diff --git a/.clang-tidy b/.clang-tidy index 89014476956..e43f3d550a7 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,8 +1,9 @@ --- # NOTE there must be no spaces before the '-', so put the comma last. -# The check bugprone-unchecked-optional-access is also turned off atm -# because it causes clang-tidy to hang randomly. The tracking issue +# The check bugprone-unchecked-optional-access is also turned on. +# Note that it can cause clang-tidy to hang randomly. The tracking issue # can be found at https://github.com/llvm/llvm-project/issues/69369. +# When that happens, we can disable it on the problematic code by NOLINT. InheritParentConfig: true Checks: ' bugprone-*, @@ -12,7 +13,6 @@ bugprone-*, -bugprone-lambda-function-name, -bugprone-reserved-identifier, -bugprone-swapped-arguments, --bugprone-unchecked-optional-access, clang-analyzer-core.*, clang-analyzer-cplusplus.*, clang-analyzer-nullability.*, diff --git a/aten/src/ATen/DeviceAccelerator.cpp b/aten/src/ATen/DeviceAccelerator.cpp index 8d4410f9638..ed3c0ae8ee6 100644 --- a/aten/src/ATen/DeviceAccelerator.cpp +++ b/aten/src/ATen/DeviceAccelerator.cpp @@ -54,6 +54,7 @@ bool isAccelerator(c10::DeviceType device_type) { } } +// NOLINTBEGIN(bugprone-unchecked-optional-access) c10::DeviceIndex deviceCount() { const auto device_type = getAccelerator(false); if (!device_type.has_value()) { @@ -99,5 +100,6 @@ void synchronizeDevice(c10::DeviceIndex device_index) { // impl.synchronizeDevice should can be safely called from any device impl.synchronizeDevice(device_index); } +// NOLINTEND(bugprone-unchecked-optional-access) } // namespace at::accelerator diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index 2c8fbf8a09a..c15e5f72af2 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -656,10 +656,11 @@ struct TORCH_API TensorType : public SharedType { const auto& shape = sizes(); for (size_t i = 0; i < shape.size(); i++) { - if (!shape[i].has_value()) { + auto const &s = shape[i]; + if (!s.has_value()) { return std::optional{}; } - prod *= shape[i].value(); + prod *= s.value(); } return prod; } @@ -727,10 +728,11 @@ struct TORCH_API TensorType : public SharedType { TensorTypePtr contiguous() const { auto cloned = clone(); - TORCH_INTERNAL_ASSERT(sizes().concrete_sizes().has_value()); + auto concrete_sizes = sizes().concrete_sizes(); + TORCH_INTERNAL_ASSERT(concrete_sizes.has_value()); auto strides = computeStrideProps( - *sizes().concrete_sizes(), - contiguousStridesOf(*sizes().concrete_sizes())); + *concrete_sizes, + contiguousStridesOf(*concrete_sizes)); cloned->strides_ = strides; return cloned; } @@ -1516,8 +1518,8 @@ struct TORCH_API FunctionType : public NamedType { FunctionType(torch::jit::Function* function); std::string annotation_str_impl( [[maybe_unused]] const TypePrinter& printer = nullptr) const override { - const auto& n = name().value(); - return n.qualifiedName(); + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) + return name()->qualifiedName(); } torch::jit::Function* function_; }; @@ -2133,6 +2135,7 @@ struct MatchTypeReturn { return !reason_.has_value(); } const std::string& reason() const { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) return reason_.value(); } @@ -2181,6 +2184,7 @@ struct TORCH_API InterfaceType : public NamedType { } std::string str() const override { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) return std::string("InterfaceType<") + name()->name() + ">"; } @@ -2208,6 +2212,7 @@ struct TORCH_API InterfaceType : public NamedType { std::string annotation_str_impl( [[maybe_unused]] const TypePrinter& printer = nullptr) const override { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) return name()->qualifiedName(); } diff --git a/aten/src/ATen/core/type.cpp b/aten/src/ATen/core/type.cpp index 8f797f6075f..b94e3cd6bd8 100644 --- a/aten/src/ATen/core/type.cpp +++ b/aten/src/ATen/core/type.cpp @@ -904,6 +904,7 @@ bool ListType::isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const { std::string TupleType::str() const { std::stringstream ss; if (schema_ && name().has_value()) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) ss << name()->qualifiedName(); } else { ss << "("; diff --git a/aten/src/ATen/functorch/BatchRulesRandomness.cpp b/aten/src/ATen/functorch/BatchRulesRandomness.cpp index 6f57ced0077..b578047dd6f 100644 --- a/aten/src/ATen/functorch/BatchRulesRandomness.cpp +++ b/aten/src/ATen/functorch/BatchRulesRandomness.cpp @@ -16,6 +16,7 @@ // registered to FuncTorchVmapMode. This is because we need to interpose on // random operations even if they're not on a BatchedTensor. +// NOLINTBEGIN(bugprone-unchecked-optional-access) namespace at::functorch { template @@ -501,3 +502,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchVmapMode, m) { } } // namespace at::functorch +// NOLINTEND(bugprone-unchecked-optional-access) diff --git a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp index 69a54ab990c..c8a6b4a82f2 100644 --- a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp @@ -11,6 +11,7 @@ #include +// NOLINTBEGIN(bugprone-unchecked-optional-access) namespace at::functorch { static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { @@ -510,3 +511,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { } } // namespace at::functorch +// NOLINTEND(bugprone-unchecked-optional-access) diff --git a/aten/src/ATen/functorch/BatchRulesScatterOps.cpp b/aten/src/ATen/functorch/BatchRulesScatterOps.cpp index 0102d3a71ae..6ebbff31c82 100644 --- a/aten/src/ATen/functorch/BatchRulesScatterOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesScatterOps.cpp @@ -14,6 +14,7 @@ #include +// NOLINTBEGIN(bugprone-unchecked-optional-access) namespace at::functorch { namespace { @@ -1283,3 +1284,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { } } // namespace at::functorch +// NOLINTEND(bugprone-unchecked-optional-access) diff --git a/aten/src/ATen/functorch/BatchRulesViews.cpp b/aten/src/ATen/functorch/BatchRulesViews.cpp index 000e80b2d2e..cd1d0e1487f 100644 --- a/aten/src/ATen/functorch/BatchRulesViews.cpp +++ b/aten/src/ATen/functorch/BatchRulesViews.cpp @@ -156,6 +156,7 @@ const Tensor& resize__plumbing( "resize_: batching rule only supports None or Contiguous MemoryFormat"); auto maybe_layer = maybeCurrentDynamicLayer(); vmap_check_escaped(maybe_layer, "resize__plumbing"); + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) int64_t cur_level = maybe_layer->layerId(); if (!isBatchedAtLevel(self, cur_level)) { c10::impl::ExcludeDispatchKeyGuard guard2(DispatchKey::FuncTorchBatched); diff --git a/aten/src/ATen/functorch/DynamicLayer.cpp b/aten/src/ATen/functorch/DynamicLayer.cpp index 9bdf155affc..652858acc3a 100644 --- a/aten/src/ATen/functorch/DynamicLayer.cpp +++ b/aten/src/ATen/functorch/DynamicLayer.cpp @@ -41,6 +41,7 @@ DynamicLayer::DynamicLayer( } switch (transform_type) { case TransformType::Vmap: + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) interpreter_ = Interpreter::Vmap(layerId, std::move(batchSize.value()), randomness.value()); break; case TransformType::Grad: @@ -50,6 +51,7 @@ DynamicLayer::DynamicLayer( interpreter_ = Interpreter::Jvp(layerId, prev_fwd_grad_mode.value()); break; case TransformType::Functionalize: + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) interpreter_ = Interpreter::Functionalize(layerId, functionalize_add_back_views.value()); break; default: @@ -345,9 +347,7 @@ void foreachTensorInplaceWithFlag(std::vector& args, int64_t begin, int6 if (!ivalue.isTensor()) { continue; } - Tensor value = ivalue.toTensor(); - Tensor replacement = func(value, flag); - args[idx] = std::move(replacement); + args[idx] = func(ivalue.toTensor(), flag); // sanity checks if (ivalue.toTensor().defined()) { TORCH_INTERNAL_ASSERT(args[idx].toTensor().defined()); diff --git a/aten/src/ATen/functorch/LegacyVmapTransforms.cpp b/aten/src/ATen/functorch/LegacyVmapTransforms.cpp index ace12bc9c45..662aaeb8e5c 100644 --- a/aten/src/ATen/functorch/LegacyVmapTransforms.cpp +++ b/aten/src/ATen/functorch/LegacyVmapTransforms.cpp @@ -118,6 +118,7 @@ static Tensor moveDimToFrontAndExpand(Tensor tensor, std::optional dim, // to `batch_sizes` VmapPhysicalViewVec MultiBatchVmapTransform::logicalToPhysical(ITensorListRef logical_tensors) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) auto cur_level = maybeCurrentDynamicLayer().value().layerId(); c10::SymInt bdim_size = -1; diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index 93e915939f7..74ac1d0ff36 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -406,6 +406,7 @@ struct ExpandableSegment { DriverAPI::get()->cuMemCreate_(&handle, segment_size_, &prop, 0); if (status == CUDA_ERROR_OUT_OF_MEMORY) { for (auto j : c10::irange(begin, i)) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) auto h = handles_.at(j).value(); handles_.at(j) = std::nullopt; C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h.handle)); @@ -444,6 +445,7 @@ struct ExpandableSegment { ShareHeader header{getpid(), segment_size_, end - begin}; buf.write((const char*)&header, sizeof(ShareHeader)); for (auto i : c10::irange(begin, end)) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) auto& handle = handles_.at(i).value(); if (!handle.fd) { int fd = 0; @@ -493,6 +495,7 @@ struct ExpandableSegment { close((int)pidfd); for (auto& h : segment->handles_) { C10_CUDA_DRIVER_CHECK( + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) DriverAPI::get()->cuMemRelease_(h.value().handle)); h = std::nullopt; } @@ -555,6 +558,7 @@ struct ExpandableSegment { ptr_ + i * segment_size_, segment_size_, 0, + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) handles_.at(i).value().handle, 0ULL)); } @@ -579,6 +583,7 @@ struct ExpandableSegment { C10_CUDA_CHECK(cudaDeviceSynchronize()); } for (auto i : c10::irange(begin, end)) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) Handle h = handles_.at(i).value(); handles_.at(i) = std::nullopt; C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemUnmap_( diff --git a/c10/util/OptionalArrayRef.h b/c10/util/OptionalArrayRef.h index ae4f4f1f2c6..90610eb7d12 100644 --- a/c10/util/OptionalArrayRef.h +++ b/c10/util/OptionalArrayRef.h @@ -162,6 +162,7 @@ class OptionalArrayRef final { } constexpr const ArrayRef& value() const& { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) return wrapped_opt_array_ref.value(); } diff --git a/torch/csrc/api/include/torch/nn/functional/loss.h b/torch/csrc/api/include/torch/nn/functional/loss.h index 918f77a0a33..b81bf47cf54 100644 --- a/torch/csrc/api/include/torch/nn/functional/loss.h +++ b/torch/csrc/api/include/torch/nn/functional/loss.h @@ -404,7 +404,7 @@ inline Tensor smooth_l1_loss( TORCH_CHECK( !options.beta().has_value(), "expected beta not to be provided in 'options', but got ", - options.beta().value()); + options.beta()); return detail::smooth_l1_loss(input, target, options.reduction(), beta); } diff --git a/torch/csrc/api/src/nn/modules/embedding.cpp b/torch/csrc/api/src/nn/modules/embedding.cpp index c1171d34633..0ed8a7b17c0 100644 --- a/torch/csrc/api/src/nn/modules/embedding.cpp +++ b/torch/csrc/api/src/nn/modules/embedding.cpp @@ -25,8 +25,9 @@ void EmbeddingImpl::reset() { TORCH_CHECK( options.padding_idx() >= -options.num_embeddings(), "Padding_idx must be within num_embedding"); - // NOLINTNEXTLINE(bugprone-unchecked-optional-access) - options.padding_idx(options.num_embeddings() + *options.padding_idx()); + options.padding_idx( + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) + options.num_embeddings() + *options.padding_idx()); } } @@ -49,6 +50,7 @@ void EmbeddingImpl::reset_parameters() { torch::nn::init::normal_(weight); if (options.padding_idx().has_value()) { torch::NoGradGuard no_grad; + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) weight[*options.padding_idx()].fill_(0); } } @@ -56,11 +58,13 @@ void EmbeddingImpl::reset_parameters() { void EmbeddingImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Embedding(num_embeddings=" << options.num_embeddings() << ", embedding_dim=" << options.embedding_dim(); - if (options.padding_idx().has_value()) { - stream << ", padding_idx=" << *options.padding_idx(); + auto const& padding_idx_opt = options.padding_idx(); + if (padding_idx_opt.has_value()) { + stream << ", padding_idx=" << padding_idx_opt.value(); } - if (options.max_norm().has_value()) { - stream << ", max_norm=" << *options.max_norm(); + auto const& max_norm_opt = options.max_norm(); + if (max_norm_opt.has_value()) { + stream << ", max_norm=" << max_norm_opt.value(); } if (options.norm_type() != 2) { stream << ", norm_type=" << options.norm_type(); @@ -93,8 +97,9 @@ EmbeddingBagImpl::EmbeddingBagImpl(EmbeddingBagOptions options_) } void EmbeddingBagImpl::reset() { - if (options.padding_idx().has_value()) { - auto padding_idx = options.padding_idx().value(); + auto const& padding_idx_opt = options.padding_idx(); + if (padding_idx_opt.has_value()) { + auto padding_idx = padding_idx_opt.value(); if (padding_idx > 0) { TORCH_CHECK( padding_idx < options.num_embeddings(), @@ -122,9 +127,10 @@ void EmbeddingBagImpl::reset() { } void EmbeddingBagImpl::reset_parameters() { - if (options.padding_idx().has_value()) { + auto const& padding_idx_opt = options.padding_idx(); + if (padding_idx_opt.has_value()) { torch::NoGradGuard no_grad; - weight[options.padding_idx().value()].fill_(0); + weight[*padding_idx_opt].fill_(0); } torch::nn::init::normal_(weight); } @@ -151,8 +157,9 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::EmbeddingBag(num_embeddings=" << options.num_embeddings() << ", embedding_dim=" << options.embedding_dim(); - if (options.max_norm().has_value()) { - stream << ", max_norm=" << *options.max_norm(); + auto const& max_norm_opt = options.max_norm(); + if (max_norm_opt.has_value()) { + stream << ", max_norm=" << *max_norm_opt; } if (options.norm_type() != 2) { stream << ", norm_type=" << options.norm_type(); @@ -171,8 +178,9 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const { stream << ", include_last_offset=" << std::boolalpha << options.include_last_offset(); } - if (options.padding_idx().has_value()) { - stream << ", padding_idx=" << options.padding_idx().value(); + auto const& padding_idx_opt = options.padding_idx(); + if (padding_idx_opt.has_value()) { + stream << ", padding_idx=" << padding_idx_opt.value(); } stream << ")"; } diff --git a/torch/csrc/distributed/rpc/rref_impl.cpp b/torch/csrc/distributed/rpc/rref_impl.cpp index a004a5e2b1c..ecf3cbd9991 100644 --- a/torch/csrc/distributed/rpc/rref_impl.cpp +++ b/torch/csrc/distributed/rpc/rref_impl.cpp @@ -15,6 +15,7 @@ namespace { // If the type is subtype of named type, return its qualifiedname, otherwise // return its type str. +// NOLINTBEGIN(bugprone-unchecked-optional-access) std::string getTypeStr(const c10::TypePtr& type) { switch (type->kind()) { case c10::TypeKind::FunctionType: @@ -29,6 +30,7 @@ std::string getTypeStr(const c10::TypePtr& type) { return type->annotation_str(); } } +// NOLINTEND(bugprone-unchecked-optional-access) } // namespace