diff --git a/.clang-format b/.clang-format index f789a97304f..5c093e39f94 100644 --- a/.clang-format +++ b/.clang-format @@ -44,7 +44,9 @@ ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false -ForEachMacros: [ FOR_EACH_RANGE, FOR_EACH, ] +ForEachMacros: + - FOR_EACH_RANGE + - FOR_EACH IncludeCategories: - Regex: '^<.*\.h(pp)?>' Priority: 1 @@ -79,7 +81,11 @@ SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 +Standard: c++17 +StatementMacros: + - PyObject_HEAD + - PyObject_VAR_HEAD + - PyException_HEAD TabWidth: 8 UseTab: Never --- diff --git a/caffe2/.clang-format b/caffe2/.clang-format index 1307bf22efb..78603034433 100644 --- a/caffe2/.clang-format +++ b/caffe2/.clang-format @@ -43,7 +43,9 @@ ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false -ForEachMacros: [ FOR_EACH_RANGE, FOR_EACH, ] +ForEachMacros: + - FOR_EACH_RANGE + - FOR_EACH IncludeCategories: - Regex: '^<.*\.h(pp)?>' Priority: 1 @@ -81,7 +83,11 @@ SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 +Standard: c++17 +StatementMacros: + - PyObject_HEAD + - PyObject_VAR_HEAD + - PyException_HEAD TabWidth: 8 UseTab: Never ... diff --git a/torch/csrc/Device.h b/torch/csrc/Device.h index 665c38bf035..eb39fcd69c6 100644 --- a/torch/csrc/Device.h +++ b/torch/csrc/Device.h @@ -7,7 +7,8 @@ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct TORCH_API THPDevice { - PyObject_HEAD at::Device device; + PyObject_HEAD + at::Device device; }; TORCH_API extern PyTypeObject THPDeviceType; diff --git a/torch/csrc/Dtype.h b/torch/csrc/Dtype.h index 4e0689c9ab2..2dd7a99caa8 100644 --- a/torch/csrc/Dtype.h +++ b/torch/csrc/Dtype.h @@ -7,7 +7,8 @@ constexpr int DTYPE_NAME_LEN = 64; struct TORCH_API THPDtype { - PyObject_HEAD at::ScalarType scalar_type; + PyObject_HEAD + at::ScalarType scalar_type; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char name[DTYPE_NAME_LEN + 1]; }; diff --git a/torch/csrc/Event.h b/torch/csrc/Event.h index 70a37d2eea6..3bbc7d37939 100644 --- a/torch/csrc/Event.h +++ b/torch/csrc/Event.h @@ -5,7 +5,8 @@ #include struct TORCH_API THPEvent { - PyObject_HEAD c10::Event event; + PyObject_HEAD + c10::Event event; }; TORCH_API extern PyTypeObject* THPEventClass; TORCH_API extern PyTypeObject THPEventType; diff --git a/torch/csrc/Generator.h b/torch/csrc/Generator.h index 57656c471ec..4fef5911bab 100644 --- a/torch/csrc/Generator.h +++ b/torch/csrc/Generator.h @@ -6,7 +6,8 @@ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct THPGenerator { - PyObject_HEAD at::Generator cdata; + PyObject_HEAD + at::Generator cdata; }; // Creates a new Python object wrapping the default at::Generator. The reference diff --git a/torch/csrc/Layout.h b/torch/csrc/Layout.h index 265582e0ddf..3b6844c9bad 100644 --- a/torch/csrc/Layout.h +++ b/torch/csrc/Layout.h @@ -9,7 +9,8 @@ const int LAYOUT_NAME_LEN = 64; struct THPLayout { - PyObject_HEAD at::Layout layout; + PyObject_HEAD + at::Layout layout; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char name[LAYOUT_NAME_LEN + 1]; }; diff --git a/torch/csrc/MemoryFormat.h b/torch/csrc/MemoryFormat.h index 7f60a0ba028..566270e70ab 100644 --- a/torch/csrc/MemoryFormat.h +++ b/torch/csrc/MemoryFormat.h @@ -9,7 +9,8 @@ const int MEMORY_FORMAT_NAME_LEN = 64; struct THPMemoryFormat { - PyObject_HEAD at::MemoryFormat memory_format; + PyObject_HEAD + at::MemoryFormat memory_format; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char name[MEMORY_FORMAT_NAME_LEN + 1]; }; diff --git a/torch/csrc/QScheme.h b/torch/csrc/QScheme.h index fcb75304c0e..f604772fb82 100644 --- a/torch/csrc/QScheme.h +++ b/torch/csrc/QScheme.h @@ -9,7 +9,8 @@ constexpr int QSCHEME_NAME_LEN = 64; struct THPQScheme { - PyObject_HEAD at::QScheme qscheme; + PyObject_HEAD + at::QScheme qscheme; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char name[QSCHEME_NAME_LEN + 1]; }; diff --git a/torch/csrc/Stream.h b/torch/csrc/Stream.h index 91f1abe0516..0c979391071 100644 --- a/torch/csrc/Stream.h +++ b/torch/csrc/Stream.h @@ -6,7 +6,8 @@ #include struct THPStream { - PyObject_HEAD int64_t stream_id; + PyObject_HEAD + int64_t stream_id; int64_t device_type; int64_t device_index; }; diff --git a/torch/csrc/TypeInfo.h b/torch/csrc/TypeInfo.h index 97d12e4eea5..6841312e4a9 100644 --- a/torch/csrc/TypeInfo.h +++ b/torch/csrc/TypeInfo.h @@ -5,7 +5,8 @@ #include struct THPDTypeInfo { - PyObject_HEAD at::ScalarType type; + PyObject_HEAD + at::ScalarType type; }; struct THPFInfo : THPDTypeInfo {}; diff --git a/torch/csrc/autograd/python_cpp_function.h b/torch/csrc/autograd/python_cpp_function.h index 832ab1c7677..b530621f349 100644 --- a/torch/csrc/autograd/python_cpp_function.h +++ b/torch/csrc/autograd/python_cpp_function.h @@ -11,7 +11,8 @@ namespace torch::autograd { struct THPCppFunction { - PyObject_HEAD std::shared_ptr cdata; + PyObject_HEAD + std::shared_ptr cdata; }; template diff --git a/torch/csrc/autograd/python_function.h b/torch/csrc/autograd/python_function.h index 0bf3c8bbab7..b1c3eb25ee5 100644 --- a/torch/csrc/autograd/python_function.h +++ b/torch/csrc/autograd/python_function.h @@ -95,7 +95,7 @@ inline bool ensure_tuple(THPObjectPtr& obj) { struct THPFunction { PyObject_HEAD - PyObject* needs_input_grad; + PyObject* needs_input_grad; // Python tuple of tensors whose variables we should save. Set // by Python with 'save_for_backward'. If nullptr, no tensors were diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp index 5345a418288..53dcd0f6044 100644 --- a/torch/csrc/dynamo/guards.cpp +++ b/torch/csrc/dynamo/guards.cpp @@ -46,7 +46,8 @@ // Manually create _PyTupleIterObject struct typedef struct { - PyObject_HEAD Py_ssize_t it_index; + PyObject_HEAD + Py_ssize_t it_index; PyTupleObject* it_seq; /* Set to NULL when iterator is exhausted */ } _PyTupleIterObject; diff --git a/torch/csrc/fx/node.cpp b/torch/csrc/fx/node.cpp index dc96737abda..06c0683db34 100644 --- a/torch/csrc/fx/node.cpp +++ b/torch/csrc/fx/node.cpp @@ -8,7 +8,8 @@ /////////////////////////////// struct NodeBase { - PyObject_HEAD bool _erased; + PyObject_HEAD + bool _erased; NodeBase* _prev; NodeBase* _next; }; @@ -111,7 +112,8 @@ bool NodeBase_init(PyObject* module) { //////////////////////////////// struct NodeIter { - PyObject_HEAD bool _reversed; + PyObject_HEAD + bool _reversed; NodeBase* _root; NodeBase* _cur; }; diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp index 66164692063..7f77dbad5eb 100644 --- a/torch/csrc/profiler/python/init.cpp +++ b/torch/csrc/profiler/python/init.cpp @@ -12,7 +12,8 @@ #include struct THPCapturedTraceback { - PyObject_HEAD std::shared_ptr data; + PyObject_HEAD + std::shared_ptr data; }; static int THPCapturedTraceback_traverse( @@ -136,7 +137,8 @@ namespace torch::profiler { namespace { struct RecordFunctionFast { - PyObject_HEAD PyObject* name; + PyObject_HEAD + PyObject* name; PyObject* input_values; PyObject* keyword_values; std::unique_ptr guard; diff --git a/torch/csrc/utils/disable_torch_function.cpp b/torch/csrc/utils/disable_torch_function.cpp index ade676d5e14..818c3c4a00d 100644 --- a/torch/csrc/utils/disable_torch_function.cpp +++ b/torch/csrc/utils/disable_torch_function.cpp @@ -34,8 +34,8 @@ void set_disabled_torch_dispatch_impl(PyObject* value) { typedef struct { PyObject_HEAD - /* Type-specific fields go here. */ - at::impl::TorchFunctionDisabledState old_state; + /* Type-specific fields go here. */ + at::impl::TorchFunctionDisabledState old_state; } DisableTorchFunctionSubclass; PyObject* DisableTorchFunctionSubclass__enter( @@ -131,8 +131,8 @@ PyObject* THPModule_DisableTorchFunctionSubclassType() { typedef struct { PyObject_HEAD - /* Type-specific fields go here. */ - at::impl::TorchFunctionDisabledState old_state; + /* Type-specific fields go here. */ + at::impl::TorchFunctionDisabledState old_state; } DisableTorchFunction; PyObject* DisableTorchFunction__enter(PyObject* self, PyObject* unused) {