pytorch/caffe2/operators/square_root_divide_op.cc
Nikita Shulga a9b0a921d5 Disable avoid-non-const-global-variables lint check (#62008)
Summary:
As GoogleTest `TEST` macro is non-compliant with it as well as `DEFINE_DISPATCH`

All changes but the ones to `.clang-tidy` are generated using following script:
```
for i in `find . -type f -iname "*.c*" -or -iname "*.h"|xargs grep cppcoreguidelines-avoid-non-const-global-variables|cut -f1 -d:|sort|uniq`;  do sed -i "/\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)/d" $i; done
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/62008

Reviewed By: driazati, r-barnes

Differential Revision: D29838584

Pulled By: malfet

fbshipit-source-id: 1b2f8602c945bd4ce50a9bfdd204755556e31d13
2021-07-22 18:04:40 -07:00

43 lines
1.1 KiB
C++

#include "caffe2/operators/square_root_divide_op.h"
namespace caffe2 {
REGISTER_CPU_OPERATOR(SquareRootDivide, SquareRootDivideOp<CPUContext>);
OPERATOR_SCHEMA(SquareRootDivide)
.NumInputs(2)
.NumOutputs(1)
.AllowInplace({{0, 0}})
.SetDoc(R"DOC(
Given DATA tensor with first dimension N and SCALE vector of the same size N
produces an output tensor with same dimensions as DATA. Which consists of DATA
slices. i-th slice is divided by sqrt(SCALE[i]) elementwise. If SCALE[i] == 0
output slice is identical to the input one (no scaling)
Example:
Data = [
[2.0, 4.0],
[9.0, 12.0]
]
SCALE = [4, 9]
OUTPUT = [
[1.0, 2.0],
[3.0, 4.0]
]
)DOC");
class GetSquareRootDivideGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"SquareRootDivide",
"",
vector<string>{GO(0), I(1)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(SquareRootDivide, GetSquareRootDivideGradient);
} // namespace caffe2