change bunch of inexpensive DCHECKS to CAFFE_ENFORCEs

Summary: There is no need to disable inexpesnive assertions in mode/opt, but it makes it incredible difficult to debug model problems. So changed a bunch of them to CAFFE_ENFORCEs.

Reviewed By: Yangqing

Differential Revision: D5517902

fbshipit-source-id: 9154d0114db159e8136a482fb6508e92084af97a
This commit is contained in:
Aapo Kyrola 2017-07-28 11:25:08 -07:00 committed by Facebook Github Bot
parent f2090debb0
commit 071127cc07
23 changed files with 144 additions and 138 deletions

View file

@ -7,11 +7,11 @@ bool AccuracyOp<float, CPUContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
CAFFE_ENFORCE_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK_EQ(label.ndim(), 1);
DCHECK_EQ(label.dim32(0), N);
CAFFE_ENFORCE_EQ(label.ndim(), 1);
CAFFE_ENFORCE_EQ(label.dim32(0), N);
Y->Resize(vector<TIndex>());
const auto* Xdata = X.data<float>();
const auto* labelData = label.data<int>();
@ -37,7 +37,7 @@ bool AccuracyOp<float, CPUContext>::RunOnDevice() {
++correct;
}
}
DCHECK_LE(correct, N);
CAFFE_ENFORCE_LE(correct, N);
*(Y->mutable_data<float>()) = static_cast<float>(correct) / N;
return true;

View file

@ -48,11 +48,11 @@ bool AccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
CAFFE_ENFORCE_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK_EQ(label.ndim(), 1);
DCHECK_EQ(label.dim32(0), N);
CAFFE_ENFORCE_EQ(label.ndim(), 1);
CAFFE_ENFORCE_EQ(label.dim32(0), N);
Y->Resize(vector<TIndex>());
float* Ydata = Y->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0, Ydata, &context_);

View file

@ -19,8 +19,8 @@ bool ClipGradientOp<float, CPUContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();

View file

@ -44,7 +44,7 @@ template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_GT(X.size(), 0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
ClipKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
@ -57,8 +57,8 @@ bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ClipGradientKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(

View file

@ -523,8 +523,8 @@ bool CudnnConvOp::DoRunWithType() {
if (InputSize() == 3) {
auto& bias = Input(BIAS);
DCHECK_EQ(bias.ndim(), 1);
DCHECK_EQ(bias.dim32(0), M);
CAFFE_ENFORCE_EQ(bias.ndim(), 1);
CAFFE_ENFORCE_EQ(bias.dim32(0), M);
CUDNN_ENFORCE(cudnnAddTensor(
cudnn_wrapper_.inline_cudnn_handle(),

View file

@ -149,10 +149,10 @@ bool CudnnConvTransposeOp<T>::RunOnDevice() {
M = X.dim32(3);
H_out = Y->dim32(1);
W_out = Y->dim32(2);
DCHECK_EQ(filter.dim32(1), kernel_h_);
DCHECK_EQ(filter.dim32(1), kernel_h_);
DCHECK_EQ(filter.dim32(2), kernel_w_);
DCHECK_EQ(filter.dim32(3), C);
CAFFE_ENFORCE_EQ(filter.dim32(1), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(1), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(2), kernel_w_);
CAFFE_ENFORCE_EQ(filter.dim32(3), C);
break;
case StorageOrder::NCHW:
N = X.dim32(0);
@ -161,16 +161,16 @@ bool CudnnConvTransposeOp<T>::RunOnDevice() {
W = X.dim32(3);
H_out = Y->dim32(2);
W_out = Y->dim32(3);
DCHECK_EQ(filter.dim32(1), C);
DCHECK_EQ(filter.dim32(2), kernel_h_);
DCHECK_EQ(filter.dim32(3), kernel_w_);
CAFFE_ENFORCE_EQ(filter.dim32(1), C);
CAFFE_ENFORCE_EQ(filter.dim32(2), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(3), kernel_w_);
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
DCHECK_EQ(bias.ndim(), 1);
DCHECK_EQ(bias.dim32(0), C);
CAFFE_ENFORCE_EQ(bias.ndim(), 1);
CAFFE_ENFORCE_EQ(bias.dim32(0), C);
// Set up the cudnn algorithms & workspace if necessary
bool input_changed = (X.dims() != cudnn_input_dims_);
@ -343,8 +343,8 @@ bool CudnnConvTransposeGradientOp<T>::RunOnDevice() {
auto& dY = Input(OUTPUT_GRAD);
auto* dfilter = Output(FILTER_GRAD);
auto* dbias = Output(BIAS_GRAD);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(filter.ndim(), 4);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(filter.ndim(), 4);
int C = 0;
switch (order_) {
case StorageOrder::NHWC:
@ -366,10 +366,10 @@ bool CudnnConvTransposeGradientOp<T>::RunOnDevice() {
M = X.dim32(3);
H_out = dY.dim32(1);
W_out = dY.dim32(2);
DCHECK_EQ(filter.dim32(1), kernel_h_);
DCHECK_EQ(filter.dim32(1), kernel_h_);
DCHECK_EQ(filter.dim32(2), kernel_w_);
DCHECK_EQ(filter.dim32(3), C);
CAFFE_ENFORCE_EQ(filter.dim32(1), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(1), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(2), kernel_w_);
CAFFE_ENFORCE_EQ(filter.dim32(3), C);
break;
case StorageOrder::NCHW:
N = X.dim32(0);
@ -378,9 +378,9 @@ bool CudnnConvTransposeGradientOp<T>::RunOnDevice() {
W = X.dim32(3);
H_out = dY.dim32(2);
W_out = dY.dim32(3);
DCHECK_EQ(filter.dim32(1), C);
DCHECK_EQ(filter.dim32(2), kernel_h_);
DCHECK_EQ(filter.dim32(3), kernel_w_);
CAFFE_ENFORCE_EQ(filter.dim32(1), C);
CAFFE_ENFORCE_EQ(filter.dim32(2), kernel_h_);
CAFFE_ENFORCE_EQ(filter.dim32(3), kernel_w_);
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;

View file

@ -31,11 +31,12 @@ bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
CAFFE_ENFORCE_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
Y->Resize(vector<TIndex>(size_t(1), N));
LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
@ -50,13 +51,14 @@ bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() {
auto& label = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(X.ndim(), 2);
CAFFE_ENFORCE_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
DCHECK_EQ(dY.ndim(), 1);
DCHECK_EQ(dY.dim32(0), N);
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
CAFFE_ENFORCE_EQ(dY.ndim(), 1);
CAFFE_ENFORCE_EQ(dY.dim32(0), N);
dX->ResizeLike(X);
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);

View file

@ -37,7 +37,7 @@ bool DropoutGradientOp<float, CPUContext>::RunOnDevice() {
auto& dY = Input(0);
auto& mask = Input(1);
auto* dX = Output(0);
DCHECK_EQ(dY.size(), mask.size());
CAFFE_ENFORCE_EQ(dY.size(), mask.size());
dX->Resize(dY.dims());
if (is_test_) {
if (dX != &dY) {

View file

@ -58,7 +58,7 @@ bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto& mask = Input(1);
auto* dX = Output(0);
DCHECK_EQ(dY.size(), mask.size());
CAFFE_ENFORCE_EQ(dY.size(), mask.size());
dX->Resize(dY.dims());
if (is_test_) {
if (dX != &dY) {

View file

@ -16,8 +16,8 @@ class DropoutOp final : public Operator<Context> {
: Operator<Context>(operator_def, ws),
ratio_(OperatorBase::GetSingleArgument<float>("ratio", 0.5)),
is_test_(OperatorBase::GetSingleArgument<int>("is_test", 0)) {
DCHECK_GE(ratio_, 0);
DCHECK_LT(ratio_, 1);
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
}
bool RunOnDevice() override;
@ -36,8 +36,8 @@ class DropoutGradientOp final : public Operator<Context> {
: Operator<Context>(operator_def, ws),
ratio_(OperatorBase::GetSingleArgument<float>("ratio", 0.5)),
is_test_(OperatorBase::GetSingleArgument<int>("is_test", 0)) {
DCHECK_GE(ratio_, 0);
DCHECK_LT(ratio_, 1);
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
}
bool RunOnDevice() override;

View file

@ -41,8 +41,8 @@ class CuDNNDropoutOp final : public Operator<CUDAContext> {
cudnn_wrapper_(&context_),
ratio_(OperatorBase::GetSingleArgument<float>("ratio", 0.5)),
is_test_(OperatorBase::GetSingleArgument<int>("is_test", 0)) {
DCHECK_GE(ratio_, 0);
DCHECK_LT(ratio_, 1);
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_ENFORCE(cudnnCreateDropoutDescriptor(&dropout_desc_));
@ -83,8 +83,8 @@ class CuDNNDropoutGradientOp final : public Operator<CUDAContext> {
cudnn_wrapper_(&context_),
ratio_(OperatorBase::GetSingleArgument<float>("ratio", 0.5)),
is_test_(OperatorBase::GetSingleArgument<int>("is_test", 0)) {
DCHECK_GE(ratio_, 0);
DCHECK_LT(ratio_, 1);
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_ENFORCE(cudnnCreateDropoutDescriptor(&dropout_desc_));

View file

@ -333,8 +333,8 @@ bool DivGradientOp<Context>::RunOnDevice() {
auto& dZ = Input(2);
auto* dX = Output(0);
auto* dY = Output(1);
DCHECK_GT(Y.size(), 0);
DCHECK_GT(Z.size(), 0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_GT(Z.size(), 0);
dX->ResizeLike(Y);
dY->ResizeLike(Y);

View file

@ -32,7 +32,7 @@ bool ReluGradientOp<float, CPUContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_EQ(dY.size(), Y.size());
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
const float* Ydata = Y.data<float>();

View file

@ -23,7 +23,7 @@ template <>
bool ReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_GT(X.size(), 0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
ReluKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
@ -36,8 +36,8 @@ bool ReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ReluGradientKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(

View file

@ -47,7 +47,7 @@ template <>
bool ReluOp<float16, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_GT(X.size(), 0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
if (X.size() % 2 == 0) {
ReluKernelHalf2<<<CAFFE_GET_BLOCKS(X.size() / 2), CAFFE_CUDA_NUM_THREADS,
@ -69,8 +69,8 @@ bool ReluGradientOp<float16, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ReluGradientKernelHalf<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(

View file

@ -296,15 +296,15 @@ bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
@ -399,9 +399,9 @@ bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
@ -499,15 +499,15 @@ bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
@ -609,8 +609,8 @@ bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);

View file

@ -84,15 +84,15 @@ bool SoftmaxWithLossOp<float, CPUContext>::RunOnDevice() {
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : nullptr);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
@ -190,15 +190,15 @@ bool SoftmaxWithLossGradientOp<float, CPUContext>::RunOnDevice() {
dX->ResizeLike(X);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}

View file

@ -22,8 +22,8 @@ bool SpatialBNGradientOp<CPUContext>::RunOnDevice() {
const int sample_size = H * W * D;
DCHECK_EQ(scale.ndim(), 1);
DCHECK_EQ(scale.dim32(0), C);
CAFFE_ENFORCE_EQ(scale.ndim(), 1);
CAFFE_ENFORCE_EQ(scale.dim32(0), C);
ConstEigenVectorArrayMap<float> scale_arr(scale.data<float>(), C);
ConstEigenVectorArrayMap<float> mean_arr(Input(SAVED_MEAN).data<float>(), C);

View file

@ -21,10 +21,10 @@ bool SpatialBNOp<CPUContext>::RunOnDevice() {
: 1;
const int sample_size = H * W * D;
DCHECK_EQ(scale.ndim(), 1);
DCHECK_EQ(bias.ndim(), 1);
DCHECK_EQ(scale.dim32(0), C);
DCHECK_EQ(bias.dim32(0), C);
CAFFE_ENFORCE_EQ(scale.ndim(), 1);
CAFFE_ENFORCE_EQ(bias.ndim(), 1);
CAFFE_ENFORCE_EQ(scale.dim32(0), C);
CAFFE_ENFORCE_EQ(bias.dim32(0), C);
ConstEigenVectorArrayMap<float> scale_arr(scale.data<float>(), C);
ConstEigenVectorArrayMap<float> bias_arr(bias.data<float>(), C);

View file

@ -93,7 +93,7 @@ bool CudnnSpatialBNOp::DoRunWithType() {
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
DCHECK_GE(X.ndim(), 3);
CAFFE_ENFORCE_GE(X.ndim(), 3);
const int N = X.dim32(0);
const int C = X.ndim() > 3
? (order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(X.ndim() - 1))
@ -105,10 +105,10 @@ bool CudnnSpatialBNOp::DoRunWithType() {
const int D = X.ndim() > 4
? (order_ == StorageOrder::NCHW ? X.dim32(4) : X.dim32(3))
: 1;
DCHECK_EQ(scale.ndim(), 1);
DCHECK_EQ(bias.ndim(), 1);
DCHECK_EQ(scale.dim32(0), C);
DCHECK_EQ(bias.dim32(0), C);
CAFFE_ENFORCE_EQ(scale.ndim(), 1);
CAFFE_ENFORCE_EQ(bias.ndim(), 1);
CAFFE_ENFORCE_EQ(scale.dim32(0), C);
CAFFE_ENFORCE_EQ(bias.dim32(0), C);
// See if we need to reshape.
if (X.dims() != cudnn_input_dims_) {
VLOG(1) << "Setting descriptors.";
@ -141,10 +141,10 @@ bool CudnnSpatialBNOp::DoRunWithType() {
// Run inference mode.
const auto& est_mean = Input(EST_MEAN);
const auto& est_var = Input(EST_VAR);
DCHECK_EQ(est_mean.ndim(), 1);
DCHECK_EQ(est_var.ndim(), 1);
DCHECK_EQ(est_mean.dim32(0), C);
DCHECK_EQ(est_var.dim32(0), C);
CAFFE_ENFORCE_EQ(est_mean.ndim(), 1);
CAFFE_ENFORCE_EQ(est_var.ndim(), 1);
CAFFE_ENFORCE_EQ(est_mean.dim32(0), C);
CAFFE_ENFORCE_EQ(est_var.dim32(0), C);
auto* Y = Output(OUTPUT);
Y->ResizeLike(X);
@ -191,10 +191,10 @@ bool CudnnSpatialBNOp::DoRunWithType() {
math::Set<BNParamType, CUDAContext>(C, 0, running_var_data, &context_);
} else {
// Does not need to do initialization.
DCHECK_EQ(running_mean->ndim(), 1);
DCHECK_EQ(running_var->ndim(), 1);
DCHECK_EQ(running_mean->dim32(0), C);
DCHECK_EQ(running_var->dim32(0), C);
CAFFE_ENFORCE_EQ(running_mean->ndim(), 1);
CAFFE_ENFORCE_EQ(running_var->ndim(), 1);
CAFFE_ENFORCE_EQ(running_mean->dim32(0), C);
CAFFE_ENFORCE_EQ(running_var->dim32(0), C);
running_mean_data = running_mean->template mutable_data<BNParamType>();
running_var_data = running_var->template mutable_data<BNParamType>();
}
@ -248,7 +248,7 @@ bool CudnnSpatialBNGradientOp::DoRunWithType() {
const auto& scale = Input(SCALE);
const auto& dY = Input(OUTPUT_GRAD);
DCHECK_GE(X.ndim(), 3);
CAFFE_ENFORCE_GE(X.ndim(), 3);
const int N = X.dim32(0);
const int C = X.ndim() > 3
? (order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(X.ndim() - 1))
@ -260,8 +260,8 @@ bool CudnnSpatialBNGradientOp::DoRunWithType() {
const int D = X.ndim() > 4
? (order_ == StorageOrder::NCHW ? X.dim32(4) : X.dim32(3))
: 1;
DCHECK_EQ(scale.ndim(), 1);
DCHECK_EQ(scale.dim32(0), C);
CAFFE_ENFORCE_EQ(scale.ndim(), 1);
CAFFE_ENFORCE_EQ(scale.dim32(0), C);
// See if we need to reshape.
if (X.dims() != cudnn_input_dims_) {
if (order_ == StorageOrder::NCHW) {

View file

@ -24,8 +24,8 @@ OPERATOR_SCHEMA(SpatialSoftmaxWithLoss)
auto batch_size = logits.dims().Get(0);
auto num_classes = logits.dims().Get(1);
DCHECK_EQ(logits.dims_size(), 4);
DCHECK_EQ(labels.dims_size(), 3);
CAFFE_ENFORCE_EQ(logits.dims_size(), 4);
CAFFE_ENFORCE_EQ(labels.dims_size(), 3);
out[0].set_data_type(logits.data_type());
out[0].add_dims(batch_size);
out[0].add_dims(num_classes);
@ -80,9 +80,9 @@ bool SpatialSoftmaxWithLossOp<float, CPUContext>::RunOnDevice() {
float* Pdata = P->mutable_data<float>();
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : nullptr);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
@ -167,9 +167,9 @@ bool SpatialSoftmaxWithLossGradientOp<float, CPUContext>::RunOnDevice() {
N = X.dim32(0);
D = X.dim32(1);
dX->ResizeLike(X);
DCHECK_EQ(T.dim32(0), N);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);

View file

@ -565,16 +565,16 @@ bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
template <>
template <typename Index>
bool ScatterWeightedSumOp<float,CUDAContext>::DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 1);
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
CAFFE_ENFORCE_GT(X0.size(), 0);
CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);

View file

@ -238,7 +238,7 @@ class ResizeLikeOp : public Operator<Context> {
auto& input0 = Input(0);
auto& input1 = Input(1);
auto* output = Output(0);
DCHECK_EQ(input0.size(), input1.size());
CAFFE_ENFORCE_EQ(input0.size(), input1.size());
output->ResizeLike(Input(1));
context_.template CopyItems<Context, Context>(
input0.meta(),
@ -325,11 +325,11 @@ class WeightedSumOp : public Operator<Context> {
template <typename DstType>
bool DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 0);
CAFFE_ENFORCE_EQ(InputSize() % 2, 0);
auto& X0 = Input(0);
auto& weight0 = Input(1);
DCHECK_GT(X0.size(), 0);
DCHECK_EQ(weight0.size(), 1);
CAFFE_ENFORCE_GT(X0.size(), 0);
CAFFE_ENFORCE_EQ(weight0.size(), 1);
int size = X0.size();
auto* output = Output(0);
output->ResizeLike(X0);
@ -350,8 +350,8 @@ class WeightedSumOp : public Operator<Context> {
return false;
}
auto& weight = Input(i + 1);
DCHECK_EQ(X.size(), size);
DCHECK_EQ(weight.size(), 1);
CAFFE_ENFORCE_EQ(X.size(), size);
CAFFE_ENFORCE_EQ(weight.size(), 1);
math::Axpy<DstType, Context>(
size,
weight.template data<float>(),
@ -422,16 +422,16 @@ class ScatterWeightedSumOp : public Operator<Context> {
template <typename Index, int FixedSize>
bool DoRunWithValue() {
DCHECK_EQ(InputSize() % 2, 1);
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
CAFFE_ENFORCE_GT(X0.size(), 0);
CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);
TIndex K = indices.size();
@ -443,8 +443,12 @@ class ScatterWeightedSumOp : public Operator<Context> {
if (w0 != 1.0) {
for (int i = 0; i < K; ++i) {
Index idx = idxs[i];
DCHECK(0 <= idx && idx < N) << "Index out of bounds: " << idx
<< ", range 0 to " << N;
CAFFE_ENFORCE(
0 <= idx && idx < N,
"Index out of bounds: ",
idx,
", range 0 to ",
N);
math::ScaleFixedSize<T, Context, FixedSize>(
block_size,
w0,
@ -456,8 +460,8 @@ class ScatterWeightedSumOp : public Operator<Context> {
for (int inp = 3; inp < InputSize(); inp += 2) {
auto& X = Input(inp);
auto& weight = Input(inp + 1);
DCHECK_EQ(X.size(), block_size * K);
DCHECK_EQ(weight.size(), 1);
CAFFE_ENFORCE_EQ(X.size(), block_size * K);
CAFFE_ENFORCE_EQ(weight.size(), 1);
const T* x_data = X.template data<T>();
T w = *weight.template data<T>();
for (int i = 0; i < K; ++i) {
@ -579,12 +583,12 @@ class ScatterAssignOp : public Operator<Context> {
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&input, output, "In place operation is required");
DCHECK_GT(input.ndim(), 0) << "X0 has to be at least the vector";
CAFFE_ENFORCE_GT(input.ndim(), 0, "X0 has to be at least the vector");
TIndex M = input.size();
TIndex N = input.dim(0);
TIndex K = indices.size();
TIndex block_size = M / N;
DCHECK_EQ(slices.size(), block_size * K);
CAFFE_ENFORCE_EQ(slices.size(), block_size * K);
// TODO(dzhulgakov): it can be made to work with arbitrary data type by
// using raw_mutable_data
T* data = output->template mutable_data<T>();