mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Removed work around for convolution transpose op since the bug has be… (#22184)
Summary: …en fixed in v0.18 Pull Request resolved: https://github.com/pytorch/pytorch/pull/22184 Differential Revision: D15982627 Pulled By: bddppq fbshipit-source-id: 8725d5b5e5b68e029ffb08af12b416bd310c9638
This commit is contained in:
parent
5b87049c66
commit
7ee82d48a8
2 changed files with 3 additions and 59 deletions
|
|
@ -1,39 +1,19 @@
|
|||
#include "caffe2/operators/conv_transpose_op.h"
|
||||
#include "caffe2/ideep/operators/conv_transpose_unpool_base_op.h"
|
||||
#include "caffe2/ideep/operators/operator_fallback_ideep.h"
|
||||
#include <vector>
|
||||
|
||||
using namespace caffe2;
|
||||
|
||||
namespace {
|
||||
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
bool need_type_zero_pad(const mkldnn_memory_desc_t *pd) {
|
||||
if (pd->ndims == 0) {
|
||||
return false;
|
||||
}
|
||||
int p1 = 1, p2 = 1;
|
||||
for (int i = 0; i < pd->ndims; i++) {
|
||||
p1 *= pd->dims[i];
|
||||
p2 *= pd->layout_desc.blocking.padding_dims[i];
|
||||
}
|
||||
return (p1 != p2);
|
||||
}
|
||||
|
||||
class IDEEPConvTransposeOp final : public IDEEPConvTransposeUnpoolBase {
|
||||
public:
|
||||
USE_IDEEP_DEF_ALIASES();
|
||||
USE_IDEEP_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS();
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
using FALLBACK_OP = IDEEPFallbackOp<ConvTransposeOp<float, CPUContext>>;
|
||||
|
||||
IDEEPConvTransposeOp(const OperatorDef& operator_def, Workspace* ws)
|
||||
: IDEEPConvTransposeUnpoolBase(operator_def, ws),
|
||||
training_mode_(
|
||||
OperatorBase::GetSingleArgument<int>("training_mode", 0)),
|
||||
fallback_(operator_def, ws) {
|
||||
OperatorBase::GetSingleArgument<int>("training_mode", 0)) {
|
||||
OPERATOR_NEEDS_FEATURE(
|
||||
pad_l() == pad_r() && pad_t() == pad_b(),
|
||||
"Uneven padding not supported.");
|
||||
|
|
@ -82,22 +62,11 @@ class IDEEPConvTransposeOp final : public IDEEPConvTransposeUnpoolBase {
|
|||
filter_.feed_from(filter_in);
|
||||
}
|
||||
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
if (need_type_zero_pad(filter_.get_mkldnn_memory_desc_t())) {
|
||||
return fallback_.Run(0);
|
||||
}
|
||||
} else {
|
||||
CAFFE_ENFORCE_EQ(
|
||||
filter.get_dim(1), X.get_dim(1),
|
||||
"filter number must be equal to input channel number");
|
||||
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
if (need_type_zero_pad(filter.get_mkldnn_memory_desc_t())) {
|
||||
return fallback_.Run(0);
|
||||
}
|
||||
|
||||
Y_dims = CalcOutputDims(X, filter.get_dim(0));
|
||||
}
|
||||
|
||||
|
|
@ -124,24 +93,16 @@ class IDEEPConvTransposeOp final : public IDEEPConvTransposeUnpoolBase {
|
|||
const bool training_mode_;
|
||||
ideep::tensor filter_;
|
||||
ideep::tensor::descriptor cached_weights_descriptor_;
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
FALLBACK_OP fallback_;
|
||||
|
||||
};
|
||||
|
||||
class IDEEPConvTransposeGradientOp final : public IDEEPConvTransposeUnpoolBase {
|
||||
public:
|
||||
USE_IDEEP_DEF_ALIASES();
|
||||
USE_IDEEP_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS();
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
using FALLBACK_OP = IDEEPFallbackOp<ConvTransposeGradientOp<float, CPUContext>>;
|
||||
|
||||
IDEEPConvTransposeGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
||||
: IDEEPConvTransposeUnpoolBase(operator_def, ws),
|
||||
no_bias_(OperatorBase::GetSingleArgument<int>("no_bias", false)),
|
||||
fallback_(operator_def, ws) {
|
||||
no_bias_(OperatorBase::GetSingleArgument<int>("no_bias", false)) {
|
||||
OPERATOR_NEEDS_FEATURE(
|
||||
pad_l() == pad_r() && pad_t() == pad_b(),
|
||||
"Uneven padding not supported.");
|
||||
|
|
@ -181,20 +142,6 @@ class IDEEPConvTransposeGradientOp final : public IDEEPConvTransposeUnpoolBase {
|
|||
filter_in.set_public_format(ideep::format::iohw);
|
||||
filter_.init(expected_descriptor);
|
||||
filter_.feed_from(filter_in);
|
||||
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
if (((filter_in.get_dim(0) % 8 != 0) && (stride_[0] * stride_[1] != 1)) ||
|
||||
need_type_zero_pad(filter_.get_mkldnn_memory_desc_t())) {
|
||||
return fallback_.Run(0);
|
||||
}
|
||||
} else {
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
if (((filter_in.get_dim(0) % 8 != 0) && (stride_[0] * stride_[1] != 1)) ||
|
||||
need_type_zero_pad(filter.get_mkldnn_memory_desc_t())) {
|
||||
return fallback_.Run(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (no_bias_) {
|
||||
|
|
@ -235,9 +182,6 @@ class IDEEPConvTransposeGradientOp final : public IDEEPConvTransposeUnpoolBase {
|
|||
|
||||
private:
|
||||
bool no_bias_;
|
||||
// TODO: The code below works around correctness issues with particular input shapes
|
||||
// in MKL-DNN v0.17, will be removed with the fixes in MKL-DNN 0.18.
|
||||
FALLBACK_OP fallback_;
|
||||
|
||||
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
|
||||
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class ConvTransposeTest(hu.HypothesisTestCase):
|
|||
adj=st.integers(0, 2),
|
||||
size=st.integers(7, 10),
|
||||
input_channels=st.integers(1, 8),
|
||||
output_channels=st.integers(1, 7),
|
||||
output_channels=st.integers(1, 8),
|
||||
batch_size=st.integers(1, 3),
|
||||
use_bias=st.booleans(),
|
||||
training_mode=st.booleans(),
|
||||
|
|
|
|||
Loading…
Reference in a new issue