pytorch/caffe2/operators/operator_fallback_gpu_test.cc
Jerry Zhang 9f4bcdf075 caffe2::DeviceType -> at::DeviceType (#11254)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11254
Previously we use DeviceType in caffe2.proto directly, but it's an `enum` and have implicit conversion to int, which does not have type safety, e.g. we have to explicitly check for a device type is valid in event.h:
```
template <int d>
struct EventCreateFunctionRegisterer {
  explicit EventCreateFunctionRegisterer(EventCreateFunction f) {
    static_assert(d < MaxDeviceTypes, "");
    Event::event_creator_[d] = f;
  }
};
```
at::DeviceType is an `enum class`, and it does not have implicit conversion to int, and provides better type safety guarantees. In this diff we have done the following refactor(taking CPU as an example):

    1. caffe2::DeviceType → caffe2::DeviceTypeProto
    2. caffe2::CPU → caffe2::PROTO_CPU
    3. caffe2::DeviceType = at::DeviceType
    4. caffe2::CPU = at::DeviceType::CPU

codemod -d caffe2/caffe2 --extensions h,cc,cpp 'device_type\(\), ' 'device_type(), PROTO_'
+ some manual changes

In short, after this diff, in c++, caffe2::CPU refers to the at::DeviceType::CPU and the old proto caffe2::CPU will be caffe2::PROTO_CPU.
In python side, we have a temporary workaround that alias `caffe2_pb2.CPU = caffe2_pb2.PROOT_CPU` to make the change easier to review and this will be removed later.

Reviewed By: ezyang

Differential Revision: D9545704

fbshipit-source-id: 461a28a4ca74e616d3ee183a607078a717fd38a7
2018-09-05 16:28:09 -07:00

82 lines
2.5 KiB
C++

#include <iostream>
#include "caffe2/core/operator.h"
#include "caffe2/operators/operator_fallback_gpu.h"
#include <gtest/gtest.h>
namespace caffe2 {
class IncrementByOneOp final : public Operator<CPUContext> {
public:
IncrementByOneOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
bool RunOnDevice() {
const auto& in = Input(0);
auto* out = Output(0);
out->ResizeLike(in);
const float* in_data = in.template data<float>();
float* out_data = out->template mutable_data<float>();
for (int i = 0; i < in.size(); ++i) {
out_data[i] = in_data[i] + 1.f;
}
return true;
}
};
OPERATOR_SCHEMA(IncrementByOne)
.NumInputs(1).NumOutputs(1).AllowInplace({{0, 0}});
REGISTER_CPU_OPERATOR(IncrementByOne, IncrementByOneOp);
REGISTER_CUDA_OPERATOR(IncrementByOne,
GPUFallbackOp<IncrementByOneOp>);
TEST(OperatorFallbackTest, IncrementByOneOp) {
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"},
vector<string>{"X"});
Workspace ws;
Tensor source_tensor(vector<TIndex>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
ws.CreateBlob("X")->GetMutableTensor(CPU)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorCPU& output = ws.GetBlob("X")->Get<TensorCPU>();
EXPECT_EQ(output.ndim(), 2);
EXPECT_EQ(output.dim(0), 2);
EXPECT_EQ(output.dim(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output.data<float>()[i], i + 1);
}
}
TEST(OperatorFallbackTest, GPUIncrementByOneOp) {
if (!HasCudaGPU()) return;
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"},
vector<string>{"X"});
op_def.mutable_device_option()->set_device_type(PROTO_CUDA);
Workspace ws;
Tensor source_tensor(vector<TIndex>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
ws.CreateBlob("X")->GetMutableTensor(CUDA)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorCUDA& output = ws.GetBlob("X")->Get<TensorCUDA>();
Tensor output_cpu(output, CPU);
EXPECT_EQ(output.ndim(), 2);
EXPECT_EQ(output.dim(0), 2);
EXPECT_EQ(output.dim(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output_cpu.data<float>()[i], i + 1);
}
}
} // namespace caffe2