Replace the remaining usages of IntList in caffe2 to IntArrayRef

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18282

Differential Revision: D14569269

Pulled By: bddppq

fbshipit-source-id: 5fc33701b83f9efdec4b456d2691764831d10e7f
This commit is contained in:
Junjie Bai 2019-03-21 16:24:45 -07:00 committed by Facebook Github Bot
parent 979db03722
commit 46439c78d0
3 changed files with 3 additions and 3 deletions

View file

@ -295,7 +295,7 @@ class ConvPoolOpBase : public Operator<Context> {
}
static void InferOutputSize64(
const at::IntList& input_dims,
const at::IntArrayRef& input_dims,
const int output_channel,
const StorageOrder order,
const bool global_pooling,

View file

@ -69,7 +69,7 @@ class ConvPoolDNNLowPOpBase : public ConvPoolOpBase<CPUContext> {
return &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
}
Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
auto* t = &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
ReinitializeTensor(t, dims, options.device(CPU));
return t;

View file

@ -122,7 +122,7 @@ class DNNLowPOp : public Operator<CPUContext> {
}
}
Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
if (dequantize_output_) {
return Output(idx, dims, options.device(CPU));
} else {