pytorch/caffe2/operators/quantized/int8_dequantize_op.h
Richard Barnes 1433160a36 use irange for loops 6 (#66742)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/66742

Modified loops in files under fbsource/fbcode/caffe2/ from the format

`for(TYPE var=x0;var<x_max;x++)`

to the format

`for(const auto var: irange(xmax))`

This was achieved by running r-barnes's loop upgrader script (D28874212) with some modification to exclude all files under /torch/jit and a number of reversions or unused variable suppression warnings added by hand.

Test Plan: Sandcastle

Reviewed By: malfet

Differential Revision: D31705366

fbshipit-source-id: be58222426c192406a7f93c21582c3f6f2082401
2021-12-07 16:07:50 -08:00

54 lines
1.2 KiB
C++

#ifndef CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
#define CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"
#include <c10/util/irange.h>
namespace caffe2 {
namespace int8 {
namespace {
void Int8Dequantize(
const uint8_t* in,
float* out,
const int64_t N,
const float X_scale,
const int32_t X_offset) {
for (const auto i : c10::irange(N)) {
out[i] = (static_cast<int32_t>(in[i]) - X_offset) * X_scale;
}
}
} // namespace
class Int8DequantizeOp final : public Operator<CPUContext> {
public:
using Operator<CPUContext>::Operator;
bool RunOnDevice() override {
const auto& X = Inputs()[0]->template Get<Int8TensorCPU>();
auto* Y = Output(0, X.t.sizes(), at::dtype<float>());
int32_t X_offset = X.zero_point;
auto X_scale = X.scale;
Int8Dequantize(
X.t.data<uint8_t>(),
Y->mutable_data<float>(),
X.t.numel(),
X_scale,
X_offset);
return true;
}
};
} // namespace int8
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_