mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/40494
Resubmit the diff because D22124313 (1ec4337b7d) was reverted due to CI test failures
Added the int8_gen_quant_params.cc to CMakeList.txt to fix the CI failures
Test Plan: buck test caffe2/caffe2/quantization/server:
Reviewed By: hx89
Differential Revision: D22204244
fbshipit-source-id: a2c8b668f199cc5b0c5894086f554f7c459b1ad7
21 lines
704 B
C++
21 lines
704 B
C++
#include "caffe2/operators/quantized/int8_quantize_op.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
REGISTER_CPU_OPERATOR(Int8Quantize, int8::Int8QuantizeOp);
|
|
|
|
OPERATOR_SCHEMA(Int8Quantize)
|
|
.IdenticalTypeAndShape()
|
|
.Arg("Y_scale", "Output tensor quantization scale")
|
|
.Arg("Y_zero_point", "Output tensor quantization offset")
|
|
.NumInputs(1, 3)
|
|
.NumOutputs(1)
|
|
.Input(0, "X", "FP32 Tensor X.")
|
|
.Input(
|
|
1,
|
|
"Qparam",
|
|
"Optional Qparam blob that constans quant param computed on activation histogram data"
|
|
"Will overwrite Y_scale and Y_zero_point argument if specified")
|
|
.Output(0, "Y", "Int8 Tensor qX representing X with linear quantization.");
|
|
|
|
} // namespace caffe2
|