mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/17684 Adding tensor and cost inference functions to more int8 operators. Reviewed By: yinghai Differential Revision: D14174746 fbshipit-source-id: dfad975fa75899565c8fb61f1b7747a9206ebd22
33 lines
1.1 KiB
C++
33 lines
1.1 KiB
C++
#include "caffe2/operators/quantized/int8_flatten_op.h"
|
|
|
|
#include "caffe2/operators/flatten_op.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
REGISTER_CPU_OPERATOR(Int8Flatten, int8::Int8FlattenOp);
|
|
|
|
OPERATOR_SCHEMA(Int8Flatten)
|
|
.NumInputs(1)
|
|
.NumOutputs(1)
|
|
.TensorInferenceFunction(TensorInferenceForFlatten)
|
|
.SetDoc(R"DOC(
|
|
Flattens the input tensor into a 2D matrix. If input tensor has shape
|
|
(d_0, d_1, ... d_n) then the output will have shape
|
|
(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)
|
|
)DOC")
|
|
.Input(0, "input", "A Int8 tensor of rank >= axis.")
|
|
.Output(
|
|
0,
|
|
"output",
|
|
"A 2D Int8 tensor with the contents of the input tensor, "
|
|
"with input dimensions up to axis flattened to the outer dimension "
|
|
"of the output and remaining input dimensions flattened into the inner "
|
|
"dimension of the output.")
|
|
.Arg("Y_scale", "Output tensor quantization scale")
|
|
.Arg("Y_zero_point", "Output tensor quantization offset")
|
|
.Arg(
|
|
"axis",
|
|
"(Default to 1) Indicate up to which input dimensions "
|
|
"(exclusive) should be flattened to the outer dimension of the output");
|
|
|
|
} // namespace caffe2
|