mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[SR] Fix quantized linear tests not managing outputs (#75776)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/75776 The output was returned directly instead of a clone, so the output of the relevant op would not be managed. ghstack-source-id: 154935103 Test Plan: CI Reviewed By: navahgar Differential Revision: D35633469 fbshipit-source-id: 7b08b7368e0349a12abf8802a4c625ffecdc5abb (cherry picked from commit 24bed9ba4da39cff7f3b40f5e49dfded2552b373)
This commit is contained in:
parent
04b3313379
commit
3fa77fa51a
1 changed files with 2 additions and 2 deletions
|
|
@ -2133,7 +2133,7 @@ TEST(StaticRuntime, QuantizedLinearDynamicFp16) {
|
|||
%packed_params = quantized::linear_prepack_fp16(%weights, %bias)
|
||||
%output = quantized::linear_dynamic_fp16(%input, %packed_params)
|
||||
%ret = aten::clone(%output, %bias)
|
||||
return (%output)
|
||||
return (%ret)
|
||||
)IR";
|
||||
at::Tensor weight = torch::randn({3, 2}, torch::kFloat);
|
||||
at::Tensor input = torch::randn({3, 2}, torch::kFloat);
|
||||
|
|
@ -2154,7 +2154,7 @@ TEST(StaticRuntime, QuantizedLinearReluDynamicFp16) {
|
|||
%packed_params = quantized::linear_prepack_fp16(%weights, %bias)
|
||||
%output = quantized::linear_relu_dynamic_fp16(%input, %packed_params)
|
||||
%ret = aten::clone(%output, %bias)
|
||||
return (%output)
|
||||
return (%ret)
|
||||
)IR";
|
||||
at::Tensor weight = torch::randn({3, 2}, torch::kFloat);
|
||||
at::Tensor input = torch::randn({3, 2}, torch::kFloat);
|
||||
|
|
|
|||
Loading…
Reference in a new issue