mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/54403 A few important points about InferenceMode behavior: 1. All tensors created in InferenceMode are inference tensors except for view ops. - view ops produce output has the same is_inference_tensor property as their input. Namely view of normal tensor inside InferenceMode produce a normal tensor, which is exactly the same as creating a view inside NoGradMode. And view of inference tensor outside InferenceMode produce inference tensor as output. 2. All ops are allowed inside InferenceMode, faster than normal mode. 3. Inference tensor cannot be saved for backward. Test Plan: Imported from OSS Reviewed By: ezyang Differential Revision: D27316483 Pulled By: ailzhang fbshipit-source-id: e03248a66d42e2d43cfe7ccb61e49cc4afb2923b
73 lines
2.3 KiB
C++
73 lines
2.3 KiB
C++
#include <torch/script.h>
|
|
#include <gtest/gtest.h>
|
|
#include <test/cpp/api/support.h>
|
|
|
|
using namespace torch::autograd;
|
|
using namespace torch::test;
|
|
|
|
TEST(GradModeTest, TestRequiresGradFunctionalOp) {
|
|
torch::AutoGradMode mode(false);
|
|
for (bool requires_grad : {true, false}) {
|
|
torch::Tensor c = torch::ones({1, 2, 3}).set_requires_grad(requires_grad);
|
|
|
|
torch::Tensor func_out = c * c;
|
|
ASSERT_FALSE(func_out.requires_grad());
|
|
ASSERT_TRUE(func_out.is_leaf());
|
|
}
|
|
}
|
|
|
|
TEST(GradModeTest, TestRequiresGradInplaceOp) {
|
|
torch::AutoGradMode mode(false);
|
|
for (bool requires_grad : {true, false}) {
|
|
torch::Tensor c = torch::ones({1, 2, 3}).set_requires_grad(requires_grad);
|
|
|
|
c.mul_(2);
|
|
ASSERT_EQ(c.requires_grad(), requires_grad);
|
|
}
|
|
}
|
|
|
|
TEST(GradModeTest, TestRequiresGradViewOp) {
|
|
torch::AutoGradMode mode(false);
|
|
for (bool requires_grad : {true, false}) {
|
|
torch::Tensor c = torch::ones({1, 2, 3}).set_requires_grad(requires_grad);
|
|
|
|
torch::Tensor view_out = c.view({2, 3});
|
|
ASSERT_EQ(view_out.requires_grad(), requires_grad);
|
|
ASSERT_TRUE(view_out.is_leaf());
|
|
}
|
|
}
|
|
|
|
TEST(GradModeTest, TestRequiresGradViewOpExiting) {
|
|
for (bool requires_grad: {true, false}) {
|
|
torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(requires_grad);
|
|
torch::Tensor a = s.clone();
|
|
torch::Tensor view_out, tmp;
|
|
|
|
{
|
|
torch::AutoGradMode mode(false);
|
|
view_out = a.view({2, 3}); // go through kernels: VariableType, InplaceOrView, CPU
|
|
assert_tensor_creation_meta(view_out, torch::autograd::CreationMeta::NO_GRAD_MODE);
|
|
ASSERT_EQ(view_out.requires_grad(), requires_grad);
|
|
ASSERT_TRUE(view_out.is_leaf());
|
|
}
|
|
|
|
tmp = view_out * view_out;
|
|
ASSERT_EQ(tmp.requires_grad(), requires_grad);
|
|
if (requires_grad) {
|
|
tmp.backward(torch::ones_like(tmp));
|
|
// TODO: this behavior is a side effect of issue #11390.
|
|
ASSERT_FALSE(view_out.grad().defined());
|
|
}
|
|
|
|
if (requires_grad) {
|
|
ASSERT_THROWS_WITH(view_out.mul_(2), // go through kernels: VariableType, InplaceOrView, CPU
|
|
"A view was created in no_grad mode and is being modified inplace")
|
|
} else {
|
|
view_out.mul_(2);
|
|
}
|
|
|
|
tmp = view_out.view({2, 3});
|
|
ASSERT_EQ(tmp.requires_grad(), requires_grad);
|
|
assert_tensor_creation_meta(tmp, torch::autograd::CreationMeta::NO_GRAD_MODE);
|
|
}
|
|
}
|