mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: libshm_manager doesn't need to depend on all of libtorch. It only uses tiny tempfile.h which can be moved to c10. I could just duplicate the file too, but it's not worth it as c10 is small enough. Pull Request resolved: https://github.com/pytorch/pytorch/pull/17019 Differential Revision: D14052688 Pulled By: dzhulgakov fbshipit-source-id: 8797d15f8c7c49c49d40b7ab2f43aa3bf6becb0c
53 lines
1.3 KiB
C++
53 lines
1.3 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/nn/init.h>
|
|
#include <torch/nn/modules/linear.h>
|
|
#include <torch/types.h>
|
|
#include <torch/utils.h>
|
|
|
|
#include <test/cpp/api/support.h>
|
|
|
|
TEST(NoGradTest, SetsGradModeCorrectly) {
|
|
torch::manual_seed(0);
|
|
torch::NoGradGuard guard;
|
|
torch::nn::Linear model(5, 2);
|
|
auto x = torch::randn({10, 5}, torch::requires_grad());
|
|
auto y = model->forward(x);
|
|
torch::Tensor s = y.sum();
|
|
|
|
s.backward();
|
|
ASSERT_FALSE(model->weight.grad().defined());
|
|
}
|
|
|
|
struct AutogradTest : torch::test::SeedingFixture {
|
|
AutogradTest() {
|
|
x = torch::randn({3, 3}, torch::requires_grad());
|
|
y = torch::randn({3, 3});
|
|
z = x * y;
|
|
}
|
|
torch::Tensor x, y, z;
|
|
};
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivatives) {
|
|
z.backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
|
|
z.sum().backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
|
|
z.sum().backward(torch::ones({}) * 2);
|
|
ASSERT_TRUE(x.grad().allclose(y * 2));
|
|
}
|
|
|
|
TEST(NNInitTest, CanInitializeTensorThatRequiresGrad) {
|
|
auto tensor = torch::empty({3, 4}, torch::requires_grad());
|
|
ASSERT_THROWS_WITH(
|
|
tensor.fill_(1),
|
|
"a leaf Variable that requires grad "
|
|
"has been used in an in-place operation");
|
|
ASSERT_EQ(torch::nn::init::ones_(tensor).sum().item<int32_t>(), 12);
|
|
}
|