From 006cfebf3dcc4bc0cba344c25243baebaeeefbb7 Mon Sep 17 00:00:00 2001 From: Alban Desmaison Date: Fri, 8 Jan 2021 06:36:01 -0800 Subject: [PATCH] Update autograd related comments (#50166) Summary: Remove outdated comment and update to use new paths. Pull Request resolved: https://github.com/pytorch/pytorch/pull/50166 Reviewed By: zou3519 Differential Revision: D25824942 Pulled By: albanD fbshipit-source-id: 7dc694891409e80e1804eddcdcc50cc21b60f822 --- c10/core/TensorImpl.h | 12 ------------ tools/autograd/derivatives.yaml | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 47e1c865f99..d8b803f906f 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -582,9 +582,6 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { /** * Set whether or not a tensor requires gradient. - * - * It is only valid to call this method on a Variable. - * See Note [Tensor versus Variable in C++]. */ void set_requires_grad(bool requires_grad); @@ -594,27 +591,18 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { * we can automatically differentiate back to them. A tensor that * requires gradient and has no history is a "leaf" tensor, which we * accumulate gradients into. - * - * It is only valid to call this method on a Variable. - * See Note [Tensor versus Variable in C++]. */ bool requires_grad() const; /** * Return a mutable reference to the gradient. This is conventionally * used as `t.grad() = x` to set a gradient to a completely new tensor. - * - * It is only valid to call this method on a Variable. - * See Note [Tensor versus Variable in C++]. */ at::Tensor& mutable_grad(); /** * Return the accumulated gradient of a tensor. This gradient is written * into when performing backwards, when this tensor is a leaf tensor. - * - * It is only valid to call this method on a Variable. - * See Note [Tensor versus Variable in C++]. */ const at::Tensor& grad() const; diff --git a/tools/autograd/derivatives.yaml b/tools/autograd/derivatives.yaml index 9bf266da394..5bd0451545c 100644 --- a/tools/autograd/derivatives.yaml +++ b/tools/autograd/derivatives.yaml @@ -86,7 +86,7 @@ # e.g., it is used by _cudnn_rnn # # If you need a complex expression, e.g., with local variables, -# write a _backward function in tools/autograd/templates/Functions.cpp +# write a _backward function in torch/csrc/autograd/FunctionsManual.cpp # and invoke it from here. By the way, go read # https://github.com/zdevito/ATen/issues/163; this describes an # important hazard that occurs when porting backwards from Python to C++