mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
s/foward/forward/g (#58497)
Summary: Annoying typo. Prompted by these profiling results: https://github.com/pytorch/pytorch/issues/56419#issuecomment-825787828 Pull Request resolved: https://github.com/pytorch/pytorch/pull/58497 Reviewed By: malfet Differential Revision: D28521081 Pulled By: Chillee fbshipit-source-id: ab91a2e167dd7d3387fd56106a6cff81f7a32f10
This commit is contained in:
parent
ccad77aa22
commit
79a258f448
10 changed files with 14 additions and 14 deletions
|
|
@ -389,7 +389,7 @@ Tensor _fft_c2r_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization,
|
|||
auto input = self;
|
||||
if (dim.size() > 1) {
|
||||
auto c2c_dims = dim.slice(0, dim.size() - 1);
|
||||
input = _fft_c2c_mkl(self, c2c_dims, normalization, /*foward=*/false);
|
||||
input = _fft_c2c_mkl(self, c2c_dims, normalization, /*forward=*/false);
|
||||
dim = dim.slice(dim.size() - 1);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class TrainerBase(ABC):
|
|||
|
||||
BATCH_LEVEL_METRIC = "batch_level_metric"
|
||||
BATCH_ALL = "batch_all"
|
||||
FORWARD_METRIC = "foward_metric"
|
||||
FORWARD_METRIC = "forward_metric"
|
||||
FORWARD_PASS = "forward_pass"
|
||||
BACKWARD_METRIC = "backward_metric"
|
||||
BACKWARD = "backward"
|
||||
|
|
|
|||
BIN
benchmarks/tensorexpr/nnc.png
Normal file
BIN
benchmarks/tensorexpr/nnc.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 53 KiB |
|
|
@ -474,7 +474,7 @@ Debugging the Generated Code
|
|||
|
||||
Because FX generates the ``forward()`` function on :class:`GraphModule`\s, using
|
||||
traditional debugging techniques like ``print`` statements or ``pdb`` is
|
||||
not as straightfoward. Luckily, we have several techniques we can use
|
||||
not as straightforward. Luckily, we have several techniques we can use
|
||||
for debugging the generated code.
|
||||
|
||||
Use ``pdb``
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from typing import Tuple
|
|||
|
||||
import torch
|
||||
from jit.test_hooks_modules import (
|
||||
ModuleDirectFowardSubmodCall, ModuleForwardSingleInput,
|
||||
ModuleDirectforwardSubmodCall, ModuleForwardSingleInput,
|
||||
ModuleForwardTupleInput, create_forward_tuple_input,
|
||||
create_module_forward_multiple_inputs, create_module_forward_single_input,
|
||||
create_module_hook_return_nothing,
|
||||
|
|
@ -184,7 +184,7 @@ class TestHooks(JitTestCase):
|
|||
self.assertNotEqual(m_scripted("a"), m_scripted.forward("a"))
|
||||
|
||||
def test_submodule_direct_forward_invocation(self):
|
||||
m_submod_forward_call = ModuleDirectFowardSubmodCall(
|
||||
m_submod_forward_call = ModuleDirectforwardSubmodCall(
|
||||
"outer_mod_name", "inner_mod_name"
|
||||
)
|
||||
m_submod_call = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class ModuleForwardSingleInput(torch.nn.Module):
|
|||
return self.submodule(input)
|
||||
|
||||
|
||||
class ModuleDirectFowardSubmodCall(torch.nn.Module):
|
||||
class ModuleDirectforwardSubmodCall(torch.nn.Module):
|
||||
def __init__(self, name: str, submodule_name: str):
|
||||
super().__init__()
|
||||
self.name = name
|
||||
|
|
|
|||
|
|
@ -254,12 +254,12 @@ class TestOptimizer(TestCase):
|
|||
FileCheck().check_count("prim::CallMethod[name=\"forward\"]", 2, exactly=True) \
|
||||
.run(bn_no_forward_scripted_module.foo.graph)
|
||||
|
||||
bn_fold_no_foward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo'])
|
||||
self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_foward_scripted_module)), 1)
|
||||
bn_fold_no_forward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo'])
|
||||
self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_forward_scripted_module)), 1)
|
||||
bn_input = torch.rand(1, 1, 6, 6)
|
||||
torch.testing.assert_allclose(
|
||||
bn_no_forward_scripted_module.foo(bn_input),
|
||||
bn_fold_no_foward_scripted_module.foo(bn_input),
|
||||
bn_fold_no_forward_scripted_module.foo(bn_input),
|
||||
rtol=1e-2,
|
||||
atol=1e-3)
|
||||
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ object around in C++, all its code will stay around and methods will be
|
|||
invokable.
|
||||
|
||||
**`Module`**: A view over a `ClassType` and the `Object` that holds its state.
|
||||
Also responsible for turning unqualified names (e.g. `foward()`) into
|
||||
Also responsible for turning unqualified names (e.g. `forward()`) into
|
||||
qualified ones for lookup in the owning `CompilationUnit` (e.g.
|
||||
`__torch__.MyModule.forward`). Owns the `Object`, which transitively owns the
|
||||
`CompilationUnit`.
|
||||
|
|
|
|||
|
|
@ -370,8 +370,8 @@ struct DifferentiableGraphBackward : public autograd::Node {
|
|||
// to the output Variables if present.
|
||||
struct DifferentiableGraphOp {
|
||||
DifferentiableGraphOp(Gradient grad)
|
||||
: f_ptr(std::make_shared<GraphExecutor>(grad.f, "<foward op>")),
|
||||
legacy_f(grad.f, "<foward op>"),
|
||||
: f_ptr(std::make_shared<GraphExecutor>(grad.f, "<forward op>")),
|
||||
legacy_f(grad.f, "<forward op>"),
|
||||
grad(std::move(grad)),
|
||||
grad_executor(this->grad.df, "<backward op>"),
|
||||
num_inputs(this->grad.f->inputs().size()),
|
||||
|
|
|
|||
|
|
@ -496,7 +496,7 @@ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
|
|||
self.assertFalse(attrs["is_scriptable"])
|
||||
|
||||
# Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
|
||||
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``foward_async``,
|
||||
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
|
||||
# not have another worker to initiate forward over the RPC layer.
|
||||
args = (torch.ones(1), 2, "3")
|
||||
ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
|
||||
|
|
@ -537,7 +537,7 @@ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
|
|||
self.assertFalse(attrs["is_scriptable"])
|
||||
|
||||
# Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
|
||||
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``foward_async``,
|
||||
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
|
||||
# not have another worker to initiate forward over the RPC layer.
|
||||
args = (torch.ones(1), 2, "3")
|
||||
ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
|
||||
|
|
|
|||
Loading…
Reference in a new issue