diff --git a/aten/src/ATen/native/mkl/SpectralOps.cpp b/aten/src/ATen/native/mkl/SpectralOps.cpp index a79c8a36752..896d3416b50 100644 --- a/aten/src/ATen/native/mkl/SpectralOps.cpp +++ b/aten/src/ATen/native/mkl/SpectralOps.cpp @@ -389,7 +389,7 @@ Tensor _fft_c2r_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, auto input = self; if (dim.size() > 1) { auto c2c_dims = dim.slice(0, dim.size() - 1); - input = _fft_c2c_mkl(self, c2c_dims, normalization, /*foward=*/false); + input = _fft_c2c_mkl(self, c2c_dims, normalization, /*forward=*/false); dim = dim.slice(dim.size() - 1); } diff --git a/benchmarks/distributed/rpc/parameter_server/trainers/TrainerBase.py b/benchmarks/distributed/rpc/parameter_server/trainers/TrainerBase.py index 5d17e6e84e0..d83b9874479 100644 --- a/benchmarks/distributed/rpc/parameter_server/trainers/TrainerBase.py +++ b/benchmarks/distributed/rpc/parameter_server/trainers/TrainerBase.py @@ -9,7 +9,7 @@ class TrainerBase(ABC): BATCH_LEVEL_METRIC = "batch_level_metric" BATCH_ALL = "batch_all" - FORWARD_METRIC = "foward_metric" + FORWARD_METRIC = "forward_metric" FORWARD_PASS = "forward_pass" BACKWARD_METRIC = "backward_metric" BACKWARD = "backward" diff --git a/benchmarks/tensorexpr/nnc.png b/benchmarks/tensorexpr/nnc.png new file mode 100644 index 00000000000..f2f33ccf0be Binary files /dev/null and b/benchmarks/tensorexpr/nnc.png differ diff --git a/docs/source/fx.rst b/docs/source/fx.rst index 33f33683411..60d2490100a 100644 --- a/docs/source/fx.rst +++ b/docs/source/fx.rst @@ -474,7 +474,7 @@ Debugging the Generated Code Because FX generates the ``forward()`` function on :class:`GraphModule`\s, using traditional debugging techniques like ``print`` statements or ``pdb`` is -not as straightfoward. Luckily, we have several techniques we can use +not as straightforward. Luckily, we have several techniques we can use for debugging the generated code. Use ``pdb`` diff --git a/test/jit/test_hooks.py b/test/jit/test_hooks.py index dd8b3e96f57..0c7cff56d5f 100644 --- a/test/jit/test_hooks.py +++ b/test/jit/test_hooks.py @@ -5,7 +5,7 @@ from typing import Tuple import torch from jit.test_hooks_modules import ( - ModuleDirectFowardSubmodCall, ModuleForwardSingleInput, + ModuleDirectforwardSubmodCall, ModuleForwardSingleInput, ModuleForwardTupleInput, create_forward_tuple_input, create_module_forward_multiple_inputs, create_module_forward_single_input, create_module_hook_return_nothing, @@ -184,7 +184,7 @@ class TestHooks(JitTestCase): self.assertNotEqual(m_scripted("a"), m_scripted.forward("a")) def test_submodule_direct_forward_invocation(self): - m_submod_forward_call = ModuleDirectFowardSubmodCall( + m_submod_forward_call = ModuleDirectforwardSubmodCall( "outer_mod_name", "inner_mod_name" ) m_submod_call = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name") diff --git a/test/jit/test_hooks_modules.py b/test/jit/test_hooks_modules.py index c97890826ab..0297b34a448 100644 --- a/test/jit/test_hooks_modules.py +++ b/test/jit/test_hooks_modules.py @@ -46,7 +46,7 @@ class ModuleForwardSingleInput(torch.nn.Module): return self.submodule(input) -class ModuleDirectFowardSubmodCall(torch.nn.Module): +class ModuleDirectforwardSubmodCall(torch.nn.Module): def __init__(self, name: str, submodule_name: str): super().__init__() self.name = name diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index da439267263..78ebb550d02 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -254,12 +254,12 @@ class TestOptimizer(TestCase): FileCheck().check_count("prim::CallMethod[name=\"forward\"]", 2, exactly=True) \ .run(bn_no_forward_scripted_module.foo.graph) - bn_fold_no_foward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo']) - self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_foward_scripted_module)), 1) + bn_fold_no_forward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo']) + self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_forward_scripted_module)), 1) bn_input = torch.rand(1, 1, 6, 6) torch.testing.assert_allclose( bn_no_forward_scripted_module.foo(bn_input), - bn_fold_no_foward_scripted_module.foo(bn_input), + bn_fold_no_forward_scripted_module.foo(bn_input), rtol=1e-2, atol=1e-3) diff --git a/torch/csrc/jit/docs/serialization.md b/torch/csrc/jit/docs/serialization.md index b5187794e16..8c3461a9abe 100644 --- a/torch/csrc/jit/docs/serialization.md +++ b/torch/csrc/jit/docs/serialization.md @@ -374,7 +374,7 @@ object around in C++, all its code will stay around and methods will be invokable. **`Module`**: A view over a `ClassType` and the `Object` that holds its state. -Also responsible for turning unqualified names (e.g. `foward()`) into +Also responsible for turning unqualified names (e.g. `forward()`) into qualified ones for lookup in the owning `CompilationUnit` (e.g. `__torch__.MyModule.forward`). Owns the `Object`, which transitively owns the `CompilationUnit`. diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp index 487efe20e3a..236e2712b82 100644 --- a/torch/csrc/jit/runtime/graph_executor.cpp +++ b/torch/csrc/jit/runtime/graph_executor.cpp @@ -370,8 +370,8 @@ struct DifferentiableGraphBackward : public autograd::Node { // to the output Variables if present. struct DifferentiableGraphOp { DifferentiableGraphOp(Gradient grad) - : f_ptr(std::make_shared(grad.f, "")), - legacy_f(grad.f, ""), + : f_ptr(std::make_shared(grad.f, "")), + legacy_f(grad.f, ""), grad(std::move(grad)), grad_executor(this->grad.df, ""), num_inputs(this->grad.f->inputs().size()), diff --git a/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/torch/testing/_internal/distributed/nn/api/remote_module_test.py index 06e9c21ee59..b4236f3224c 100644 --- a/torch/testing/_internal/distributed/nn/api/remote_module_test.py +++ b/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -496,7 +496,7 @@ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): self.assertFalse(attrs["is_scriptable"]) # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. - # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``foward_async``, + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, # not have another worker to initiate forward over the RPC layer. args = (torch.ones(1), 2, "3") ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) @@ -537,7 +537,7 @@ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): self.assertFalse(attrs["is_scriptable"]) # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. - # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``foward_async``, + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, # not have another worker to initiate forward over the RPC layer. args = (torch.ones(1), 2, "3") ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))