From e8645fa2b9e82f083c7f461314140d6771ca9812 Mon Sep 17 00:00:00 2001 From: Wouter Devriendt Date: Mon, 5 Aug 2024 17:21:56 +0000 Subject: [PATCH] [Doc] fix some typos (found by codespell and typos) (#132544) Applying doc fixes from PR https://github.com/pytorch/pytorch/pull/127267 - with CLA Pull Request resolved: https://github.com/pytorch/pytorch/pull/132544 Approved by: https://github.com/kit1980 --- aten/src/ATen/Context.cpp | 2 +- aten/src/ATen/core/dispatch/OperatorEntry.cpp | 2 +- docs/cpp/source/notes/tensor_basics.rst | 2 +- docs/source/cond.rst | 4 ++-- docs/source/distributed.checkpoint.rst | 2 +- docs/source/distributed.pipelining.rst | 4 ++-- docs/source/export.rst | 2 +- docs/source/fx.rst | 2 +- docs/source/jit_language_reference.rst | 2 +- docs/source/jit_language_reference_v2.rst | 2 +- docs/source/notes/extending.rst | 6 +++--- docs/source/notes/numerical_accuracy.rst | 2 +- docs/source/rpc/rref.rst | 2 +- docs/source/scripts/exportdb/blurb.txt | 2 +- docs/source/torch.compiler_fine_grain_apis.rst | 4 ++-- test/test_overrides.py | 4 ++-- test/test_python_dispatch.py | 2 +- torch/_library/fake_class_registry.py | 2 +- 18 files changed, 24 insertions(+), 24 deletions(-) diff --git a/aten/src/ATen/Context.cpp b/aten/src/ATen/Context.cpp index 2c7ddbae6d8..107c874ee05 100644 --- a/aten/src/ATen/Context.cpp +++ b/aten/src/ATen/Context.cpp @@ -471,7 +471,7 @@ Allocator* getCPUAllocator() { } // override_allow_tf32_flag = true -// means the allow_tf32 flags are overrided and tf32 is force disabled +// means the allow_tf32 flags are overridden and tf32 is force disabled // override_allow_tf32_flag = false // means the original allow_tf32 flags are followed thread_local bool override_allow_tf32_flag = false; diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp index 45bd548cdfc..aa99e9d2fdf 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp +++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp @@ -152,7 +152,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel( // Suppress the warning for Meta key as we are overriding C++ meta functions with python meta functions // for some ops if (dispatch_key != DispatchKey::Meta) { - TORCH_WARN_ONCE("Warning only once for all operators, other operators may also be overrided.\n", + TORCH_WARN_ONCE("Warning only once for all operators, other operators may also be overridden.\n", " Overriding a previously registered kernel for the same operator and the same dispatch key\n", " operator: ", (schema_.has_value() ? toString(schema_->schema) : toString(name_)), "\n", " ", (this->schema_.has_value() ? this->schema_->debug : "no debug info"), "\n", diff --git a/docs/cpp/source/notes/tensor_basics.rst b/docs/cpp/source/notes/tensor_basics.rst index ed3687989e1..1eab1c832b5 100644 --- a/docs/cpp/source/notes/tensor_basics.rst +++ b/docs/cpp/source/notes/tensor_basics.rst @@ -1,7 +1,7 @@ Tensor Basics ============= -The ATen tensor library backing PyTorch is a simple tensor library thats exposes +The ATen tensor library backing PyTorch is a simple tensor library that exposes the Tensor operations in Torch directly in C++14. ATen's API is auto-generated from the same declarations PyTorch uses so the two APIs will track each other over time. diff --git a/docs/source/cond.rst b/docs/source/cond.rst index 9d1fb7515ff..c43ce4fd6d9 100644 --- a/docs/source/cond.rst +++ b/docs/source/cond.rst @@ -21,7 +21,7 @@ and can logically be seen as implemented as follows. Its unique power lies in its ability of expressing **data-dependent control flow**: it lowers to a conditional operator (`torch.ops.higher_order.cond`), which preserves predicate, true function and false functions. -This unlocks great flexibilty in writing and deploying models that change model architecture based on +This unlocks great flexibility in writing and deploying models that change model architecture based on the **value** or **shape** of inputs or intermediate outputs of tensor operations. .. warning:: @@ -109,7 +109,7 @@ This gives us an exported program as shown below: Notice that `torch.cond` is lowered to `torch.ops.higher_order.cond`, its predicate becomes a Symbolic expression over the shape of input, and branch functions becomes two sub-graph attributes of the top level graph module. -Here is another exmaple that showcases how to express a data-dependet control flow: +Here is another example that showcases how to express a data-dependent control flow: .. code-block:: python diff --git a/docs/source/distributed.checkpoint.rst b/docs/source/distributed.checkpoint.rst index 573faa429b7..9e458db31e5 100644 --- a/docs/source/distributed.checkpoint.rst +++ b/docs/source/distributed.checkpoint.rst @@ -97,7 +97,7 @@ Due to legacy design decisions, the state dictionaries of `FSDP` and `DDP` may h To tackle these challenges, we offer a collection of APIs for users to easily manage state_dicts. `get_model_state_dict` returns a model state dictionary with keys consistent with those returned by the unparallelized model state dictionary. Similarly, `get_optimizer_state_dict` provides the optimizer state dictionary with keys uniform across all parallelisms applied. To achieve this consistency, `get_optimizer_state_dict` converts parameter IDs to fully qualified names identical to those found in the unparallelized model state dictionary. -Note that results returned by hese APIs can be used directly with the `torch.distributed.checkpoint.save()` and `torch.distributed.checkpoint.load()` methods without requiring any additional conversions. +Note that results returned by these APIs can be used directly with the `torch.distributed.checkpoint.save()` and `torch.distributed.checkpoint.load()` methods without requiring any additional conversions. Note that this feature is experimental, and API signatures might change in the future. diff --git a/docs/source/distributed.pipelining.rst b/docs/source/distributed.pipelining.rst index 00cfde50d0d..841ab9a4b4d 100644 --- a/docs/source/distributed.pipelining.rst +++ b/docs/source/distributed.pipelining.rst @@ -52,7 +52,7 @@ Overall, the ``pipelining`` package provides the following features: * Splitting of model code based on simple specification. * Rich support for pipeline schedules, including GPipe, 1F1B, - Interleaved 1F1B and Looped BFS, and providing the infrastruture for writing + Interleaved 1F1B and Looped BFS, and providing the infrastructure for writing customized schedules. * First-class support for cross-host pipeline parallelism, as this is where PP is typically used (over slower interconnects). @@ -149,7 +149,7 @@ model. self.tok_embeddings = nn.Embedding(...) - # Using a ModuleDict lets us delete layers witout affecting names, + # Using a ModuleDict lets us delete layers without affecting names, # ensuring checkpoints will correctly save and load. self.layers = torch.nn.ModuleDict() for layer_id in range(model_args.n_layers): diff --git a/docs/source/export.rst b/docs/source/export.rst index 739659c950c..f4192d477fd 100644 --- a/docs/source/export.rst +++ b/docs/source/export.rst @@ -505,7 +505,7 @@ Input Tensor Shapes By default, ``torch.export`` will trace the program specializing on the input tensors' shapes, unless a dimension is specified as dynamic via the -``dynamic_shapes`` argumen to ``torch.export``. This means that if there exists +``dynamic_shapes`` argument to ``torch.export``. This means that if there exists shape-dependent control flow, ``torch.export`` will specialize on the branch that is being taken with the given sample inputs. For example: diff --git a/docs/source/fx.rst b/docs/source/fx.rst index 0a0af6254a5..ac1f2349a95 100644 --- a/docs/source/fx.rst +++ b/docs/source/fx.rst @@ -355,7 +355,7 @@ properties on the nodes as we see them at runtime. That might look like: attr_itr = self.mod for i, atom in enumerate(target_atoms): if not hasattr(attr_itr, atom): - raise RuntimeError(f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}") + raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}") attr_itr = getattr(attr_itr, atom) return attr_itr diff --git a/docs/source/jit_language_reference.rst b/docs/source/jit_language_reference.rst index b342a26ef9c..ccd3cf873d7 100644 --- a/docs/source/jit_language_reference.rst +++ b/docs/source/jit_language_reference.rst @@ -376,7 +376,7 @@ Python enums can be used in TorchScript without any extra annotation or code: After an enum is defined, it can be used in both TorchScript and Python interchangeably like any other TorchScript type. The type of the values of an enum must be ``int``, -``float``, or ``str``. All values must be of the same type; heterogenous types for enum +``float``, or ``str``. All values must be of the same type; heterogeneous types for enum values are not supported. diff --git a/docs/source/jit_language_reference_v2.rst b/docs/source/jit_language_reference_v2.rst index b09cd13b7f2..0863f9bbd2b 100644 --- a/docs/source/jit_language_reference_v2.rst +++ b/docs/source/jit_language_reference_v2.rst @@ -830,7 +830,7 @@ TorchScript Type System Definition TSMetaType ::= "Any" TSPrimitiveType ::= "int" | "float" | "double" | "complex" | "bool" | "str" | "None" - TSStructualType ::= TSTuple | TSNamedTuple | TSList | TSDict | TSOptional | + TSStructuralType ::= TSTuple | TSNamedTuple | TSList | TSDict | TSOptional | TSUnion | TSFuture | TSRRef | TSAwait TSTuple ::= "Tuple" "[" (TSType ",")* TSType "]" TSNamedTuple ::= "namedtuple" "(" (TSType ",")* TSType ")" diff --git a/docs/source/notes/extending.rst b/docs/source/notes/extending.rst index 04c4e705e19..1f80e36a48e 100644 --- a/docs/source/notes/extending.rst +++ b/docs/source/notes/extending.rst @@ -638,10 +638,10 @@ keyword arguments like :func:`torch.add` does:: For speed and flexibility the ``__torch_function__`` dispatch mechanism does not check that the signature of an override function matches the signature of the -function being overrided in the :mod:`torch` API. For some applications ignoring +function being overridden in the :mod:`torch` API. For some applications ignoring optional arguments would be fine but to ensure full compatibility with :class:`Tensor`, user implementations of torch API functions should take care to -exactly emulate the API of the function that is being overrided. +exactly emulate the API of the function that is being overridden. Functions in the :mod:`torch` API that do not have explicit overrides will return ``NotImplemented`` from ``__torch_function__``. If all operands with @@ -860,7 +860,7 @@ signature of the original ``PyTorch`` function:: Finally, ``torch.overrides.get_ignored_functions`` returns a tuple of functions -that explicitly cannot be overrided by ``__torch_function__``. This list can be +that explicitly cannot be overridden by ``__torch_function__``. This list can be useful to confirm that a function that isn't present in the dictionary returned by ``get_overridable_functions`` cannot be overridden. diff --git a/docs/source/notes/numerical_accuracy.rst b/docs/source/notes/numerical_accuracy.rst index f1ad3183087..086b3dd3508 100644 --- a/docs/source/notes/numerical_accuracy.rst +++ b/docs/source/notes/numerical_accuracy.rst @@ -4,7 +4,7 @@ Numerical accuracy ================== In modern computers, floating point numbers are represented using IEEE 754 standard. -For more details on floating point arithmetics and IEEE 754 standard, please see +For more details on floating point arithmetic and IEEE 754 standard, please see `Floating point arithmetic `_ In particular, note that floating point provides limited accuracy (about 7 decimal digits for single precision floating point numbers, about 16 decimal digits for double precision diff --git a/docs/source/rpc/rref.rst b/docs/source/rpc/rref.rst index 212669f42fd..3f858e58686 100644 --- a/docs/source/rpc/rref.rst +++ b/docs/source/rpc/rref.rst @@ -132,7 +132,7 @@ to Y, and Y forks to Z: OwnerRRef -> A -> Y -> Z If all of Z's messages, including the delete message, are processed by the -owner before Y's messages. the owner will learn of Z's deletion befores +owner before Y's messages. the owner will learn of Z's deletion before knowing Y exists. Nevertheless, this does not cause any problem. Because, at least one of Y's ancestors will be alive (A) and it will prevent the owner from deleting the ``OwnerRRef``. More specifically, if the diff --git a/docs/source/scripts/exportdb/blurb.txt b/docs/source/scripts/exportdb/blurb.txt index 7e32cb9b807..a910efc81a3 100644 --- a/docs/source/scripts/exportdb/blurb.txt +++ b/docs/source/scripts/exportdb/blurb.txt @@ -6,4 +6,4 @@ everything that is supported by exportdb, but it covers the most common and confusing use cases that users will run into. If you have a feature that you think needs a stronger guarantee from us to -support in export please create an issue in the pytorch/pytorch repo wih a module:export tag. +support in export please create an issue in the pytorch/pytorch repo with a module:export tag. diff --git a/docs/source/torch.compiler_fine_grain_apis.rst b/docs/source/torch.compiler_fine_grain_apis.rst index 5057628cf74..9c0ebf29187 100644 --- a/docs/source/torch.compiler_fine_grain_apis.rst +++ b/docs/source/torch.compiler_fine_grain_apis.rst @@ -9,7 +9,7 @@ TorchDynamo APIs for fine-grained tracing ``torch.compile`` performs TorchDynamo tracing on the whole user model. However, it is possible that a small part of the model code cannot be -handeled by ``torch.compiler``. In this case, you might want to disable +handled by ``torch.compiler``. In this case, you might want to disable the compiler on that particular portion, while running compilation on the rest of the model. This section describe the existing APIs that use to define parts of your code in which you want to skip compilation @@ -22,7 +22,7 @@ disable compilation are listed in the following table: :header: "API", "Description", "When to use?" :widths: auto - "``torch.compiler.disable``", "Disables Dynamo on the decorated function as well as recursively invoked functions.", "Excellent for unblocking a user, if a small portion of the model cannot be handeled with ``torch.compile``." + "``torch.compiler.disable``", "Disables Dynamo on the decorated function as well as recursively invoked functions.", "Excellent for unblocking a user, if a small portion of the model cannot be handled with ``torch.compile``." "``torch._dynamo.disallow_in_graph``", "Disallows the marked op in the TorchDynamo graph. TorchDynamo causes graph break, and runs the op in the eager (no compile) mode.\n\nThis is suitable for the ops, while ``torch.compiler.disable`` is suitable for decorating functions.", "This API is excellent for both debugging and unblocking if a custom op like ``torch.ops.fbgemm.*`` is causing issues with the ``torch.compile`` function." "``torch.compile.allow_in_graph``", "The annotated callable goes as is in the TorchDynamo graph. For example, a black-box for TorchDynamo Dynamo.\n\nNote that AOT Autograd will trace through it, so the ``allow_in_graph`` is only a Dynamo-level concept.", "This API is useful for portions of the model which have known TorchDynamo hard-to-support features, like hooks or ``autograd.Function``. However, each usage of ``allow_in_graph`` **must be carefully screened** (no graph breaks, no closures)." "``torch._dynamo.graph_break``", "Adds a graph break. The code before and after the graph break goes through TorchDynamo.", "**Rarely useful for deployment** - If you think you need this, most probably you need either ``disable`` or ``disallow_in_graph``." diff --git a/test/test_overrides.py b/test/test_overrides.py index efa75a00999..2aaebeb24a8 100644 --- a/test/test_overrides.py +++ b/test/test_overrides.py @@ -394,7 +394,7 @@ class TestTorchFunctionOverride(TestCase): cls._stack.close() def test_mean_semantics(self): - """Test that a function with one argument can be overrided""" + """Test that a function with one argument can be overridden""" t1 = DiagonalTensor(5, 2) t2 = SubTensor([[1, 2], [1, 2]]) t3 = SubDiagonalTensor(5, 2) @@ -410,7 +410,7 @@ class TestTorchFunctionOverride(TestCase): has_torch_function(object()) def test_mm_semantics(self): - """Test that a function with multiple arguments can be overrided""" + """Test that a function with multiple arguments can be overridden""" t1 = DiagonalTensor(5, 2) t2 = torch.eye(5) * 2 t3 = SubTensor([[1, 2], [1, 2]]) diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index 54c6041fdcc..293fc8a87f5 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -235,7 +235,7 @@ class TestPythonRegistration(TestCase): self.assertFalse(torch.mul(x, y)._is_zerotensor()) # Assert that a user can't override the behavior of a (ns, op, dispatch_key) - # combination if someone overrided the behavior for the same before them + # combination if someone overridden the behavior for the same before them with self.assertRaisesRegex( RuntimeError, "already a kernel registered from python" ): diff --git a/torch/_library/fake_class_registry.py b/torch/_library/fake_class_registry.py index 82142f8ceae..3c2689f5328 100644 --- a/torch/_library/fake_class_registry.py +++ b/torch/_library/fake_class_registry.py @@ -55,7 +55,7 @@ class FakeClassRegistry: def register(self, full_qualname: str, fake_class=None) -> None: if self.has_impl(full_qualname): log.warning( - "%s is already registered. Previous fake class is overrided with %s.", + "%s is already registered. Previous fake class is overridden with %s.", full_qualname, fake_class, )