2022-07-20 22:12:25 +00:00
|
|
|
# Owner(s): ["module: ProxyTensor"]
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2023-11-06 19:38:49 +00:00
|
|
|
from torch.testing._internal.common_utils import TestCase, run_tests
|
2022-06-07 00:28:53 +00:00
|
|
|
import torch
|
2024-03-28 17:33:51 +00:00
|
|
|
import torch._dynamo
|
2022-06-07 00:28:53 +00:00
|
|
|
import unittest
|
|
|
|
|
import warnings
|
2022-08-31 00:29:55 +00:00
|
|
|
import operator
|
2022-07-25 12:43:17 +00:00
|
|
|
from collections.abc import Iterable
|
2023-12-23 03:34:58 +00:00
|
|
|
from torch.nn.utils import stateless
|
2022-06-07 00:28:53 +00:00
|
|
|
from torch.testing._internal.common_device_type import instantiate_device_type_tests
|
2023-06-13 14:18:49 +00:00
|
|
|
from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps
|
Tighten FakeTensor reentrancy asserts, add debugging (#102091)
When investigating failures in https://github.com/pytorch/pytorch/pull/100017 I realized that we were reentering FakeTensorMode even though there was already one on the stack. Although we have attempted assert for these cases in the past, e.g., as in https://github.com/pytorch/pytorch/pull/97186 it seems that the existing protections were insufficient.
In this particular case, the reapplication of FakeTensorMode was due to an interaction with NotImplemented multiple dispatch handling. If proxy tensor mode detects an unrecognized tensor type (this includes FakeTensor, if it is not tracked with a proxy), it will return NotImplemented to give this tensor a chance to unpack itself into proxyable operation. However, this is never the right thing for FakeTensor, where no unpacking is possible. However, today, FakeTensor attempts to reapply the FakeTensorMode, resulting in FakeTensorMode being twice on the stack.
This PR does a number of things:
* It adds an assert in `FakeTensorMode.__torch_dispatch__` that you must not already have this mode on the stack, this is ALWAYS an error
* It modifies `FakeTensor.__torch_dispatch__` to return `NotImplemented` if the mode is already active. This prevents us from readding the mode on the stack
* It adds a new logging artifact `not_implemented` which you can use to get debug logs about all of the times a `__torch_dispatch__` handler returned NotImplemented and why it did so. Your subclass has to manually opt into this logging, but I inserted the necessary logs for ProxyTensorMode and FakeTensor(Mode)
* `with fake_mode` now no-ops if the fake mode is already on the stack, which is what users want anyway
* I am BREAKING pre-autograd tracing, because it is currently doing something weird with the original C++ mode stack. Brian is going to follow up with a fix next week.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/102091
Approved by: https://github.com/thiagocrepaldi, https://github.com/eellison, https://github.com/wanchaol, https://github.com/bdhirsh
2023-05-23 22:58:14 +00:00
|
|
|
from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode
|
2023-12-19 00:19:21 +00:00
|
|
|
from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode
|
2022-06-29 17:28:42 +00:00
|
|
|
from torch._decomp import decomposition_table
|
2023-02-12 22:04:01 +00:00
|
|
|
from torch.fx.experimental.symbolic_shapes import (
|
Split SymNode into its own file (#112037)
This PR:
- Moves TrueDiv, LShift, RShift, IsNonOverlappingAndDenseIndicator to `_sympy.functions.py`
- Moves SymNode to `fx.experimental.sym_node`.
- This file does not have any SymPy dependencies at import time
- It installs the magic methods in Sym{Bool,Int,Float}.
- N.b. With this split, we may be able to move Sym{Bool,Int,Float} to this file, and remove quite a few of the hacks around these classes
- Imports `sym_node` in `torch/__init__.py` rather than the whole `symbolic_shapes.py`.
This breaks the import-time dependency between torch and SymPy
Pull Request resolved: https://github.com/pytorch/pytorch/pull/112037
Approved by: https://github.com/peterbell10
ghstack dependencies: #112035, #112036
2023-10-26 20:00:33 +00:00
|
|
|
eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets,
|
2023-08-14 18:10:02 +00:00
|
|
|
guard_int, GuardOnDataDependentSymNode
|
2023-02-12 22:04:01 +00:00
|
|
|
)
|
2023-04-18 13:51:23 +00:00
|
|
|
from torch.testing._internal.custom_op_db import custom_op_db
|
2024-03-28 17:33:51 +00:00
|
|
|
from torch.testing._internal.hop_db import hop_db
|
2022-06-07 00:28:53 +00:00
|
|
|
from torch.testing._internal.common_device_type import ops
|
2023-06-13 14:18:49 +00:00
|
|
|
import torch.testing._internal.optests as optests
|
2022-08-10 22:31:38 +00:00
|
|
|
from torch._C import _disabled_torch_function_impl
|
2022-12-15 08:37:24 +00:00
|
|
|
from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule
|
2022-07-07 04:54:31 +00:00
|
|
|
from torch.utils._pytree import tree_map
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts
|
2022-08-01 15:55:19 +00:00
|
|
|
from torch import nn
|
Add propagate_real_tensors mode for unbacked (#125115)
A common complaint when working with data-dependent code in PyTorch is that it's hard to tell how far you are from the finish line: every time a GuardOnDataDependentSymNode error is hit, you have to somehow fix or workaround it to see the next one.
This PR adds a new mode `torch._functorch.config.fake_tensor_propagate_real_tensors` which modifies fake tensors to also propagate real tensors. This means that when we try to guard on a data-dependent SymNode, we can actually produce a real result. We also produce a warning which you should consult to figure out what the crux points are.
I ran this on vision_maskrcnn. In the baseline (without this mode), the model has 27 graph breaks, resulting in 40 graphs. With this mode on, the model has only 11 graph breaks, resulting in 15 graphs (the remaining graph breaks are due to missing functionality for item() on float tensor and some other Dynamo missing features.) You get a list of things that would have errored like this:
```
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> False
```
Potential later follow ups:
* Improve the warning messages (in particular, should provide user frames)
* GC real tensors when they are no longer needed by tracing. Right now, this will use A LOT of memory, equal to as if your GC was broken and every intermediate tensor was kept live
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125115
Approved by: https://github.com/IvanKobzarev
2024-05-01 00:08:35 +00:00
|
|
|
import torch._functorch.config
|
2022-07-23 19:03:38 +00:00
|
|
|
import re
|
|
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
import functools
|
2022-09-21 02:30:50 +00:00
|
|
|
import itertools
|
2022-08-03 21:25:16 +00:00
|
|
|
|
2022-07-26 20:21:16 +00:00
|
|
|
aten = torch.ops.aten
|
|
|
|
|
|
2022-08-25 06:59:37 +00:00
|
|
|
HAS_CUDA = torch.cuda.is_available()
|
2022-07-23 19:03:38 +00:00
|
|
|
|
|
|
|
|
|
2023-02-12 22:04:01 +00:00
|
|
|
def strip_end(s, suffix):
|
|
|
|
|
if suffix and s.endswith(suffix):
|
|
|
|
|
return s[:-len(suffix)]
|
|
|
|
|
else:
|
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_guards(gm):
|
|
|
|
|
names = [strip_end(n, "_1") for n in fx_placeholder_targets(gm)]
|
|
|
|
|
return "\n".join(
|
Expand dynamic dims support for traceable subclasses (#114311)
Continuation of #112185, following the design in this [doc](https://docs.google.com/document/d/1ipSxcTzEMMOAPvxP-YJlD5JBZZmIGgh8Q34ixtOUCRo).
Summary:
* Introduce `SubclassSymbolicPolicy` containing separate dynamic dim / constraint policies for the outer and inner tensors
* Expand the automatic dynamic algorithm to recurse into inner tensors and produce one of these for a subclass instance
* Maintain legacy behavior for subclasses by recursively calling `mark_dynamic()` on inner tensors *of the same dim as outer* when `mark_dynamic(outer, ...)` is called
* Addresses this: https://github.com/pytorch/pytorch/blob/6a86cf00adb071fe7200229cc1996dfefbb79289/torch/_dynamo/variables/builder.py#L1750
* Add `outer_size` and `outer_stride` arguments to `__tensor_unflatten__()` so that you can find out what symbols were allocated for the outer size / stride (you are expected to return a tensor that compares equal to the outer symbols)
* Signatures now:
```python
# attrs is a list of inner tensor attributes on x; inner_tensor = getattr(x, attr)
# ctx is anything useful for rebuilding the class we want to guard on
attrs, ctx = x.__tensor_flatten__()
...
# inner_tensors is a dict of {attr -> tensor}
# ctx is taken unmodified from flattening and (eventually) guarded on
# outer_size is the expected size of the output; possibly symbolic
# outer_stride is the expected strides of the output; possibly symbolic
y = MySubclass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride)
# at the __tensor_unflatten__() call-site in PT2, we assert y.shape == outer_size and y.stride() == outer_stride
# the assert simplifies symbols when there are relationships between outer and inner symbols
```
* Size info needed for `NestedTensor` at least, stride info needed for `DTensor` at least
* Punting on `outer_storage_offset` because storage_offset handling is horribly broken in PT2 right now
* ~~Add new `__tensor_mark_dynamic__()` to allow overriding the behavior of mark_dynamic on a per-subclass basis~~ (booted to future work)
* ~~Add guards for tensor subclasses by calling `__tensor_flatten__()` in the guard to test equality on `ctx`~~
* Now handled in #114469
* Next PR: add TENSOR_MATCH guards on inner tensors
Pull Request resolved: https://github.com/pytorch/pytorch/pull/114311
Approved by: https://github.com/ezyang, https://github.com/drisspg, https://github.com/voznesenskym, https://github.com/bdhirsh
2023-12-05 17:50:59 +00:00
|
|
|
gm.shape_env.produce_guards(fx_placeholder_vals(gm), names, _simplified=True, input_contexts=None)
|
2023-02-12 22:04:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
def process_failures():
|
|
|
|
|
"""
|
|
|
|
|
Takes file containing failures like
|
|
|
|
|
|
|
|
|
|
FAILED test/test_proxy_tensor.py::TestProxyTensorOpInfoCPU::test_make_fx_symbolic_exhaustive___getitem___cpu_float32 - RuntimeError: aten.size.default - couldn't find symbolic meta function/decomposition # noqa: B950
|
|
|
|
|
|
|
|
|
|
and processes them into a list of opinfo xfails
|
|
|
|
|
"""
|
|
|
|
|
f = open('pytest_failures')
|
|
|
|
|
failures = f.readlines()
|
|
|
|
|
failures = [i.strip() for i in failures]
|
|
|
|
|
|
|
|
|
|
def process_failure_string(s, matcher):
|
|
|
|
|
out = re.search(matcher, s)
|
|
|
|
|
return out.groups()
|
|
|
|
|
|
|
|
|
|
SYMBOLIC_TRACE_MATCH = r'exhaustive_(.*)_cpu.*: (.*)'
|
|
|
|
|
failures = [process_failure_string(s, SYMBOLIC_TRACE_MATCH) for s in failures]
|
|
|
|
|
|
|
|
|
|
def create_normalized_name(op):
|
|
|
|
|
if op.variant_test_name == '':
|
|
|
|
|
s = op.name
|
|
|
|
|
else:
|
|
|
|
|
s = f"{op.name}.{op.variant_test_name}"
|
|
|
|
|
return s.replace('.', '_')
|
|
|
|
|
|
|
|
|
|
remap_opinfo = {create_normalized_name(op): (op.name, op.variant_test_name) for op in op_db}
|
|
|
|
|
|
|
|
|
|
print("symbolic_tensor_failures = {")
|
|
|
|
|
for failure, reason in failures:
|
|
|
|
|
print(f" xfail{remap_opinfo[failure]}, # {reason}")
|
|
|
|
|
print("}")
|
|
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
|
|
|
|
|
USE_TORCHVISION = False
|
|
|
|
|
try:
|
|
|
|
|
import torchvision
|
|
|
|
|
USE_TORCHVISION = True
|
|
|
|
|
except ImportError:
|
|
|
|
|
warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
|
|
|
|
|
"to install it with commands from pytorch.org, post-fixed with "
|
|
|
|
|
"`--no-deps` to avoid overwriting the pytorch installation",
|
|
|
|
|
UserWarning)
|
|
|
|
|
|
|
|
|
|
|
2022-07-07 04:54:31 +00:00
|
|
|
def _create_new_input(x):
|
|
|
|
|
if not isinstance(x, torch.Tensor):
|
|
|
|
|
return x
|
|
|
|
|
if x.dtype != torch.float:
|
|
|
|
|
return x + 1
|
|
|
|
|
if x.is_leaf:
|
2022-08-03 21:25:16 +00:00
|
|
|
return torch.rand_like(x, requires_grad=x.requires_grad)
|
2022-07-07 04:54:31 +00:00
|
|
|
else:
|
|
|
|
|
return torch.rand_like(x)
|
|
|
|
|
|
2022-08-10 22:31:38 +00:00
|
|
|
"""
|
|
|
|
|
Delays a cos being executed on the unwraptensor until its used. Simulates a CommTensor used
|
|
|
|
|
"""
|
|
|
|
|
class UnwrapTensor(torch.Tensor):
|
|
|
|
|
@staticmethod
|
|
|
|
|
def __new__(cls, tensor: torch.Tensor):
|
|
|
|
|
r = torch.Tensor._make_wrapper_subclass(
|
|
|
|
|
cls,
|
|
|
|
|
tensor.size(),
|
|
|
|
|
dtype=tensor.dtype,
|
|
|
|
|
device=tensor.device,
|
|
|
|
|
layout=tensor.layout,
|
|
|
|
|
requires_grad=tensor.requires_grad,
|
|
|
|
|
)
|
|
|
|
|
r._tensor = tensor
|
|
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
|
# TODO: consider all_gather the local tensors for better debugging
|
|
|
|
|
return f"UnwrapTensor({self._tensor})"
|
|
|
|
|
|
|
|
|
|
__torch_function__ = _disabled_torch_function_impl
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
|
|
|
|
def unwrap(e):
|
|
|
|
|
ret = e
|
|
|
|
|
if isinstance(e, UnwrapTensor):
|
|
|
|
|
ret = e._tensor.cos()
|
|
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
|
args = tree_map(unwrap, args)
|
|
|
|
|
kwargs = tree_map(unwrap, kwargs)
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
class TestGenericProxyTensor(TestCase):
|
|
|
|
|
# WARNING: if any of your inputs are index tensors, DO NOT use this
|
|
|
|
|
# function
|
2022-07-07 04:54:31 +00:00
|
|
|
def _test(self, f, inps):
|
2022-08-03 17:50:30 +00:00
|
|
|
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(*inps)
|
2022-07-07 04:54:31 +00:00
|
|
|
new_inps = tree_map(_create_new_input, inps)
|
Delete ProxyTensor wrapper subclass (#83330)
I was working on https://github.com/pytorch/torchdynamo/issues/80 and my
working hypothesis for what was causing the error was that proxy tensor
was not advertising correct dispatch keys, causing AMP to operate
differently when you traced. I could have fixed this directly by
replicating fake tensor's fix for setting dispatch keys to also apply to
proxy tensor, but I was like, "Why must I repeat myself."
This PR is the result. It completely deletes the ProxyTensor wrapper
subclass, so that when we are tracing, the tensors flowing through the
program are the *original* real or fake tensors, depending on what the
user requested in the top-level API. There is no more wrapping. To
store the Proxy objects necessary for actually doing tracing, I store
the property directly on the tensors. (Note: I never
clean up old entries from the map at the moment, this is easily fixed
by using a weak map)
Benefits of doing this:
* No more tip-toeing around no_dispatch() creation of new ProxyTensors;
we never create new tensors (except when we call the underlying func),
so you don't have to worry about accidentally tracing them.
* No more syncing up metadata from in place operators. In particular
https://github.com/pytorch/pytorch/issues/81526 is mooted
* This fixes https://github.com/pytorch/torchdynamo/issues/519 as we no longer need to teach proxy tensor to support sparse tensor.
* No more schlepping symbolic integers from the inner fake tensor to the
outer proxy tensor. If you can make a fake tensor with symbolic ints,
you're done, nothing else to do.
To avoid having to rewrite all of the guts, when I get to the actual
proxy tensor handler, I first "fetch" the stored ProxyTensor data from
the weakmap via a tree_map, and then operate on the consequent data as
before. A more optimized implementation is possible.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83330
Approved by: https://github.com/Chillee
2022-08-16 20:37:29 +00:00
|
|
|
r1 = fx_f(*new_inps)
|
|
|
|
|
r2 = f(*new_inps)
|
|
|
|
|
self.assertEqual(r1, r2)
|
2022-07-07 04:54:31 +00:00
|
|
|
|
2023-06-21 00:43:00 +00:00
|
|
|
def test_pre_dispatch_mode_stack(self):
|
2023-03-21 20:15:23 +00:00
|
|
|
def f(a):
|
|
|
|
|
b = torch.ones(4, 4)
|
|
|
|
|
return torch.matmul(a, b)
|
|
|
|
|
# We expect to see matmul in the trace - it should NOT be decomposed into mm.
|
|
|
|
|
# Also, torch.ones() doesn't show up in the trace.
|
|
|
|
|
# This is annoying but expected: ones() never dispatches to the Autograd dispatch key,
|
|
|
|
|
# so our mode never sees it - it goes directly to the BackendSelect key.
|
2023-04-25 19:04:34 +00:00
|
|
|
inp = torch.ones(4, 4)
|
2023-06-21 00:43:00 +00:00
|
|
|
# Test that make_fx(pre_dispatch=True) clears caches properly.
|
2023-04-25 19:04:34 +00:00
|
|
|
from torch._dispatch.python import enable_python_dispatcher
|
|
|
|
|
with enable_python_dispatcher():
|
|
|
|
|
out1 = f(inp)
|
2023-06-21 00:43:00 +00:00
|
|
|
fx_g = make_fx(f, pre_dispatch=True)(inp)
|
2023-03-21 20:15:23 +00:00
|
|
|
self.assertExpectedInline(fx_g.code.strip(), """\
|
|
|
|
|
def forward(self, a_1):
|
|
|
|
|
ones = torch.ops.aten.ones.default([4, 4], device = device(type='cpu'), pin_memory = False)
|
|
|
|
|
matmul = torch.ops.aten.matmul.default(a_1, ones); a_1 = ones = None
|
|
|
|
|
return matmul""")
|
|
|
|
|
|
2023-06-21 00:43:00 +00:00
|
|
|
def test_pre_dispatch_linear(self):
|
|
|
|
|
def f(a, b, c):
|
|
|
|
|
return torch.nn.functional.linear(a, b, c)
|
|
|
|
|
a = torch.ones(4, 4)
|
|
|
|
|
b = torch.ones(4, 4)
|
|
|
|
|
c = torch.ones(4)
|
|
|
|
|
fx_g = make_fx(f, pre_dispatch=True)(a, b, c)
|
|
|
|
|
out1 = f(a, b, c)
|
|
|
|
|
out2 = fx_g(a, b, c)
|
|
|
|
|
self.assertEqual(out1, out2)
|
|
|
|
|
|
pre_dispatch tracing: support autocast and no_grad/enable_grad ctx managers, add a pre_dispatch_eager dynamo backend (#103024)
This PR adds support for `enable_grad`/`no_grad`/`autocast` context managers getting properly traced in `pre_dispatch` tracing. The stuff in this PR includes:
- I added a torch function mode that runs during make_fx pre_dispatch tracing, `ProxyTorchFunctionMode`. It directly intercepts the torch ops that run during the above context managers, and adds them to the current graph instead of executing them
- `enable_grad` and `no_grad` currently desugar into `torch._C.set_grad_enabled(bool)`, but this API isn't currently overrideable by torch function so I added the ability to interpose there
- the `torch.amp` context managers don't currently have a nice equivalent, like `set_autocast_enabled(state)`, so I ended up adding two new API's: `torch.amp._set_autocast_enabled` and `torch.amp._set_autocast_disabled`. If you look at how the context manager is implemented, it ends up calling several different state-changing functions, some of which depend on the backend - so I figured that it would be cleaner just to add a new API (that should probably only be used by tracing) - but open to feedback
- I added a new dynamo backend, `compile(backend="pre_dispatch_eager")`. When pre_dispatch tracing becomes always-on in inductor, it will be another potential surface for bugs. I also added a test file for it (`test/dynamo/test_pre_dispatch.py`).
Pull Request resolved: https://github.com/pytorch/pytorch/pull/103024
Approved by: https://github.com/ezyang
2023-06-28 18:01:10 +00:00
|
|
|
def test_pre_dispatch_no_grad(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
b = a.sin()
|
|
|
|
|
torch.set_grad_enabled(False)
|
|
|
|
|
c = b.cos()
|
|
|
|
|
torch.set_grad_enabled(True)
|
|
|
|
|
return b + c.sin()
|
|
|
|
|
a1 = torch.randn(4, requires_grad=True)
|
|
|
|
|
a2 = a1.clone().detach().requires_grad_(True)
|
|
|
|
|
a_tmp = a1.clone().detach().requires_grad_(True)
|
|
|
|
|
fx_g = make_fx(f, pre_dispatch=True)(a_tmp)
|
|
|
|
|
out1 = f(a1)
|
|
|
|
|
out2 = fx_g(a2)
|
|
|
|
|
self.assertEqual(out1, out2)
|
|
|
|
|
out1.sum().backward()
|
|
|
|
|
out2.sum().backward()
|
|
|
|
|
self.assertEqual(a1.grad, a2.grad)
|
2023-03-21 20:15:23 +00:00
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
def test_make_fx_simple(self):
|
2022-06-07 00:28:53 +00:00
|
|
|
def f(x):
|
|
|
|
|
return torch.sin(x)
|
2022-07-07 04:54:31 +00:00
|
|
|
self._test(f, (torch.randn(3),))
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
def test_scalar_device(self, device='cpu'):
|
2022-06-07 00:28:53 +00:00
|
|
|
def f(a, b):
|
|
|
|
|
return a + b
|
2022-07-07 04:54:31 +00:00
|
|
|
self._test(f, [torch.randn(3, device=device), torch.tensor(5)])
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2022-08-02 11:02:10 +00:00
|
|
|
def test_isolated_graphmodule(self):
|
|
|
|
|
def is_any_sum(gm):
|
|
|
|
|
return any(node.target == torch.ops.aten.sum.default for node in gm.graph.nodes)
|
|
|
|
|
|
|
|
|
|
def is_any_digamma(gm):
|
|
|
|
|
return any(node.target == torch.ops.aten.digamma.default for node in gm.graph.nodes)
|
|
|
|
|
|
|
|
|
|
def is_any_sigmoid(gm):
|
|
|
|
|
return any(node.target == torch.ops.aten.sigmoid.default for node in gm.graph.nodes)
|
|
|
|
|
|
|
|
|
|
def inner(x):
|
|
|
|
|
return torch.sum(x)
|
|
|
|
|
|
|
|
|
|
def f(x):
|
|
|
|
|
gm = get_isolated_graphmodule(inner, (x,), {})
|
|
|
|
|
self.assertTrue(is_any_sum(gm))
|
|
|
|
|
return x + torch.randn(x.shape)
|
|
|
|
|
|
|
|
|
|
# get_isolated_graphmodule uses make_fx internally that shouldn't be traced
|
|
|
|
|
# by the outer make_fx call
|
|
|
|
|
traced = make_fx(f)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
|
|
|
|
|
|
|
|
|
# When factory functions are used, they should not be traced
|
|
|
|
|
# by the outer make_fx call
|
|
|
|
|
def inner_with_factory():
|
|
|
|
|
val = torch.tensor(float(1))
|
|
|
|
|
val.add_(2)
|
|
|
|
|
return torch.full((10, 10), val).sum()
|
|
|
|
|
|
|
|
|
|
def f1(x):
|
|
|
|
|
gm = get_isolated_graphmodule(inner_with_factory, (), {})
|
|
|
|
|
self.assertTrue(is_any_sum(gm))
|
|
|
|
|
return torch.sigmoid(x)
|
|
|
|
|
|
|
|
|
|
def f2(x):
|
|
|
|
|
gm = get_isolated_graphmodule(f1, (x,), {})
|
|
|
|
|
self.assertFalse(is_any_sum(gm))
|
|
|
|
|
self.assertTrue(is_any_sigmoid(gm))
|
|
|
|
|
return torch.digamma(x)
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f2)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
|
|
|
|
self.assertFalse(is_any_sigmoid(traced))
|
|
|
|
|
self.assertTrue(is_any_digamma(traced))
|
|
|
|
|
|
|
|
|
|
# Verify nested make_fx calls don't make factory functions to be leaked
|
fix(fx): make all `make_fx` invocations isolated (opaque to higher `make_fx` invocations) by default (#93290)
Fixes https://github.com/pytorch/pytorch/issues/88996#issuecomment-1409174554
Example code:
```python
import torch
from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx
@torch.fx.wrap
def func(a, b):
return b.expand([1, a.shape[0], b.shape[-1]])
a = torch.randn(3, 4)
b = torch.randn(4)
class TestMode(torch.overrides.TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs={}):
if torch.overrides.resolve_name(func) in ["torch.Tensor.expand"]:
print(f"TestMode: {func} {args} {kwargs}")
wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)
gm = make_fx(wrapped, tracing_mode="real")(all_args)
return func(*args, **kwargs)
with TestMode():
gm = make_fx(func, tracing_mode="symbolic")(a, b)
gm.graph.print_tabular()
```
Before:
```
opcode name target args kwargs
------------- ---------- ------------------- -------------------------------- --------
placeholder a_1 a_1 () {}
placeholder b_1 b_1 () {}
call_function detach aten.detach.default (b_1,) {}
call_function detach_1 aten.detach.default (detach,) {}
call_function sym_size aten.sym_size (a_1, 0) {}
call_function sym_size_1 aten.sym_size (b_1, 0) {}
call_function expand aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
call_function detach_2 aten.detach.default (expand,) {}
call_function expand_1 aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
output output output (expand_1,) {}
```
After:
```
opcode name target args kwargs
------------- ---------- ------------------- -------------------------------- --------
placeholder a_1 a_1 () {}
placeholder b_1 b_1 () {}
call_function sym_size aten.sym_size (a_1, 0) {}
call_function sym_size_1 aten.sym_size (b_1, 0) {}
call_function expand aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
output output output (expand_1,) {}
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/93290
Approved by: https://github.com/ezyang
2023-02-01 17:28:44 +00:00
|
|
|
# into the outer graph. Verify that `make_fx`` itself does not leak its execution.
|
2022-08-02 11:02:10 +00:00
|
|
|
def f2(x):
|
|
|
|
|
gm = make_fx(f1)(x)
|
|
|
|
|
self.assertFalse(is_any_sum(gm))
|
|
|
|
|
self.assertTrue(is_any_sigmoid(gm))
|
|
|
|
|
return torch.digamma(x)
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f2)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
fix(fx): make all `make_fx` invocations isolated (opaque to higher `make_fx` invocations) by default (#93290)
Fixes https://github.com/pytorch/pytorch/issues/88996#issuecomment-1409174554
Example code:
```python
import torch
from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx
@torch.fx.wrap
def func(a, b):
return b.expand([1, a.shape[0], b.shape[-1]])
a = torch.randn(3, 4)
b = torch.randn(4)
class TestMode(torch.overrides.TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs={}):
if torch.overrides.resolve_name(func) in ["torch.Tensor.expand"]:
print(f"TestMode: {func} {args} {kwargs}")
wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)
gm = make_fx(wrapped, tracing_mode="real")(all_args)
return func(*args, **kwargs)
with TestMode():
gm = make_fx(func, tracing_mode="symbolic")(a, b)
gm.graph.print_tabular()
```
Before:
```
opcode name target args kwargs
------------- ---------- ------------------- -------------------------------- --------
placeholder a_1 a_1 () {}
placeholder b_1 b_1 () {}
call_function detach aten.detach.default (b_1,) {}
call_function detach_1 aten.detach.default (detach,) {}
call_function sym_size aten.sym_size (a_1, 0) {}
call_function sym_size_1 aten.sym_size (b_1, 0) {}
call_function expand aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
call_function detach_2 aten.detach.default (expand,) {}
call_function expand_1 aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
output output output (expand_1,) {}
```
After:
```
opcode name target args kwargs
------------- ---------- ------------------- -------------------------------- --------
placeholder a_1 a_1 () {}
placeholder b_1 b_1 () {}
call_function sym_size aten.sym_size (a_1, 0) {}
call_function sym_size_1 aten.sym_size (b_1, 0) {}
call_function expand aten.expand.default (b_1, [1, sym_size, sym_size_1]) {}
output output output (expand_1,) {}
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/93290
Approved by: https://github.com/ezyang
2023-02-01 17:28:44 +00:00
|
|
|
self.assertFalse(is_any_sigmoid(traced))
|
|
|
|
|
self.assertTrue(is_any_digamma(traced))
|
|
|
|
|
|
|
|
|
|
# Verify that the `forward`` function of a graph module produced as a
|
|
|
|
|
# side effect of an interior `make_fx` is still traced
|
|
|
|
|
def f3(x):
|
|
|
|
|
gm = make_fx(f1)(x)
|
|
|
|
|
self.assertFalse(is_any_sum(gm))
|
|
|
|
|
self.assertTrue(is_any_sigmoid(gm))
|
|
|
|
|
# `gm.forward`` is still traced
|
|
|
|
|
return torch.digamma(gm(x))
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f3)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
2022-08-02 11:02:10 +00:00
|
|
|
self.assertTrue(is_any_sigmoid(traced))
|
|
|
|
|
self.assertTrue(is_any_digamma(traced))
|
|
|
|
|
|
|
|
|
|
# Verify interaction with non-ProxyTensor modes
|
|
|
|
|
from torch.testing._internal.logging_tensor import LoggingTensorMode
|
|
|
|
|
|
|
|
|
|
def f1_logging(x):
|
|
|
|
|
with LoggingTensorMode():
|
|
|
|
|
gm = get_isolated_graphmodule(inner_with_factory, (), {})
|
|
|
|
|
self.assertTrue(is_any_sum(gm))
|
|
|
|
|
return torch.sigmoid(x)
|
|
|
|
|
|
|
|
|
|
def f2_logging(x):
|
|
|
|
|
with LoggingTensorMode(), LoggingTensorMode():
|
|
|
|
|
gm = get_isolated_graphmodule(f1_logging, (x,), {})
|
|
|
|
|
self.assertFalse(is_any_sum(gm))
|
|
|
|
|
self.assertTrue(is_any_sigmoid(gm))
|
|
|
|
|
return torch.digamma(x)
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f2_logging)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
|
|
|
|
self.assertFalse(is_any_sigmoid(traced))
|
|
|
|
|
self.assertTrue(is_any_digamma(traced))
|
|
|
|
|
|
|
|
|
|
# Verify interaction with another tensor subclass
|
|
|
|
|
# This case currently doesn't work and should raise an error
|
|
|
|
|
# See: https://github.com/pytorch/pytorch/pull/81764#issuecomment-1200472068
|
|
|
|
|
from torch.testing._internal.logging_tensor import LoggingTensor
|
|
|
|
|
|
|
|
|
|
def f1_logging_tensor(x):
|
|
|
|
|
gm = get_isolated_graphmodule(inner_with_factory, (), {})
|
|
|
|
|
self.assertTrue(is_any_sum(gm))
|
|
|
|
|
return torch.sigmoid(x)
|
|
|
|
|
|
|
|
|
|
def f2_logging_tensor(x):
|
|
|
|
|
x = LoggingTensor(x)
|
|
|
|
|
gm = get_isolated_graphmodule(f1_logging_tensor, (x,), {})
|
|
|
|
|
self.assertFalse(is_any_sum(gm))
|
|
|
|
|
self.assertTrue(is_any_sigmoid(gm))
|
|
|
|
|
return torch.digamma(x)
|
|
|
|
|
|
Delete ProxyTensor wrapper subclass (#83330)
I was working on https://github.com/pytorch/torchdynamo/issues/80 and my
working hypothesis for what was causing the error was that proxy tensor
was not advertising correct dispatch keys, causing AMP to operate
differently when you traced. I could have fixed this directly by
replicating fake tensor's fix for setting dispatch keys to also apply to
proxy tensor, but I was like, "Why must I repeat myself."
This PR is the result. It completely deletes the ProxyTensor wrapper
subclass, so that when we are tracing, the tensors flowing through the
program are the *original* real or fake tensors, depending on what the
user requested in the top-level API. There is no more wrapping. To
store the Proxy objects necessary for actually doing tracing, I store
the property directly on the tensors. (Note: I never
clean up old entries from the map at the moment, this is easily fixed
by using a weak map)
Benefits of doing this:
* No more tip-toeing around no_dispatch() creation of new ProxyTensors;
we never create new tensors (except when we call the underlying func),
so you don't have to worry about accidentally tracing them.
* No more syncing up metadata from in place operators. In particular
https://github.com/pytorch/pytorch/issues/81526 is mooted
* This fixes https://github.com/pytorch/torchdynamo/issues/519 as we no longer need to teach proxy tensor to support sparse tensor.
* No more schlepping symbolic integers from the inner fake tensor to the
outer proxy tensor. If you can make a fake tensor with symbolic ints,
you're done, nothing else to do.
To avoid having to rewrite all of the guts, when I get to the actual
proxy tensor handler, I first "fetch" the stored ProxyTensor data from
the weakmap via a tree_map, and then operate on the consequent data as
before. A more optimized implementation is possible.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83330
Approved by: https://github.com/Chillee
2022-08-16 20:37:29 +00:00
|
|
|
traced = make_fx(f2_logging_tensor)(torch.randn(3))
|
|
|
|
|
self.assertFalse(is_any_sum(traced))
|
|
|
|
|
self.assertFalse(is_any_sigmoid(traced)) # this fails, sigmoid is traced with LoggingTensor
|
|
|
|
|
self.assertTrue(is_any_digamma(traced))
|
2022-08-02 11:02:10 +00:00
|
|
|
|
2023-03-27 15:08:41 +00:00
|
|
|
# See https://github.com/pytorch/pytorch/issues/97541
|
|
|
|
|
def test_empty_like_doesnt_burn_in_defaults(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.empty_like(x)
|
|
|
|
|
out = make_fx(f)(torch.randn(3))
|
|
|
|
|
self.assertExpectedInline(out.code.strip(), """\
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
empty_like = torch.ops.aten.empty_like.default(x_1, pin_memory = False); x_1 = None
|
|
|
|
|
return empty_like""")
|
|
|
|
|
|
fix functionalization <> resnet18, make ProxyTensor work with tensor-less decomps (#83207)
This should fix a few of the errors I was seeing when I turned on functionalization in torchbench. It also fixes this AOTAutograd repro with resnet18:
```
import torch
from torchvision.models import resnet18
from functorch._src.compilers import nop
from functorch._src.aot_autograd import aot_module
from functorch.compile import config
config.use_functionalize = True
model = resnet18().cuda().half().to(memory_format=torch.channels_last)
input = torch.randn(256, 3, 224, 224, device='cuda', dtype=torch.float16) \
.to(memory_format=torch.channels_last).detach().requires_grad_(True)
input_expected = input.clone().detach().requires_grad_(True)
fn = aot_module(model, nop)
out = fn(input)
out_expected = model(input_expected)
print(torch.allclose(out, out_expected))
out.sum().backward()
out_expected.sum().backward()
print(torch.allclose(input.grad, input_expected.grad))
```
The problem was that functorch adds a decomp to the decomp table for `new_zeros`:
```
@register_decomposition(aten.new_zeros, aot_autograd_decompositions)
def new_zeros(inp, size, dtype=None, layout=None, device=None, pin_memory=None):
return torch.zeros(size, dtype=inp.dtype, device=inp.device)
```
When calling that decomp from inside of `ProxyTensorDispatchMode`, the ProxyTensorMode is already disabled, and `torch.zeros` doesn't take in any tensor-like arguments, so we never end up dispatching back into python again.
The way that manifests is that the output of `new_zeros()` gets baked as a constant into the AOTAutograd FX graph.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83207
Approved by: https://github.com/ezyang
2022-08-11 21:20:53 +00:00
|
|
|
def test_proxy_tensor_mode_with_decomp_table_preserves_proxy(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
y = x.new_zeros(x.size())
|
|
|
|
|
y.copy_(x)
|
|
|
|
|
return y
|
|
|
|
|
|
|
|
|
|
def _new_zeros_decomp(inp, size, dtype=None, layout=None, device=None, pin_memory=None):
|
|
|
|
|
return torch.zeros(size, dtype=inp.dtype, device=inp.device)
|
|
|
|
|
|
|
|
|
|
factory_func_decomp = {torch.ops.aten.new_zeros.default: _new_zeros_decomp}
|
|
|
|
|
|
|
|
|
|
# When new_zeros() decomposes into torch.zero(), we expect ProxyTensorMode
|
|
|
|
|
# to still be (re-entrantly) enabled, so that the `torch.zero()` call
|
|
|
|
|
# returns a ProxyTensor.
|
|
|
|
|
out = make_fx(f, decomposition_table=factory_func_decomp)(torch.ones(2))
|
|
|
|
|
self.assertExpectedInline(out.code, """\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
zeros = torch.ops.aten.zeros.default([2], dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
|
2022-08-31 07:01:37 +00:00
|
|
|
copy_ = torch.ops.aten.copy_.default(zeros, x_1); zeros = x_1 = None
|
|
|
|
|
return copy_
|
fix functionalization <> resnet18, make ProxyTensor work with tensor-less decomps (#83207)
This should fix a few of the errors I was seeing when I turned on functionalization in torchbench. It also fixes this AOTAutograd repro with resnet18:
```
import torch
from torchvision.models import resnet18
from functorch._src.compilers import nop
from functorch._src.aot_autograd import aot_module
from functorch.compile import config
config.use_functionalize = True
model = resnet18().cuda().half().to(memory_format=torch.channels_last)
input = torch.randn(256, 3, 224, 224, device='cuda', dtype=torch.float16) \
.to(memory_format=torch.channels_last).detach().requires_grad_(True)
input_expected = input.clone().detach().requires_grad_(True)
fn = aot_module(model, nop)
out = fn(input)
out_expected = model(input_expected)
print(torch.allclose(out, out_expected))
out.sum().backward()
out_expected.sum().backward()
print(torch.allclose(input.grad, input_expected.grad))
```
The problem was that functorch adds a decomp to the decomp table for `new_zeros`:
```
@register_decomposition(aten.new_zeros, aot_autograd_decompositions)
def new_zeros(inp, size, dtype=None, layout=None, device=None, pin_memory=None):
return torch.zeros(size, dtype=inp.dtype, device=inp.device)
```
When calling that decomp from inside of `ProxyTensorDispatchMode`, the ProxyTensorMode is already disabled, and `torch.zeros` doesn't take in any tensor-like arguments, so we never end up dispatching back into python again.
The way that manifests is that the output of `new_zeros()` gets baked as a constant into the AOTAutograd FX graph.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83207
Approved by: https://github.com/ezyang
2022-08-11 21:20:53 +00:00
|
|
|
""")
|
|
|
|
|
|
2022-08-09 15:35:50 +00:00
|
|
|
def test_make_fx_reentrant_dispatch(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.ops.aten.norm.Scalar(x, 2.0)
|
|
|
|
|
|
|
|
|
|
def norm_decomp(x, p=2.0):
|
|
|
|
|
if p != 2.0:
|
|
|
|
|
raise RuntimeError("can't handle with p != 2")
|
|
|
|
|
return torch.sqrt(torch.sum(torch.square(x)))
|
|
|
|
|
|
|
|
|
|
decomp = {torch.ops.aten.norm.Scalar: norm_decomp}
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f, decomposition_table=decomp, tracing_mode=self.tracing_mode)(torch.rand(3))
|
|
|
|
|
|
|
|
|
|
for n in traced.graph.nodes:
|
|
|
|
|
self.assertTrue("square" not in str(n.target))
|
|
|
|
|
self.assertTrue("norm" not in str(n.target))
|
|
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
|
2022-08-03 21:25:16 +00:00
|
|
|
def test_resnet18_backward_trace(self):
|
2022-06-07 00:28:53 +00:00
|
|
|
mod = torchvision.models.resnet18()
|
|
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
# An old version of this test called the module directly. This works
|
|
|
|
|
# for tracing_mode == "real", but for fake tensors, we also have to
|
|
|
|
|
# ensure that the parameters and buffers get wrapped in fake tensors
|
2023-01-18 02:49:58 +00:00
|
|
|
# because free fake tensors are not supported. Fortunately functional_call
|
2022-08-03 21:25:16 +00:00
|
|
|
# does precisely this for us.
|
|
|
|
|
def f(x, params, buffers):
|
|
|
|
|
for p in params.values():
|
|
|
|
|
p.grad = None
|
2023-01-18 02:49:58 +00:00
|
|
|
loss = torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum()
|
2022-08-03 21:25:16 +00:00
|
|
|
# I could have done this with the functional API, but there is
|
|
|
|
|
# plenty of exercising this; I want to show mutating API still
|
|
|
|
|
# works
|
|
|
|
|
loss.backward()
|
|
|
|
|
return [p.grad for p in params.values()]
|
|
|
|
|
|
|
|
|
|
inp = torch.randn(3, 3, 250, 250)
|
|
|
|
|
self._test(f, [inp, dict(mod.named_parameters()), dict(mod.named_buffers())])
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2022-08-10 22:33:10 +00:00
|
|
|
def test_varargs(self):
|
|
|
|
|
def f(*args):
|
|
|
|
|
return sum(args)
|
|
|
|
|
|
|
|
|
|
self._test(f, [torch.randn(2), torch.randn(2)])
|
|
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
def test_proxy_tensor(self):
|
|
|
|
|
def f_grad(x):
|
|
|
|
|
val = x.cos().cos().sum()
|
|
|
|
|
return torch.autograd.grad(val, x)
|
|
|
|
|
|
|
|
|
|
def f_backward(x):
|
|
|
|
|
val = x.cos().cos().sum()
|
|
|
|
|
val.backward()
|
|
|
|
|
return x.grad
|
|
|
|
|
|
|
|
|
|
for f in [f_grad, f_backward]:
|
2022-07-07 04:54:31 +00:00
|
|
|
self._test(f, [torch.randn(3, requires_grad=True)])
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2022-12-15 08:37:24 +00:00
|
|
|
def test_pickle_issue89626(self):
|
|
|
|
|
import pickle
|
|
|
|
|
x = torch.randn(2)
|
|
|
|
|
make_fx(lambda x: x * 2, tracing_mode=self.tracing_mode)(x)
|
|
|
|
|
pickle.dumps(x)
|
|
|
|
|
|
2022-06-27 12:06:49 +00:00
|
|
|
def test_inplace_metadata(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
x = x.clone()
|
|
|
|
|
x.unsqueeze_(-1)
|
|
|
|
|
assert x.shape[-1] == 1
|
|
|
|
|
return x
|
|
|
|
|
|
2022-07-07 04:54:31 +00:00
|
|
|
self._test(f, [torch.randn(5)])
|
2022-06-27 12:06:49 +00:00
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
def test_mode_tracing_factory_function(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return x + torch.randn(x.shape)
|
|
|
|
|
|
2022-06-16 22:04:10 +00:00
|
|
|
# default behavior should trace factory functions
|
2022-08-03 17:50:30 +00:00
|
|
|
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
|
2022-06-07 00:28:53 +00:00
|
|
|
self.assertTrue(
|
|
|
|
|
any(
|
2022-07-26 20:21:16 +00:00
|
|
|
node.target == aten.randn.default
|
2022-06-07 00:28:53 +00:00
|
|
|
for node in traced.graph.nodes
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2023-12-19 00:19:21 +00:00
|
|
|
def test_pre_dispatch_functionalization(self):
|
|
|
|
|
def f(x):
|
2024-09-25 17:37:50 +00:00
|
|
|
a = FunctionalTensorMode(pre_dispatch=True, export=True)
|
2023-12-19 00:19:21 +00:00
|
|
|
with a:
|
2023-12-25 01:11:37 +00:00
|
|
|
x_unwrapped = FunctionalTensor.to_functional(x)
|
2023-12-19 00:19:21 +00:00
|
|
|
y = torch.matmul(x_unwrapped, x_unwrapped)
|
|
|
|
|
y = y + x_unwrapped
|
|
|
|
|
y.mul_(5)
|
|
|
|
|
y_unwrapped = torch._from_functional_tensor(y.elem)
|
|
|
|
|
return y_unwrapped
|
|
|
|
|
|
|
|
|
|
from torch._dispatch.python import enable_python_dispatcher
|
|
|
|
|
|
|
|
|
|
with enable_python_dispatcher():
|
|
|
|
|
inp = torch.randn(4, 4)
|
|
|
|
|
gm = make_fx(f, pre_dispatch=True)(inp)
|
|
|
|
|
|
|
|
|
|
# TODO actually not decompose
|
|
|
|
|
self.assertExpectedInline(gm.code.strip(), """\
|
|
|
|
|
def forward(self, x_1):
|
2023-12-28 02:58:59 +00:00
|
|
|
matmul = torch.ops.aten.matmul.default(x_1, x_1)
|
|
|
|
|
add = torch.ops.aten.add.Tensor(matmul, x_1); matmul = x_1 = None
|
2023-12-19 00:19:21 +00:00
|
|
|
mul = torch.ops.aten.mul.Tensor(add, 5); add = None
|
|
|
|
|
return mul""")
|
|
|
|
|
|
|
|
|
|
def test_pre_dispatch_functionalization_view_op(self):
|
|
|
|
|
def f(x):
|
2024-09-25 17:37:50 +00:00
|
|
|
a = FunctionalTensorMode(pre_dispatch=True, export=True)
|
2023-12-19 00:19:21 +00:00
|
|
|
with a:
|
2023-12-25 01:11:37 +00:00
|
|
|
x_unwrapped = FunctionalTensor.to_functional(x)
|
2023-12-19 00:19:21 +00:00
|
|
|
y = torch.matmul(x_unwrapped, x_unwrapped)
|
|
|
|
|
x_unwrapped = x_unwrapped.transpose(1, 0)
|
|
|
|
|
y = y + x_unwrapped
|
|
|
|
|
y = y.view(2, 8)
|
|
|
|
|
y_unwrapped = torch._from_functional_tensor(y.elem)
|
|
|
|
|
return y_unwrapped
|
|
|
|
|
|
|
|
|
|
from torch._dispatch.python import enable_python_dispatcher
|
|
|
|
|
|
|
|
|
|
with enable_python_dispatcher():
|
|
|
|
|
inp = torch.randn(4, 4)
|
|
|
|
|
gm = make_fx(f, pre_dispatch=True)(inp)
|
|
|
|
|
|
|
|
|
|
# TODO actually not decompose
|
|
|
|
|
self.assertExpectedInline(gm.code.strip(), """\
|
|
|
|
|
def forward(self, x_1):
|
2023-12-28 02:58:59 +00:00
|
|
|
matmul = torch.ops.aten.matmul.default(x_1, x_1)
|
2023-12-19 00:19:21 +00:00
|
|
|
transpose = torch.ops.aten.transpose.int(x_1, 1, 0); x_1 = None
|
2023-12-28 02:58:59 +00:00
|
|
|
add = torch.ops.aten.add.Tensor(matmul, transpose); matmul = transpose = None
|
2023-12-19 00:19:21 +00:00
|
|
|
view = torch.ops.aten.view.default(add, [2, 8]); add = None
|
|
|
|
|
return view""")
|
|
|
|
|
|
2022-11-18 21:14:40 +00:00
|
|
|
def test_val_metadata_mutation(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
y = x.clone()
|
|
|
|
|
y.unsqueeze_(0)
|
|
|
|
|
return y
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3, requires_grad=True))
|
|
|
|
|
self.assertEqual([
|
|
|
|
|
tuple(node.meta['val'].shape)
|
|
|
|
|
for node in traced.graph.nodes
|
|
|
|
|
if 'val' in node.meta
|
|
|
|
|
], [(3,), (3,), (1, 3)])
|
|
|
|
|
|
2022-06-28 00:20:22 +00:00
|
|
|
def test_make_fx_overloads(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return x.cos() + torch.randn(x.shape)
|
|
|
|
|
|
2022-08-03 17:50:30 +00:00
|
|
|
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
|
2022-06-28 00:20:22 +00:00
|
|
|
|
2023-04-25 15:02:13 +00:00
|
|
|
self.assertTrue(all(isinstance(node.target, torch._ops.OpOverload)
|
|
|
|
|
for node in traced.graph.nodes if node.op == 'call_function'))
|
2022-06-28 00:20:22 +00:00
|
|
|
|
2022-07-07 04:54:31 +00:00
|
|
|
def test_tensor_constants(self):
|
|
|
|
|
def f():
|
|
|
|
|
val = torch.tensor(float('inf'))
|
|
|
|
|
return torch.full((100, 100), val)
|
|
|
|
|
|
|
|
|
|
self._test(f, [])
|
|
|
|
|
|
2022-08-12 13:17:53 +00:00
|
|
|
def test_allclose(self):
|
|
|
|
|
def f(a, b):
|
|
|
|
|
return torch.allclose(a, b)
|
2022-07-14 04:11:10 +00:00
|
|
|
|
2022-12-11 04:29:21 +00:00
|
|
|
def test_f():
|
|
|
|
|
make_fx(f, tracing_mode=self.tracing_mode)(
|
2022-08-12 13:17:53 +00:00
|
|
|
torch.zeros(3), torch.zeros(3)
|
|
|
|
|
)
|
2022-12-11 04:29:21 +00:00
|
|
|
|
2023-01-30 13:57:30 +00:00
|
|
|
if self.tracing_mode != "real":
|
2022-12-11 04:29:21 +00:00
|
|
|
self.assertRaises(DataDependentOutputException, test_f)
|
|
|
|
|
else:
|
|
|
|
|
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
|
2022-07-14 04:11:10 +00:00
|
|
|
|
|
|
|
|
def test_constant_proxy_tensor_mut(self):
|
|
|
|
|
def f():
|
|
|
|
|
val = torch.tensor(float(1))
|
|
|
|
|
val.add_(2)
|
|
|
|
|
return torch.full((100, 100), val)
|
|
|
|
|
|
2022-08-03 17:50:30 +00:00
|
|
|
g = make_fx(f, tracing_mode=self.tracing_mode)()
|
2022-07-14 04:11:10 +00:00
|
|
|
self.assertEqual(g(), f())
|
|
|
|
|
# In case we mutated shared state in the g graph!
|
|
|
|
|
self.assertEqual(g(), f())
|
|
|
|
|
|
2022-08-01 14:02:58 +00:00
|
|
|
def test_constant_unbind(self):
|
|
|
|
|
def f():
|
|
|
|
|
val = torch.tensor([2])
|
|
|
|
|
r, = torch.unbind(val, 0)
|
|
|
|
|
return r.item()
|
|
|
|
|
|
2022-08-03 17:50:30 +00:00
|
|
|
g = make_fx(f, tracing_mode=self.tracing_mode)()
|
2022-08-01 14:02:58 +00:00
|
|
|
self.assertEqual(g(), f())
|
|
|
|
|
|
2022-08-18 03:30:13 +00:00
|
|
|
def test_constant_blowup(self):
|
|
|
|
|
def f():
|
|
|
|
|
val = torch.tensor([2])
|
|
|
|
|
blowup = val.repeat(1000)
|
2022-12-11 04:29:21 +00:00
|
|
|
return bool(blowup.sum().item() == 2)
|
2022-08-18 03:30:13 +00:00
|
|
|
|
2023-01-30 13:57:30 +00:00
|
|
|
def test_f():
|
|
|
|
|
make_fx(f, tracing_mode=self.tracing_mode)()
|
|
|
|
|
|
2023-09-13 01:41:13 +00:00
|
|
|
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
|
2022-08-18 03:30:41 +00:00
|
|
|
|
|
|
|
|
def test_constant_random(self):
|
|
|
|
|
def f():
|
|
|
|
|
val = torch.tensor([2.0])
|
|
|
|
|
val.normal_()
|
2022-12-11 04:29:21 +00:00
|
|
|
return bool(val.item() == 2.1)
|
2022-08-18 03:30:41 +00:00
|
|
|
|
2023-01-30 13:57:30 +00:00
|
|
|
def test_f():
|
|
|
|
|
make_fx(f, tracing_mode=self.tracing_mode)()
|
|
|
|
|
|
2023-09-13 01:41:13 +00:00
|
|
|
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
|
2022-08-18 03:30:13 +00:00
|
|
|
|
2022-06-29 17:28:42 +00:00
|
|
|
def test_decomposition_interpreter(self):
|
|
|
|
|
def fn(x):
|
|
|
|
|
return torch.nn.functional.silu(x)
|
|
|
|
|
|
|
|
|
|
x = torch.rand((4, 4))
|
2022-08-03 17:50:30 +00:00
|
|
|
fx_module = make_fx(fn, tracing_mode=self.tracing_mode, decomposition_table=None)(x)
|
2022-06-29 17:28:42 +00:00
|
|
|
|
|
|
|
|
found_silu = False
|
|
|
|
|
for n in fx_module.graph.nodes:
|
|
|
|
|
if n.target == torch.ops.aten.silu or n.target == torch.ops.aten.silu.default:
|
|
|
|
|
found_silu = True
|
|
|
|
|
|
|
|
|
|
self.assertTrue(found_silu)
|
|
|
|
|
|
|
|
|
|
new_graph = torch.fx.Graph()
|
|
|
|
|
silu_decomp_table = {torch.ops.aten.silu.default: decomposition_table[torch.ops.aten.silu.default]}
|
|
|
|
|
DecompositionInterpreter(
|
|
|
|
|
fx_module,
|
|
|
|
|
new_graph=new_graph,
|
|
|
|
|
decomposition_table=silu_decomp_table,
|
|
|
|
|
).run(x)
|
|
|
|
|
|
|
|
|
|
decomposed_module = torch.fx.GraphModule(fx_module, new_graph)
|
|
|
|
|
|
|
|
|
|
for n in decomposed_module.graph.nodes:
|
|
|
|
|
self.assertTrue(n.target != torch.ops.aten.silu)
|
|
|
|
|
self.assertTrue(n.target != torch.ops.aten.silu.default)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(fx_module(x), decomposed_module(x))
|
2022-06-28 00:20:22 +00:00
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
def test_make_fx_model_fwd_bwd(self):
|
2022-07-25 12:43:17 +00:00
|
|
|
class Foo(torch.nn.Module):
|
2024-08-01 07:22:48 +00:00
|
|
|
def __init__(self) -> None:
|
2022-07-25 12:43:17 +00:00
|
|
|
super().__init__()
|
|
|
|
|
self.linear = torch.nn.Linear(5, 5)
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
return self.linear(x).relu()
|
|
|
|
|
|
|
|
|
|
model = Foo()
|
|
|
|
|
|
|
|
|
|
def f(x, params):
|
2023-01-18 02:49:58 +00:00
|
|
|
out = torch.func.functional_call(model, params, x).sum()
|
2022-07-25 12:43:17 +00:00
|
|
|
out.backward()
|
|
|
|
|
return list(params.values())
|
|
|
|
|
input = torch.randn(3, 5, requires_grad=True)
|
|
|
|
|
params = dict(model.named_parameters())
|
2022-08-03 17:50:30 +00:00
|
|
|
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params)
|
2022-07-25 12:43:17 +00:00
|
|
|
# fx may change the order of parameters in list, so using set() to compare
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.allclose(fx_f(input, params)[0], f(input, params)[0])
|
|
|
|
|
or
|
|
|
|
|
torch.allclose(fx_f(input, params)[0], f(input, params)[1])
|
|
|
|
|
)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.allclose(fx_f(input, params)[1], f(input, params)[0])
|
|
|
|
|
or
|
|
|
|
|
torch.allclose(fx_f(input, params)[1], f(input, params)[1])
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-25 01:53:33 +00:00
|
|
|
def test_make_fx_model_double_param(self):
|
|
|
|
|
class Emformer(torch.nn.Module):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
input_dim: int = 256,
|
|
|
|
|
) -> None:
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
self.layer_norm = torch.nn.LayerNorm(input_dim)
|
|
|
|
|
|
|
|
|
|
def forward(mod_self, x): # noqa: B902
|
|
|
|
|
self.assertTrue(isinstance(mod_self.layer_norm.weight, torch.Tensor))
|
|
|
|
|
y = mod_self.layer_norm(x)
|
|
|
|
|
self.assertTrue(isinstance(mod_self.layer_norm.weight, torch.Tensor))
|
|
|
|
|
z = mod_self.layer_norm(y)
|
|
|
|
|
return z
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gm = make_fx(Emformer())(torch.randn(16, 1, 256))
|
2023-02-10 23:40:26 +00:00
|
|
|
ops = {n.target for n in gm.graph.nodes if n.op == 'call_function'}
|
2022-08-25 01:53:33 +00:00
|
|
|
self.assertEqual(len(ops), 2)
|
|
|
|
|
|
|
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
def test_make_fx_model_fwd_bwd_wgtupdate(self):
|
2022-07-25 12:43:17 +00:00
|
|
|
class Foo(torch.nn.Module):
|
2024-08-01 07:22:48 +00:00
|
|
|
def __init__(self) -> None:
|
2022-07-25 12:43:17 +00:00
|
|
|
super().__init__()
|
|
|
|
|
self.linear = torch.nn.Linear(5, 5)
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
return self.linear(x).relu()
|
|
|
|
|
|
|
|
|
|
model = Foo()
|
|
|
|
|
|
|
|
|
|
def f(args, params, buffers):
|
Delete ProxyTensor wrapper subclass (#83330)
I was working on https://github.com/pytorch/torchdynamo/issues/80 and my
working hypothesis for what was causing the error was that proxy tensor
was not advertising correct dispatch keys, causing AMP to operate
differently when you traced. I could have fixed this directly by
replicating fake tensor's fix for setting dispatch keys to also apply to
proxy tensor, but I was like, "Why must I repeat myself."
This PR is the result. It completely deletes the ProxyTensor wrapper
subclass, so that when we are tracing, the tensors flowing through the
program are the *original* real or fake tensors, depending on what the
user requested in the top-level API. There is no more wrapping. To
store the Proxy objects necessary for actually doing tracing, I store
the property directly on the tensors. (Note: I never
clean up old entries from the map at the moment, this is easily fixed
by using a weak map)
Benefits of doing this:
* No more tip-toeing around no_dispatch() creation of new ProxyTensors;
we never create new tensors (except when we call the underlying func),
so you don't have to worry about accidentally tracing them.
* No more syncing up metadata from in place operators. In particular
https://github.com/pytorch/pytorch/issues/81526 is mooted
* This fixes https://github.com/pytorch/torchdynamo/issues/519 as we no longer need to teach proxy tensor to support sparse tensor.
* No more schlepping symbolic integers from the inner fake tensor to the
outer proxy tensor. If you can make a fake tensor with symbolic ints,
you're done, nothing else to do.
To avoid having to rewrite all of the guts, when I get to the actual
proxy tensor handler, I first "fetch" the stored ProxyTensor data from
the weakmap via a tree_map, and then operate on the consequent data as
before. A more optimized implementation is possible.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83330
Approved by: https://github.com/Chillee
2022-08-16 20:37:29 +00:00
|
|
|
for p in params.values():
|
|
|
|
|
p.grad = None
|
2022-07-25 12:43:17 +00:00
|
|
|
if not isinstance(args, Iterable):
|
|
|
|
|
args = [args]
|
|
|
|
|
params_and_buffers = {**params, **buffers}
|
2023-01-18 02:49:58 +00:00
|
|
|
out = torch.func.functional_call(model, params_and_buffers, args)
|
2022-07-25 12:43:17 +00:00
|
|
|
out.sum().backward()
|
|
|
|
|
return [p - 1e-4 * p.grad for p in params.values()]
|
|
|
|
|
|
|
|
|
|
input = torch.randn(3, 5, requires_grad=True)
|
|
|
|
|
params = dict(model.named_parameters())
|
|
|
|
|
buffers = dict(model.named_buffers())
|
2022-08-03 17:50:30 +00:00
|
|
|
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params, buffers)
|
2022-07-25 12:43:17 +00:00
|
|
|
# fx may change the order of parameters in list, so using set() to compare
|
|
|
|
|
# also there is a numerical difference in results so changing atol from 1e-08 to 1e-03
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[0], atol=1e-03)
|
|
|
|
|
or
|
|
|
|
|
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[1], atol=1e-03)
|
|
|
|
|
)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[0], atol=1e-03)
|
|
|
|
|
or
|
|
|
|
|
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[1], atol=1e-03)
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-10 22:31:38 +00:00
|
|
|
def test_trace_subclasses(self):
|
2022-08-31 07:01:37 +00:00
|
|
|
def f1(x):
|
2022-08-10 22:31:38 +00:00
|
|
|
x = UnwrapTensor(x)
|
|
|
|
|
y = x * 2
|
|
|
|
|
return y
|
|
|
|
|
|
2022-08-31 07:01:37 +00:00
|
|
|
def f2(x):
|
|
|
|
|
wrapped = UnwrapTensor(x)
|
|
|
|
|
y = x * wrapped
|
|
|
|
|
return y
|
|
|
|
|
|
2022-08-10 22:31:38 +00:00
|
|
|
inp = [torch.randn(5)]
|
2022-08-31 07:01:37 +00:00
|
|
|
self._test(f1, inp)
|
|
|
|
|
self._test(f2, inp)
|
2022-08-20 00:47:11 +00:00
|
|
|
|
|
|
|
|
def test_partial_decomp(self):
|
|
|
|
|
def f(a, b, c):
|
|
|
|
|
x = torch.addmm(a, b, c)
|
|
|
|
|
y = torch.addmm(a, b, c, beta=2, alpha=1)
|
|
|
|
|
return x + y
|
|
|
|
|
inps = [torch.randn(5, 5), torch.randn(5, 5), torch.randn(5, 5)]
|
|
|
|
|
fx_g = make_fx(f)(*inps)
|
|
|
|
|
|
|
|
|
|
def addmm(a, b, c, beta=1, alpha=1):
|
|
|
|
|
if beta == 1 and alpha == 1:
|
|
|
|
|
return NotImplemented
|
|
|
|
|
return beta * a + alpha * (b @ c)
|
|
|
|
|
|
2023-03-21 20:15:23 +00:00
|
|
|
decomposed_fx = make_fx(f, decomposition_table={aten.addmm.default: addmm})(*inps)
|
2022-08-20 00:47:11 +00:00
|
|
|
|
|
|
|
|
self.assertEqual(fx_g(*inps), decomposed_fx(*inps))
|
|
|
|
|
self.assertEqual(len([n for n in fx_g.graph.nodes if n.target == aten.addmm.default]), 2)
|
|
|
|
|
self.assertEqual(len([n for n in decomposed_fx.graph.nodes if n.target == aten.addmm.default]), 1)
|
|
|
|
|
|
2022-08-31 07:01:37 +00:00
|
|
|
def test_decomp_of_capture(self):
|
|
|
|
|
val = torch.randn(5)
|
|
|
|
|
|
|
|
|
|
def f(x):
|
|
|
|
|
return x.t() + val.t()
|
|
|
|
|
|
|
|
|
|
def nop(x):
|
|
|
|
|
return x.cos()
|
|
|
|
|
|
|
|
|
|
traced = make_fx(f, decomposition_table={torch.ops.aten.t.default: nop})(torch.randn(5))
|
|
|
|
|
self.assertEqual(len([n for n in traced.graph.nodes if n.target == torch.ops.aten.t.default]), 0)
|
|
|
|
|
|
|
|
|
|
|
2022-08-25 06:59:37 +00:00
|
|
|
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
|
|
|
|
|
def test_amp_cache(self):
|
|
|
|
|
layer = torch.nn.Conv2d(3, 3, 3).cuda()
|
2022-08-20 00:47:11 +00:00
|
|
|
|
2022-08-25 06:59:37 +00:00
|
|
|
def f(x, w):
|
|
|
|
|
return torch.nn.functional.conv2d(x, w, stride=layer.stride)
|
2022-08-10 22:31:38 +00:00
|
|
|
|
2022-08-25 06:59:37 +00:00
|
|
|
inp = torch.randn(4, 3, 10, 10, device='cuda')
|
|
|
|
|
with torch.autocast('cuda'):
|
|
|
|
|
out_graph = make_fx(f)(inp, layer.weight).graph
|
|
|
|
|
out_graph2 = make_fx(f)(inp, layer.weight).graph
|
|
|
|
|
|
|
|
|
|
self.assertEqual(len(out_graph.nodes), len(out_graph2.nodes))
|
|
|
|
|
for a, b in zip(out_graph.nodes, out_graph2.nodes):
|
|
|
|
|
self.assertEqual(a.op, b.op)
|
|
|
|
|
|
2022-09-16 02:29:13 +00:00
|
|
|
def test_strides(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
self.assertTrue(x.is_contiguous())
|
|
|
|
|
self.assertFalse(x.is_contiguous(memory_format=torch.channels_last))
|
|
|
|
|
x = x.permute(0, 3, 1, 2)
|
|
|
|
|
self.assertFalse(x.is_contiguous())
|
|
|
|
|
self.assertTrue(x.is_contiguous(memory_format=torch.channels_last))
|
|
|
|
|
return x
|
|
|
|
|
make_fx(f)(torch.randn(2, 3, 4, 5))
|
|
|
|
|
|
|
|
|
|
def f(x):
|
|
|
|
|
self.assertTrue(x.is_contiguous())
|
|
|
|
|
y = x[:, 1]
|
|
|
|
|
self.assertFalse(y.is_contiguous())
|
|
|
|
|
y = x[:, ::2]
|
|
|
|
|
self.assertFalse(y.is_contiguous())
|
|
|
|
|
return x.cos()
|
|
|
|
|
|
|
|
|
|
make_fx(f)(torch.randn(2, 3, 4, 5))
|
|
|
|
|
|
2022-10-19 02:07:13 +00:00
|
|
|
def test_pr_86917(self):
|
|
|
|
|
# Tests the issue brought up here https://github.com/pytorch/pytorch/pull/86917#issuecomment-1283155344
|
|
|
|
|
def f(a, b):
|
|
|
|
|
return torch.ops.aten.nll_loss_forward(a, b, None, 1, 10)
|
|
|
|
|
|
|
|
|
|
self._test(f, [torch.randn(1, 10), torch.zeros(1, dtype=torch.long)])
|
|
|
|
|
|
2022-08-03 21:25:16 +00:00
|
|
|
class TestGenericProxyTensorReal(TestGenericProxyTensor):
|
|
|
|
|
tracing_mode = "real"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenericProxyTensorFake(TestGenericProxyTensor):
|
|
|
|
|
tracing_mode = "fake"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestGenericProxyTensorSymbolic(TestGenericProxyTensor):
|
|
|
|
|
tracing_mode = "symbolic"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
del TestGenericProxyTensor
|
|
|
|
|
|
|
|
|
|
|
2022-08-03 17:50:30 +00:00
|
|
|
class TestRealProxyTensor(TestCase):
|
2023-11-20 15:41:25 +00:00
|
|
|
def test_error_on_data_dependent_ops(self):
|
|
|
|
|
def f():
|
|
|
|
|
x = torch.randn([])
|
|
|
|
|
y = torch.randn([])
|
|
|
|
|
assert torch.allclose(x * y, y * x)
|
|
|
|
|
z = float(x)
|
|
|
|
|
z2 = float(y)
|
|
|
|
|
|
|
|
|
|
# Smoke tests
|
|
|
|
|
make_fx(f, _error_on_data_dependent_ops=False)()
|
|
|
|
|
make_fx(f, pre_dispatch=True, _error_on_data_dependent_ops=False)()
|
2022-08-03 17:50:30 +00:00
|
|
|
|
|
|
|
|
class TestFakeProxyTensor(TestCase):
|
|
|
|
|
def test_issue82547(self):
|
|
|
|
|
x = nn.Parameter(torch.randn(3, 3))
|
|
|
|
|
|
|
|
|
|
def f():
|
|
|
|
|
return torch.ops.aten.t.default(x)
|
2022-12-12 18:53:08 +00:00
|
|
|
self.assertRaisesRegex(Exception, "Please convert all Tensors", lambda: make_fx(f, tracing_mode="fake")())
|
2022-08-03 17:50:30 +00:00
|
|
|
|
|
|
|
|
class A(torch.Tensor):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
x = A(torch.randn(3, 3))
|
Tighten FakeTensor reentrancy asserts, add debugging (#102091)
When investigating failures in https://github.com/pytorch/pytorch/pull/100017 I realized that we were reentering FakeTensorMode even though there was already one on the stack. Although we have attempted assert for these cases in the past, e.g., as in https://github.com/pytorch/pytorch/pull/97186 it seems that the existing protections were insufficient.
In this particular case, the reapplication of FakeTensorMode was due to an interaction with NotImplemented multiple dispatch handling. If proxy tensor mode detects an unrecognized tensor type (this includes FakeTensor, if it is not tracked with a proxy), it will return NotImplemented to give this tensor a chance to unpack itself into proxyable operation. However, this is never the right thing for FakeTensor, where no unpacking is possible. However, today, FakeTensor attempts to reapply the FakeTensorMode, resulting in FakeTensorMode being twice on the stack.
This PR does a number of things:
* It adds an assert in `FakeTensorMode.__torch_dispatch__` that you must not already have this mode on the stack, this is ALWAYS an error
* It modifies `FakeTensor.__torch_dispatch__` to return `NotImplemented` if the mode is already active. This prevents us from readding the mode on the stack
* It adds a new logging artifact `not_implemented` which you can use to get debug logs about all of the times a `__torch_dispatch__` handler returned NotImplemented and why it did so. Your subclass has to manually opt into this logging, but I inserted the necessary logs for ProxyTensorMode and FakeTensor(Mode)
* `with fake_mode` now no-ops if the fake mode is already on the stack, which is what users want anyway
* I am BREAKING pre-autograd tracing, because it is currently doing something weird with the original C++ mode stack. Brian is going to follow up with a fix next week.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/102091
Approved by: https://github.com/thiagocrepaldi, https://github.com/eellison, https://github.com/wanchaol, https://github.com/bdhirsh
2023-05-23 22:58:14 +00:00
|
|
|
self.assertRaisesRegex(TypeError, "Multiple dispatch failed", lambda: make_fx(f, tracing_mode="fake")())
|
2022-08-03 17:50:30 +00:00
|
|
|
|
|
|
|
|
def test_use_fake_and_tensor(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
z = torch.tensor([2.0, 3.0])
|
|
|
|
|
return x + y + z
|
|
|
|
|
|
|
|
|
|
g = make_fx(f, tracing_mode="fake")(torch.randn(2), torch.randn(2))
|
|
|
|
|
x, y = torch.randn(2), torch.randn(2)
|
|
|
|
|
self.assertEqual(g(x, y), f(x, y))
|
|
|
|
|
|
Tighten FakeTensor reentrancy asserts, add debugging (#102091)
When investigating failures in https://github.com/pytorch/pytorch/pull/100017 I realized that we were reentering FakeTensorMode even though there was already one on the stack. Although we have attempted assert for these cases in the past, e.g., as in https://github.com/pytorch/pytorch/pull/97186 it seems that the existing protections were insufficient.
In this particular case, the reapplication of FakeTensorMode was due to an interaction with NotImplemented multiple dispatch handling. If proxy tensor mode detects an unrecognized tensor type (this includes FakeTensor, if it is not tracked with a proxy), it will return NotImplemented to give this tensor a chance to unpack itself into proxyable operation. However, this is never the right thing for FakeTensor, where no unpacking is possible. However, today, FakeTensor attempts to reapply the FakeTensorMode, resulting in FakeTensorMode being twice on the stack.
This PR does a number of things:
* It adds an assert in `FakeTensorMode.__torch_dispatch__` that you must not already have this mode on the stack, this is ALWAYS an error
* It modifies `FakeTensor.__torch_dispatch__` to return `NotImplemented` if the mode is already active. This prevents us from readding the mode on the stack
* It adds a new logging artifact `not_implemented` which you can use to get debug logs about all of the times a `__torch_dispatch__` handler returned NotImplemented and why it did so. Your subclass has to manually opt into this logging, but I inserted the necessary logs for ProxyTensorMode and FakeTensor(Mode)
* `with fake_mode` now no-ops if the fake mode is already on the stack, which is what users want anyway
* I am BREAKING pre-autograd tracing, because it is currently doing something weird with the original C++ mode stack. Brian is going to follow up with a fix next week.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/102091
Approved by: https://github.com/thiagocrepaldi, https://github.com/eellison, https://github.com/wanchaol, https://github.com/bdhirsh
2023-05-23 22:58:14 +00:00
|
|
|
def test_free_fake(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.add(x, y)
|
|
|
|
|
|
|
|
|
|
with FakeTensorMode() as fake_mode:
|
|
|
|
|
y = torch.randn(2)
|
2023-08-04 12:45:48 +00:00
|
|
|
make_fx(f, tracing_mode="real")(torch.randn(2))
|
Tighten FakeTensor reentrancy asserts, add debugging (#102091)
When investigating failures in https://github.com/pytorch/pytorch/pull/100017 I realized that we were reentering FakeTensorMode even though there was already one on the stack. Although we have attempted assert for these cases in the past, e.g., as in https://github.com/pytorch/pytorch/pull/97186 it seems that the existing protections were insufficient.
In this particular case, the reapplication of FakeTensorMode was due to an interaction with NotImplemented multiple dispatch handling. If proxy tensor mode detects an unrecognized tensor type (this includes FakeTensor, if it is not tracked with a proxy), it will return NotImplemented to give this tensor a chance to unpack itself into proxyable operation. However, this is never the right thing for FakeTensor, where no unpacking is possible. However, today, FakeTensor attempts to reapply the FakeTensorMode, resulting in FakeTensorMode being twice on the stack.
This PR does a number of things:
* It adds an assert in `FakeTensorMode.__torch_dispatch__` that you must not already have this mode on the stack, this is ALWAYS an error
* It modifies `FakeTensor.__torch_dispatch__` to return `NotImplemented` if the mode is already active. This prevents us from readding the mode on the stack
* It adds a new logging artifact `not_implemented` which you can use to get debug logs about all of the times a `__torch_dispatch__` handler returned NotImplemented and why it did so. Your subclass has to manually opt into this logging, but I inserted the necessary logs for ProxyTensorMode and FakeTensor(Mode)
* `with fake_mode` now no-ops if the fake mode is already on the stack, which is what users want anyway
* I am BREAKING pre-autograd tracing, because it is currently doing something weird with the original C++ mode stack. Brian is going to follow up with a fix next week.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/102091
Approved by: https://github.com/thiagocrepaldi, https://github.com/eellison, https://github.com/wanchaol, https://github.com/bdhirsh
2023-05-23 22:58:14 +00:00
|
|
|
|
2023-04-21 20:02:40 +00:00
|
|
|
def test_fused_adam(self):
|
|
|
|
|
# See https://github.com/pytorch/pytorch/issues/99356
|
2023-04-24 17:25:53 +00:00
|
|
|
params = [torch.randn(10, 10) for _ in range(10)]
|
2023-04-21 20:02:40 +00:00
|
|
|
grads = [torch.randn(10, 10) for _ in range(10)]
|
|
|
|
|
exp_avgs = [torch.randn(10, 10) for _ in range(10)]
|
|
|
|
|
exp_avg_sqs = [torch.randn(10, 10) for _ in range(10)]
|
|
|
|
|
max_exp_avg_sqs = [torch.randn(10, 10) for _ in range(10)]
|
|
|
|
|
state_steps = [torch.tensor(0) for _ in range(10)]
|
|
|
|
|
|
|
|
|
|
def fused_adam(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps):
|
2023-04-24 17:25:53 +00:00
|
|
|
(new_params, _, _, _, _) = aten._fused_adam.default(
|
2023-04-21 20:02:40 +00:00
|
|
|
params,
|
|
|
|
|
grads,
|
|
|
|
|
exp_avgs,
|
|
|
|
|
exp_avg_sqs,
|
|
|
|
|
max_exp_avg_sqs,
|
|
|
|
|
state_steps,
|
|
|
|
|
lr=0.1,
|
|
|
|
|
beta1=0.9,
|
|
|
|
|
beta2=0.999,
|
|
|
|
|
weight_decay=0.01,
|
|
|
|
|
eps=1e-8,
|
|
|
|
|
amsgrad=False,
|
|
|
|
|
maximize=False,
|
|
|
|
|
)
|
|
|
|
|
|
2023-04-24 17:25:53 +00:00
|
|
|
for p, new_p in zip(params, new_params):
|
|
|
|
|
p.copy_(new_p)
|
|
|
|
|
|
|
|
|
|
return params
|
|
|
|
|
|
2023-04-21 20:02:40 +00:00
|
|
|
gm = make_fx(fused_adam, tracing_mode='fake')(
|
|
|
|
|
params,
|
|
|
|
|
grads,
|
|
|
|
|
exp_avgs,
|
|
|
|
|
exp_avg_sqs,
|
|
|
|
|
max_exp_avg_sqs,
|
|
|
|
|
state_steps,
|
|
|
|
|
)
|
2023-04-24 17:25:53 +00:00
|
|
|
ensure_ops_have_val = [aten._fused_adam.default, operator.getitem]
|
2023-04-21 20:02:40 +00:00
|
|
|
for n in gm.graph.nodes:
|
2023-04-24 17:25:53 +00:00
|
|
|
if n.op == "call_function" and n.target in ensure_ops_have_val:
|
2023-04-21 20:02:40 +00:00
|
|
|
self.assertIn('val', n.meta)
|
|
|
|
|
|
2022-09-14 17:51:36 +00:00
|
|
|
def test_alias(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.ops.aten.alias(x)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="fake")(torch.randn(2)).code).strip()
|
|
|
|
|
# NB: this should not have a detach call
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
alias = torch.ops.aten.alias.default(x_1); x_1 = None
|
|
|
|
|
return alias""")
|
|
|
|
|
|
2022-10-15 04:10:47 +00:00
|
|
|
def test_meta(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
a = x.cos()
|
|
|
|
|
b = torch.var_mean(a, dim=0)
|
|
|
|
|
c = b * 2
|
|
|
|
|
return c
|
|
|
|
|
|
|
|
|
|
out = make_fx(f, tracing_mode="fake")(torch.randn(5, 5))
|
|
|
|
|
for n in out.graph.nodes:
|
|
|
|
|
if n.op == 'output':
|
|
|
|
|
continue
|
|
|
|
|
self.assertTrue('val' in n.meta)
|
|
|
|
|
|
2022-08-31 00:29:55 +00:00
|
|
|
def _get_node(fx_g, cond):
|
|
|
|
|
for n in fx_g.graph.nodes:
|
|
|
|
|
if cond(n):
|
|
|
|
|
return n
|
|
|
|
|
raise AssertionError
|
|
|
|
|
|
2022-09-16 22:59:44 +00:00
|
|
|
def _get_free_symbols(shape_env):
|
|
|
|
|
vars = tuple(shape_env.var_to_val.keys())
|
|
|
|
|
return len([var for var in vars if var not in shape_env.replacements])
|
|
|
|
|
|
|
|
|
|
def _trace(f, *args):
|
|
|
|
|
inps = [torch.randn(arg) for arg in args]
|
|
|
|
|
return make_fx(f, tracing_mode="symbolic")(*inps)
|
|
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
# TODO: Need to test the guards themselves specifically as well
|
|
|
|
|
class TestSymbolicTracing(TestCase):
|
Convert SymInt tracing to mode based tracing (#83380)
We're on our way to deleting ProxyTensor entirely (see https://github.com/pytorch/pytorch/pull/83330 ), but before we can do that, we have to delete ProxySymInt first. Here's the plan.
Changes in torch.fx.experimental.symbolic_shapes
* The general idea is to do mode based tracing. This means we need a mode that can interpose on all SymInt operations. There are a few ways to do this, but I've done it the easy way: (1) I have a separate mode for SymInt operations specifically called SymDispatchMode, and (2) this mode operates on PySymInt (and not the basic SymInt which is user visible). I elided Int from the name because if we add SymFloats I want to use the same mode to handle those as well, and I used Dispatch rather than Function because this is the "inner" dispatch operating PySymInt and not SymInt (this is not a perfect analogy, but SymFunctionMode definitely seemed wrong as you still must go through the C++ binding.) The mode is entirely implemented in Python for ease of implementation. We could have implemented this more symmetrically to TorchFunctionMode in C++, but I leave that as later work; this API is unlikely to get used by others (unlike TorchFunctionMode). One downside to not doing the mode in C++ is that we still have to do the hop via a preexisting PySymInt to wrap; this is currently not a big deal as conversion to SymInts only really happens when there is already another SymInt floating around. SymDispatchMode is pared down from TorchDispatchMode; there is no ancestor tracking since I don't expect people to be mixing up SymDispatchModes.
* I made some improvements for tracing. When I invoke the SymDispatchMode handler, I would like constants to show up as constants, so they can be directly inlined into the FX graph (rather than going through a wrapping process first, and then the wrapped SymInt being used in the operation). To do this, I directly track if a PySymInt is a constant at construction time. Only wrapped PySymInts are constants.
* For convenience, PySymInts now support all magic methods that regular SymInts do. This is so that redispatch inside the SymDispatchMode can be written the idiomatic way `func(*args, **kwargs)` where func is an operator. The original names are retained for direct C++ calls.
Changes in torch.fx.experimental.proxy_tensor
* OK, so we got a new SymDispatchMode, so we define a ProxySymDispatchMode and activate it when we start tracing. This mode is currently unconditionally activated although technically we only need to activate it when doing symbolic tracing (it doesn't matter either way as there are no SymInts if you are not doing symbolic tracing).
* We delete ProxySymInt. To do this, we must now record the proxy for the SymInt some other way. Based on discussion with Chillee, it is more intuitive to him if the proxies are still recorded on the SymInt in some way. So we store them in the `__dict__` of the PySymInt, indexed by Tracer. An improvement is to make this a weak map, so that we remove all of these entries when the tracer dies. In an original version of this PR, I keyed on the mode itself, but tracer is better as it is accessible from both modes (and as you will see, we will need to fetch the map from both the ProxySymDispatchMode as well as the ProxyTorchDispatchMode.) The implementation of SymDispatchMode now simply retrieves the proxies, performs the underlying operation as well as the FX graph recording, and then records the output proxy to the PySymInt. Note that FX tracing does not work with proxies and SymInts, so we manually call `call_function` to ensure that the correct operations get recorded to the graph. This means conventional FX retracing with proxies only will not work with these graphs, but there wasn't really any reason to do this (as opposed to `make_fx` retracing) anyway. Constants are detected and converted directly into Python integers.
* SymInts can show up as arguments to tensor operations, so they must be accounted for in ProxyTorchDispatchMode as well. This is done by searching for SymInt arguments and converting them into proxies before the proxy call. This can be done more efficiently in a single `tree_map` but I'm lazy. The helper `unwrap_symint_proxy` conveniently implements the unwrapping in one place given a tracer; unfortunately it cannot be shared with SymDispatchMode as SymDispatchMode gets PySymInts, but ProxyTensorMode gets SymInts. Similarly, tensors that are returned from tensor operations can have SymInts in their shapes, which need fresh proxies allocated. To avoid leaking internal details of SymInt shape computation to the tensor operation graph, these SymInts are always given proxies derived from `x.size(dim)` call on their return tensor. We also need to do this for strides and numel but have not done so yet. Furthermore, we must avoid tracing internal SymInt calls while we run meta operations on the true operation; this is achieved by also disabling SymInt tracing on the inside of tensor tracing. This is analogous to how tensor tracing is disabled inside the implementation of tracing mode, but unfortunately we are unable to use the same mechanism (this would have been easier if the two modes could be combined somehow, and I am amenable to suggestions to try harder to achieve this.)
* Because there are no more ProxySymInts, we no longer need to do anything to unwrap SymInt. Furthermore, we do not need to reallocate ProxySymInts on class creation.
* If a bare SymInt without a Proxy is encountered, it is assumed that this must be a constant. `create_arg` handles this case. Non-constant free SymInts result in an assert error.
* The initial input handling in `dispatch_trace` involves traversing all of the input tensors, traversing over their shapes, and assigning proxies for the SymInts in shapes in the same way we handle proxies for the output tensors.
The preexisting testing is inadequate but will be better after I rebase past https://github.com/pytorch/pytorch/pull/82209
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83380
Approved by: https://github.com/samdow
2022-08-16 03:03:13 +00:00
|
|
|
def _test_dynamic(self, fn, trace_inputs, test_inputs, assert_eq=True):
|
2022-07-23 19:03:38 +00:00
|
|
|
"""
|
|
|
|
|
Tests fn traced with trace_inputs against test_inputs
|
|
|
|
|
Also returns shape env
|
|
|
|
|
"""
|
|
|
|
|
trace_inputs = [torch.randn(shape) for shape in trace_inputs]
|
|
|
|
|
traced_f = make_fx(fn, tracing_mode="symbolic")(*trace_inputs)
|
|
|
|
|
for input in test_inputs:
|
|
|
|
|
input = [torch.randn(shape) for shape in input]
|
Convert SymInt tracing to mode based tracing (#83380)
We're on our way to deleting ProxyTensor entirely (see https://github.com/pytorch/pytorch/pull/83330 ), but before we can do that, we have to delete ProxySymInt first. Here's the plan.
Changes in torch.fx.experimental.symbolic_shapes
* The general idea is to do mode based tracing. This means we need a mode that can interpose on all SymInt operations. There are a few ways to do this, but I've done it the easy way: (1) I have a separate mode for SymInt operations specifically called SymDispatchMode, and (2) this mode operates on PySymInt (and not the basic SymInt which is user visible). I elided Int from the name because if we add SymFloats I want to use the same mode to handle those as well, and I used Dispatch rather than Function because this is the "inner" dispatch operating PySymInt and not SymInt (this is not a perfect analogy, but SymFunctionMode definitely seemed wrong as you still must go through the C++ binding.) The mode is entirely implemented in Python for ease of implementation. We could have implemented this more symmetrically to TorchFunctionMode in C++, but I leave that as later work; this API is unlikely to get used by others (unlike TorchFunctionMode). One downside to not doing the mode in C++ is that we still have to do the hop via a preexisting PySymInt to wrap; this is currently not a big deal as conversion to SymInts only really happens when there is already another SymInt floating around. SymDispatchMode is pared down from TorchDispatchMode; there is no ancestor tracking since I don't expect people to be mixing up SymDispatchModes.
* I made some improvements for tracing. When I invoke the SymDispatchMode handler, I would like constants to show up as constants, so they can be directly inlined into the FX graph (rather than going through a wrapping process first, and then the wrapped SymInt being used in the operation). To do this, I directly track if a PySymInt is a constant at construction time. Only wrapped PySymInts are constants.
* For convenience, PySymInts now support all magic methods that regular SymInts do. This is so that redispatch inside the SymDispatchMode can be written the idiomatic way `func(*args, **kwargs)` where func is an operator. The original names are retained for direct C++ calls.
Changes in torch.fx.experimental.proxy_tensor
* OK, so we got a new SymDispatchMode, so we define a ProxySymDispatchMode and activate it when we start tracing. This mode is currently unconditionally activated although technically we only need to activate it when doing symbolic tracing (it doesn't matter either way as there are no SymInts if you are not doing symbolic tracing).
* We delete ProxySymInt. To do this, we must now record the proxy for the SymInt some other way. Based on discussion with Chillee, it is more intuitive to him if the proxies are still recorded on the SymInt in some way. So we store them in the `__dict__` of the PySymInt, indexed by Tracer. An improvement is to make this a weak map, so that we remove all of these entries when the tracer dies. In an original version of this PR, I keyed on the mode itself, but tracer is better as it is accessible from both modes (and as you will see, we will need to fetch the map from both the ProxySymDispatchMode as well as the ProxyTorchDispatchMode.) The implementation of SymDispatchMode now simply retrieves the proxies, performs the underlying operation as well as the FX graph recording, and then records the output proxy to the PySymInt. Note that FX tracing does not work with proxies and SymInts, so we manually call `call_function` to ensure that the correct operations get recorded to the graph. This means conventional FX retracing with proxies only will not work with these graphs, but there wasn't really any reason to do this (as opposed to `make_fx` retracing) anyway. Constants are detected and converted directly into Python integers.
* SymInts can show up as arguments to tensor operations, so they must be accounted for in ProxyTorchDispatchMode as well. This is done by searching for SymInt arguments and converting them into proxies before the proxy call. This can be done more efficiently in a single `tree_map` but I'm lazy. The helper `unwrap_symint_proxy` conveniently implements the unwrapping in one place given a tracer; unfortunately it cannot be shared with SymDispatchMode as SymDispatchMode gets PySymInts, but ProxyTensorMode gets SymInts. Similarly, tensors that are returned from tensor operations can have SymInts in their shapes, which need fresh proxies allocated. To avoid leaking internal details of SymInt shape computation to the tensor operation graph, these SymInts are always given proxies derived from `x.size(dim)` call on their return tensor. We also need to do this for strides and numel but have not done so yet. Furthermore, we must avoid tracing internal SymInt calls while we run meta operations on the true operation; this is achieved by also disabling SymInt tracing on the inside of tensor tracing. This is analogous to how tensor tracing is disabled inside the implementation of tracing mode, but unfortunately we are unable to use the same mechanism (this would have been easier if the two modes could be combined somehow, and I am amenable to suggestions to try harder to achieve this.)
* Because there are no more ProxySymInts, we no longer need to do anything to unwrap SymInt. Furthermore, we do not need to reallocate ProxySymInts on class creation.
* If a bare SymInt without a Proxy is encountered, it is assumed that this must be a constant. `create_arg` handles this case. Non-constant free SymInts result in an assert error.
* The initial input handling in `dispatch_trace` involves traversing all of the input tensors, traversing over their shapes, and assigning proxies for the SymInts in shapes in the same way we handle proxies for the output tensors.
The preexisting testing is inadequate but will be better after I rebase past https://github.com/pytorch/pytorch/pull/82209
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83380
Approved by: https://github.com/samdow
2022-08-16 03:03:13 +00:00
|
|
|
rx, ry = traced_f(*input), fn(*input)
|
|
|
|
|
if assert_eq:
|
|
|
|
|
self.assertEqual(rx, ry)
|
2022-12-10 13:19:57 +00:00
|
|
|
return traced_f
|
2022-07-23 19:03:38 +00:00
|
|
|
|
|
|
|
|
|
2022-12-16 01:02:37 +00:00
|
|
|
def test_debug_interpreter(self):
|
|
|
|
|
import torch.library
|
|
|
|
|
from torch.library import Library
|
|
|
|
|
|
2024-02-12 23:30:08 +00:00
|
|
|
foo = Library("foo", "DEF") # noqa: TOR901
|
2022-12-16 01:02:37 +00:00
|
|
|
foo.define("foo(Tensor self) -> Tensor")
|
|
|
|
|
|
|
|
|
|
# Operator where meta and cpu disagree on strides
|
|
|
|
|
@torch.library.impl(foo, "foo", "CPU")
|
|
|
|
|
def foo_cpu(x):
|
|
|
|
|
return x.clone().T
|
|
|
|
|
|
|
|
|
|
@torch.library.impl(foo, "foo", "Meta")
|
|
|
|
|
def foo_meta(x):
|
|
|
|
|
return x.clone()
|
|
|
|
|
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.ops.foo.foo.default(x)
|
|
|
|
|
|
|
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(torch.randn(2, 2))
|
|
|
|
|
from torch._functorch.compilers import DebugInterpreter
|
|
|
|
|
|
|
|
|
|
interp = DebugInterpreter(gm)
|
|
|
|
|
|
|
|
|
|
# input mismatch is caught (indicates guard problem)
|
|
|
|
|
self.assertRaisesRegex(
|
|
|
|
|
AssertionError, r"3 != 1",
|
|
|
|
|
lambda: interp.run(torch.randn(3, 3).T),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Catch the incorrect meta
|
|
|
|
|
self.assertRaisesRegex(
|
|
|
|
|
AssertionError, r"\(3, 1\) != \(1, 3\)",
|
|
|
|
|
lambda: interp.run(torch.randn(3, 3))
|
|
|
|
|
)
|
|
|
|
|
|
2023-11-17 18:01:21 +00:00
|
|
|
def test_int_input(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
return x.view(y)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(3, 4), 12).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
view = torch.ops.aten.view.default(x_1, [y_1]); x_1 = y_1 = None
|
|
|
|
|
return view""")
|
|
|
|
|
|
2022-12-11 04:23:17 +00:00
|
|
|
def test_resize_from_zero(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
x.resize_(y.size(0))
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(0), torch.empty(2)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
|
2024-08-01 03:18:37 +00:00
|
|
|
resize_ = torch.ops.aten.resize_.default(x_1, [sym_size_int]); x_1 = sym_size_int = resize_ = None
|
2022-12-11 04:23:17 +00:00
|
|
|
return None""")
|
|
|
|
|
|
2023-11-17 01:06:15 +00:00
|
|
|
def test_broadcast_shapes(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
return torch.functional.broadcast_shapes(x.size(), y.size()[0])
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(3, 1), torch.empty(5)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0); x_1 = None
|
|
|
|
|
sym_size_int_1 = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
|
|
|
|
|
return (sym_size_int, sym_size_int_1)""")
|
2022-12-11 04:23:17 +00:00
|
|
|
|
2024-01-10 23:53:38 +00:00
|
|
|
def test_deduped_shape(self):
|
|
|
|
|
def f(s0, s1, x, y):
|
|
|
|
|
return torch.functional.broadcast_shapes(x.size(), y.size()[0]), torch.empty(x.shape[0])
|
|
|
|
|
|
|
|
|
|
x = torch.empty(3, 1)
|
|
|
|
|
y = torch.empty(5)
|
|
|
|
|
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
|
|
|
|
shape_env = ShapeEnv()
|
|
|
|
|
|
|
|
|
|
with FakeTensorMode(shape_env=shape_env, static_shapes=False) as fake_mode:
|
|
|
|
|
x = fake_mode.from_tensor(x)
|
|
|
|
|
y = fake_mode.from_tensor(y)
|
|
|
|
|
r = str(make_fx(f, tracing_mode="real")(x.shape[0], y.shape[0], x, y).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, s0_1, s1_1, x_1, y_1):
|
|
|
|
|
empty = torch.ops.aten.empty.memory_format([s0_1], device = device(type='cpu'), pin_memory = False)
|
|
|
|
|
return ((s0_1, s1_1), empty)""")
|
|
|
|
|
|
2024-02-06 01:50:16 +00:00
|
|
|
def test_non_deduped_shape(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
return torch.functional.broadcast_shapes(x.size(), y.size()[0]), torch.empty(x.shape[0])
|
|
|
|
|
|
|
|
|
|
x = torch.empty(3, 1)
|
|
|
|
|
y = torch.empty(5)
|
|
|
|
|
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
|
|
|
|
shape_env = ShapeEnv()
|
|
|
|
|
|
|
|
|
|
with FakeTensorMode(shape_env=shape_env, static_shapes=False) as fake_mode:
|
|
|
|
|
x = fake_mode.from_tensor(x)
|
|
|
|
|
y = fake_mode.from_tensor(y)
|
|
|
|
|
r = str(make_fx(f, tracing_mode="real")(x, y).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0); x_1 = None
|
2024-08-07 15:29:54 +00:00
|
|
|
sym_size_int_1 = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
|
2024-08-08 11:59:12 +00:00
|
|
|
empty = torch.ops.aten.empty.memory_format([sym_size_int], device = device(type='cpu'), pin_memory = False)
|
2024-02-06 01:50:16 +00:00
|
|
|
return ((sym_size_int, sym_size_int_1), empty)""")
|
2024-01-10 23:53:38 +00:00
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
def test_unary(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
assert x.shape[0] < 20
|
|
|
|
|
return x.cos()
|
|
|
|
|
test_inputs = []
|
|
|
|
|
test_inputs.append([(2, 5)])
|
|
|
|
|
test_inputs.append([(6, 8)])
|
2022-12-10 13:19:57 +00:00
|
|
|
gm = self._test_dynamic(f, [(3, 4)], test_inputs)
|
|
|
|
|
self.assertTrue(eval_guards(gm, torch.randn(4, 5)))
|
2022-12-16 01:02:35 +00:00
|
|
|
self.assertEqual(repr(bind_symbols(gm, torch.randn(4, 5))), "{s0: 4, s1: 5}")
|
2022-12-10 13:19:57 +00:00
|
|
|
self.assertFalse(eval_guards(gm, torch.randn(25, 5)))
|
Refine value ranges on inequalities (#120800)
This is basically done the obvious way. For better or worse, I jammed this into what used to be `_maybe_guard_eq` but now is `_maybe_guard_rel`. I was careful to test all the off by one conditions, and each permutation. Let me know if you think I missed anything. Importantly, this now works for unbacked SymInts.
While testing, I noticed we are silently duck sizing all symbolic variables in `test_dynamic_shapes.py`. This may or may not be covering up bugs.
Along the way, I had to fix a bug in export constraints, where we weren't checking that the final var_to_range was consistent with what the user requested at top level.
After I implemented all this, I realized that applying this to non-unbacked SymInts was duplicative with @ysiraichi's previous work on https://github.com/pytorch/pytorch/pull/97963 . The upside is I now understand what Yukio was trying to do in the original PR, and I think my new logic is simpler and less error prone. In Yukio's earlier diff, Yukio tried very hard to avoid changing what guards we actually issue (since this would cause tests to wobble). Thus, when he refined a range, he also saved the guard that actually caused the range to refine. In this PR, I don't bother saving these guards; instead I just tighten var_to_range directly and rely on generating guards on this to be correct. The key insight is that if I assert `x < y`, it's always safe to emit (potentially) more restrictive range guards, because this won't invalidate our guards, it will just make them a little too strong (but actually, I think we are precise along the way.) If these guards make it unnecessary to test `x < y`, because now the ranges for x and y are disjoint, this is fine, we've subsumed the x < y guard and can just not bother testing it. If I've gotten it right, TV will agree with me.
In fact, I had a bug in this PR which TV didn't catch, which is that when we have a recorded var_to_guards for a symbol, we unconditionally never generate the range guard for it, even if the var_to_guards is potentially inconsistent with var_to_range (because var_to_range was updated separately). With var_to_guards removed, I don't have to worry abou this inconsistency.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120800
Approved by: https://github.com/Skylion007, https://github.com/avikchaudhuri, https://github.com/ysiraichi
2024-02-29 16:47:58 +00:00
|
|
|
self.assertExpectedInline(show_guards(gm), """L['x'].size()[0] <= 19""")
|
2022-07-23 19:03:38 +00:00
|
|
|
|
2023-06-02 13:09:39 +00:00
|
|
|
def test_repeat_interleave(self):
|
|
|
|
|
def f(src_tokens, beam_size_src):
|
|
|
|
|
return src_tokens.repeat_interleave(beam_size_src.size(0), 0)
|
|
|
|
|
|
|
|
|
|
prompt_size = 64
|
|
|
|
|
vocab_size = 64
|
|
|
|
|
batch_size = 4
|
|
|
|
|
src_tokens = torch.randint(1, vocab_size, (batch_size, prompt_size))
|
|
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(src_tokens, torch.randn(5))
|
|
|
|
|
self.assertEqual(len(gm.shape_env.guards), 0)
|
|
|
|
|
|
2023-11-01 15:40:49 +00:00
|
|
|
def test_non_symint_size_spec(self):
|
|
|
|
|
# this isn't really a proxy tensor test, but it's the most convenient
|
|
|
|
|
# way to get a fake tensor with symbolic sizes
|
|
|
|
|
def f(x):
|
|
|
|
|
torch._C._non_sym_sizes(x)
|
|
|
|
|
return x + 1
|
|
|
|
|
|
|
|
|
|
x = torch.randn(2, 3)
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(x)
|
|
|
|
|
|
2023-09-07 14:24:34 +00:00
|
|
|
# https://github.com/pytorch/pytorch/issues/108195
|
|
|
|
|
def test_symbolic_repeat_interleave(self):
|
|
|
|
|
def f(y, x):
|
|
|
|
|
return y.repeat_interleave(x, dim=1)
|
|
|
|
|
|
|
|
|
|
y = torch.tensor([[1, 2], [3, 4]])
|
|
|
|
|
x = torch.tensor([2, 3])
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(y, x).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, y_1, x_1):
|
|
|
|
|
repeat_interleave = torch.ops.aten.repeat_interleave.Tensor(x_1); x_1 = None
|
|
|
|
|
index_select = torch.ops.aten.index_select.default(y_1, 1, repeat_interleave); y_1 = repeat_interleave = None
|
|
|
|
|
return index_select""")
|
|
|
|
|
|
2024-05-28 02:03:24 +00:00
|
|
|
def test_mod_gcd_unbacked(self):
|
|
|
|
|
def f(_a, _b, _stride):
|
|
|
|
|
a = _a.item()
|
|
|
|
|
b = _b.item()
|
|
|
|
|
stride = _stride.item()
|
|
|
|
|
torch._check_is_size(a)
|
|
|
|
|
torch._check_is_size(b)
|
|
|
|
|
torch._check_is_size(stride)
|
|
|
|
|
ta = torch.randn(a * stride)
|
|
|
|
|
tb = torch.randn(b * stride)
|
|
|
|
|
r = torch.cat([ta, tb])
|
|
|
|
|
return r.view(a + b, stride)
|
|
|
|
|
|
|
|
|
|
_a = torch.tensor(30)
|
|
|
|
|
_b = torch.tensor(20)
|
|
|
|
|
_stride = torch.tensor(10)
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(_a, _b, _stride).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, _a_1, _b_1, _stride_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(_a_1); _a_1 = None
|
|
|
|
|
_local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(_b_1); _b_1 = None
|
|
|
|
|
_local_scalar_dense_2 = torch.ops.aten._local_scalar_dense.default(_stride_1); _stride_1 = None
|
|
|
|
|
mul = _local_scalar_dense * _local_scalar_dense_2
|
|
|
|
|
randn = torch.ops.aten.randn.default([mul], device = device(type='cpu'), pin_memory = False); mul = None
|
|
|
|
|
mul_1 = _local_scalar_dense_1 * _local_scalar_dense_2
|
|
|
|
|
randn_1 = torch.ops.aten.randn.default([mul_1], device = device(type='cpu'), pin_memory = False); mul_1 = None
|
|
|
|
|
cat = torch.ops.aten.cat.default([randn, randn_1]); randn = randn_1 = None
|
|
|
|
|
add = _local_scalar_dense + _local_scalar_dense_1; _local_scalar_dense = _local_scalar_dense_1 = None
|
|
|
|
|
view = torch.ops.aten.view.default(cat, [add, _local_scalar_dense_2]); cat = add = _local_scalar_dense_2 = None
|
|
|
|
|
return view""")
|
|
|
|
|
|
2024-02-07 21:38:40 +00:00
|
|
|
def test_cumsum_unbacked(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
y = x.item()
|
|
|
|
|
z = torch.randn((3, y, 3))
|
|
|
|
|
return z.cumsum(0)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor([5])).code).strip()
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
r, """\
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
|
|
|
|
|
randn = torch.ops.aten.randn.default([3, _local_scalar_dense, 3], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
|
|
|
|
|
cumsum = torch.ops.aten.cumsum.default(randn, 0); randn = None
|
|
|
|
|
return cumsum""" # noqa: B950
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2023-09-21 02:33:18 +00:00
|
|
|
def test_repeat_interleave_unbacked_output_size(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
s = x.sum().item()
|
|
|
|
|
return y.repeat_interleave(x, dim=0, output_size=s)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor([2, 3]), torch.randn(2)).code).strip()
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
sum_1 = torch.ops.aten.sum.default(x_1)
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(sum_1); sum_1 = None
|
|
|
|
|
repeat_interleave = torch.ops.aten.repeat_interleave.Tensor(x_1, output_size = _local_scalar_dense); x_1 = _local_scalar_dense = None
|
|
|
|
|
index_select = torch.ops.aten.index_select.default(y_1, 0, repeat_interleave); y_1 = repeat_interleave = None
|
|
|
|
|
return index_select""" # noqa: B950
|
|
|
|
|
)
|
|
|
|
|
|
2023-10-10 20:15:28 +00:00
|
|
|
def test_arange_unbacked_output_size(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return torch.arange(0, x)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10)).code).strip()
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
r, """\
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
|
|
|
|
|
arange = torch.ops.aten.arange.start(0, _local_scalar_dense, device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
|
|
|
|
|
return arange""" # noqa: B950
|
|
|
|
|
)
|
|
|
|
|
|
2023-06-02 13:09:39 +00:00
|
|
|
def test_adv_index_batch(self):
|
|
|
|
|
def f(src_tokens):
|
|
|
|
|
bsz, src_len = src_tokens.size()[:2]
|
|
|
|
|
start_step = src_tokens.shape[1]
|
|
|
|
|
beam_size = 1
|
|
|
|
|
generate_size = 64
|
|
|
|
|
max_len = src_len + generate_size
|
|
|
|
|
tokens = torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
|
|
|
|
|
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
|
|
|
|
|
return tokens
|
|
|
|
|
|
|
|
|
|
prompt_size = 64
|
|
|
|
|
vocab_size = 64
|
|
|
|
|
batch_size = 4
|
|
|
|
|
src_tokens = torch.randint(1, vocab_size, (batch_size, prompt_size))
|
|
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(src_tokens)
|
Introduce int_oo (#127693)
In a previous life, we used sympy.oo to represent the lower/upper bounds of integer ranges. Later, we changed this to be sys.maxsize - 1 for a few reasons: (1) sometimes we do tests on a value being exactly sys.maxsize, and we wanted to avoid a data dependent guard in this case, (2) sympy.oo corresponds to floating point infinity, so you get incorrect types for value ranges with oo, and (3) you can do slightly better reasoning if you assume that input sizes fall within representable 64-bit integer range.
After working in the sys.maxsize regime for a bit, I've concluded that this was actually a bad idea. Specifically, the problem is that you end up with sys.maxsize in your upper bound, and then whenever you do any sort of size-increasing computation like size * 2, you end up with 2 * sys.maxsize, and you end up doing a ton of arbitrary precision int computation that is totally unnecessary. A symbolic bound is better.
But especially after #126905, we can't go back to using sympy.oo, because that advertises that it's not an integer, and now your ValueRanges is typed incorrectly. So what do we do? We define a new numeric constant `int_oo`, which is like `sympy.oo` but it advertises `is_integer`. **test/test_sympy_utils.py** describes some basic properties of the number, and **torch/utils/_sympy/numbers.py** has the actual implementation.
The rest of the changes of the PR are working out the implications of this change. I'll give more commentary as inline comments.
Fixes https://github.com/pytorch/pytorch/issues/127396
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/127693
Approved by: https://github.com/lezcano
ghstack dependencies: #126905
2024-06-09 16:48:47 +00:00
|
|
|
# Guards to rule out batch_size == sys.maxsize (wobbling between 2 and
|
|
|
|
|
# 1 ok)
|
|
|
|
|
self.assertEqual(len(gm.shape_env.guards), 1)
|
2023-06-02 13:09:39 +00:00
|
|
|
|
2023-01-26 17:42:30 +00:00
|
|
|
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
|
|
|
|
|
def test_cpu_scalar_cuda(self):
|
|
|
|
|
# Extracted from wave2vec2
|
|
|
|
|
def f(a, b):
|
|
|
|
|
return (a * b) @ b
|
|
|
|
|
|
|
|
|
|
r = str(
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.tensor(1.0), torch.randn(2, 2, device='cuda')
|
|
|
|
|
).code
|
|
|
|
|
).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1, b_1):
|
|
|
|
|
mul = torch.ops.aten.mul.Tensor(a_1, b_1); a_1 = None
|
|
|
|
|
mm = torch.ops.aten.mm.default(mul, b_1); mul = b_1 = None
|
|
|
|
|
return mm""")
|
|
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
def test_binary_broadcast(self):
|
|
|
|
|
def f(a, b):
|
|
|
|
|
c = a * b
|
|
|
|
|
return c
|
|
|
|
|
|
|
|
|
|
test_inputs = []
|
|
|
|
|
test_inputs.append([(1, 5), (3, 1)])
|
|
|
|
|
test_inputs.append([(1, 4), (4, 1)])
|
2022-12-10 13:19:57 +00:00
|
|
|
shape_env = self._test_dynamic(f, [(1, 2), (3, 1)], test_inputs).shape_env
|
2022-07-23 19:03:38 +00:00
|
|
|
assert len(shape_env.guards) == 0
|
|
|
|
|
|
Convert SymInt tracing to mode based tracing (#83380)
We're on our way to deleting ProxyTensor entirely (see https://github.com/pytorch/pytorch/pull/83330 ), but before we can do that, we have to delete ProxySymInt first. Here's the plan.
Changes in torch.fx.experimental.symbolic_shapes
* The general idea is to do mode based tracing. This means we need a mode that can interpose on all SymInt operations. There are a few ways to do this, but I've done it the easy way: (1) I have a separate mode for SymInt operations specifically called SymDispatchMode, and (2) this mode operates on PySymInt (and not the basic SymInt which is user visible). I elided Int from the name because if we add SymFloats I want to use the same mode to handle those as well, and I used Dispatch rather than Function because this is the "inner" dispatch operating PySymInt and not SymInt (this is not a perfect analogy, but SymFunctionMode definitely seemed wrong as you still must go through the C++ binding.) The mode is entirely implemented in Python for ease of implementation. We could have implemented this more symmetrically to TorchFunctionMode in C++, but I leave that as later work; this API is unlikely to get used by others (unlike TorchFunctionMode). One downside to not doing the mode in C++ is that we still have to do the hop via a preexisting PySymInt to wrap; this is currently not a big deal as conversion to SymInts only really happens when there is already another SymInt floating around. SymDispatchMode is pared down from TorchDispatchMode; there is no ancestor tracking since I don't expect people to be mixing up SymDispatchModes.
* I made some improvements for tracing. When I invoke the SymDispatchMode handler, I would like constants to show up as constants, so they can be directly inlined into the FX graph (rather than going through a wrapping process first, and then the wrapped SymInt being used in the operation). To do this, I directly track if a PySymInt is a constant at construction time. Only wrapped PySymInts are constants.
* For convenience, PySymInts now support all magic methods that regular SymInts do. This is so that redispatch inside the SymDispatchMode can be written the idiomatic way `func(*args, **kwargs)` where func is an operator. The original names are retained for direct C++ calls.
Changes in torch.fx.experimental.proxy_tensor
* OK, so we got a new SymDispatchMode, so we define a ProxySymDispatchMode and activate it when we start tracing. This mode is currently unconditionally activated although technically we only need to activate it when doing symbolic tracing (it doesn't matter either way as there are no SymInts if you are not doing symbolic tracing).
* We delete ProxySymInt. To do this, we must now record the proxy for the SymInt some other way. Based on discussion with Chillee, it is more intuitive to him if the proxies are still recorded on the SymInt in some way. So we store them in the `__dict__` of the PySymInt, indexed by Tracer. An improvement is to make this a weak map, so that we remove all of these entries when the tracer dies. In an original version of this PR, I keyed on the mode itself, but tracer is better as it is accessible from both modes (and as you will see, we will need to fetch the map from both the ProxySymDispatchMode as well as the ProxyTorchDispatchMode.) The implementation of SymDispatchMode now simply retrieves the proxies, performs the underlying operation as well as the FX graph recording, and then records the output proxy to the PySymInt. Note that FX tracing does not work with proxies and SymInts, so we manually call `call_function` to ensure that the correct operations get recorded to the graph. This means conventional FX retracing with proxies only will not work with these graphs, but there wasn't really any reason to do this (as opposed to `make_fx` retracing) anyway. Constants are detected and converted directly into Python integers.
* SymInts can show up as arguments to tensor operations, so they must be accounted for in ProxyTorchDispatchMode as well. This is done by searching for SymInt arguments and converting them into proxies before the proxy call. This can be done more efficiently in a single `tree_map` but I'm lazy. The helper `unwrap_symint_proxy` conveniently implements the unwrapping in one place given a tracer; unfortunately it cannot be shared with SymDispatchMode as SymDispatchMode gets PySymInts, but ProxyTensorMode gets SymInts. Similarly, tensors that are returned from tensor operations can have SymInts in their shapes, which need fresh proxies allocated. To avoid leaking internal details of SymInt shape computation to the tensor operation graph, these SymInts are always given proxies derived from `x.size(dim)` call on their return tensor. We also need to do this for strides and numel but have not done so yet. Furthermore, we must avoid tracing internal SymInt calls while we run meta operations on the true operation; this is achieved by also disabling SymInt tracing on the inside of tensor tracing. This is analogous to how tensor tracing is disabled inside the implementation of tracing mode, but unfortunately we are unable to use the same mechanism (this would have been easier if the two modes could be combined somehow, and I am amenable to suggestions to try harder to achieve this.)
* Because there are no more ProxySymInts, we no longer need to do anything to unwrap SymInt. Furthermore, we do not need to reallocate ProxySymInts on class creation.
* If a bare SymInt without a Proxy is encountered, it is assumed that this must be a constant. `create_arg` handles this case. Non-constant free SymInts result in an assert error.
* The initial input handling in `dispatch_trace` involves traversing all of the input tensors, traversing over their shapes, and assigning proxies for the SymInts in shapes in the same way we handle proxies for the output tensors.
The preexisting testing is inadequate but will be better after I rebase past https://github.com/pytorch/pytorch/pull/82209
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83380
Approved by: https://github.com/samdow
2022-08-16 03:03:13 +00:00
|
|
|
def test_multiply_shape(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
return torch.empty(a.shape[0] * 2)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0); a_1 = None
|
|
|
|
|
mul = sym_size_int * 2; sym_size_int = None
|
2022-08-29 13:08:43 +00:00
|
|
|
empty = torch.ops.aten.empty.memory_format([mul], device = device(type='cpu'), pin_memory = False); mul = None
|
2022-11-18 21:14:40 +00:00
|
|
|
return empty""")
|
Convert SymInt tracing to mode based tracing (#83380)
We're on our way to deleting ProxyTensor entirely (see https://github.com/pytorch/pytorch/pull/83330 ), but before we can do that, we have to delete ProxySymInt first. Here's the plan.
Changes in torch.fx.experimental.symbolic_shapes
* The general idea is to do mode based tracing. This means we need a mode that can interpose on all SymInt operations. There are a few ways to do this, but I've done it the easy way: (1) I have a separate mode for SymInt operations specifically called SymDispatchMode, and (2) this mode operates on PySymInt (and not the basic SymInt which is user visible). I elided Int from the name because if we add SymFloats I want to use the same mode to handle those as well, and I used Dispatch rather than Function because this is the "inner" dispatch operating PySymInt and not SymInt (this is not a perfect analogy, but SymFunctionMode definitely seemed wrong as you still must go through the C++ binding.) The mode is entirely implemented in Python for ease of implementation. We could have implemented this more symmetrically to TorchFunctionMode in C++, but I leave that as later work; this API is unlikely to get used by others (unlike TorchFunctionMode). One downside to not doing the mode in C++ is that we still have to do the hop via a preexisting PySymInt to wrap; this is currently not a big deal as conversion to SymInts only really happens when there is already another SymInt floating around. SymDispatchMode is pared down from TorchDispatchMode; there is no ancestor tracking since I don't expect people to be mixing up SymDispatchModes.
* I made some improvements for tracing. When I invoke the SymDispatchMode handler, I would like constants to show up as constants, so they can be directly inlined into the FX graph (rather than going through a wrapping process first, and then the wrapped SymInt being used in the operation). To do this, I directly track if a PySymInt is a constant at construction time. Only wrapped PySymInts are constants.
* For convenience, PySymInts now support all magic methods that regular SymInts do. This is so that redispatch inside the SymDispatchMode can be written the idiomatic way `func(*args, **kwargs)` where func is an operator. The original names are retained for direct C++ calls.
Changes in torch.fx.experimental.proxy_tensor
* OK, so we got a new SymDispatchMode, so we define a ProxySymDispatchMode and activate it when we start tracing. This mode is currently unconditionally activated although technically we only need to activate it when doing symbolic tracing (it doesn't matter either way as there are no SymInts if you are not doing symbolic tracing).
* We delete ProxySymInt. To do this, we must now record the proxy for the SymInt some other way. Based on discussion with Chillee, it is more intuitive to him if the proxies are still recorded on the SymInt in some way. So we store them in the `__dict__` of the PySymInt, indexed by Tracer. An improvement is to make this a weak map, so that we remove all of these entries when the tracer dies. In an original version of this PR, I keyed on the mode itself, but tracer is better as it is accessible from both modes (and as you will see, we will need to fetch the map from both the ProxySymDispatchMode as well as the ProxyTorchDispatchMode.) The implementation of SymDispatchMode now simply retrieves the proxies, performs the underlying operation as well as the FX graph recording, and then records the output proxy to the PySymInt. Note that FX tracing does not work with proxies and SymInts, so we manually call `call_function` to ensure that the correct operations get recorded to the graph. This means conventional FX retracing with proxies only will not work with these graphs, but there wasn't really any reason to do this (as opposed to `make_fx` retracing) anyway. Constants are detected and converted directly into Python integers.
* SymInts can show up as arguments to tensor operations, so they must be accounted for in ProxyTorchDispatchMode as well. This is done by searching for SymInt arguments and converting them into proxies before the proxy call. This can be done more efficiently in a single `tree_map` but I'm lazy. The helper `unwrap_symint_proxy` conveniently implements the unwrapping in one place given a tracer; unfortunately it cannot be shared with SymDispatchMode as SymDispatchMode gets PySymInts, but ProxyTensorMode gets SymInts. Similarly, tensors that are returned from tensor operations can have SymInts in their shapes, which need fresh proxies allocated. To avoid leaking internal details of SymInt shape computation to the tensor operation graph, these SymInts are always given proxies derived from `x.size(dim)` call on their return tensor. We also need to do this for strides and numel but have not done so yet. Furthermore, we must avoid tracing internal SymInt calls while we run meta operations on the true operation; this is achieved by also disabling SymInt tracing on the inside of tensor tracing. This is analogous to how tensor tracing is disabled inside the implementation of tracing mode, but unfortunately we are unable to use the same mechanism (this would have been easier if the two modes could be combined somehow, and I am amenable to suggestions to try harder to achieve this.)
* Because there are no more ProxySymInts, we no longer need to do anything to unwrap SymInt. Furthermore, we do not need to reallocate ProxySymInts on class creation.
* If a bare SymInt without a Proxy is encountered, it is assumed that this must be a constant. `create_arg` handles this case. Non-constant free SymInts result in an assert error.
* The initial input handling in `dispatch_trace` involves traversing all of the input tensors, traversing over their shapes, and assigning proxies for the SymInts in shapes in the same way we handle proxies for the output tensors.
The preexisting testing is inadequate but will be better after I rebase past https://github.com/pytorch/pytorch/pull/82209
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83380
Approved by: https://github.com/samdow
2022-08-16 03:03:13 +00:00
|
|
|
|
2022-12-11 04:29:21 +00:00
|
|
|
def test_item(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
r = a.item()
|
|
|
|
|
return r * a
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.randn(1)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(a_1)
|
|
|
|
|
mul = torch.ops.aten.mul.Tensor(a_1, _local_scalar_dense); a_1 = _local_scalar_dense = None
|
|
|
|
|
return mul""")
|
|
|
|
|
|
2023-09-18 15:52:35 +00:00
|
|
|
def test_tensor_symfloat(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
r = torch.tensor(a.size(0) ** 2.0)
|
|
|
|
|
assert r.dtype is torch.float
|
|
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(torch.randn(2))
|
|
|
|
|
r = str(gm.code).strip()
|
|
|
|
|
# NB: this specializes, which is fine, the point is to make sure the
|
|
|
|
|
# dtype inference is correct
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
|
|
|
|
_tensor_constant0 = self._tensor_constant0
|
|
|
|
|
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
|
|
|
|
|
return lift_fresh_copy""")
|
|
|
|
|
self.assertEqual(gm._tensor_constant0, torch.tensor(4.0))
|
|
|
|
|
|
Introduce branchless implementations of TensorImpl bools (#94473)
This is the main payload of this diff stack. With it, we are able to construct a 1D tensor from unbacked SymInt with guards that are equivalent to asserting that the size is non-negative (which makes sense!) To get here, I had to arrange for all of the guards that occur when doing contiguity tests to be lazy. This was done by writing non-branching implementations of each of the tests in `sympy_is_contiguous` etc functions, and then using those implementations when we don't branch.
I also had to do some bug fixes for `is_non_overlapping_and_dense`, as unbacked SymInts were very untested previously (and that was the only time you would actually hit the Python version of the code.) In particular, we now consistently pass separate sizes/strides lists into each of the boolean computation functions (and only pack them into a single argument list when going to Sympy, which doesn't support lists of variables in custom functions.)
Finally, to actually test that this is doing something, I add a simple assumptions system from https://github.com/pytorch/pytorch/pull/90985 and use this to get the end to end test test_item_to_constructor passing. Soon, I intend to replace this with a range analysis system which will be used for assumptions in the short term. (We still might use Z3, but for all the stray assumptions I've seen range analysis will be good enough.)
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/94473
Approved by: https://github.com/albanD
2023-02-15 14:37:23 +00:00
|
|
|
def test_item_to_constructor(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
r = a.item()
|
|
|
|
|
return torch.empty(r)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.randint(5, (1,))).code).strip()
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
r, """\
|
|
|
|
|
def forward(self, a_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(a_1); a_1 = None
|
|
|
|
|
empty = torch.ops.aten.empty.memory_format([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
|
|
|
|
|
return empty""" # noqa: B950
|
|
|
|
|
)
|
2022-10-19 15:27:42 +00:00
|
|
|
|
2023-09-09 21:01:47 +00:00
|
|
|
|
|
|
|
|
def test_setitem_symint(self):
|
|
|
|
|
# from moco
|
|
|
|
|
# https://github.com/pytorch/pytorch/issues/101939
|
|
|
|
|
def f(x):
|
|
|
|
|
x[0] = x.size(0)
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.randn(10)).code).strip()
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
r, """\
|
|
|
|
|
def forward(self, x_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0)
|
|
|
|
|
scalar_tensor = torch.ops.aten.scalar_tensor.default(sym_size_int, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')); sym_size_int = None
|
2023-09-09 21:01:47 +00:00
|
|
|
select = torch.ops.aten.select.int(x_1, 0, 0)
|
2024-08-01 03:18:37 +00:00
|
|
|
copy_ = torch.ops.aten.copy_.default(select, scalar_tensor); select = scalar_tensor = copy_ = None
|
2023-09-09 21:01:47 +00:00
|
|
|
return x_1""" # noqa: B950
|
|
|
|
|
)
|
|
|
|
|
|
2023-02-23 19:51:25 +00:00
|
|
|
def test_dynamic_pointwise_scalar(self):
|
|
|
|
|
def f(gravity, mask):
|
|
|
|
|
gravity[mask, 0] = gravity[mask, 0] * -1
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.randn((12, 4)),
|
|
|
|
|
torch.randint(0, 2, (12,), dtype=torch.bool)
|
|
|
|
|
).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, gravity_1, mask_1):
|
|
|
|
|
select = torch.ops.aten.select.int(gravity_1, 1, 0)
|
|
|
|
|
index = torch.ops.aten.index.Tensor(select, [mask_1]); select = None
|
|
|
|
|
mul = torch.ops.aten.mul.Tensor(index, -1); index = None
|
|
|
|
|
select_1 = torch.ops.aten.select.int(gravity_1, 1, 0); gravity_1 = None
|
2024-08-01 03:18:37 +00:00
|
|
|
index_put_ = torch.ops.aten.index_put_.default(select_1, [mask_1], mul); select_1 = mask_1 = mul = index_put_ = None
|
2023-02-23 19:51:25 +00:00
|
|
|
return None""")
|
|
|
|
|
|
|
|
|
|
def test_reflect_r_over_x(self):
|
|
|
|
|
def reflect_R_over_x(R):
|
|
|
|
|
reflect = torch.eye(3, device=R.device)
|
|
|
|
|
reflect[0, 0] = -1
|
|
|
|
|
return reflect @ R @ reflect
|
|
|
|
|
|
|
|
|
|
def f(crop_camera, mask):
|
|
|
|
|
crop_camera[mask] = reflect_R_over_x(crop_camera[mask])
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.randn((12, 3, 3)),
|
|
|
|
|
torch.randint(0, 2, (12,), dtype=torch.bool)
|
|
|
|
|
).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, crop_camera_1, mask_1):
|
|
|
|
|
index = torch.ops.aten.index.Tensor(crop_camera_1, [mask_1])
|
|
|
|
|
eye = torch.ops.aten.eye.default(3, device = device(type='cpu'), pin_memory = False)
|
|
|
|
|
_tensor_constant0 = self._tensor_constant0
|
|
|
|
|
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
|
|
|
|
|
select = torch.ops.aten.select.int(eye, 0, 0)
|
|
|
|
|
select_1 = torch.ops.aten.select.int(select, 0, 0); select = None
|
2024-08-01 03:18:37 +00:00
|
|
|
copy_ = torch.ops.aten.copy_.default(select_1, lift_fresh_copy); select_1 = lift_fresh_copy = copy_ = None
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(index, 0)
|
|
|
|
|
expand = torch.ops.aten.expand.default(eye, [sym_size_int, 3, 3])
|
|
|
|
|
view = torch.ops.aten.view.default(expand, [sym_size_int, 3, 3]); expand = None
|
|
|
|
|
sym_size_int_1 = torch.ops.aten.sym_size.int(crop_camera_1, 1)
|
|
|
|
|
sym_size_int_2 = torch.ops.aten.sym_size.int(crop_camera_1, 2)
|
|
|
|
|
expand_1 = torch.ops.aten.expand.default(index, [sym_size_int, sym_size_int_1, sym_size_int_2]); index = None
|
|
|
|
|
view_1 = torch.ops.aten.view.default(expand_1, [sym_size_int, sym_size_int_1, sym_size_int_2]); expand_1 = sym_size_int_1 = sym_size_int_2 = None
|
2023-07-25 13:21:19 +00:00
|
|
|
bmm = torch.ops.aten.bmm.default(view, view_1); view = view_1 = None
|
2023-11-06 19:38:49 +00:00
|
|
|
view_2 = torch.ops.aten.view.default(bmm, [sym_size_int, 3, 3]); bmm = None
|
2024-08-08 11:59:12 +00:00
|
|
|
mul_4 = sym_size_int * 3
|
|
|
|
|
view_3 = torch.ops.aten.view.default(view_2, [mul_4, 3]); view_2 = mul_4 = None
|
2023-07-25 13:21:19 +00:00
|
|
|
mm = torch.ops.aten.mm.default(view_3, eye); view_3 = eye = None
|
2024-09-19 11:52:16 +00:00
|
|
|
_unsafe_view = torch.ops.aten._unsafe_view.default(mm, [sym_size_int, 3, 3]); mm = sym_size_int = None
|
|
|
|
|
index_put_ = torch.ops.aten.index_put_.default(crop_camera_1, [mask_1], _unsafe_view); crop_camera_1 = mask_1 = _unsafe_view = index_put_ = None
|
2023-11-06 19:38:49 +00:00
|
|
|
return None""") # noqa: B950
|
2023-02-23 19:51:25 +00:00
|
|
|
|
2023-03-07 02:23:35 +00:00
|
|
|
def test_unbacked_slice(self):
|
|
|
|
|
def f(x, m):
|
|
|
|
|
x = x[m]
|
|
|
|
|
return x[slice(None, None, None), slice(None, None, None), slice(None, 2, None)]
|
|
|
|
|
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.randn((12, 3, 3)),
|
|
|
|
|
torch.randint(0, 2, (12,), dtype=torch.bool)
|
|
|
|
|
)
|
|
|
|
|
|
2023-02-23 19:51:25 +00:00
|
|
|
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
|
|
|
|
|
def test_unbacked_batch_resnet(self):
|
|
|
|
|
mod = torchvision.models.resnet18()
|
|
|
|
|
|
|
|
|
|
def f(x, mask, params, buffers):
|
|
|
|
|
for p in itertools.chain([x, mask], params.values(), buffers.values()):
|
|
|
|
|
for s in p.shape:
|
|
|
|
|
guard_int(s)
|
|
|
|
|
x = x[mask]
|
2024-05-01 21:01:26 +00:00
|
|
|
torch._check(x.shape[0] >= 1)
|
2023-02-23 19:51:25 +00:00
|
|
|
for p in params.values():
|
|
|
|
|
p.grad = None
|
|
|
|
|
return torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum()
|
|
|
|
|
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.randn(3, 3, 250, 250),
|
|
|
|
|
torch.randint(0, 2, (3,), dtype=torch.bool),
|
|
|
|
|
dict(mod.named_parameters()),
|
|
|
|
|
dict(mod.named_buffers()),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def test_boolean_index(self):
|
|
|
|
|
def f(images, handedness, valid):
|
|
|
|
|
images = images[valid]
|
|
|
|
|
handedness = handedness[valid]
|
|
|
|
|
right_hand_mask = handedness == 1
|
|
|
|
|
images[right_hand_mask] = images[right_hand_mask].flip(-1)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.randint(0, 256, (512, 1, 96, 96)),
|
|
|
|
|
torch.randint(0, 1, (512,)),
|
|
|
|
|
torch.randint(0, 2, (512,), dtype=torch.bool)
|
|
|
|
|
).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, images_1, handedness_1, valid_1):
|
|
|
|
|
index = torch.ops.aten.index.Tensor(images_1, [valid_1]); images_1 = None
|
|
|
|
|
index_1 = torch.ops.aten.index.Tensor(handedness_1, [valid_1]); handedness_1 = valid_1 = None
|
|
|
|
|
eq = torch.ops.aten.eq.Scalar(index_1, 1); index_1 = None
|
|
|
|
|
index_2 = torch.ops.aten.index.Tensor(index, [eq])
|
|
|
|
|
flip = torch.ops.aten.flip.default(index_2, [-1]); index_2 = None
|
2024-08-01 03:18:37 +00:00
|
|
|
index_put_ = torch.ops.aten.index_put_.default(index, [eq], flip); index = eq = flip = index_put_ = None
|
2023-02-23 19:51:25 +00:00
|
|
|
return None""")
|
|
|
|
|
|
2022-10-19 15:27:42 +00:00
|
|
|
def test_neg_shape(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
return torch.empty(-a.shape[0] + 10)
|
|
|
|
|
|
2022-10-26 16:37:10 +00:00
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(2)).code).strip()
|
2022-10-19 15:27:42 +00:00
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0); a_1 = None
|
|
|
|
|
neg = -sym_size_int; sym_size_int = None
|
2022-10-19 15:27:42 +00:00
|
|
|
add = neg + 10; neg = None
|
|
|
|
|
empty = torch.ops.aten.empty.memory_format([add], device = device(type='cpu'), pin_memory = False); add = None
|
2022-11-18 21:14:40 +00:00
|
|
|
return empty""")
|
2022-10-19 15:27:42 +00:00
|
|
|
|
2023-11-01 18:30:45 +00:00
|
|
|
def test_unbacked_unification(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
z = torch.zeros(x.item())
|
|
|
|
|
return z + y
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.randn(10)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
|
|
|
|
|
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
|
|
|
|
|
add = torch.ops.aten.add.Tensor(zeros, y_1); zeros = y_1 = None
|
|
|
|
|
return add""") # noqa: B950
|
|
|
|
|
|
2024-04-22 17:12:20 +00:00
|
|
|
def test_reshape_divisibility_unbacked(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
i0 = x.item()
|
|
|
|
|
r = torch.zeros(i0, 4, 20)
|
|
|
|
|
r = r.transpose(2, 1)
|
|
|
|
|
return r.reshape(-1, 80)
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(torch.tensor(24))
|
|
|
|
|
|
Allow inferring divisibility on unbacked SymInts and do replacement trick (#113165)
We want something like torch.empty(i0, 12).view(4, -1, 12) to work. Right now, it chokes on guards on data dependent accesses. It turns out we are very close to having it work based on experiments in https://github.com/pytorch/pytorch/issues/112347 if we do the replacement trick, setting i0 = i1 * 4 to explicitly encode in the divisibility; this is good enough for Sympy to be able to handle the rest.
There are two parts to this PR.
* First, we must discover that there is this divisibility constraint. The place where this happens on view is in `infer_size`; however, we are unable to discover the modulus test with `expect_true` because the condition is currently written with a Python boolean operator that forces guarding too early: `numel == newsize or (dim is not None and newsize > 0 and numel % newsize == 0)`. We rewrite this into an equivalent version which tests on dim being None or not first, before performing individual tests. The main nontrivial reasoning here is that I must show that my set of tests in the `dim is None` branch are sufficient when `numel == newsize`. However, if `numel == newsize`, then the modulus must pass. Thus this is equivalent.
* Given the modifications to `infer_size`, this suffices to produce a runtime assert `Eq(Mod(192*i0, 2304), 0)`. Now we must simply turn this into the replacement automatically. I wasn't really sure how to use Sympy to do this for me, so I just manually pattern matched for this particular expression form, and if it exists do the replacements.
Note that this is kind of only useful for export, because inductor chokes on views involving unbacked SymInts. That will be follow up.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/113165
Approved by: https://github.com/lezcano, https://github.com/aakhundov
2023-11-10 18:19:35 +00:00
|
|
|
def test_view_divisibility_unbacked(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
i0 = x.item()
|
|
|
|
|
r = torch.zeros(i0, 192)
|
|
|
|
|
return r.view(12, -1, 192)
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(torch.tensor(24))
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
|
Don't intersect when clamping for size oblivious (#123675)
Fixes https://github.com/pytorch/pytorch/issues/123651
Previously, when we performed a size oblivious test, we would only modify the lower bound, e.g., if we knew something had range `[0, 100]`, the size oblivious test would do `[2, 100]`. But what if your original range was `[0, 1]`? Naively intersecting this with `[2, sympy.oo]` would result in an empty set: that's a big no no. And in general, this intersection is kind of questionable: if your original range was `[0, 2]`, do we really want to assume that this quantity is exactly equal to 2 in the size oblivious test?
So here's an idea: when we're doing a size oblivious test, just forget about the max bound entirely. The idea is that the max bound probably wasn't actually helping you discharge the size oblivious test (because size oblivious tests are all about "well, if we can assume thing isn't zero or one, we know what the static value is.") So you can use the max bound OR you can use the size oblivious bound, but you're not allowed to use both at the same time. (It doesn't actually seem necessary to use the max bound, but it would be easy to permit this without using the size oblivious refinement.)
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123675
Approved by: https://github.com/PaulZhang12
2024-04-10 19:27:02 +00:00
|
|
|
def test_view_divisibility_unbacked_relatively_prime(self):
|
|
|
|
|
# See https://github.com/pytorch/pytorch/issues/123651
|
|
|
|
|
def f(x):
|
|
|
|
|
i0 = x.item()
|
|
|
|
|
torch._check_is_size(i0)
|
|
|
|
|
# To trigger the original issue, the max bound has to
|
|
|
|
|
# be chosen such that 448 / 447 < 2 (which it is.)
|
|
|
|
|
torch._check(i0 <= 448)
|
|
|
|
|
return torch.zeros(256 * i0).view(-1, 447)
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
make_fx(f, tracing_mode="symbolic")(torch.tensor(256 * 447, device="cuda"))
|
Don't intersect when clamping for size oblivious (#123675)
Fixes https://github.com/pytorch/pytorch/issues/123651
Previously, when we performed a size oblivious test, we would only modify the lower bound, e.g., if we knew something had range `[0, 100]`, the size oblivious test would do `[2, 100]`. But what if your original range was `[0, 1]`? Naively intersecting this with `[2, sympy.oo]` would result in an empty set: that's a big no no. And in general, this intersection is kind of questionable: if your original range was `[0, 2]`, do we really want to assume that this quantity is exactly equal to 2 in the size oblivious test?
So here's an idea: when we're doing a size oblivious test, just forget about the max bound entirely. The idea is that the max bound probably wasn't actually helping you discharge the size oblivious test (because size oblivious tests are all about "well, if we can assume thing isn't zero or one, we know what the static value is.") So you can use the max bound OR you can use the size oblivious bound, but you're not allowed to use both at the same time. (It doesn't actually seem necessary to use the max bound, but it would be easy to permit this without using the size oblivious refinement.)
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123675
Approved by: https://github.com/PaulZhang12
2024-04-10 19:27:02 +00:00
|
|
|
|
2023-11-01 18:57:42 +00:00
|
|
|
def test_unbacked_unify_guard(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
z = torch.zeros(x.item())
|
|
|
|
|
torch._check(z.size(0) == y.size(0)) # refines i0 = s0
|
|
|
|
|
if z.size(0) == 4:
|
|
|
|
|
return y * 2
|
|
|
|
|
else:
|
|
|
|
|
return y + 2
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.randn(10)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, x_1, y_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
|
2024-08-01 03:18:37 +00:00
|
|
|
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = zeros = None
|
2023-11-01 18:57:42 +00:00
|
|
|
add = torch.ops.aten.add.Tensor(y_1, 2); y_1 = None
|
|
|
|
|
return add""") # noqa: B950
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
|
2024-04-24 20:50:17 +00:00
|
|
|
@unittest.expectedFailure
|
2023-11-01 18:57:42 +00:00
|
|
|
def test_unbacked_unify_guard_transitivity(self):
|
|
|
|
|
def f(x1, x2, y):
|
|
|
|
|
z1 = torch.zeros(x1.item())
|
|
|
|
|
z2 = torch.zeros(x2.item())
|
|
|
|
|
torch._check(z1.size(0) == z2.size(0)) # refines i0 = i1
|
|
|
|
|
torch._check(z2.size(0) == y.size(0)) # refines i0 = s0
|
|
|
|
|
if z1.size(0) == 4:
|
|
|
|
|
return y * 2
|
|
|
|
|
else:
|
|
|
|
|
return y + 2
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.tensor(10, device="cuda"),
|
|
|
|
|
torch.tensor(10, device="cuda"),
|
|
|
|
|
torch.randn(10, device="cuda")
|
|
|
|
|
)
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
|
|
|
|
|
gm.recompile()
|
|
|
|
|
r = str(gm.code).strip()
|
|
|
|
|
# self.assertExpectedInline(
|
|
|
|
|
# r, """""" # noqa: B950
|
|
|
|
|
# )
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
def test_unbacked_unify_dependency_violation(self):
|
|
|
|
|
def f(x1, x2, x3, y):
|
|
|
|
|
z1 = x1.item()
|
|
|
|
|
torch._check(z1 // 9 == 1)
|
|
|
|
|
z2 = x2.item()
|
|
|
|
|
z3 = x3.item()
|
|
|
|
|
torch._check(z1 == z2 + z3)
|
|
|
|
|
return y * 2
|
|
|
|
|
if z2 + z3 == z1:
|
|
|
|
|
return y * 2
|
|
|
|
|
else:
|
|
|
|
|
return y + 3
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
# NB: inputs are done as CUDA to ensure they aren't queried to be
|
|
|
|
|
# backed
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
gm = make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.tensor(10, device="cuda"), torch.tensor(5, device="cuda"),
|
|
|
|
|
torch.tensor(5, device="cuda"), torch.randn(1, device="cuda")
|
|
|
|
|
)
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
|
|
|
|
|
gm.recompile()
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
self.assertEqual(gm(
|
|
|
|
|
torch.tensor(12, device="cuda"), torch.tensor(6, device="cuda"),
|
|
|
|
|
torch.tensor(6, device="cuda"), torch.tensor([1.0], device="cuda")),
|
|
|
|
|
torch.tensor([2.0], device="cuda")
|
|
|
|
|
)
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
with self.assertRaises(RuntimeError):
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
gm(
|
|
|
|
|
torch.tensor(20, device="cuda"), torch.tensor(10, device="cuda"),
|
|
|
|
|
torch.tensor(10, device="cuda"), torch.tensor([1.0], device="cuda")
|
|
|
|
|
)
|
Codegen runtime asserts in Inductor (#124874)
This completely subsumes https://github.com/pytorch/pytorch/pull/120816
This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain.
Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops!
So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point.
There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway.
**torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these.
**torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification
**torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes.
**torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py
**torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this.
TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874
Approved by: https://github.com/jansel
ghstack dependencies: #124864
2024-04-29 05:13:31 +00:00
|
|
|
|
2023-11-01 18:57:42 +00:00
|
|
|
|
2023-08-15 15:49:48 +00:00
|
|
|
def test_split_unbacked_sizes(self):
|
|
|
|
|
def f(lengths, values):
|
|
|
|
|
# tolist not directly supported atm
|
|
|
|
|
sizes = [lengths[i].item() for i in range(lengths.size(0))]
|
|
|
|
|
for s in sizes:
|
2024-05-01 21:01:26 +00:00
|
|
|
# TODO(avik): no assertion generated with torch._check_is_size?
|
2023-10-10 19:48:16 +00:00
|
|
|
torch._constrain_as_size(s)
|
2023-08-15 15:49:48 +00:00
|
|
|
return torch.split(values, sizes)
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(
|
|
|
|
|
torch.tensor([2, 3, 4]),
|
|
|
|
|
torch.randn(9)
|
|
|
|
|
).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, lengths_1, values_1):
|
|
|
|
|
select = torch.ops.aten.select.int(lengths_1, 0, 0)
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(select); select = None
|
|
|
|
|
select_1 = torch.ops.aten.select.int(lengths_1, 0, 1)
|
|
|
|
|
_local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(select_1); select_1 = None
|
|
|
|
|
select_2 = torch.ops.aten.select.int(lengths_1, 0, 2); lengths_1 = None
|
|
|
|
|
_local_scalar_dense_2 = torch.ops.aten._local_scalar_dense.default(select_2); select_2 = None
|
2024-08-01 03:18:37 +00:00
|
|
|
sym_constrain_range_for_size = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense); sym_constrain_range_for_size = None
|
|
|
|
|
sym_constrain_range_for_size_1 = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense_1); sym_constrain_range_for_size_1 = None
|
|
|
|
|
sym_constrain_range_for_size_2 = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense_2); sym_constrain_range_for_size_2 = None
|
2023-08-15 15:49:48 +00:00
|
|
|
split_with_sizes = torch.ops.aten.split_with_sizes.default(values_1, [_local_scalar_dense, _local_scalar_dense_1, _local_scalar_dense_2]); values_1 = _local_scalar_dense = _local_scalar_dense_1 = _local_scalar_dense_2 = None
|
|
|
|
|
getitem = split_with_sizes[0]
|
|
|
|
|
getitem_1 = split_with_sizes[1]
|
|
|
|
|
getitem_2 = split_with_sizes[2]; split_with_sizes = None
|
|
|
|
|
return (getitem, getitem_1, getitem_2)""") # noqa: B950
|
|
|
|
|
|
2023-02-23 19:54:36 +00:00
|
|
|
def test_invalidate_nonzero(self):
|
|
|
|
|
ok = False
|
|
|
|
|
|
|
|
|
|
def f(a):
|
|
|
|
|
nonlocal ok
|
|
|
|
|
b = a.clone()
|
|
|
|
|
x = b.nonzero()
|
|
|
|
|
x1 = b.nonzero()
|
|
|
|
|
x2 = b.nonzero()
|
|
|
|
|
assert x1.shape[0] == x2.shape[0]
|
|
|
|
|
ok = True
|
|
|
|
|
b.normal_()
|
|
|
|
|
y = b.nonzero()
|
|
|
|
|
try:
|
|
|
|
|
bool(x1.shape[0] == y.shape[0])
|
|
|
|
|
self.fail("didn't raise exception")
|
|
|
|
|
except GuardOnDataDependentSymNode:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(torch.randn(4))
|
|
|
|
|
|
Add propagate_real_tensors mode for unbacked (#125115)
A common complaint when working with data-dependent code in PyTorch is that it's hard to tell how far you are from the finish line: every time a GuardOnDataDependentSymNode error is hit, you have to somehow fix or workaround it to see the next one.
This PR adds a new mode `torch._functorch.config.fake_tensor_propagate_real_tensors` which modifies fake tensors to also propagate real tensors. This means that when we try to guard on a data-dependent SymNode, we can actually produce a real result. We also produce a warning which you should consult to figure out what the crux points are.
I ran this on vision_maskrcnn. In the baseline (without this mode), the model has 27 graph breaks, resulting in 40 graphs. With this mode on, the model has only 11 graph breaks, resulting in 15 graphs (the remaining graph breaks are due to missing functionality for item() on float tensor and some other Dynamo missing features.) You get a list of things that would have errored like this:
```
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> True
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> False
WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> False
```
Potential later follow ups:
* Improve the warning messages (in particular, should provide user frames)
* GC real tensors when they are no longer needed by tracing. Right now, this will use A LOT of memory, equal to as if your GC was broken and every intermediate tensor was kept live
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125115
Approved by: https://github.com/IvanKobzarev
2024-05-01 00:08:35 +00:00
|
|
|
@torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True)
|
|
|
|
|
def test_invalidate_nonzero_propagate_real_tensors(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
b = a.clone()
|
|
|
|
|
x = b.nonzero()
|
|
|
|
|
x1 = b.nonzero()
|
|
|
|
|
x2 = b.nonzero()
|
|
|
|
|
assert x1.shape[0] == x2.shape[0]
|
|
|
|
|
b.normal_()
|
|
|
|
|
y = b.nonzero()
|
|
|
|
|
# Because you're not actually going to generate exactly zero with
|
|
|
|
|
# normal_ lol
|
|
|
|
|
assert x1.shape[0] == y.shape[0]
|
|
|
|
|
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(torch.randn(4))
|
|
|
|
|
|
2022-10-17 02:16:14 +00:00
|
|
|
def test_sqrt_size(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
return a / a.size(-1) ** 0.5
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
|
Complete revamp of float/promotion sympy handling (#126905)
At a high level, the idea behind this PR is:
* Make it clearer what the promotion and int/float rules for various Sympy operations are. Operators that previously were polymorphic over int/float are now split into separate operators for clarity. We never do mixed int/float addition/multiplication etc in sympy, instead, we always promote to the appropriate operator. (However, equality is currently not done correctly.)
* Enforce strict typing on ValueRanges: if you have a ValueRange for a float, the lower and upper MUST be floats, and so forth for integers.
The story begins in **torch/utils/_sympy/functions.py**. Here, I make some changes to how we represent certain operations in sympy expressions:
* FloorDiv now only supports integer inputs; to do float floor division, do a truediv and then a trunc. Additionally, we remove the divide out addition by gcd optimization, because sympy gcd is over fields and is willing to generate rationals (but rationals are bad for ValueRange strict typing).
* ModularIndexing, LShift, RShift now assert they are given integer inputs.
* Mod only supports integer inputs; eventually we will support FloatMod (left for later work, when we build out Sympy support for floating operations). Unfortunately, I couldn't assert integer inputs here, because of a bad interaction with sympy's inequality solver that is used by the offline solver
* TrueDiv is split into FloatTrueDiv and IntTrueDiv. This allows for us to eventually generate accurate code for Python semantics IntTrueDiv, which is written in a special way to preserve precision when the inputs are >= 2**53 beyond what first coercing the integer to floats and then doing true division.
* Trunc is split to TruncToFloat and TruncToInt.
* Round is updated to return a float, not an int, making it consistent with the round op handler in Inductor. To get Python-style conversion to int, we call TruncToInt on the result.
* RoundDecimal updated to consistently only ever return a float
* Add ToFloat for explicit coercion to float (required so we can enforce strict ValueRanges typing)
In **torch/__init__.py**, we modify SymInt and SymFloat to appropriately call into new bindings that route to these refined sympy operations. Also, we modify `torch.sym_min` and `torch.sym_max` to have promotion semantics (if one argument is a float, the return result is always a float), making them inconsistent with builtins.min/max, but possible to do type analysis without runtime information.
We also need to introduce some new op handlers in **torch/_inductor/ops_handler.py**:
* `to_int` for truncation to int64, directly corresponding to TruncToInt; this can be implemented by trunc and dtype, but with a dedicated handler it is more convenient for roundtripping in Sympy
* `int_truediv` for Python-style integer true division, which has higher precision than casting to floats and then running `truediv`
These changes have consequences. First, we need to make some administrative changes:
* Actually wire up these Sympy functions from SymInt/SymFloat in **torch/fx/experimental/sym_node.py**, including the new promotion rules (promote2)
* Add support for new Sympy functions in **torch/utils/_sympy/interp.py**, **torch/utils/_sympy/reference.py**
* In particular, in torch.utils._sympy.reference, we have a strong preference to NOT do nontrivial compute, instead, everything in ops handler should map to a singular sympy function
* TODO: I chose to roundtrip mod back to our Mod function, but I think I'm going to have to deal with the C/Python inconsistency this to fix tests here
* Add printer support for the Sympy functions in **torch/_inductor/codegen/common.py**, **torch/_inductor/codegen/cpp_utils.py**, **torch/_inductor/codegen/triton.py**. `int_truediv` and mixed precision equality is currently not implemented soundly, so we will lose precision in codegen for large values. TODO: The additions here are not exhaustive yet
* Update ValueRanges logic to use new sympy functions in **torch/utils/_sympy/value_ranges.py**. In general, we prefer to use the new Sympy function rather than try to roll things by hand, which is what was done previously for many VR analysis functions.
In **torch/fx/experimental/symbolic_shapes.py** we need to make some symbolic reasoning adjustments:
* Avoid generation of rational subexpressions by removing simplification of `x // y` into `floor(x / y)`. This simplification then triggers an addition simplification rule `(x + y) / c --> x / c + y / c` which is bad because x / c is a rational number now
* `_assert_bound_is_rational` is no more, we no longer generate rational bounds
* Don't intersect non-int value ranges with the `int_range`
* Support more sympy Functions for guard SYMPY_INTERP
* Assert the type of value range is consistent with the variable type
The new asserts uncovered necessary bug fixes:
* **torch/_inductor/codegen/cpp.py**, **torch/_inductor/select_algorithm.py**, **torch/_inductor/sizevars.py** - Ensure Wild/Symbol manually allocated in Inductor is marked `is_integer` so it's accepted to build expressions
* **torch/_inductor/utils.py** - make sure you actually pass in sympy.Expr to these functions
* **torch/_inductor/ir.py** - make_contiguous_strides_for takes int/SymInt, not sympy.Expr!
* **torch/export/dynamic_shapes.py** - don't use infinity to represent int ranges, instead use sys.maxsize - 1
Because of the removal of some symbolic reasoning that produced rationals, some of our symbolic reasoning has gotten worse and we are unable to simplify some guards. Check the TODO at **test/test_proxy_tensor.py**
**Reland notes.** This requires this internal fbcode diff https://www.internalfb.com/phabricator/paste/view/P1403322587 but I cannot prepare the diff codev due to https://fb.workplace.com/groups/osssupport/posts/26343544518600814/
It also requires this Executorch PR https://github.com/pytorch/executorch/pull/3911 but the ET PR can be landed prior to this landing.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126905
Approved by: https://github.com/xadupre, https://github.com/lezcano
2024-06-09 01:12:54 +00:00
|
|
|
sym_float = torch.sym_float(sym_size_int); sym_size_int = None
|
|
|
|
|
pow_1 = sym_float ** 0.5; sym_float = None
|
2022-10-17 02:16:14 +00:00
|
|
|
div = torch.ops.aten.div.Tensor(a_1, pow_1); a_1 = pow_1 = None
|
|
|
|
|
return div""")
|
|
|
|
|
|
2023-12-23 03:34:58 +00:00
|
|
|
def test_make_fx_with_custom_tracer_preserving_nn_module_stack(self):
|
|
|
|
|
|
|
|
|
|
class Bar(torch.nn.Module):
|
2024-08-01 07:22:48 +00:00
|
|
|
def __init__(self) -> None:
|
2023-12-23 03:34:58 +00:00
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
return x + 1
|
|
|
|
|
|
|
|
|
|
class Foo(torch.nn.Module):
|
2024-08-01 07:22:48 +00:00
|
|
|
def __init__(self) -> None:
|
2023-12-23 03:34:58 +00:00
|
|
|
super().__init__()
|
|
|
|
|
self.bar = Bar()
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
return x + self.bar(x)
|
|
|
|
|
|
|
|
|
|
gm = make_fx(Foo())(torch.randn(4, 4))
|
|
|
|
|
for node in gm.graph.nodes:
|
|
|
|
|
self.assertTrue("nn_module_stack" not in node.meta)
|
|
|
|
|
|
|
|
|
|
foo = Foo()
|
|
|
|
|
|
|
|
|
|
def functional_call(*args, **kwargs):
|
|
|
|
|
with stateless._reparametrize_module(foo, {}):
|
|
|
|
|
return foo(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
functional_call._orig_mod = foo
|
|
|
|
|
|
|
|
|
|
gm_with_stack = make_fx(functional_call, record_module_stack=True)(torch.randn(4, 4))
|
|
|
|
|
found = False
|
|
|
|
|
for node in gm_with_stack.graph.nodes:
|
|
|
|
|
if "nn_module_stack" in node.meta:
|
|
|
|
|
if len(node.meta["nn_module_stack"]) == 1:
|
|
|
|
|
self.assertTrue("custom_tracer_preserving_nn_module_stack.<locals>.Foo" in str(node.meta["nn_module_stack"]))
|
|
|
|
|
found = True
|
|
|
|
|
elif len(node.meta["nn_module_stack"]) == 2:
|
|
|
|
|
self.assertTrue("preserving_nn_module_stack.<locals>.Bar" in str(node.meta["nn_module_stack"]))
|
|
|
|
|
found = True
|
|
|
|
|
else:
|
|
|
|
|
# there can be at most 2 level
|
|
|
|
|
self.assertTrue(False)
|
|
|
|
|
|
|
|
|
|
self.assertTrue(found)
|
|
|
|
|
|
|
|
|
|
gm_without_stack = make_fx(functional_call)(torch.randn(4, 4))
|
|
|
|
|
for node in gm_without_stack.graph.nodes:
|
|
|
|
|
self.assertTrue("nn_module_stack" not in node.meta)
|
2022-10-17 02:16:14 +00:00
|
|
|
|
2022-09-02 15:53:59 +00:00
|
|
|
def test_symint_to_tensor(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
return a / a.shape[0]
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
|
|
|
|
|
div = torch.ops.aten.div.Tensor(a_1, sym_size_int); a_1 = sym_size_int = None
|
2022-09-02 15:53:59 +00:00
|
|
|
return div""")
|
|
|
|
|
|
|
|
|
|
r = str(make_fx(f, tracing_mode="symbolic", decomposition_table=decomposition_table)(torch.empty(4)).code).strip()
|
|
|
|
|
self.assertExpectedInline(r, """\
|
|
|
|
|
def forward(self, a_1):
|
2023-11-06 19:38:49 +00:00
|
|
|
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
|
|
|
|
|
sym_float = torch.sym_float(sym_size_int); sym_size_int = None
|
2022-09-02 15:53:59 +00:00
|
|
|
div = torch.ops.prims.div.default(a_1, sym_float); a_1 = sym_float = None
|
|
|
|
|
return div""")
|
|
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
def test_cat(self):
|
|
|
|
|
def f(a, b):
|
|
|
|
|
val = torch.mul(a, b)
|
|
|
|
|
out = torch.cat([val, val])
|
|
|
|
|
if out.shape[0] * out.shape[1] > 20:
|
|
|
|
|
out = out.cos()
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
test_inputs = []
|
|
|
|
|
test_inputs.append([(1, 5), (6, 1)])
|
|
|
|
|
test_inputs.append([(1, 4), (3, 1)])
|
2022-12-10 13:19:57 +00:00
|
|
|
gm = self._test_dynamic(f, [(1, 6), (8, 1)], test_inputs)
|
|
|
|
|
self.assertTrue(eval_guards(gm, torch.randn(1, 10), torch.randn(6, 1)))
|
|
|
|
|
self.assertFalse(eval_guards(gm, torch.randn(1, 2), torch.randn(4, 1)))
|
2023-04-03 20:11:34 +00:00
|
|
|
self.assertExpectedInline(show_guards(gm), """2*L['a'].size()[1]*L['b'].size()[0] > 20""")
|
2022-07-23 19:03:38 +00:00
|
|
|
|
2022-08-13 19:03:13 +00:00
|
|
|
def test_new_empty(self):
|
|
|
|
|
def f(a, b):
|
Convert SymInt tracing to mode based tracing (#83380)
We're on our way to deleting ProxyTensor entirely (see https://github.com/pytorch/pytorch/pull/83330 ), but before we can do that, we have to delete ProxySymInt first. Here's the plan.
Changes in torch.fx.experimental.symbolic_shapes
* The general idea is to do mode based tracing. This means we need a mode that can interpose on all SymInt operations. There are a few ways to do this, but I've done it the easy way: (1) I have a separate mode for SymInt operations specifically called SymDispatchMode, and (2) this mode operates on PySymInt (and not the basic SymInt which is user visible). I elided Int from the name because if we add SymFloats I want to use the same mode to handle those as well, and I used Dispatch rather than Function because this is the "inner" dispatch operating PySymInt and not SymInt (this is not a perfect analogy, but SymFunctionMode definitely seemed wrong as you still must go through the C++ binding.) The mode is entirely implemented in Python for ease of implementation. We could have implemented this more symmetrically to TorchFunctionMode in C++, but I leave that as later work; this API is unlikely to get used by others (unlike TorchFunctionMode). One downside to not doing the mode in C++ is that we still have to do the hop via a preexisting PySymInt to wrap; this is currently not a big deal as conversion to SymInts only really happens when there is already another SymInt floating around. SymDispatchMode is pared down from TorchDispatchMode; there is no ancestor tracking since I don't expect people to be mixing up SymDispatchModes.
* I made some improvements for tracing. When I invoke the SymDispatchMode handler, I would like constants to show up as constants, so they can be directly inlined into the FX graph (rather than going through a wrapping process first, and then the wrapped SymInt being used in the operation). To do this, I directly track if a PySymInt is a constant at construction time. Only wrapped PySymInts are constants.
* For convenience, PySymInts now support all magic methods that regular SymInts do. This is so that redispatch inside the SymDispatchMode can be written the idiomatic way `func(*args, **kwargs)` where func is an operator. The original names are retained for direct C++ calls.
Changes in torch.fx.experimental.proxy_tensor
* OK, so we got a new SymDispatchMode, so we define a ProxySymDispatchMode and activate it when we start tracing. This mode is currently unconditionally activated although technically we only need to activate it when doing symbolic tracing (it doesn't matter either way as there are no SymInts if you are not doing symbolic tracing).
* We delete ProxySymInt. To do this, we must now record the proxy for the SymInt some other way. Based on discussion with Chillee, it is more intuitive to him if the proxies are still recorded on the SymInt in some way. So we store them in the `__dict__` of the PySymInt, indexed by Tracer. An improvement is to make this a weak map, so that we remove all of these entries when the tracer dies. In an original version of this PR, I keyed on the mode itself, but tracer is better as it is accessible from both modes (and as you will see, we will need to fetch the map from both the ProxySymDispatchMode as well as the ProxyTorchDispatchMode.) The implementation of SymDispatchMode now simply retrieves the proxies, performs the underlying operation as well as the FX graph recording, and then records the output proxy to the PySymInt. Note that FX tracing does not work with proxies and SymInts, so we manually call `call_function` to ensure that the correct operations get recorded to the graph. This means conventional FX retracing with proxies only will not work with these graphs, but there wasn't really any reason to do this (as opposed to `make_fx` retracing) anyway. Constants are detected and converted directly into Python integers.
* SymInts can show up as arguments to tensor operations, so they must be accounted for in ProxyTorchDispatchMode as well. This is done by searching for SymInt arguments and converting them into proxies before the proxy call. This can be done more efficiently in a single `tree_map` but I'm lazy. The helper `unwrap_symint_proxy` conveniently implements the unwrapping in one place given a tracer; unfortunately it cannot be shared with SymDispatchMode as SymDispatchMode gets PySymInts, but ProxyTensorMode gets SymInts. Similarly, tensors that are returned from tensor operations can have SymInts in their shapes, which need fresh proxies allocated. To avoid leaking internal details of SymInt shape computation to the tensor operation graph, these SymInts are always given proxies derived from `x.size(dim)` call on their return tensor. We also need to do this for strides and numel but have not done so yet. Furthermore, we must avoid tracing internal SymInt calls while we run meta operations on the true operation; this is achieved by also disabling SymInt tracing on the inside of tensor tracing. This is analogous to how tensor tracing is disabled inside the implementation of tracing mode, but unfortunately we are unable to use the same mechanism (this would have been easier if the two modes could be combined somehow, and I am amenable to suggestions to try harder to achieve this.)
* Because there are no more ProxySymInts, we no longer need to do anything to unwrap SymInt. Furthermore, we do not need to reallocate ProxySymInts on class creation.
* If a bare SymInt without a Proxy is encountered, it is assumed that this must be a constant. `create_arg` handles this case. Non-constant free SymInts result in an assert error.
* The initial input handling in `dispatch_trace` involves traversing all of the input tensors, traversing over their shapes, and assigning proxies for the SymInts in shapes in the same way we handle proxies for the output tensors.
The preexisting testing is inadequate but will be better after I rebase past https://github.com/pytorch/pytorch/pull/82209
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83380
Approved by: https://github.com/samdow
2022-08-16 03:03:13 +00:00
|
|
|
return a.new_empty(b.shape[0], b.shape[1] * 2)
|
2022-08-13 19:03:13 +00:00
|
|
|
|
2022-12-10 13:19:57 +00:00
|
|
|
self._test_dynamic(f, [(2, 4), (4, 5)], [[(2, 3), (5, 7)], [(3, 7), (9, 3)]], assert_eq=False).shape_env
|
2022-08-13 19:03:13 +00:00
|
|
|
|
2022-10-03 16:29:49 +00:00
|
|
|
def test_size_with_tensor(self):
|
2023-09-21 02:33:18 +00:00
|
|
|
# I think I messed up writing this test case originally, I think
|
|
|
|
|
# I'm supposed to hit an error case, but the code here works in both
|
|
|
|
|
# eager and tracing
|
2022-10-03 16:29:49 +00:00
|
|
|
def f(tensor):
|
|
|
|
|
max_size = torch.tensor([800, 1216], dtype=torch.int64)
|
|
|
|
|
batch_shape = [2] + list(tensor.shape[:-2]) + list(max_size)
|
|
|
|
|
return tensor.new_empty(batch_shape)
|
|
|
|
|
|
|
|
|
|
a = torch.randn(3, 800, 1199)
|
2023-09-21 02:33:18 +00:00
|
|
|
f(a)
|
|
|
|
|
make_fx(f, tracing_mode="symbolic")(a)
|
2022-10-03 16:29:49 +00:00
|
|
|
|
2024-01-14 02:57:37 +00:00
|
|
|
def test_fake_tensor_as_size(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
r = torch.zeros([x])
|
|
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.tensor(4))
|
|
|
|
|
self.assertExpectedInline(fx_g.code.strip(), """\
|
|
|
|
|
def forward(self, x_1):
|
|
|
|
|
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
|
|
|
|
|
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
|
|
|
|
|
return zeros""") # noqa: B950
|
|
|
|
|
|
2022-08-13 19:03:13 +00:00
|
|
|
def test_expand(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
b = torch.mul(a, a)
|
|
|
|
|
c = b.expand(a.shape)
|
|
|
|
|
return c
|
|
|
|
|
|
|
|
|
|
self._test_dynamic(f, [(3,)], [[(3,)], [(4,)], [(2,)]])
|
|
|
|
|
self._test_dynamic(f, [(5, 1)], [[(4, 1)], [(3, 1)], [(6, 1)]])
|
|
|
|
|
|
2022-10-19 03:19:22 +00:00
|
|
|
def test_metadata(self):
|
2022-08-31 00:29:55 +00:00
|
|
|
def f(a, b):
|
|
|
|
|
d = a.new_empty(a.shape[0] + b.shape[0])
|
|
|
|
|
return d
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(5), torch.randn(4))
|
|
|
|
|
meta_c = _get_node(fx_g, lambda x: x.target == aten.new_empty.default)
|
|
|
|
|
meta_d = _get_node(fx_g, lambda x: x.target == operator.add)
|
2023-01-19 21:16:12 +00:00
|
|
|
self.assertTrue(meta_c.meta['val'].shape[0].node.expr == meta_d.meta['val'].node.expr)
|
2022-10-19 03:19:22 +00:00
|
|
|
|
|
|
|
|
def test_metadata_fresh(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
assert x.shape[0] == 3
|
|
|
|
|
return x.cos()
|
|
|
|
|
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(3))
|
|
|
|
|
meta_cos = _get_node(fx_g, lambda x: x.target == aten.cos.default)
|
|
|
|
|
meta_inp = _get_node(fx_g, lambda x: x.op == 'placeholder')
|
2023-07-15 03:13:12 +00:00
|
|
|
self.assertTrue(meta_cos.meta['val'].shape[0] == 3)
|
2022-10-19 03:19:22 +00:00
|
|
|
# Checks if the input expr has been updated even though the constraint
|
|
|
|
|
# happened afterwards
|
2023-07-15 03:13:12 +00:00
|
|
|
self.assertTrue(meta_inp.meta['val'].shape[0] == 3)
|
2022-10-19 03:19:22 +00:00
|
|
|
|
2022-11-19 23:10:34 +00:00
|
|
|
def test_elementwise_meta_with_sym_numbers(self):
|
|
|
|
|
def f(x, offset, as_sym_float=False):
|
|
|
|
|
x0 = x.size()[0]
|
|
|
|
|
if as_sym_float:
|
Split SymNode into its own file (#112037)
This PR:
- Moves TrueDiv, LShift, RShift, IsNonOverlappingAndDenseIndicator to `_sympy.functions.py`
- Moves SymNode to `fx.experimental.sym_node`.
- This file does not have any SymPy dependencies at import time
- It installs the magic methods in Sym{Bool,Int,Float}.
- N.b. With this split, we may be able to move Sym{Bool,Int,Float} to this file, and remove quite a few of the hacks around these classes
- Imports `sym_node` in `torch/__init__.py` rather than the whole `symbolic_shapes.py`.
This breaks the import-time dependency between torch and SymPy
Pull Request resolved: https://github.com/pytorch/pytorch/pull/112037
Approved by: https://github.com/peterbell10
ghstack dependencies: #112035, #112036
2023-10-26 20:00:33 +00:00
|
|
|
x0 = torch.sym_float(x0)
|
2022-11-19 23:10:34 +00:00
|
|
|
return torch.add(x0, offset)
|
|
|
|
|
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2.0, False)
|
|
|
|
|
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].shape, ())
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].dtype, torch.float32)
|
|
|
|
|
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2, False)
|
|
|
|
|
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].shape, ())
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].dtype, torch.int64)
|
|
|
|
|
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2, True)
|
|
|
|
|
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].shape, ())
|
|
|
|
|
self.assertEqual(meta_add.meta['val'].dtype, torch.float32)
|
2022-08-13 19:03:13 +00:00
|
|
|
|
2022-10-03 07:11:53 +00:00
|
|
|
def test_return_symint(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return x.shape[0], x.cos(), x.shape[0] / 5
|
|
|
|
|
self._test_dynamic(f, [(5,)], [[(4,)], [(12,)]])
|
|
|
|
|
|
|
|
|
|
def f(x):
|
|
|
|
|
return x.shape
|
|
|
|
|
self._test_dynamic(f, [(5, 3)], [[(4, 6)]])
|
|
|
|
|
|
2022-10-29 15:45:32 +00:00
|
|
|
def test_rmethod(self):
|
|
|
|
|
def f(x):
|
|
|
|
|
return x.size(0) + x
|
|
|
|
|
self._test_dynamic(f, [(5,)], [[(4,)], [(12,)]])
|
|
|
|
|
|
2022-10-25 04:04:16 +00:00
|
|
|
def test_mega_guard(self):
|
|
|
|
|
def f(a, b):
|
|
|
|
|
assert a.shape[0] == b.shape[0] * 2
|
|
|
|
|
return a.cos()
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(16), torch.randn(8))
|
2022-12-29 05:32:31 +00:00
|
|
|
from torch._dynamo.source import LocalSource
|
2022-12-10 13:19:57 +00:00
|
|
|
self.assertExpectedInline(
|
2023-04-22 07:33:12 +00:00
|
|
|
str(fx_g.shape_env.produce_guards(fx_placeholder_vals(fx_g), [LocalSource("a"), LocalSource("b")], ignore_static=False)), # noqa: B950
|
2023-04-03 20:11:34 +00:00
|
|
|
"""["L['a'].size()[0] == 2*L['b'].size()[0]", "L['a'].stride()[0] == 1", "L['a'].storage_offset() == 0", "L['b'].stride()[0] == 1", "L['b'].storage_offset() == 0", "2 <= L['b'].size()[0]"]""" # noqa: B950
|
2022-12-10 13:19:57 +00:00
|
|
|
)
|
2023-04-22 07:33:12 +00:00
|
|
|
self.assertExpectedInline(
|
|
|
|
|
str(fx_g.shape_env.produce_guards(fx_placeholder_vals(fx_g), [LocalSource("a"), LocalSource("b")], ignore_static=True)), # noqa: B950
|
|
|
|
|
"""["L['a'].size()[0] == 2*L['b'].size()[0]", "2 <= L['b'].size()[0]"]""" # noqa: B950
|
|
|
|
|
)
|
2022-10-25 04:04:16 +00:00
|
|
|
|
2023-06-29 14:39:48 +00:00
|
|
|
def test_guard_upperbound_range_refinement(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
assert a.shape[0] > 5 and a.shape[0] > 12
|
|
|
|
|
return a.cos()
|
|
|
|
|
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(15))
|
Refine value ranges on inequalities (#120800)
This is basically done the obvious way. For better or worse, I jammed this into what used to be `_maybe_guard_eq` but now is `_maybe_guard_rel`. I was careful to test all the off by one conditions, and each permutation. Let me know if you think I missed anything. Importantly, this now works for unbacked SymInts.
While testing, I noticed we are silently duck sizing all symbolic variables in `test_dynamic_shapes.py`. This may or may not be covering up bugs.
Along the way, I had to fix a bug in export constraints, where we weren't checking that the final var_to_range was consistent with what the user requested at top level.
After I implemented all this, I realized that applying this to non-unbacked SymInts was duplicative with @ysiraichi's previous work on https://github.com/pytorch/pytorch/pull/97963 . The upside is I now understand what Yukio was trying to do in the original PR, and I think my new logic is simpler and less error prone. In Yukio's earlier diff, Yukio tried very hard to avoid changing what guards we actually issue (since this would cause tests to wobble). Thus, when he refined a range, he also saved the guard that actually caused the range to refine. In this PR, I don't bother saving these guards; instead I just tighten var_to_range directly and rely on generating guards on this to be correct. The key insight is that if I assert `x < y`, it's always safe to emit (potentially) more restrictive range guards, because this won't invalidate our guards, it will just make them a little too strong (but actually, I think we are precise along the way.) If these guards make it unnecessary to test `x < y`, because now the ranges for x and y are disjoint, this is fine, we've subsumed the x < y guard and can just not bother testing it. If I've gotten it right, TV will agree with me.
In fact, I had a bug in this PR which TV didn't catch, which is that when we have a recorded var_to_guards for a symbol, we unconditionally never generate the range guard for it, even if the var_to_guards is potentially inconsistent with var_to_range (because var_to_range was updated separately). With var_to_guards removed, I don't have to worry abou this inconsistency.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120800
Approved by: https://github.com/Skylion007, https://github.com/avikchaudhuri, https://github.com/ysiraichi
2024-02-29 16:47:58 +00:00
|
|
|
self.assertExpectedInline(show_guards(tensor), """13 <= L['a'].size()[0]""")
|
2023-06-29 14:39:48 +00:00
|
|
|
|
|
|
|
|
def test_guard_lowerbound_range_refinement(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
assert a.shape[0] < 20 and a.shape[0] < 30
|
|
|
|
|
return a.cos()
|
|
|
|
|
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(15))
|
Refine value ranges on inequalities (#120800)
This is basically done the obvious way. For better or worse, I jammed this into what used to be `_maybe_guard_eq` but now is `_maybe_guard_rel`. I was careful to test all the off by one conditions, and each permutation. Let me know if you think I missed anything. Importantly, this now works for unbacked SymInts.
While testing, I noticed we are silently duck sizing all symbolic variables in `test_dynamic_shapes.py`. This may or may not be covering up bugs.
Along the way, I had to fix a bug in export constraints, where we weren't checking that the final var_to_range was consistent with what the user requested at top level.
After I implemented all this, I realized that applying this to non-unbacked SymInts was duplicative with @ysiraichi's previous work on https://github.com/pytorch/pytorch/pull/97963 . The upside is I now understand what Yukio was trying to do in the original PR, and I think my new logic is simpler and less error prone. In Yukio's earlier diff, Yukio tried very hard to avoid changing what guards we actually issue (since this would cause tests to wobble). Thus, when he refined a range, he also saved the guard that actually caused the range to refine. In this PR, I don't bother saving these guards; instead I just tighten var_to_range directly and rely on generating guards on this to be correct. The key insight is that if I assert `x < y`, it's always safe to emit (potentially) more restrictive range guards, because this won't invalidate our guards, it will just make them a little too strong (but actually, I think we are precise along the way.) If these guards make it unnecessary to test `x < y`, because now the ranges for x and y are disjoint, this is fine, we've subsumed the x < y guard and can just not bother testing it. If I've gotten it right, TV will agree with me.
In fact, I had a bug in this PR which TV didn't catch, which is that when we have a recorded var_to_guards for a symbol, we unconditionally never generate the range guard for it, even if the var_to_guards is potentially inconsistent with var_to_range (because var_to_range was updated separately). With var_to_guards removed, I don't have to worry abou this inconsistency.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120800
Approved by: https://github.com/Skylion007, https://github.com/avikchaudhuri, https://github.com/ysiraichi
2024-02-29 16:47:58 +00:00
|
|
|
self.assertExpectedInline(show_guards(tensor), """L['a'].size()[0] <= 19""")
|
2023-06-29 14:39:48 +00:00
|
|
|
|
2023-07-18 19:38:31 +00:00
|
|
|
def test_guard_upperbound_range_refinement_multivariate(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
assert a.shape[0] > 5 and a.shape[0] > 12
|
|
|
|
|
assert a.shape[1] > 5 and a.shape[1] > a.shape[0]
|
|
|
|
|
return a.cos()
|
|
|
|
|
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn((15, 20)))
|
|
|
|
|
self.assertExpectedInline(show_guards(tensor), """\
|
|
|
|
|
L['a'].size()[1] > L['a'].size()[0]
|
Refine value ranges on inequalities (#120800)
This is basically done the obvious way. For better or worse, I jammed this into what used to be `_maybe_guard_eq` but now is `_maybe_guard_rel`. I was careful to test all the off by one conditions, and each permutation. Let me know if you think I missed anything. Importantly, this now works for unbacked SymInts.
While testing, I noticed we are silently duck sizing all symbolic variables in `test_dynamic_shapes.py`. This may or may not be covering up bugs.
Along the way, I had to fix a bug in export constraints, where we weren't checking that the final var_to_range was consistent with what the user requested at top level.
After I implemented all this, I realized that applying this to non-unbacked SymInts was duplicative with @ysiraichi's previous work on https://github.com/pytorch/pytorch/pull/97963 . The upside is I now understand what Yukio was trying to do in the original PR, and I think my new logic is simpler and less error prone. In Yukio's earlier diff, Yukio tried very hard to avoid changing what guards we actually issue (since this would cause tests to wobble). Thus, when he refined a range, he also saved the guard that actually caused the range to refine. In this PR, I don't bother saving these guards; instead I just tighten var_to_range directly and rely on generating guards on this to be correct. The key insight is that if I assert `x < y`, it's always safe to emit (potentially) more restrictive range guards, because this won't invalidate our guards, it will just make them a little too strong (but actually, I think we are precise along the way.) If these guards make it unnecessary to test `x < y`, because now the ranges for x and y are disjoint, this is fine, we've subsumed the x < y guard and can just not bother testing it. If I've gotten it right, TV will agree with me.
In fact, I had a bug in this PR which TV didn't catch, which is that when we have a recorded var_to_guards for a symbol, we unconditionally never generate the range guard for it, even if the var_to_guards is potentially inconsistent with var_to_range (because var_to_range was updated separately). With var_to_guards removed, I don't have to worry abou this inconsistency.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120800
Approved by: https://github.com/Skylion007, https://github.com/avikchaudhuri, https://github.com/ysiraichi
2024-02-29 16:47:58 +00:00
|
|
|
13 <= L['a'].size()[0]
|
|
|
|
|
14 <= L['a'].size()[1]""")
|
2023-07-18 19:38:31 +00:00
|
|
|
|
|
|
|
|
def test_guard_lowerbound_range_refinement_multivariate(self):
|
|
|
|
|
def f(a):
|
|
|
|
|
assert a.shape[0] < 20 and a.shape[0] < 30
|
|
|
|
|
assert a.shape[1] < 30 and a.shape[1] < a.shape[0]
|
|
|
|
|
return a.cos()
|
|
|
|
|
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn((15, 5)))
|
|
|
|
|
self.assertExpectedInline(
|
|
|
|
|
show_guards(tensor),
|
|
|
|
|
"""\
|
|
|
|
|
L['a'].size()[1] < L['a'].size()[0]
|
Refine value ranges on inequalities (#120800)
This is basically done the obvious way. For better or worse, I jammed this into what used to be `_maybe_guard_eq` but now is `_maybe_guard_rel`. I was careful to test all the off by one conditions, and each permutation. Let me know if you think I missed anything. Importantly, this now works for unbacked SymInts.
While testing, I noticed we are silently duck sizing all symbolic variables in `test_dynamic_shapes.py`. This may or may not be covering up bugs.
Along the way, I had to fix a bug in export constraints, where we weren't checking that the final var_to_range was consistent with what the user requested at top level.
After I implemented all this, I realized that applying this to non-unbacked SymInts was duplicative with @ysiraichi's previous work on https://github.com/pytorch/pytorch/pull/97963 . The upside is I now understand what Yukio was trying to do in the original PR, and I think my new logic is simpler and less error prone. In Yukio's earlier diff, Yukio tried very hard to avoid changing what guards we actually issue (since this would cause tests to wobble). Thus, when he refined a range, he also saved the guard that actually caused the range to refine. In this PR, I don't bother saving these guards; instead I just tighten var_to_range directly and rely on generating guards on this to be correct. The key insight is that if I assert `x < y`, it's always safe to emit (potentially) more restrictive range guards, because this won't invalidate our guards, it will just make them a little too strong (but actually, I think we are precise along the way.) If these guards make it unnecessary to test `x < y`, because now the ranges for x and y are disjoint, this is fine, we've subsumed the x < y guard and can just not bother testing it. If I've gotten it right, TV will agree with me.
In fact, I had a bug in this PR which TV didn't catch, which is that when we have a recorded var_to_guards for a symbol, we unconditionally never generate the range guard for it, even if the var_to_guards is potentially inconsistent with var_to_range (because var_to_range was updated separately). With var_to_guards removed, I don't have to worry abou this inconsistency.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120800
Approved by: https://github.com/Skylion007, https://github.com/avikchaudhuri, https://github.com/ysiraichi
2024-02-29 16:47:58 +00:00
|
|
|
L['a'].size()[0] <= 19
|
|
|
|
|
L['a'].size()[1] <= 18""")
|
2023-07-18 19:38:31 +00:00
|
|
|
|
2022-10-26 16:37:10 +00:00
|
|
|
def test_sym_storage_offset(self):
|
|
|
|
|
def f(x, y):
|
|
|
|
|
return x + y
|
|
|
|
|
|
|
|
|
|
inp = (torch.randn(8)[3:], torch.randn(5))
|
|
|
|
|
fx_g = make_fx(f, tracing_mode="symbolic")(*inp)
|
|
|
|
|
inp = (torch.randn(8)[3:], torch.randn(5))
|
|
|
|
|
self.assertEqual(fx_g(*inp), f(*inp))
|
2022-10-25 04:04:16 +00:00
|
|
|
|
2022-09-16 22:59:44 +00:00
|
|
|
def _assert_no_guards(self, fx_g, free_symbols):
|
2022-09-21 14:00:52 +00:00
|
|
|
assert _get_free_symbols(fx_g.shape_env) == free_symbols, fx_g.shape_env.var_to_val
|
|
|
|
|
assert len(fx_g.shape_env.get_nontrivial_guards()) == 0, fx_g.shape_env.format_guards()
|
2022-09-16 22:59:44 +00:00
|
|
|
|
|
|
|
|
def test_guards_equal(self):
|
|
|
|
|
def f(a, b):
|
|
|
|
|
return a * b
|
|
|
|
|
|
2022-09-28 21:28:26 +00:00
|
|
|
# NB: Numbers are carefully chosen to avoid duck shaping from applying
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, (5, 6), (5, 6))
|
2022-09-16 22:59:44 +00:00
|
|
|
self._assert_no_guards(fx_g, 2)
|
|
|
|
|
|
2022-09-28 21:28:26 +00:00
|
|
|
fx_g = _trace(f, (5, 6, 7), (5, 6, 7))
|
2022-09-16 22:59:44 +00:00
|
|
|
self._assert_no_guards(fx_g, 3)
|
|
|
|
|
|
2022-09-28 21:28:26 +00:00
|
|
|
fx_g = _trace(f, (5, 1), (1, 6))
|
|
|
|
|
self._assert_no_guards(fx_g, 2)
|
2022-09-16 22:59:44 +00:00
|
|
|
|
|
|
|
|
def f(a, b, c, d):
|
|
|
|
|
a = a + b
|
|
|
|
|
cat = torch.cat([c, d])
|
|
|
|
|
return a + cat
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, 7, 7, 4, 3)
|
|
|
|
|
self._assert_no_guards(fx_g, 2)
|
|
|
|
|
|
2022-09-17 18:11:51 +00:00
|
|
|
def f(a, b, c, d, e):
|
|
|
|
|
vals = [a, b, c, d, e]
|
|
|
|
|
x = a
|
|
|
|
|
for idx in range(len(vals) - 1):
|
|
|
|
|
x = torch.cat([x, vals[idx]]) + vals[idx + 1]
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, 2, 4, 8, 16, 32)
|
|
|
|
|
self._assert_no_guards(fx_g, 1)
|
|
|
|
|
|
2022-09-16 22:59:44 +00:00
|
|
|
def f(a, b):
|
|
|
|
|
a = a.view(b.shape[0])
|
|
|
|
|
return a + b.sum()
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, (4, 2), 8)
|
|
|
|
|
self._assert_no_guards(fx_g, 2)
|
|
|
|
|
|
2022-09-28 21:28:26 +00:00
|
|
|
fx_g = _trace(f, (4, 2), (8, 5))
|
2022-09-16 22:59:44 +00:00
|
|
|
self._assert_no_guards(fx_g, 3)
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, (2, 3, 4), 24)
|
|
|
|
|
self._assert_no_guards(fx_g, 3)
|
|
|
|
|
|
|
|
|
|
def test_nonidentity_transitive_guards(self):
|
|
|
|
|
def f(a, b, c, d, e):
|
|
|
|
|
vals = [a, b, c, d, e]
|
|
|
|
|
cat_vals = []
|
|
|
|
|
for idx in range(len(vals) - 1):
|
|
|
|
|
cat_vals.append(torch.cat([vals[idx], vals[idx]]))
|
|
|
|
|
final_vals = []
|
|
|
|
|
for a, b in reversed(list(zip(cat_vals, vals[1:]))):
|
|
|
|
|
final_vals.append(a + b)
|
|
|
|
|
return final_vals
|
|
|
|
|
|
|
|
|
|
fx_g = _trace(f, 2, 4, 8, 16, 32)
|
2023-02-12 22:04:01 +00:00
|
|
|
self.assertExpectedInline(show_guards(fx_g), """""")
|
2022-09-16 22:59:44 +00:00
|
|
|
|
2023-11-06 16:26:53 +00:00
|
|
|
@torch.fx.experimental._config.patch(translation_validation=True)
|
2023-07-05 14:34:44 +00:00
|
|
|
def test_constant_specialization(self):
|
|
|
|
|
def f(t):
|
|
|
|
|
assert t.shape[0] == 10
|
|
|
|
|
return t
|
|
|
|
|
|
|
|
|
|
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(10))
|
|
|
|
|
self.assertExpectedInline(show_guards(tensor), """""")
|
2022-09-17 18:11:51 +00:00
|
|
|
|
2022-10-19 02:07:13 +00:00
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
make_fx_failures = {
|
2022-06-27 12:06:49 +00:00
|
|
|
# unknown
|
2022-06-07 00:28:53 +00:00
|
|
|
xfail('allclose'),
|
2022-06-20 18:58:41 +00:00
|
|
|
xfail('equal'),
|
2022-06-16 22:04:10 +00:00
|
|
|
# empty
|
|
|
|
|
skip('new_empty'),
|
|
|
|
|
skip('empty_like'),
|
|
|
|
|
skip('empty'),
|
2023-02-21 14:13:06 +00:00
|
|
|
skip('empty_permuted'),
|
2022-06-16 22:04:10 +00:00
|
|
|
# flaky
|
|
|
|
|
skip('linalg.lstsq', 'grad_oriented'),
|
|
|
|
|
skip('nn.functional.max_unpool1d', '', device_type='cpu'),
|
|
|
|
|
skip('nn.functional.max_unpool2d', '', device_type='cpu'),
|
|
|
|
|
skip('nn.functional.max_unpool3d', '', device_type='cpu'),
|
2022-06-07 00:28:53 +00:00
|
|
|
skip('linalg.lstsq'), # flaky, probably just a precision issue
|
2022-06-27 12:06:49 +00:00
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
# data-dependent control flow
|
2023-05-10 11:32:45 +00:00
|
|
|
skip('item'),
|
2022-06-07 00:28:53 +00:00
|
|
|
xfail('cov'),
|
|
|
|
|
xfail('nn.functional.gaussian_nll_loss'),
|
|
|
|
|
xfail('tensor_split'),
|
2022-06-16 22:04:10 +00:00
|
|
|
xfail('corrcoef'),
|
2022-08-12 13:17:53 +00:00
|
|
|
xfail('quantile'),
|
|
|
|
|
xfail('nanquantile'),
|
2022-06-16 22:04:10 +00:00
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
# Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse
|
|
|
|
|
xfail('sparse.sampled_addmm'),
|
port sparse_mm.reduce to pytorch and optimize it on CPU (#83727)
### Motivation of this PR
This patch is to migrate `spmm_reduce` from `torch-sparse` (a 3rd party dependency for PyG) to `torch`, which is a response to the initial proposal for fusion of **Gather, Apply Scatter** in Message Passing of GNN inference/training. https://github.com/pytorch/pytorch/issues/71300
**GAS** is the major step for Message Passing, the behavior of **GAS** can be classified into 2 kinds depending on the storage type of `EdgeIndex` which records the connections of nodes:
* COO: the hotspot is `scatter_reduce`
* CSR: the hotspot is `spmm_reduce`
The reduce type can be choose from: "max", "mean", "max", "min".
extend `torch.sparse.mm` with an `reduce` argument, maps to `torch.sparse_mm.reduce` internally.
`sparse_mm_reduce` is registered under the TensorTypeId of `SparseCsrCPU`, and this operator requires an internal interface `_sparse_mm_reduce_impl` which has dual outputs:
* `out` - the actual output
* `arg_out` - records output indices in the non zero elements if the reduce type is "max" or "min", this is only useful for training. So for inference, it will not be calculated.
### Performance
Benchmark on GCN for obgn-products on Xeon single socket, the workload is improved by `4.3x` with this patch.
Performance benefit for training will be bigger, the original backward impl for `sum|mean` is sequential; the original backward impl for `max|min` is not fused.
#### before:
```
----------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
----------------------------- ------------ ------------ ------------ ------------ ------------ ------------
torch_sparse::spmm_sum 97.09% 56.086s 97.09% 56.088s 6.232s 9
aten::linear 0.00% 85.000us 1.38% 795.485ms 88.387ms 9
aten::matmul 0.00% 57.000us 1.38% 795.260ms 88.362ms 9
aten::mm 1.38% 795.201ms 1.38% 795.203ms 88.356ms 9
aten::relu 0.00% 50.000us 0.76% 440.434ms 73.406ms 6
aten::clamp_min 0.76% 440.384ms 0.76% 440.384ms 73.397ms 6
aten::add_ 0.57% 327.801ms 0.57% 327.801ms 36.422ms 9
aten::log_softmax 0.00% 23.000us 0.10% 55.503ms 18.501ms 3
```
#### after
```
----------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
----------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::spmm_sum 87.35% 11.826s 87.36% 11.827s 1.314s 9
aten::linear 0.00% 92.000us 5.87% 794.451ms 88.272ms 9
aten::matmul 0.00% 62.000us 5.87% 794.208ms 88.245ms 9
aten::mm 5.87% 794.143ms 5.87% 794.146ms 88.238ms 9
aten::relu 0.00% 53.000us 3.35% 452.977ms 75.496ms 6
aten::clamp_min 3.35% 452.924ms 3.35% 452.924ms 75.487ms 6
aten::add_ 2.58% 348.663ms 2.58% 348.663ms 38.740ms 9
aten::argmax 0.42% 57.473ms 0.42% 57.475ms 14.369ms 4
aten::log_softmax 0.00% 22.000us 0.39% 52.605ms 17.535ms 3
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83727
Approved by: https://github.com/jgong5, https://github.com/cpuhrsch, https://github.com/rusty1s, https://github.com/pearu
2023-02-10 03:12:35 +00:00
|
|
|
xfail('sparse.mm', 'reduce'),
|
2022-09-16 15:54:50 +00:00
|
|
|
|
2022-08-02 20:22:19 +00:00
|
|
|
# proxy tensor doesn't support sparse correctly right now
|
|
|
|
|
skip('to_sparse'),
|
2022-06-27 12:06:49 +00:00
|
|
|
# segfaults
|
|
|
|
|
skip('block_diag'),
|
2023-05-19 03:06:29 +00:00
|
|
|
|
|
|
|
|
# AssertionError: Tensor-likes are not close!
|
|
|
|
|
skip('empty_strided', '', device_type='cpu'),
|
2022-06-27 12:06:49 +00:00
|
|
|
}
|
|
|
|
|
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
only_real_tensor_failures = {
|
|
|
|
|
xfail('narrow'),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
only_fake_tensor_failures = {
|
|
|
|
|
xfail('narrow'),
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-27 12:06:49 +00:00
|
|
|
fake_tensor_failures = {
|
|
|
|
|
# ASAN failures due to divide by 0
|
|
|
|
|
skip('nn.functional.nll_loss'),
|
2022-06-07 00:28:53 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-23 19:03:38 +00:00
|
|
|
symbolic_tensor_failures = {
|
2022-10-13 20:19:16 +00:00
|
|
|
xfail('combinations', ''),
|
2022-07-23 19:03:38 +00:00
|
|
|
xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition
|
|
|
|
|
xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c...
|
|
|
|
|
xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition
|
2022-08-08 14:42:51 +00:00
|
|
|
xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
|
2022-07-23 19:03:38 +00:00
|
|
|
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
|
|
|
|
|
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
2022-09-22 00:21:11 +00:00
|
|
|
xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition
|
2022-08-08 14:42:51 +00:00
|
|
|
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
|
2022-10-25 21:15:40 +00:00
|
|
|
xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition
|
2023-08-07 18:05:01 +00:00
|
|
|
|
2023-11-17 18:01:21 +00:00
|
|
|
xfail('max_pool2d_with_indices_backward', ''), # Expected a value of type 'List[int]' for argument 'kernel_size' but...
|
|
|
|
|
|
2023-08-07 18:05:01 +00:00
|
|
|
# many complex operators incorrect striding, metadata
|
|
|
|
|
xfail('fft.fft', ''),
|
|
|
|
|
xfail('fft.hfft2', ''),
|
|
|
|
|
xfail('fft.hfft', ''),
|
|
|
|
|
xfail('fft.hfftn', ''),
|
|
|
|
|
xfail('fft.ifft', ''),
|
|
|
|
|
xfail('fft.ihfft2', ''),
|
|
|
|
|
xfail('fft.ihfft', ''),
|
|
|
|
|
xfail('fft.ihfftn', ''),
|
|
|
|
|
xfail('fft.ihfft2', ''),
|
|
|
|
|
xfail('fft.irfft2', ''),
|
|
|
|
|
xfail('fft.irfft', ''),
|
|
|
|
|
xfail('fft.irfftn', ''),
|
|
|
|
|
xfail('fft.rfft2', ''),
|
|
|
|
|
xfail('fft.rfft', ''),
|
|
|
|
|
xfail('fft.rfftn', ''),
|
2023-08-07 18:05:01 +00:00
|
|
|
xfail('stft', '')
|
2022-07-23 19:03:38 +00:00
|
|
|
}
|
2022-08-23 05:11:03 +00:00
|
|
|
symbolic_tensor_segfaults = {
|
2022-10-13 20:19:16 +00:00
|
|
|
skip('nn.functional.batch_norm') # Segfault??
|
2022-08-23 05:11:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
symbolic_tensor_failures.update(symbolic_tensor_segfaults)
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2022-10-19 22:33:17 +00:00
|
|
|
inplace_symbolic_tensor_failures = {
|
2022-11-18 11:25:36 +00:00
|
|
|
# bugs
|
|
|
|
|
xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double
|
2022-10-19 22:33:17 +00:00
|
|
|
}
|
|
|
|
|
|
2023-11-10 04:21:59 +00:00
|
|
|
out_symbolic_tensor_failures = {
|
Batch Norm Consolidation (#116092)
**Summary:**
This commit simplifies the existing decomposition hierarchy
of batch norm ops by adding a single, backend agnostic op:
`batch_norm_with_update`. The existing hierarchy looks like:
```
aten.batch_norm ->
aten._batch_norm_impl_index ->
[
aten.native_batch_norm ->
aten._native_batch_norm_legit (export only) ->
_batch_norm_legit_cpu/cuda (kernels, export only) ->
_batch_norm_cpu/cuda (kernels)
] OR
[ aten.cudnn_batch_norm ] OR
[ aten.miopen_batch_norm ]
```
Aside from complexity, an important problem with the
above decomposition hierarchy is cuda numerics in
export flows. We observed significantly worse convergence
when training a mobilenetv2-like model when using the
`_batch_norm_cuda` kernel instead of the `cudnn_batch_norm`
kernel. This means users who export their models on CPU
first then move the models to cuda later may silently
see worse accuracies even when cudnn is installed,
because they are using the worse kernel. This issue is
summarized in https://github.com/pytorch/pytorch/issues/111384.
Instead, the new hierarchy proposed by consolidating
existing batch norm ops will look like:
```
aten.batch_norm ->
aten.batch_norm_with_update ->
[ _batch_norm_cpu (kernel) ] OR
[ _batch_norm_cuda (kernel) ] OR
[ cudnn_batch_norm (kernel) ] OR
[ miopen_batch_norm (kernel) ]
```
The new op `batch_norm_with_update` hides backend
implementation details and automatically picks the right
kernel based on what is installed. This commit also adds
the following variants to this op:
```
batch_norm_with_update_functional
batch_norm_with_update.out
batch_norm_no_update
batch_norm_no_update.out
batch_norm_backward
```
Note that this commit only adds this op and its variants,
but does not actually change the decomps to produce these
ops in the graph. This will be done after the 2 week FC
window, and the ops used in the old stack is planned to
be removed after the 6 month BC window.
Test Plan: `OpInfo` tests for `batch_norm_with_update`.
Reviewers: albanD, bdhirsh
Subscribers: albanD, bdhirsh, supriyar
Tasks: https://github.com/pytorch/pytorch/issues/111384
Differential Revision: [D54805279](https://our.internmc.facebook.com/intern/diff/D54805279)
Co-authored-by: Tugsbayasgalan Manlaibaatar <tmanlaibaatar@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/116092
Approved by: https://github.com/bdhirsh, https://github.com/albanD
2024-03-18 14:27:27 +00:00
|
|
|
# Cast error details: Unable to cast (...) to Tensor
|
|
|
|
|
#
|
|
|
|
|
# This happens because the test is set up to call the out variant using the `out` kwarg:
|
|
|
|
|
# torch._some_op(arg1, arg2, out=(out1, out2, out3))
|
|
|
|
|
#
|
|
|
|
|
# However, this only works on torch ops, not aten ops. For `_batch_norm_with_update`,
|
|
|
|
|
# this fails because the op has no python bindings, so it doesn't support the `out` kwarg
|
|
|
|
|
# way of calling its out variant.
|
|
|
|
|
xfail('_batch_norm_with_update', ''),
|
2023-11-10 04:21:59 +00:00
|
|
|
xfail('_native_batch_norm_legit', ''),
|
|
|
|
|
xfail('angle', ''),
|
|
|
|
|
xfail('argmax', ''),
|
|
|
|
|
xfail('argmin', ''),
|
|
|
|
|
xfail('fft.fft2', ''),
|
|
|
|
|
xfail('fft.fftn', ''),
|
|
|
|
|
xfail('fft.ifft2', ''),
|
|
|
|
|
xfail('fft.ifftn', ''),
|
|
|
|
|
xfail('gather', ''),
|
|
|
|
|
xfail('linalg.pinv', ''),
|
|
|
|
|
xfail('linalg.pinv', 'hermitian'),
|
|
|
|
|
xfail('lu', ''),
|
|
|
|
|
xfail('scatter_add', ''),
|
|
|
|
|
xfail('scatter', ''),
|
|
|
|
|
xfail('take_along_dim', ''),
|
|
|
|
|
xfail('triangular_solve', ''),
|
2023-11-17 18:01:21 +00:00
|
|
|
|
|
|
|
|
# SymIntArrayRef expected to contain only concrete
|
|
|
|
|
xfail('ones', ''),
|
|
|
|
|
xfail('randn', ''),
|
|
|
|
|
xfail('zeros', ''),
|
2024-04-17 11:30:26 +00:00
|
|
|
|
|
|
|
|
# RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
|
|
|
|
|
xfail('index_reduce', 'prod'),
|
|
|
|
|
xfail('index_reduce', 'mean'),
|
|
|
|
|
xfail('index_reduce', 'amax'),
|
|
|
|
|
xfail('index_reduce', 'amin'),
|
2023-11-10 04:21:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_symbolic_tensor_segfaults = {
|
|
|
|
|
skip('nanmean', ''),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_symbolic_tensor_failures.update(out_symbolic_tensor_segfaults)
|
|
|
|
|
|
2022-10-19 22:33:17 +00:00
|
|
|
# Copies inputs to inplace operations to avoid inplace modifications
|
|
|
|
|
# to leaves requiring gradient
|
|
|
|
|
def _get_safe_inplace(inplace_variant):
|
|
|
|
|
@functools.wraps(inplace_variant)
|
|
|
|
|
def _fn(t, *args, **kwargs):
|
|
|
|
|
return inplace_variant(t.clone(), *args, **kwargs)
|
|
|
|
|
|
|
|
|
|
return _fn
|
|
|
|
|
|
2023-11-10 04:21:59 +00:00
|
|
|
def _test_make_fx_helper(self, device, dtype, op, tracing_mode, inplace=False, out=False):
|
2023-06-13 14:18:49 +00:00
|
|
|
fn = _get_safe_inplace(op.get_inplace()) if inplace else op.op
|
2022-06-27 12:06:49 +00:00
|
|
|
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
|
2022-09-21 02:30:50 +00:00
|
|
|
|
|
|
|
|
# Limit ourselves to first 100 inputs so symbolic tracing tests don't take too long
|
2023-11-10 04:21:59 +00:00
|
|
|
count = 100
|
|
|
|
|
if out:
|
|
|
|
|
count = 5
|
|
|
|
|
for sample_input in itertools.islice(sample_inputs_itr, count):
|
2022-10-19 22:33:17 +00:00
|
|
|
if inplace and sample_input.broadcasts_input:
|
|
|
|
|
continue
|
2022-06-27 12:06:49 +00:00
|
|
|
args = [sample_input.input] + list(sample_input.args)
|
|
|
|
|
kwargs = sample_input.kwargs
|
2023-11-10 04:21:59 +00:00
|
|
|
if out:
|
|
|
|
|
expected = fn(*args, **kwargs)
|
|
|
|
|
kwargs['out'] = expected
|
2022-06-27 12:06:49 +00:00
|
|
|
|
|
|
|
|
try:
|
2023-06-13 14:18:49 +00:00
|
|
|
optests.make_fx_check(fn, args, kwargs, tracing_mode, self.assertEqual,
|
|
|
|
|
randomize_data=True)
|
|
|
|
|
except DynamicOutputShapeException:
|
2022-06-27 12:06:49 +00:00
|
|
|
self.skipTest("Dynamic output shape operation in trace")
|
|
|
|
|
|
2023-11-10 04:21:59 +00:00
|
|
|
|
2024-03-28 17:33:51 +00:00
|
|
|
def skipIfNameMatches(pattern):
|
|
|
|
|
"""
|
|
|
|
|
Decorator to skip a test if its name matches the given pattern.
|
|
|
|
|
"""
|
|
|
|
|
def decorator(test_func):
|
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
|
if re.match(pattern, test_func.__name__):
|
|
|
|
|
raise unittest.SkipTest(f"Test '{test_func.__name__}' skipped because its name matches the pattern '{pattern}'")
|
|
|
|
|
return test_func(*args, **kwargs)
|
|
|
|
|
return wrapper
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
# Auto functionalize shouldn't work with make_fx directly
|
|
|
|
|
filtered_hop_db = [op for op in hop_db if op.name != "auto_functionalize"]
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "Cond requires dynamo")
|
2022-06-07 00:28:53 +00:00
|
|
|
class TestProxyTensorOpInfo(TestCase):
|
2024-03-28 17:33:51 +00:00
|
|
|
@ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,))
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_exhaustive', make_fx_failures.union(only_real_tensor_failures))
|
2022-06-07 00:28:53 +00:00
|
|
|
def test_make_fx_exhaustive(self, device, dtype, op):
|
2022-07-23 19:03:38 +00:00
|
|
|
_test_make_fx_helper(self, device, dtype, op, "real")
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2024-03-28 17:33:51 +00:00
|
|
|
@ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,))
|
Teach FakeTensor to fill in item_memo when converting scalar CPU tensor (#126245)
This PR requires a little justification, but let's start with what it does first:
1. When you have a 0d CPU scalar int64/float64 tensor input to a graph, we will preallocate a backed SymInt/SymFloat corresponding to what you would get if you call item() on this tensor. This means you can freely change your input to be a Python int/float or a Tensor with an item() call and end up with exactly the same level of expressivity (specifically, you can guard on the internal SymInt/SymFloat no matter what). By default, the source of the backed SymInt/SymFloat is `L['tensor'].item()`, but if you have promoted a float input into a Tensor, we will cancel out `torch.as_tensor(L['float']).item()` into just `L['float']`.
2. We switch wrap_symfloat to use this, instead of hand crafting the new SymNodeVariable. Everything works out, except that we carefully pass the item() result to tracked fakes (and not the fake Tensor argument)
OK, so why do this at all? There is some marginal benefit where now some item() calls on scalar inputs can be guarded on, but IMO this is a pretty marginal benefit, and if it was the only reason, I wouldn't do this. The real reason for this is that I need to be able to propagate fake tensors through the graphs that are produced by Dynamo, and if I am doing the old custom wrap_symfloat logic, there's no way I can do this, because ordinarily an item() call will cause an unbacked SymInt when I reallocate.
The other obvious way to solve the problem above is to make a HOP alternative that item() that "bakes in" the backed SymInt its supposed to return. But this strategy seems more parsimonious, and it does have the marginal benefit I mentioned above. The main downside is that what I have to do next, is make it so that when I run tensor computation, I also apply the equivalent operations to the SymInt/SymFloat as well. That's next PR.
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126245
Approved by: https://github.com/eellison
ghstack dependencies: #126637
2024-05-21 20:16:23 +00:00
|
|
|
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive',
|
|
|
|
|
make_fx_failures.union(fake_tensor_failures, only_fake_tensor_failures))
|
2022-06-27 12:06:49 +00:00
|
|
|
def test_make_fx_fake_exhaustive(self, device, dtype, op):
|
2022-07-23 19:03:38 +00:00
|
|
|
_test_make_fx_helper(self, device, dtype, op, "fake")
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2024-03-28 17:33:51 +00:00
|
|
|
@ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,))
|
2022-07-23 19:03:38 +00:00
|
|
|
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive',
|
2024-03-01 20:27:37 +00:00
|
|
|
make_fx_failures | fake_tensor_failures | symbolic_tensor_failures)
|
2022-07-23 19:03:38 +00:00
|
|
|
def test_make_fx_symbolic_exhaustive(self, device, dtype, op):
|
|
|
|
|
_test_make_fx_helper(self, device, dtype, op, "symbolic")
|
2022-06-07 00:28:53 +00:00
|
|
|
|
2023-04-18 13:51:23 +00:00
|
|
|
@ops(op_db + custom_op_db, allowed_dtypes=(torch.float,))
|
2022-10-19 22:33:17 +00:00
|
|
|
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_inplace',
|
|
|
|
|
make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | inplace_symbolic_tensor_failures)
|
|
|
|
|
def test_make_fx_symbolic_exhaustive_inplace(self, device, dtype, op):
|
|
|
|
|
if not op.get_inplace():
|
|
|
|
|
self.skipTest("No inplace variable for this op")
|
|
|
|
|
_test_make_fx_helper(self, device, dtype, op, "symbolic", inplace=True)
|
|
|
|
|
|
2023-11-10 04:21:59 +00:00
|
|
|
@ops(op_db + custom_op_db, allowed_dtypes=(torch.float,))
|
|
|
|
|
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_out',
|
|
|
|
|
make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | out_symbolic_tensor_failures)
|
|
|
|
|
def test_make_fx_symbolic_exhaustive_out(self, device, dtype, op):
|
|
|
|
|
if not op.supports_out:
|
|
|
|
|
self.skipTest("Op doesn't support out")
|
|
|
|
|
_test_make_fx_helper(self, device, dtype, op, "symbolic", out=True)
|
|
|
|
|
|
2022-06-07 00:28:53 +00:00
|
|
|
|
|
|
|
|
only_for = ("cpu")
|
|
|
|
|
instantiate_device_type_tests(TestProxyTensorOpInfo, globals(), only_for=only_for)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
run_tests()
|