mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Fix errors from [7k github models](https://github.com/pytorch/torchdynamo/issues/1884) ``` Traceback (most recent call last): File "/scratch/ybliang/work/repos/pytorch/torch/_dynamo/utils.py", line 1062, in get_fake_value return wrap_fake_exception( File "/scratch/ybliang/work/repos/pytorch/torch/_dynamo/utils.py", line 739, in wrap_fake_exception return fn() File "/scratch/ybliang/work/repos/pytorch/torch/_dynamo/utils.py", line 1063, in <lambda> lambda: run_node(tx.output, node, args, kwargs, nnmodule) File "/scratch/ybliang/work/repos/pytorch/torch/_dynamo/utils.py", line 1112, in run_node raise RuntimeError( RuntimeError: Failed running call_function <function einsum at 0x7fd8f246a4c0>(*('i,j->ij', FakeTensor(FakeTensor(..., device='meta', size=(4,)), cpu), FakeTensor(FakeTensor(..., device='meta', size=(2,)), cuda:0)), **{}): Unhandled FakeTensor Device Propagation for aten.mul.Tensor, found two different devices cpu, cuda:0 (scroll up for backtrace) ``` The root cause is: ```tensor.type()``` should return ```torch.cuda.FloatTensor``` rather than ```torch.FloatTensor``` if it's on GPU. Pull Request resolved: https://github.com/pytorch/pytorch/pull/90021 Approved by: https://github.com/jansel |
||
|---|---|---|
| .. | ||
| optimizations | ||
| variables | ||
| __init__.py | ||
| allowed_functions.py | ||
| bytecode_analysis.py | ||
| bytecode_transformation.py | ||
| codegen.py | ||
| config.py | ||
| convert_frame.py | ||
| debug_utils.py | ||
| eval_frame.py | ||
| exc.py | ||
| guards.py | ||
| logging.py | ||
| mutation_guard.py | ||
| output_graph.py | ||
| profiler.py | ||
| replay_record.py | ||
| resume_execution.py | ||
| side_effects.py | ||
| skipfiles.py | ||
| source.py | ||
| symbolic_convert.py | ||
| test_case.py | ||
| test_minifier_common.py | ||
| testing.py | ||
| types.py | ||
| utils.py | ||