diff --git a/torch/autograd/_functions/tensor.py b/torch/autograd/_functions/tensor.py index 457a11bb41b..8e334d5dc40 100644 --- a/torch/autograd/_functions/tensor.py +++ b/torch/autograd/_functions/tensor.py @@ -1,4 +1,5 @@ from functools import reduce +import warnings import torch import torch._utils from ..function import Function @@ -8,6 +9,8 @@ class Type(Function): @staticmethod def forward(ctx, i, dest_type): + warnings.warn("torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use " + "torch.tensor.to(dtype=dtype) instead.") ctx.input_type = type(i) ctx.input_device = -1 if not i.is_cuda else i.get_device() return i.type(dest_type) diff --git a/torch/autograd/graph.py b/torch/autograd/graph.py index dc657e18827..182a5e8f56f 100644 --- a/torch/autograd/graph.py +++ b/torch/autograd/graph.py @@ -258,16 +258,17 @@ class save_on_cpu(saved_tensors_hooks): >>> # all intermediary tensors are released (deleted) after the call to backward """ - def __init__(self, pin_memory=False): + def __init__(self, pin_memory=False, device_type="cuda"): + device_module = getattr(torch, device_type, torch.cuda) + def pack_to_cpu(tensor): if not pin_memory: return (tensor.device, tensor.cpu()) - packed = torch.empty( tensor.size(), dtype=tensor.dtype, layout=tensor.layout, - pin_memory=(torch.cuda.is_available() and not tensor.is_sparse)) + pin_memory=(device_module.is_available() and not tensor.is_sparse)) packed.copy_(tensor) return (tensor.device, packed)