From 2961ea80f5d5fbcb9ef86d95ce08f1fa603863bf Mon Sep 17 00:00:00 2001 From: shibo19 <18207133434@163.com> Date: Fri, 9 Jun 2023 05:04:57 +0000 Subject: [PATCH] Deprecate "Type" and support more devices for save_on_cpu (#103245) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #ISSUE_NUMBER 1、the class named "Type" has not been used anymore in anywhere, so I add warning message to remove it in the future. 2、add a arg(default is "cuda") for save_on_cpu so that it can support more device type (like privateuse1) Pull Request resolved: https://github.com/pytorch/pytorch/pull/103245 Approved by: https://github.com/soulitzer --- torch/autograd/_functions/tensor.py | 3 +++ torch/autograd/graph.py | 7 ++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/torch/autograd/_functions/tensor.py b/torch/autograd/_functions/tensor.py index 457a11bb41b..8e334d5dc40 100644 --- a/torch/autograd/_functions/tensor.py +++ b/torch/autograd/_functions/tensor.py @@ -1,4 +1,5 @@ from functools import reduce +import warnings import torch import torch._utils from ..function import Function @@ -8,6 +9,8 @@ class Type(Function): @staticmethod def forward(ctx, i, dest_type): + warnings.warn("torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use " + "torch.tensor.to(dtype=dtype) instead.") ctx.input_type = type(i) ctx.input_device = -1 if not i.is_cuda else i.get_device() return i.type(dest_type) diff --git a/torch/autograd/graph.py b/torch/autograd/graph.py index dc657e18827..182a5e8f56f 100644 --- a/torch/autograd/graph.py +++ b/torch/autograd/graph.py @@ -258,16 +258,17 @@ class save_on_cpu(saved_tensors_hooks): >>> # all intermediary tensors are released (deleted) after the call to backward """ - def __init__(self, pin_memory=False): + def __init__(self, pin_memory=False, device_type="cuda"): + device_module = getattr(torch, device_type, torch.cuda) + def pack_to_cpu(tensor): if not pin_memory: return (tensor.device, tensor.cpu()) - packed = torch.empty( tensor.size(), dtype=tensor.dtype, layout=tensor.layout, - pin_memory=(torch.cuda.is_available() and not tensor.is_sparse)) + pin_memory=(device_module.is_available() and not tensor.is_sparse)) packed.copy_(tensor) return (tensor.device, packed)