Deprecate "Type" and support more devices for save_on_cpu (#103245)

Fixes #ISSUE_NUMBER
1、the class named "Type" has not been used anymore in anywhere, so I add warning message  to remove it in the future.
2、add a arg(default is "cuda") for save_on_cpu so that it can support more device type (like privateuse1)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/103245
Approved by: https://github.com/soulitzer
This commit is contained in:
shibo19 2023-06-09 05:04:57 +00:00 committed by PyTorch MergeBot
parent c037088ac4
commit 2961ea80f5
2 changed files with 7 additions and 3 deletions

View file

@ -1,4 +1,5 @@
from functools import reduce
import warnings
import torch
import torch._utils
from ..function import Function
@ -8,6 +9,8 @@ class Type(Function):
@staticmethod
def forward(ctx, i, dest_type):
warnings.warn("torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use "
"torch.tensor.to(dtype=dtype) instead.")
ctx.input_type = type(i)
ctx.input_device = -1 if not i.is_cuda else i.get_device()
return i.type(dest_type)

View file

@ -258,16 +258,17 @@ class save_on_cpu(saved_tensors_hooks):
>>> # all intermediary tensors are released (deleted) after the call to backward
"""
def __init__(self, pin_memory=False):
def __init__(self, pin_memory=False, device_type="cuda"):
device_module = getattr(torch, device_type, torch.cuda)
def pack_to_cpu(tensor):
if not pin_memory:
return (tensor.device, tensor.cpu())
packed = torch.empty(
tensor.size(),
dtype=tensor.dtype,
layout=tensor.layout,
pin_memory=(torch.cuda.is_available() and not tensor.is_sparse))
pin_memory=(device_module.is_available() and not tensor.is_sparse))
packed.copy_(tensor)
return (tensor.device, packed)