Revert "Add support for directly passing symint to empty"

This reverts commit 05664a957e.

Reverted https://github.com/pytorch/pytorch/pull/79494 on behalf of https://github.com/ezyang due to conflicts with earlier diff that needs revert
This commit is contained in:
PyTorch MergeBot 2022-06-15 13:49:56 +00:00
parent 83e575c510
commit aa9d25efc0
6 changed files with 0 additions and 98 deletions

View file

@ -275,71 +275,6 @@ TensorBase empty_meta(
return empty_meta(size, dtype, memory_format_opt);
}
TensorBase empty_symint_meta(
SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt
) {
auto device = device_or_default(device_opt);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::Meta);
// NB: because there is no SparseMeta (yet), non-strided layout is
// exerciseable
TORCH_CHECK_NOT_IMPLEMENTED(
layout_or_default(layout_opt) == Layout::Strided,
"non-strided meta tensors not supported yet"
);
auto scalar_type = dtype_or_default(dtype_opt);
auto *allocator = GetAllocator(kMeta);
constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta);
// TODO: do this. Note that naive implementation will choke on truly
// unknown sizes without on the fly reasoning
// at::detail::check_size_nonnegative(size);
at::detail::raise_warning_for_complex_half(scalar_type);
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
SymInt size_bytes = dtype.itemsize();
for (auto s : size) {
size_bytes = size_bytes * s;
}
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor_base<TensorImpl>(
std::move(storage_impl), meta_dks, dtype);
int64_t dim = size.size();
std::vector<SymInt> strides;
strides.resize(dim);
// TODO: Move this into TensorImpl
auto memory_format = memory_format_opt.value_or(MemoryFormat::Contiguous);
switch (memory_format) {
case MemoryFormat::Contiguous: {
if (dim > 0) {
const auto last_idx = dim - 1;
strides.at(last_idx) = 1;
for (auto i = last_idx - 1; i >= 0; --i) {
// TODO: max with 1
strides.at(i) = strides.at(i+1) * size.at(i+1);
}
}
break;
}
default:
TORCH_CHECK(0, "other memory format not implemented yet");
}
tensor.unsafeGetTensorImpl()->set_sym_sizes_and_strides(size, strides);
return tensor;
}
TensorBase empty_meta(
IntArrayRef size, const TensorOptions &options) {
return at::detail::empty_meta(

View file

@ -87,14 +87,6 @@ TORCH_API TensorBase empty_meta(
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_symint_meta(
SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
TORCH_API TensorBase

View file

@ -24,18 +24,6 @@ Tensor empty_meta(
size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
}
Tensor empty_symint_meta(
SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt
) {
return at::detail::empty_symint_meta(
size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
}
Tensor empty_strided_meta(
IntArrayRef size,
IntArrayRef stride,

View file

@ -1967,10 +1967,6 @@
SparseCsrCPU, SparseCsrCUDA: empty_sparse_compressed
QuantizedCPU, QuantizedCUDA: empty_unknown_quantized
- func: empty.SymInt(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
Meta: empty_symint_meta
# We do not make new_empty a composite that calls into new_empty_strided, as the strided version
# is significantly more difficult to implement by different backends
- func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

View file

@ -39,11 +39,9 @@ SymInt SymInt::operator*(SymInt sci) const {
// TODO: technically we need to check that the classes match
if (!a) {
a = common->wrap(data_);
toSymInt(a); //
}
if (!b) {
b = common->wrap(sci.data_);
toSymInt(b);
}
return SymInt::toSymInt(a->add(b));
}

View file

@ -290,13 +290,6 @@ class TestPySymInt(TestCase):
self.assertTrue(str(expand_x.sym_size(1)), str(x.sym_size(0)))
self.assertTrue(str(expand_x.sym_size(1)), str(result.sym_size(0)))
@skipIfNoSympy
def test_meta_symint(self):
shape_env = ShapeEnv()
a0 = shape_env.create_symint("a0", 2)
r = torch.empty(a0, device='meta')
self.assertIsInstance(r.sym_size(0), CPP_SYMINT_CLASS)
if __name__ == '__main__':
run_tests()