mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
[LT] Remove torch::lazy::convertShapes (#71291)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/71291 This commit removes torch::lazy::convertShapes since it's no longer used. In addition, it replaces a numel logic within LTCTensorImpl. Test Plan: ./build/bin/test_lazy CI in lazy_tensor_staging branch Reviewed By: wconstab Differential Revision: D33575084 Pulled By: alanwaketan fbshipit-source-id: b104ef39fd552822e1f4069eab2cb942d48423a6
This commit is contained in:
parent
c7d1501e4d
commit
680d61daab
4 changed files with 1 additions and 41 deletions
|
|
@ -80,22 +80,5 @@ TEST(ShapeTest, Ostream) {
|
|||
EXPECT_EQ(shape.to_string(), ss.str());
|
||||
}
|
||||
|
||||
TEST(ShapeTest, ConvertShapes) {
|
||||
auto shape1 = Shape(c10::ScalarType::Long, {1, 2, 3});
|
||||
auto shape2 = Shape(c10::ScalarType::Float, {1, 2});
|
||||
|
||||
auto shapes1 = convertShapes({}, {});
|
||||
EXPECT_TRUE(shapes1.empty());
|
||||
|
||||
auto shapes2 = convertShapes({c10::ScalarType::Long}, {{1, 2, 3}});
|
||||
EXPECT_EQ(shapes2.size(), 1);
|
||||
EXPECT_EQ(shapes2[0], shape1);
|
||||
|
||||
auto shapes3 = convertShapes({c10::ScalarType::Long, c10::ScalarType::Float}, {{1, 2, 3}, {1, 2}});
|
||||
EXPECT_EQ(shapes3.size(), 2);
|
||||
EXPECT_EQ(shapes3[0], shape1);
|
||||
EXPECT_EQ(shapes3[1], shape2);
|
||||
}
|
||||
|
||||
} // namespace lazy
|
||||
} // namespace torch
|
||||
|
|
|
|||
|
|
@ -20,20 +20,6 @@ std::ostream& operator<<(std::ostream& out, const Shape& shape) {
|
|||
return out << shape.to_string();
|
||||
}
|
||||
|
||||
std::vector<Shape> convertShapes(
|
||||
const std::vector<at::ScalarType>& dtypes,
|
||||
const std::vector<std::vector<int64_t>>& shapes) {
|
||||
TORCH_INTERNAL_ASSERT(dtypes.size() == shapes.size());
|
||||
|
||||
std::vector<Shape> shape;
|
||||
shape.reserve(dtypes.size());
|
||||
for (const auto i : c10::irange(dtypes.size())) {
|
||||
shape.emplace_back(dtypes[i], shapes[i]);
|
||||
}
|
||||
|
||||
return shape;
|
||||
}
|
||||
|
||||
size_t Shape::numel() const {
|
||||
size_t elts = 1;
|
||||
for (auto size : sizes_) {
|
||||
|
|
|
|||
|
|
@ -36,10 +36,5 @@ class TORCH_API Shape {
|
|||
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape);
|
||||
|
||||
// TODO(alanwaketan): Rethink how code-gen uses shapes.
|
||||
TORCH_API std::vector<Shape> convertShapes(
|
||||
const std::vector<at::ScalarType>& dtypes,
|
||||
const std::vector<std::vector<int64_t>>& shapes);
|
||||
|
||||
} // namespace lazy
|
||||
} // namespace torch
|
||||
|
|
|
|||
|
|
@ -139,11 +139,7 @@ void LTCTensorImpl::setup_size_properties() {
|
|||
// implementation uses in its APIs.
|
||||
auto shape = tensor_.shape();
|
||||
// We can't call refresh_numel() given we override sizes() too.
|
||||
// TODO(alanwaketan): Replace the following with Shape.numel().
|
||||
numel_ = 1;
|
||||
for (auto dim : shape.Get().sizes()) {
|
||||
numel_ *= dim;
|
||||
}
|
||||
numel_ = shape.Get().numel();
|
||||
sizes_and_strides_.set_sizes(shape.Get().sizes());
|
||||
// We can't call empty_tensor_restride(c10::MemoryFormat::Contiguous) given we override sizes() too.
|
||||
std::vector<int64_t> updated_strides;
|
||||
|
|
|
|||
Loading…
Reference in a new issue