Set up aten/src/ATen/functorch directory; move some files there (#84648)

This PR:
- sets up aten/src/ATen/functorch in PyTorch's build system
- Moves {BatchedTensorImpl.h, and BatchedTensorImpl.cpp}
there as a test.

Test Plan:
- functorch build and test should pass

Differential Revision: [D39315051](https://our.internmc.facebook.com/intern/diff/D39315051)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84648
Approved by: https://github.com/ezyang
This commit is contained in:
Richard Zou 2022-09-09 08:00:04 -07:00 committed by PyTorch MergeBot
parent 8e57ce63a1
commit 0a89bdf989
16 changed files with 20 additions and 18 deletions

View file

@ -133,6 +133,7 @@ filegroup(
name = "aten_base_cpp",
srcs = glob([
"aten/src/ATen/*.cpp",
"aten/src/ATen/functorch/*.cpp",
"aten/src/ATen/detail/*.cpp",
"aten/src/ATen/cpu/*.cpp",
]),

View file

@ -56,8 +56,8 @@ if(NOT BUILD_CAFFE2 AND NOT BUILD_LITE_INTERPRETER)
EXCLUDE(ATen_CORE_TEST_SRCS "${ATen_CORE_TEST_SRCS}" ${ATen_CORE_EXCLUDED_TEST_SRCS})
endif()
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/*.h" "quantized/*.h")
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp")
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp" "functorch/*.cpp")
file(GLOB cuda_h "cuda/*.h" "cuda/detail/*.h" "cuda/*.cuh" "cuda/detail/*.cuh")
file(GLOB cuda_cpp "cuda/*.cpp" "cuda/detail/*.cpp")
file(GLOB cuda_nvrtc_stub_h "cuda/nvrtc_stub/*.h")

View file

@ -3,7 +3,7 @@
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <ATen/WrapDimUtils.h>
#include <c10/util/Exception.h>

View file

@ -12,8 +12,6 @@
#include <ATen/SmallVector.h>
#include <ATen/Tensor.h>
#include <functorch/csrc/Macros.h>
namespace at {
namespace functorch {
@ -42,7 +40,7 @@ constexpr int64_t kBatchDimsStackSize = 5;
//
// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor.
struct BatchedTensorImpl : public c10::TensorImpl {
struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level);
// Returns batch dimension of this tensor
@ -136,10 +134,10 @@ inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(int64_t level) {
}
// Use this to construct a BatchedTensor from a regular Tensor
FUNCTORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level);
TORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level);
// Adds a batch dim to `tensor`, returning a BatchedTensor
FUNCTORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level);
TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level);
// Certain dispatch keys must be propagated to the BatchedTensor (or, in general,
// any wrapper Tensor subclasses). This is because there are methods on Tensor

View file

@ -807,6 +807,7 @@ def define_buck_targets(
("aten/src", "ATen/*.h"),
("aten/src", "ATen/cpu/**/*.h"),
("aten/src", "ATen/detail/*.h"),
("aten/src", "ATen/functorch/**/*.h"),
("aten/src", "ATen/quantized/*.h"),
("aten/src", "ATen/vulkan/*.h"),
("aten/src", "ATen/metal/*.h"),
@ -869,6 +870,7 @@ def define_buck_targets(
("", "torch/custom_class_detail.h"),
# Add again due to namespace difference from aten_header.
("", "aten/src/ATen/*.h"),
("", "aten/src/ATen/functorch/**/*.h"),
("", "aten/src/ATen/quantized/*.h"),
],
exclude = [

View file

@ -6,7 +6,7 @@
#include <functorch/csrc/DynamicLayer.h>
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/BatchRulesHelper.h>
#include <torch/library.h>

View file

@ -1,5 +1,5 @@
#include <functorch/csrc/Interpreter.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/VmapInterpreter.h>
#include <functorch/csrc/FunctionalizeInterpreter.h>

View file

@ -7,7 +7,7 @@
#pragma once
#include <functorch/csrc/Macros.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
namespace at {
namespace functorch {

View file

@ -6,7 +6,7 @@
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/DynamicLayer.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
namespace at { namespace functorch {

View file

@ -5,7 +5,7 @@
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/Tensor.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/DynamicLayer.h>
// NOTE: [vmap plumbing]

View file

@ -3,7 +3,7 @@
#include <ATen/ATen.h>
#include <ATen/WrapDimUtils.h>
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <c10/util/irange.h>

View file

@ -6,7 +6,7 @@
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/DynamicLayer.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <torch/library.h>
#include <ATen/core/dispatch/Dispatcher.h>

View file

@ -7,7 +7,7 @@
#include <torch/library.h>
#include <ATen/ATen.h>
#include <functorch/csrc/LegacyVmapTransforms.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/PlumbingHelper.h>
#include <functorch/csrc/DynamicLayer.h>
#include <ATen/core/dispatch/Dispatcher.h>

View file

@ -13,7 +13,7 @@
#include <vector>
//#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/Export.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/DynamicLayer.h>
#include <ATen/ATen.h>
#include <memory>

View file

@ -10,7 +10,7 @@
#include <functorch/csrc/TensorWrapper.h>
#include <functorch/csrc/DynamicLayer.h>
#include <functorch/csrc/BatchedTensorImpl.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <functorch/csrc/LegacyVmapTransforms.h>
#include <functorch/csrc/BatchedFallback.h>
#include <functorch/csrc/BatchRulesHelper.h>

View file

@ -996,6 +996,7 @@ def main():
'include/ATen/cuda/detail/*.cuh',
'include/ATen/cuda/detail/*.h',
'include/ATen/cudnn/*.h',
'include/ATen/functorch/*.h',
'include/ATen/ops/*.h',
'include/ATen/hip/*.cuh',
'include/ATen/hip/*.h',