From 67d0d14908bbfa786baddeab4282fccf931041dc Mon Sep 17 00:00:00 2001 From: Peter Goldsborough Date: Fri, 4 May 2018 08:04:57 -0700 Subject: [PATCH] Rename autograd namespace to torch and change torch.h into python.h (#7267) * Rename autograd namespace to torch and change torch.h into python.h * Include torch.h instead of python.h in test/cpp/api * Change some mentions of torch.h to python.h in C++ extensions * Set paths directly, without find_path --- setup.py | 7 ++-- test/cpp/api/container.cpp | 4 +- test/cpp/api/integration.cpp | 4 +- test/cpp/api/{main.cpp.cpp => main.cpp} | 4 +- test/cpp/api/misc.cpp | 4 +- test/cpp/api/optim.cpp | 4 +- test/cpp/api/rnn.cpp | 4 +- test/cpp/api/serialization.cpp | 4 +- test/cpp_extensions/cuda_extension.cpp | 2 +- test/cpp_extensions/doubler.h | 2 +- test/cpp_extensions/extension.cpp | 2 +- test/cpp_extensions/jit_extension.cpp | 2 +- test/cpp_extensions/jit_extension2.cpp | 2 +- test/test_cpp_extensions.py | 2 +- tools/cpp_build/libtorch/CMakeLists.txt | 11 +++--- torch/csrc/api/include/torch/containers.h | 6 +-- torch/csrc/api/include/torch/detail.h | 6 +-- torch/csrc/api/include/torch/optimizers.h | 4 +- .../api/include/torch/python.h} | 0 torch/csrc/api/include/torch/serialization.h | 38 +++++++++---------- .../api/include/torch/{autograd.h => torch.h} | 0 torch/csrc/api/src/containers.cpp | 4 +- torch/csrc/api/src/detail.cpp | 4 +- torch/csrc/api/src/optimizers.cpp | 4 +- torch/csrc/{torch.cpp => api/src/python.cpp} | 2 +- torch/csrc/tensor/python_tensor.cpp | 6 ++- torch/utils/cpp_extension.py | 6 +-- 27 files changed, 70 insertions(+), 68 deletions(-) rename test/cpp/api/{main.cpp.cpp => main.cpp} (90%) rename torch/{torch.h => csrc/api/include/torch/python.h} (100%) rename torch/csrc/api/include/torch/{autograd.h => torch.h} (100%) rename torch/csrc/{torch.cpp => api/src/python.cpp} (93%) diff --git a/setup.py b/setup.py index b1558fa19e6..cb5c341a978 100644 --- a/setup.py +++ b/setup.py @@ -312,7 +312,8 @@ class build_deps(Command): self.copy_tree('torch/csrc', 'torch/lib/include/torch/csrc/') self.copy_tree('third_party/pybind11/include/pybind11/', 'torch/lib/include/pybind11') - self.copy_file('torch/torch.h', 'torch/lib/include/torch/torch.h') + self.copy_file('torch/csrc/api/include/torch/python.h', + 'torch/lib/include/torch/python.h') build_dep_cmds = {} @@ -600,7 +601,7 @@ main_sources = [ "torch/csrc/DynamicTypes.cpp", "torch/csrc/assertions.cpp", "torch/csrc/byte_order.cpp", - "torch/csrc/torch.cpp", + "torch/csrc/api/src/python.cpp", "torch/csrc/utils.cpp", "torch/csrc/utils/cuda_lazy_init.cpp", "torch/csrc/utils/device.cpp", @@ -901,6 +902,6 @@ if __name__ == '__main__': 'lib/include/torch/csrc/jit/*.h', 'lib/include/torch/csrc/utils/*.h', 'lib/include/torch/csrc/cuda/*.h', - 'lib/include/torch/torch.h', + 'lib/include/torch/python.h', ] }) diff --git a/test/cpp/api/container.cpp b/test/cpp/api/container.cpp index be6ba9005ba..67e8bc7ba86 100644 --- a/test/cpp/api/container.cpp +++ b/test/cpp/api/container.cpp @@ -1,8 +1,8 @@ #include -#include +#include -using namespace autograd; +using namespace torch; AUTOGRAD_CONTAINER_CLASS(TestModel) { public: diff --git a/test/cpp/api/integration.cpp b/test/cpp/api/integration.cpp index 47032f89ace..e94a626b795 100644 --- a/test/cpp/api/integration.cpp +++ b/test/cpp/api/integration.cpp @@ -1,8 +1,8 @@ #include -#include +#include -using namespace autograd; +using namespace torch; #include diff --git a/test/cpp/api/main.cpp.cpp b/test/cpp/api/main.cpp similarity index 90% rename from test/cpp/api/main.cpp.cpp rename to test/cpp/api/main.cpp index 860e981874c..a00d271830c 100644 --- a/test/cpp/api/main.cpp.cpp +++ b/test/cpp/api/main.cpp @@ -1,7 +1,7 @@ #define CATCH_CONFIG_RUNNER #include -#include +#include #include @@ -16,7 +16,7 @@ int main(int argc, char* argv[]) { return return_code; } - if (!autograd::hasCuda()) { + if (!torch::hasCuda()) { std::cerr << "CUDA not available. Disabling CUDA tests" << std::endl; // ~ disables the [cuda] tag. session.configData().testsOrTags.push_back("~[cuda]"); diff --git a/test/cpp/api/misc.cpp b/test/cpp/api/misc.cpp index fb9c2337509..d55c15d4b26 100644 --- a/test/cpp/api/misc.cpp +++ b/test/cpp/api/misc.cpp @@ -1,8 +1,8 @@ #include -#include +#include -using namespace autograd; +using namespace torch; TEST_CASE("misc") { SECTION("no_grad") { diff --git a/test/cpp/api/optim.cpp b/test/cpp/api/optim.cpp index da97e27fed2..78d42f34014 100644 --- a/test/cpp/api/optim.cpp +++ b/test/cpp/api/optim.cpp @@ -1,8 +1,8 @@ #include -#include +#include -using namespace autograd; +using namespace torch; bool test_optimizer_xor(Optimizer optim, std::shared_ptr model) { float running_loss = 1; diff --git a/test/cpp/api/rnn.cpp b/test/cpp/api/rnn.cpp index f951496c00b..93947b63fc7 100644 --- a/test/cpp/api/rnn.cpp +++ b/test/cpp/api/rnn.cpp @@ -1,8 +1,8 @@ #include -#include +#include -using namespace autograd; +using namespace torch; template bool test_RNN_xor(Func&& model_maker, bool cuda = false) { diff --git a/test/cpp/api/serialization.cpp b/test/cpp/api/serialization.cpp index e211ae8cf1e..328668c8b62 100644 --- a/test/cpp/api/serialization.cpp +++ b/test/cpp/api/serialization.cpp @@ -1,10 +1,10 @@ #include -#include +#include #include "cereal/archives/portable_binary.hpp" -using namespace autograd; +using namespace torch; TEST_CASE("serialization") { SECTION("undefined") { diff --git a/test/cpp_extensions/cuda_extension.cpp b/test/cpp_extensions/cuda_extension.cpp index 963850acc27..0d21c197561 100644 --- a/test/cpp_extensions/cuda_extension.cpp +++ b/test/cpp_extensions/cuda_extension.cpp @@ -1,4 +1,4 @@ -#include +#include // Declare the function from cuda_extension.cu. It will be compiled // separately with nvcc and linked with the object file of cuda_extension.cpp diff --git a/test/cpp_extensions/doubler.h b/test/cpp_extensions/doubler.h index 7b4b105b13a..b2d76dbe838 100644 --- a/test/cpp_extensions/doubler.h +++ b/test/cpp_extensions/doubler.h @@ -1,4 +1,4 @@ -#include +#include struct Doubler { Doubler(int A, int B) { diff --git a/test/cpp_extensions/extension.cpp b/test/cpp_extensions/extension.cpp index 9994e172936..0c158fc0d42 100644 --- a/test/cpp_extensions/extension.cpp +++ b/test/cpp_extensions/extension.cpp @@ -1,4 +1,4 @@ -#include +#include at::Tensor sigmoid_add(at::Tensor x, at::Tensor y) { return x.sigmoid() + y.sigmoid(); diff --git a/test/cpp_extensions/jit_extension.cpp b/test/cpp_extensions/jit_extension.cpp index e62be5b38ba..abb01bb6c8e 100644 --- a/test/cpp_extensions/jit_extension.cpp +++ b/test/cpp_extensions/jit_extension.cpp @@ -1,4 +1,4 @@ -#include +#include #include "doubler.h" diff --git a/test/cpp_extensions/jit_extension2.cpp b/test/cpp_extensions/jit_extension2.cpp index e197308c3d5..b2e5911e3cf 100644 --- a/test/cpp_extensions/jit_extension2.cpp +++ b/test/cpp_extensions/jit_extension2.cpp @@ -1,4 +1,4 @@ -#include +#include using namespace at; diff --git a/test/test_cpp_extensions.py b/test/test_cpp_extensions.py index b1b473ca64b..1562ac96fbd 100644 --- a/test/test_cpp_extensions.py +++ b/test/test_cpp_extensions.py @@ -151,7 +151,7 @@ class TestCppExtension(common.TestCase): ''' cpp_source2 = ''' - #include + #include at::Tensor sin_add(at::Tensor x, at::Tensor y); PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("sin_add", &sin_add, "sin(x) + sin(y)"); diff --git a/tools/cpp_build/libtorch/CMakeLists.txt b/tools/cpp_build/libtorch/CMakeLists.txt index a1c2aade5c2..61e4d9e3c3f 100644 --- a/tools/cpp_build/libtorch/CMakeLists.txt +++ b/tools/cpp_build/libtorch/CMakeLists.txt @@ -10,14 +10,13 @@ if (VERBOSE) message(STATUS "ATEN_BUILD_PATH is ${ATEN_BUILD_PATH}") endif() -find_path(ATEN_INCLUDE_DIR ATen/ATen.h PATHS "${ATEN_PATH}/src/" NO_DEFAULT_PATH) -find_path(ATEN_BUILD_INCLUDE_DIR ATen/Type.h PATHS "${ATEN_BUILD_PATH}/src/ATen" NO_DEFAULT_PATH) +set(ATEN_INCLUDE_DIR "${ATEN_PATH}/src/") +set(ATEN_BUILD_INCLUDE_DIR "${ATEN_BUILD_PATH}/src/ATen") +set(TORCH_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../torch") + find_library(ATEN_LIBRARY ATen PATHS "${ATEN_BUILD_PATH}/src/ATen" NO_DEFAULT_PATH) - find_library(NANOPB_LIBRARY protobuf-nanopb PATHS "${NANOPB_BUILD_PATH}" NO_DEFAULT_PATH) -find_path(TORCH_SRC_DIR torch.h PATHS "${CMAKE_CURRENT_SOURCE_DIR}/../../../torch" NO_DEFAULT_PATH) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) @@ -256,7 +255,7 @@ target_include_directories(torch "${ATEN_INCLUDE_DIR}/TH" "${ATEN_BUILD_INCLUDE_DIR}" "${ATEN_BUILD_PATH}/src/TH" - "${TORCH_SRC_DIR}/../" + "${TORCH_SRC_DIR}/.." "${CMAKE_CURRENT_SOURCE_DIR}") if (NOT NO_API) diff --git a/torch/csrc/api/include/torch/containers.h b/torch/csrc/api/include/torch/containers.h index c722ed5ad57..7370d548511 100644 --- a/torch/csrc/api/include/torch/containers.h +++ b/torch/csrc/api/include/torch/containers.h @@ -5,9 +5,9 @@ #include "torch/csrc/autograd/variable.h" #define AUTOGRAD_CONTAINER_CLASS(Type) \ - class Type : public autograd::Container_CRTP + class Type : public torch::Container_CRTP -namespace autograd { +namespace torch { class ContainerImpl { public: virtual ~ContainerImpl() = default; @@ -438,4 +438,4 @@ class RNN : public RNNBase { } }; -} // namespace autograd +} // namespace torch diff --git a/torch/csrc/api/include/torch/detail.h b/torch/csrc/api/include/torch/detail.h index 487a7c83216..0f8c2ec3791 100644 --- a/torch/csrc/api/include/torch/detail.h +++ b/torch/csrc/api/include/torch/detail.h @@ -12,7 +12,7 @@ #include "torch/csrc/utils/auto_gpu.h" #define AUTOGRAD_OPTIMIZER_CLASS(Type) \ - class Type : public autograd::Optimizer_CRTP + class Type : public torch::Optimizer_CRTP #define AUTOGRAD_KWARG(CLS, TYP, NAME, DEFAULT, OPTION) \ TYP NAME##_ = DEFAULT; \ CLS& NAME(TYP x = OPTION) { \ @@ -25,7 +25,7 @@ namespace tag = torch::autograd; using IntVec = decltype(std::declval().vec()); } // namespace -namespace autograd { +namespace torch { namespace detail { extern tag::Engine engine; } @@ -67,4 +67,4 @@ int getNumGPUs(); bool hasCuda(); bool hasCudnn(); -} // namespace autograd +} // namespace torch diff --git a/torch/csrc/api/include/torch/optimizers.h b/torch/csrc/api/include/torch/optimizers.h index 1d8ded1fcd1..f08174197e8 100644 --- a/torch/csrc/api/include/torch/optimizers.h +++ b/torch/csrc/api/include/torch/optimizers.h @@ -6,7 +6,7 @@ #include "cereal/access.hpp" #include "cereal/cereal.hpp" -namespace autograd { +namespace torch { class OptimizerImpl { public: OptimizerImpl(Container model) : model_(model) {} @@ -137,4 +137,4 @@ AUTOGRAD_OPTIMIZER_CLASS(Adam) { std::unordered_map max_exp_avg_sq_buffer_; }; -} // namespace autograd +} // namespace torch diff --git a/torch/torch.h b/torch/csrc/api/include/torch/python.h similarity index 100% rename from torch/torch.h rename to torch/csrc/api/include/torch/python.h diff --git a/torch/csrc/api/include/torch/serialization.h b/torch/csrc/api/include/torch/serialization.h index 1a253ae3438..12c845eeccf 100644 --- a/torch/csrc/api/include/torch/serialization.h +++ b/torch/csrc/api/include/torch/serialization.h @@ -9,7 +9,7 @@ #include "cereal/types/unordered_map.hpp" #include "cereal/types/vector.hpp" -namespace autograd { +namespace torch { // Some convenience functions for saving and loading template @@ -35,12 +35,12 @@ void load(std::istream& stream, T* obj) { template void save(std::string const& path, T const& obj) { std::ofstream os(path, std::ios::binary); - autograd::save(os, obj); + torch::save(os, obj); } template void load(std::string const& path, T& obj) { std::ifstream is(path, std::ios::binary); - autograd::load(is, obj); + torch::load(is, obj); } namespace detail { @@ -108,21 +108,21 @@ inline at::Backend backendFromId(int32_t id) { } } // namespace detail -} // namespace autograd +} // namespace torch // This is super ugly and I don't know how to simplify it -CEREAL_REGISTER_TYPE(autograd::SGD); -CEREAL_REGISTER_POLYMORPHIC_RELATION(autograd::OptimizerImpl, autograd::SGD); -CEREAL_REGISTER_TYPE(autograd::Adagrad); +CEREAL_REGISTER_TYPE(torch::SGD); +CEREAL_REGISTER_POLYMORPHIC_RELATION(torch::OptimizerImpl, torch::SGD); +CEREAL_REGISTER_TYPE(torch::Adagrad); CEREAL_REGISTER_POLYMORPHIC_RELATION( - autograd::OptimizerImpl, - autograd::Adagrad); -CEREAL_REGISTER_TYPE(autograd::RMSprop); + torch::OptimizerImpl, + torch::Adagrad); +CEREAL_REGISTER_TYPE(torch::RMSprop); CEREAL_REGISTER_POLYMORPHIC_RELATION( - autograd::OptimizerImpl, - autograd::RMSprop); -CEREAL_REGISTER_TYPE(autograd::Adam); -CEREAL_REGISTER_POLYMORPHIC_RELATION(autograd::OptimizerImpl, autograd::Adam); + torch::OptimizerImpl, + torch::RMSprop); +CEREAL_REGISTER_TYPE(torch::Adam); +CEREAL_REGISTER_POLYMORPHIC_RELATION(torch::OptimizerImpl, torch::Adam); namespace cereal { @@ -162,11 +162,11 @@ loadBinary(BinaryInputArchive& archive, void* data, std::size_t size) { template void save(Archive& archive, at::Tensor const& tensor) { if (!tensor.defined()) { - int32_t typeId = ::autograd::detail::scalarTypeId(at::ScalarType::Undefined); + int32_t typeId = ::torch::detail::scalarTypeId(at::ScalarType::Undefined); archive(CEREAL_NVP(typeId)); return; } else { - int32_t typeId = ::autograd::detail::scalarTypeId(tensor.type().scalarType()); + int32_t typeId = ::torch::detail::scalarTypeId(tensor.type().scalarType()); archive(CEREAL_NVP(typeId)); } auto sizes = std::vector(); @@ -175,7 +175,7 @@ void save(Archive& archive, at::Tensor const& tensor) { sizes.push_back(s); } auto contig = tensor.toBackend(at::kCPU).contiguous(); - int32_t backend = ::autograd::detail::backendId(tensor.type().backend()); + int32_t backend = ::torch::detail::backendId(tensor.type().backend()); archive(CEREAL_NVP(backend), CEREAL_NVP(sizes)); agimpl::saveBinary( @@ -195,7 +195,7 @@ void load(Archive& archive, at::Tensor& tensor) { at::ScalarType type; int32_t typeId; archive(CEREAL_NVP(typeId)); - type = ::autograd::detail::scalarTypeFromId(typeId); + type = ::torch::detail::scalarTypeFromId(typeId); if (type == at::ScalarType::Undefined) { tensor = at::Tensor(); return; @@ -206,7 +206,7 @@ void load(Archive& archive, at::Tensor& tensor) { auto buf = std::vector(); archive(CEREAL_NVP(backendId), CEREAL_NVP(sizes)); - at::Backend backend = ::autograd::detail::backendFromId(backendId); + at::Backend backend = ::torch::detail::backendFromId(backendId); if (!tensor.defined() || tensor.type().scalarType() != type) { tensor = at::getType(backend, type).tensor(); } diff --git a/torch/csrc/api/include/torch/autograd.h b/torch/csrc/api/include/torch/torch.h similarity index 100% rename from torch/csrc/api/include/torch/autograd.h rename to torch/csrc/api/include/torch/torch.h diff --git a/torch/csrc/api/src/containers.cpp b/torch/csrc/api/src/containers.cpp index 2b5f487b4de..66b540f238c 100644 --- a/torch/csrc/api/src/containers.cpp +++ b/torch/csrc/api/src/containers.cpp @@ -1,6 +1,6 @@ #include "torch/containers.h" -namespace autograd { +namespace torch { std::map ContainerImpl::parameters() const { std::map ret; for (auto pair : children_) { @@ -634,4 +634,4 @@ variable_list Dropout2d::forward(variable_list inputs) { return lst; } -} // namespace autograd +} // namespace torch diff --git a/torch/csrc/api/src/detail.cpp b/torch/csrc/api/src/detail.cpp index 6b01713a2c4..063dceb8372 100644 --- a/torch/csrc/api/src/detail.cpp +++ b/torch/csrc/api/src/detail.cpp @@ -13,7 +13,7 @@ #include "torch/detail.h" -namespace autograd { +namespace torch { namespace detail { tag::Engine engine; } @@ -68,4 +68,4 @@ bool hasCudnn() { return hasCuda() && AT_CUDNN_ENABLED(); } -} // namespace autograd +} // namespace torch diff --git a/torch/csrc/api/src/optimizers.cpp b/torch/csrc/api/src/optimizers.cpp index 2c90d8b39a7..3ffc3210183 100644 --- a/torch/csrc/api/src/optimizers.cpp +++ b/torch/csrc/api/src/optimizers.cpp @@ -1,6 +1,6 @@ #include "torch/optimizers.h" -namespace autograd { +namespace torch { void OptimizerImpl::zero_grad() { for (auto p : model_->parameters()) { @@ -196,4 +196,4 @@ void Adam::init_state() { exp_avg_sq_buffer_.clear(); } -} // namespace autograd +} // namespace torch diff --git a/torch/csrc/torch.cpp b/torch/csrc/api/src/python.cpp similarity index 93% rename from torch/csrc/torch.cpp rename to torch/csrc/api/src/python.cpp index e3202006be7..d37f8f80e2f 100644 --- a/torch/csrc/torch.cpp +++ b/torch/csrc/api/src/python.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index d76b13032d2..ad5e8a550f5 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -2,9 +2,8 @@ #include #include -#include -#include "torch/torch.h" +#include "torch/csrc/api/include/torch/python.h" #include "torch/csrc/assertions.h" #include "torch/csrc/Dtype.h" #include "torch/csrc/DynamicTypes.h" @@ -20,6 +19,9 @@ #include "torch/csrc/utils/tensor_new.h" #include "torch/csrc/utils/tensor_types.h" +#include +#include + namespace torch { namespace tensor { using namespace at; diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index cb90794541e..a4eee65c30e 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -504,7 +504,7 @@ def load_inline(name, the necessary header includes, as well as the (pybind11) binding code. More precisely, strings passed to ``cpp_sources`` are first concatenated into a single ``.cpp`` file. This file is then prepended with ``#include - ``. + ``. Furthermore, if the ``functions`` argument is supplied, bindings will be automatically generated for each function specified. ``functions`` can @@ -551,7 +551,7 @@ def load_inline(name, if isinstance(cuda_sources, str): cuda_sources = [cuda_sources] - cpp_sources.insert(0, '#include ') + cpp_sources.insert(0, '#include ') # If `functions` is supplied, we create the pybind11 bindings for the user. # Here, `functions` is (or becomes, after some processing) a map from @@ -747,7 +747,7 @@ def _write_ninja_file(path, sources = [os.path.abspath(file) for file in sources] includes = [os.path.abspath(file) for file in extra_include_paths] - # include_paths() gives us the location of torch/torch.h + # include_paths() gives us the location of torch/python.h includes += include_paths(with_cuda) # sysconfig.get_paths()['include'] gives us the location of Python.h includes.append(sysconfig.get_paths()['include'])