Rename autograd namespace to torch and change torch.h into python.h (#7267)

* Rename autograd namespace to torch and change torch.h into python.h

* Include torch.h instead of python.h in test/cpp/api

* Change some mentions of torch.h to python.h in C++ extensions

* Set paths directly, without find_path
This commit is contained in:
Peter Goldsborough 2018-05-04 08:04:57 -07:00 committed by GitHub
parent bcffb5aa1d
commit 67d0d14908
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 70 additions and 68 deletions

View file

@ -312,7 +312,8 @@ class build_deps(Command):
self.copy_tree('torch/csrc', 'torch/lib/include/torch/csrc/')
self.copy_tree('third_party/pybind11/include/pybind11/',
'torch/lib/include/pybind11')
self.copy_file('torch/torch.h', 'torch/lib/include/torch/torch.h')
self.copy_file('torch/csrc/api/include/torch/python.h',
'torch/lib/include/torch/python.h')
build_dep_cmds = {}
@ -600,7 +601,7 @@ main_sources = [
"torch/csrc/DynamicTypes.cpp",
"torch/csrc/assertions.cpp",
"torch/csrc/byte_order.cpp",
"torch/csrc/torch.cpp",
"torch/csrc/api/src/python.cpp",
"torch/csrc/utils.cpp",
"torch/csrc/utils/cuda_lazy_init.cpp",
"torch/csrc/utils/device.cpp",
@ -901,6 +902,6 @@ if __name__ == '__main__':
'lib/include/torch/csrc/jit/*.h',
'lib/include/torch/csrc/utils/*.h',
'lib/include/torch/csrc/cuda/*.h',
'lib/include/torch/torch.h',
'lib/include/torch/python.h',
]
})

View file

@ -1,8 +1,8 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
using namespace autograd;
using namespace torch;
AUTOGRAD_CONTAINER_CLASS(TestModel) {
public:

View file

@ -1,8 +1,8 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
using namespace autograd;
using namespace torch;
#include <iostream>

View file

@ -1,7 +1,7 @@
#define CATCH_CONFIG_RUNNER
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
#include <iostream>
@ -16,7 +16,7 @@ int main(int argc, char* argv[]) {
return return_code;
}
if (!autograd::hasCuda()) {
if (!torch::hasCuda()) {
std::cerr << "CUDA not available. Disabling CUDA tests" << std::endl;
// ~ disables the [cuda] tag.
session.configData().testsOrTags.push_back("~[cuda]");

View file

@ -1,8 +1,8 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
using namespace autograd;
using namespace torch;
TEST_CASE("misc") {
SECTION("no_grad") {

View file

@ -1,8 +1,8 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
using namespace autograd;
using namespace torch;
bool test_optimizer_xor(Optimizer optim, std::shared_ptr<ContainerList> model) {
float running_loss = 1;

View file

@ -1,8 +1,8 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
using namespace autograd;
using namespace torch;
template <typename R, typename Func>
bool test_RNN_xor(Func&& model_maker, bool cuda = false) {

View file

@ -1,10 +1,10 @@
#include <catch.hpp>
#include <torch/autograd.h>
#include <torch/torch.h>
#include "cereal/archives/portable_binary.hpp"
using namespace autograd;
using namespace torch;
TEST_CASE("serialization") {
SECTION("undefined") {

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/python.h>
// Declare the function from cuda_extension.cu. It will be compiled
// separately with nvcc and linked with the object file of cuda_extension.cpp

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/python.h>
struct Doubler {
Doubler(int A, int B) {

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/python.h>
at::Tensor sigmoid_add(at::Tensor x, at::Tensor y) {
return x.sigmoid() + y.sigmoid();

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/python.h>
#include "doubler.h"

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/python.h>
using namespace at;

View file

@ -151,7 +151,7 @@ class TestCppExtension(common.TestCase):
'''
cpp_source2 = '''
#include <torch/torch.h>
#include <torch/python.h>
at::Tensor sin_add(at::Tensor x, at::Tensor y);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("sin_add", &sin_add, "sin(x) + sin(y)");

View file

@ -10,14 +10,13 @@ if (VERBOSE)
message(STATUS "ATEN_BUILD_PATH is ${ATEN_BUILD_PATH}")
endif()
find_path(ATEN_INCLUDE_DIR ATen/ATen.h PATHS "${ATEN_PATH}/src/" NO_DEFAULT_PATH)
find_path(ATEN_BUILD_INCLUDE_DIR ATen/Type.h PATHS "${ATEN_BUILD_PATH}/src/ATen" NO_DEFAULT_PATH)
set(ATEN_INCLUDE_DIR "${ATEN_PATH}/src/")
set(ATEN_BUILD_INCLUDE_DIR "${ATEN_BUILD_PATH}/src/ATen")
set(TORCH_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../torch")
find_library(ATEN_LIBRARY ATen PATHS "${ATEN_BUILD_PATH}/src/ATen" NO_DEFAULT_PATH)
find_library(NANOPB_LIBRARY protobuf-nanopb PATHS "${NANOPB_BUILD_PATH}" NO_DEFAULT_PATH)
find_path(TORCH_SRC_DIR torch.h PATHS "${CMAKE_CURRENT_SOURCE_DIR}/../../../torch" NO_DEFAULT_PATH)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
@ -256,7 +255,7 @@ target_include_directories(torch
"${ATEN_INCLUDE_DIR}/TH"
"${ATEN_BUILD_INCLUDE_DIR}"
"${ATEN_BUILD_PATH}/src/TH"
"${TORCH_SRC_DIR}/../"
"${TORCH_SRC_DIR}/.."
"${CMAKE_CURRENT_SOURCE_DIR}")
if (NOT NO_API)

View file

@ -5,9 +5,9 @@
#include "torch/csrc/autograd/variable.h"
#define AUTOGRAD_CONTAINER_CLASS(Type) \
class Type : public autograd::Container_CRTP<Type>
class Type : public torch::Container_CRTP<Type>
namespace autograd {
namespace torch {
class ContainerImpl {
public:
virtual ~ContainerImpl() = default;
@ -438,4 +438,4 @@ class RNN : public RNNBase<RNN> {
}
};
} // namespace autograd
} // namespace torch

View file

@ -12,7 +12,7 @@
#include "torch/csrc/utils/auto_gpu.h"
#define AUTOGRAD_OPTIMIZER_CLASS(Type) \
class Type : public autograd::Optimizer_CRTP<Type>
class Type : public torch::Optimizer_CRTP<Type>
#define AUTOGRAD_KWARG(CLS, TYP, NAME, DEFAULT, OPTION) \
TYP NAME##_ = DEFAULT; \
CLS& NAME(TYP x = OPTION) { \
@ -25,7 +25,7 @@ namespace tag = torch::autograd;
using IntVec = decltype(std::declval<at::IntList>().vec());
} // namespace
namespace autograd {
namespace torch {
namespace detail {
extern tag::Engine engine;
}
@ -67,4 +67,4 @@ int getNumGPUs();
bool hasCuda();
bool hasCudnn();
} // namespace autograd
} // namespace torch

View file

@ -6,7 +6,7 @@
#include "cereal/access.hpp"
#include "cereal/cereal.hpp"
namespace autograd {
namespace torch {
class OptimizerImpl {
public:
OptimizerImpl(Container model) : model_(model) {}
@ -137,4 +137,4 @@ AUTOGRAD_OPTIMIZER_CLASS(Adam) {
std::unordered_map<std::string, at::Tensor> max_exp_avg_sq_buffer_;
};
} // namespace autograd
} // namespace torch

View file

@ -9,7 +9,7 @@
#include "cereal/types/unordered_map.hpp"
#include "cereal/types/vector.hpp"
namespace autograd {
namespace torch {
// Some convenience functions for saving and loading
template <typename T>
@ -35,12 +35,12 @@ void load(std::istream& stream, T* obj) {
template <typename T>
void save(std::string const& path, T const& obj) {
std::ofstream os(path, std::ios::binary);
autograd::save(os, obj);
torch::save(os, obj);
}
template <typename T>
void load(std::string const& path, T& obj) {
std::ifstream is(path, std::ios::binary);
autograd::load(is, obj);
torch::load(is, obj);
}
namespace detail {
@ -108,21 +108,21 @@ inline at::Backend backendFromId(int32_t id) {
}
} // namespace detail
} // namespace autograd
} // namespace torch
// This is super ugly and I don't know how to simplify it
CEREAL_REGISTER_TYPE(autograd::SGD);
CEREAL_REGISTER_POLYMORPHIC_RELATION(autograd::OptimizerImpl, autograd::SGD);
CEREAL_REGISTER_TYPE(autograd::Adagrad);
CEREAL_REGISTER_TYPE(torch::SGD);
CEREAL_REGISTER_POLYMORPHIC_RELATION(torch::OptimizerImpl, torch::SGD);
CEREAL_REGISTER_TYPE(torch::Adagrad);
CEREAL_REGISTER_POLYMORPHIC_RELATION(
autograd::OptimizerImpl,
autograd::Adagrad);
CEREAL_REGISTER_TYPE(autograd::RMSprop);
torch::OptimizerImpl,
torch::Adagrad);
CEREAL_REGISTER_TYPE(torch::RMSprop);
CEREAL_REGISTER_POLYMORPHIC_RELATION(
autograd::OptimizerImpl,
autograd::RMSprop);
CEREAL_REGISTER_TYPE(autograd::Adam);
CEREAL_REGISTER_POLYMORPHIC_RELATION(autograd::OptimizerImpl, autograd::Adam);
torch::OptimizerImpl,
torch::RMSprop);
CEREAL_REGISTER_TYPE(torch::Adam);
CEREAL_REGISTER_POLYMORPHIC_RELATION(torch::OptimizerImpl, torch::Adam);
namespace cereal {
@ -162,11 +162,11 @@ loadBinary(BinaryInputArchive& archive, void* data, std::size_t size) {
template <class Archive>
void save(Archive& archive, at::Tensor const& tensor) {
if (!tensor.defined()) {
int32_t typeId = ::autograd::detail::scalarTypeId(at::ScalarType::Undefined);
int32_t typeId = ::torch::detail::scalarTypeId(at::ScalarType::Undefined);
archive(CEREAL_NVP(typeId));
return;
} else {
int32_t typeId = ::autograd::detail::scalarTypeId(tensor.type().scalarType());
int32_t typeId = ::torch::detail::scalarTypeId(tensor.type().scalarType());
archive(CEREAL_NVP(typeId));
}
auto sizes = std::vector<int64_t>();
@ -175,7 +175,7 @@ void save(Archive& archive, at::Tensor const& tensor) {
sizes.push_back(s);
}
auto contig = tensor.toBackend(at::kCPU).contiguous();
int32_t backend = ::autograd::detail::backendId(tensor.type().backend());
int32_t backend = ::torch::detail::backendId(tensor.type().backend());
archive(CEREAL_NVP(backend), CEREAL_NVP(sizes));
agimpl::saveBinary(
@ -195,7 +195,7 @@ void load(Archive& archive, at::Tensor& tensor) {
at::ScalarType type;
int32_t typeId;
archive(CEREAL_NVP(typeId));
type = ::autograd::detail::scalarTypeFromId(typeId);
type = ::torch::detail::scalarTypeFromId(typeId);
if (type == at::ScalarType::Undefined) {
tensor = at::Tensor();
return;
@ -206,7 +206,7 @@ void load(Archive& archive, at::Tensor& tensor) {
auto buf = std::vector<uint8_t>();
archive(CEREAL_NVP(backendId), CEREAL_NVP(sizes));
at::Backend backend = ::autograd::detail::backendFromId(backendId);
at::Backend backend = ::torch::detail::backendFromId(backendId);
if (!tensor.defined() || tensor.type().scalarType() != type) {
tensor = at::getType(backend, type).tensor();
}

View file

@ -1,6 +1,6 @@
#include "torch/containers.h"
namespace autograd {
namespace torch {
std::map<std::string, Variable> ContainerImpl::parameters() const {
std::map<std::string, Variable> ret;
for (auto pair : children_) {
@ -634,4 +634,4 @@ variable_list Dropout2d::forward(variable_list inputs) {
return lst;
}
} // namespace autograd
} // namespace torch

View file

@ -13,7 +13,7 @@
#include "torch/detail.h"
namespace autograd {
namespace torch {
namespace detail {
tag::Engine engine;
}
@ -68,4 +68,4 @@ bool hasCudnn() {
return hasCuda() && AT_CUDNN_ENABLED();
}
} // namespace autograd
} // namespace torch

View file

@ -1,6 +1,6 @@
#include "torch/optimizers.h"
namespace autograd {
namespace torch {
void OptimizerImpl::zero_grad() {
for (auto p : model_->parameters()) {
@ -196,4 +196,4 @@ void Adam::init_state() {
exp_avg_sq_buffer_.clear();
}
} // namespace autograd
} // namespace torch

View file

@ -1,4 +1,4 @@
#include <torch/torch.h>
#include <torch/csrc/api/include/torch/python.h>
#include <torch/csrc/autograd/generated/VariableType.h>
#include <torch/csrc/autograd/variable.h>

View file

@ -2,9 +2,8 @@
#include <structmember.h>
#include <pybind11/pybind11.h>
#include <sstream>
#include "torch/torch.h"
#include "torch/csrc/api/include/torch/python.h"
#include "torch/csrc/assertions.h"
#include "torch/csrc/Dtype.h"
#include "torch/csrc/DynamicTypes.h"
@ -20,6 +19,9 @@
#include "torch/csrc/utils/tensor_new.h"
#include "torch/csrc/utils/tensor_types.h"
#include <sstream>
#include <vector>
namespace torch { namespace tensor {
using namespace at;

View file

@ -504,7 +504,7 @@ def load_inline(name,
the necessary header includes, as well as the (pybind11) binding code. More
precisely, strings passed to ``cpp_sources`` are first concatenated into a
single ``.cpp`` file. This file is then prepended with ``#include
<torch/torch.h>``.
<torch/python.h>``.
Furthermore, if the ``functions`` argument is supplied, bindings will be
automatically generated for each function specified. ``functions`` can
@ -551,7 +551,7 @@ def load_inline(name,
if isinstance(cuda_sources, str):
cuda_sources = [cuda_sources]
cpp_sources.insert(0, '#include <torch/torch.h>')
cpp_sources.insert(0, '#include <torch/python.h>')
# If `functions` is supplied, we create the pybind11 bindings for the user.
# Here, `functions` is (or becomes, after some processing) a map from
@ -747,7 +747,7 @@ def _write_ninja_file(path,
sources = [os.path.abspath(file) for file in sources]
includes = [os.path.abspath(file) for file in extra_include_paths]
# include_paths() gives us the location of torch/torch.h
# include_paths() gives us the location of torch/python.h
includes += include_paths(with_cuda)
# sysconfig.get_paths()['include'] gives us the location of Python.h
includes.append(sysconfig.get_paths()['include'])