2016-05-02 21:06:53 +00:00
|
|
|
#ifndef TH_GENERIC_FILE
|
Canonicalize all includes in PyTorch. (#14849)
Summary:
Anywhere we used #include "foo.h", we now say #include <foo.h>
Paths are adjusted to be rooted out of aten/src, torch/lib, or
the root level directory.
I modified CMakeLists.txt by hand to remove TH and THC from
the include paths.
I used the following script to do the canonicalization:
```
import subprocess
import re
import os.path
files = subprocess.check_output(['git', 'ls-files']).decode('utf-8').rstrip().split('\n')
for fn in files:
if not any(fn.endswith(suff) for suff in ['.cu', '.cpp', '.in', '.h', '.hpp', '.cu', '.cuh', '.cc']):
continue
if not any(fn.startswith(pref) for pref in ["aten/", "torch/"]):
continue
with open(fn, 'r') as f:
c = f.read()
def fmt(p):
return "#include <{}>".format(p)
def repl(m):
p = m.group(1)
if p in ["dlfcn.h", "unistd.h", "nvrtc.h", "cuda.h", "cuda_runtime.h", "cstdint", "cudnn.h", "Python.h", "cusparse.h", "cuda_runtime_api.h", "cuda_fp16.h", "cublas_v2.h", "stdint.h", "curand_kernel.h"]:
return fmt(p)
if any(p.startswith(pref) for pref in ["torch/csrc", "c10/", "ATen/", "caffe2/", "TH/", "THC/", "Eigen/", "gtest/", "zdl/", "gloo/", "onnx/", "miopen/"]):
return fmt(p)
for root in ["aten/src", "torch/lib", ""]:
for bad_root in [os.path.dirname(fn), "aten/src/TH", "aten/src/THC", "torch/csrc"]:
new_p = os.path.relpath(os.path.join(bad_root, p), root)
if not new_p.startswith("../") and (os.path.exists(os.path.join(root, new_p)) or os.path.exists(os.path.join(root, new_p + ".in"))):
return fmt(new_p)
print("ERROR: ", fn, p)
return m.group(0)
new_c = re.sub(r'#include "([^"]+)"', repl, c)
if new_c != c:
print(fn)
with open(fn, 'w') as f:
f.write(new_c)
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14849
Reviewed By: dzhulgakov
Differential Revision: D13363445
Pulled By: ezyang
fbshipit-source-id: 52361f878a672785f9306c9e9ab2513128092b68
2018-12-09 03:32:01 +00:00
|
|
|
#define TH_GENERIC_FILE "torch/csrc/generic/Storage.cpp"
|
2016-05-02 21:06:53 +00:00
|
|
|
#else
|
|
|
|
|
|
2018-08-30 23:22:24 +00:00
|
|
|
PyObject *THPStorageClass = nullptr;
|
2016-08-31 21:42:29 +00:00
|
|
|
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
PyObject * THPStorage_(New)(THWStorage *ptr)
|
2016-09-21 02:37:20 +00:00
|
|
|
{
|
2018-07-25 00:59:44 +00:00
|
|
|
AT_ASSERT(ptr);
|
2016-12-20 01:42:53 +00:00
|
|
|
PyTypeObject *type = (PyTypeObject *)THPStorageClass;
|
|
|
|
|
PyObject *obj = type->tp_alloc(type, 0);
|
|
|
|
|
if (obj) {
|
|
|
|
|
((THPStorage *)obj)->cdata = ptr;
|
|
|
|
|
} else {
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
THWStorage_(free)(LIBRARY_STATE ptr);
|
2016-09-21 02:37:20 +00:00
|
|
|
}
|
2016-12-20 01:42:53 +00:00
|
|
|
return obj;
|
2016-08-31 21:42:29 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-02 21:06:53 +00:00
|
|
|
static void THPStorage_(dealloc)(THPStorage* self)
|
|
|
|
|
{
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
THWStorage_(free)(LIBRARY_STATE self->cdata);
|
2016-05-02 21:06:53 +00:00
|
|
|
Py_TYPE(self)->tp_free((PyObject*)self);
|
|
|
|
|
}
|
|
|
|
|
|
Introduce SupervisedPtr, delete THAllocator and THCDeviceAllocator (#9358)
Summary:
See Note [Supervisor deleter] for how SupervisedPtr works.
This design is not the obvious one, but there were a lot of
constraints feeding into it:
- It must support the reallocation usage-pattern, where, given
an existing Storage, we allocate a new region of memory,
copy the existing data to it, and then deallocate the old
region of memory.
- Creation of a deleter for memory MUST avoid dynamic allocations
in the common case. We've done some benchmarking in Caffe2
where dynamic allocation for deleters is ruinously expensive,
and it's really hard to avoid these performance tarpits in
very general function wrappers like std::function or
folly::Function (while benchmarking this, we discovered that
folly::Function's move constructor was way more expensive
than it should be).
- We need to be able to deallocate data that comes from external
sources, e.g., dlpack and numpy tensors. Most notably,
you often cannot deallocate these with merely the void*
data pointer; you need some extra, out-of-band information
(e.g., the managing struct) to deallocate it. Sometimes,
you may even want to resize data living in an external source!
- The "core" allocators need to support being wrapped in a Thrust
allocator, so you need to be implement the following two functions:
char* allocate(size_t);
void deallocate(char*, size_t);
- We need to support tensors which contain non-POD, non-trivially
copyable data; specifically tensors of std::string. This is
an upcoming requirement from Caffe2. It's dirty AF, but
it's really useful.
- It should use C++ standard library types like std::unique_ptr
(which is hugely problematic because std::unique_ptr doesn't
call the deleter when the pointer is null.)
Here is the billing of changes:
- Built-in support for realloc() has been DROPPED ENTIRELY.
Instead, you're expected to allocate and then copy from
the old memory to the new memory if you want to do a
reallocation. This is what you'd generally have expected
to occur; and axing realloc() from the design lets us avoid
some tricky correctness issues with std::realloc(), namely
the fact that we must refuse the realloc if the type of the
elements are not trivially copyeable. If it really matters,
we can add this back, but there really needs to be a good
explanation WHY you need fast resizing reallocations (by in
large, people don't resize their storages, and it should
be acceptable to have a performance degradation when they
do).
- TH_STORAGE_FREEMEM is no more; instead, if you want a
storage which doesn't free its result, you just give it
an empty deleter.
- What we used to call an "allocator" (really, a combined
object for allocating/deleting) has been split into two
concepts, an allocator, and a smart pointer (SupervisedPtr)
which knows how to delete data.
- Unlike previously, where THAllocator/THCDeviceAllocator
could have a per-tensor context storing extra information
(e.g., a pointer to the metadata you need to actually
free the tensor), there is no context in the allocator or
the deleter of the smart pointer; instead, the smart
pointer directly holds an owning reference to the
metadata necessary to free the data. This metadata
is *freshly manufactured* upon every allocation, which
permits us to resize tensors even in the absence of
built-in support for realloc().
- By default, allocators don't support "raw" allocations
and deallocations with raw pointers. This is because
some allocations may return a different context every
time, in which case you need to reconstruct the context
at delete time (because all you got was a void*, not
a unique_ptr that carries the deleter).
- The diff between at::Allocator and THCDeviceAllocator is a
bit larger:
- It used to return a cudaError_t. Now, allocators
are expected to check the error status immediately and throw
an exception if there was an error. It turns out that this
is what was immediately done after all occurrences of
allocate/release, so it wasn't a big deal (although some
subsidiary interfaces had to themselves be converted to
not return cudaError_t).
There is one notable exception to this, and it is how
we handle CUDA OOM: if this occurs, we attempt to return
unused memory to the system and try again. This is now
handled by a catch-all try-catch block. The cost of
catching the exception is probably the least of your worries
if you're about to OOM.
- It used to take the CUDA stream to perform the allocation
on as an argument. However, it turned out that all call
sites, this stream was the stream for the current device.
So we can push this into the allocator (and the choice,
in the future, could be made explicitly by twiddling
thread local state.)
- It held two extra methods, emptyCache and cacheInfo, specifically
for interacting with some state in THCCachingAllocator.
But this "generality" was a lie, since THCCachingAllocator
was the only allocator that actually implemented these
methods, and there is actually a bunch of code in THC
which assumes that it is the caching allocator that is
the underlying allocator for CUDA allocations. So I
folded these two methods into this interface as
THCCachingAllocator_emptyCache and THCCachingAllocator_cacheInfo.
- It held its context directly inside the THCDeviceAllocator
struct. This context has been moved out into whatever
is holding the at::Allocator*.
- The APIs for getting at allocators/deleters is now a little different.
- Previously there were a bunch of static variables you could get
the address of (e.g., &THDefaultAllocator); now there is a
function getTHDefaultAllocator().
- Some "allocators" didn't actually know how to allocate (e.g.,
the IPC "allocator"). These have been deleted; instead, you
can wrap the produced pointers into SupervisedPtr using
an appropriate makeSupervisedPtr() static method.
- Storage sharing was a lot of work to wrangle, but I think I've
tamed the beast.
- THMapAllocator and its "subclasses" have been refactored to
be proper, honest to goodness C++ classes. I used the enum
argument trick to get "named" constructors. We use inheritance
to add refcounting and management (in libshm). What we previously
called the "Context" class (Context has been dropped from the name)
is now the supervisor for the data.
- Sometimes, we need to pull out the file descriptor from a
tensor. Previously, it was pulled out of the allocator context.
Now, we pull it out of the supervisor of the SupervisorPtr,
using the static method fromSupervisedPtr(), which uses the
deleter as the typeid, and refines the type if it matches.
- I renamed the std::function deleter into
InefficientStdFunctionSupervisor, to emphasize the fact that it does
a dynamic allocation to save the std::function deleter.
TODO:
- Windows libshm is in shambles and needs to be fixed.
Perhaps for the future:
- newFromFd is now unconditionally calling cudaPointerGetAttributes
even though this is unnecessary, because we know what the device
is from higher up in the callstack. We can fix this by making
newWithDataAndAllocator also take an explicit device argument.
- Consider statically distinguishing between allocators that
support raw_allocate/raw_deallocate, and those which don't.
The Thrust constraint applies only to the CUDA device allocator;
you never need to allocate CPU memory this way
- Really want to get rid of storage views. Ugh.
Nontrivial bugs I noticed when preparing this patch:
- I forgot to placement-new unique pointers and attempted to
assign them directly on uninitialized memory; very bad! Sam
Gross has encouraged me to replace this with a proper constructor
but I keep putting it off, because once everything goes in
StorageImpl there really will be a proper constructor.
- I rewrote a number of APIs to use newWithDataAndAllocator
instead of newWithAllocator, calling the allocator at the
call site (because they required "allocation context" which
we no longer give to "allocators"). When I did this, I forgot
to insert the multiplication with sizeof(real) to scale from
numels to number of bytes.
- The implementation of swap on storages was missing it for
scalarType and backend. It was benign (because the only case
we call swap is when these are the same), but I fixed it anyway.
- I accidentally returned a nullptr unique_ptr with no deleter,
even though there was a legitimate one. This matters, because
some code still shoves its hands in the deleter context to
get extra metadata about the function.
- I used std::move() on a unique_ptr, and then did a boolean
test on the pointer aftewards (always false!)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9358
Reviewed By: SsnL
Differential Revision: D8811822
Pulled By: ezyang
fbshipit-source-id: 4befe2d12c3e7fd62bad819ff52b054a9bf47c75
2018-07-15 21:56:28 +00:00
|
|
|
static THWStorage* THPStorage_(newWithAllocator)(int64_t size, at::Allocator* allocator)
|
2016-10-15 22:38:26 +00:00
|
|
|
{
|
2019-06-25 19:06:51 +00:00
|
|
|
#if defined(THC_GENERIC_FILE)
|
2016-10-15 22:38:26 +00:00
|
|
|
THPUtils_setError(THPStorageStr " does not support custom allocators");
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-10-15 22:38:26 +00:00
|
|
|
#else
|
Introduce SupervisedPtr, delete THAllocator and THCDeviceAllocator (#9358)
Summary:
See Note [Supervisor deleter] for how SupervisedPtr works.
This design is not the obvious one, but there were a lot of
constraints feeding into it:
- It must support the reallocation usage-pattern, where, given
an existing Storage, we allocate a new region of memory,
copy the existing data to it, and then deallocate the old
region of memory.
- Creation of a deleter for memory MUST avoid dynamic allocations
in the common case. We've done some benchmarking in Caffe2
where dynamic allocation for deleters is ruinously expensive,
and it's really hard to avoid these performance tarpits in
very general function wrappers like std::function or
folly::Function (while benchmarking this, we discovered that
folly::Function's move constructor was way more expensive
than it should be).
- We need to be able to deallocate data that comes from external
sources, e.g., dlpack and numpy tensors. Most notably,
you often cannot deallocate these with merely the void*
data pointer; you need some extra, out-of-band information
(e.g., the managing struct) to deallocate it. Sometimes,
you may even want to resize data living in an external source!
- The "core" allocators need to support being wrapped in a Thrust
allocator, so you need to be implement the following two functions:
char* allocate(size_t);
void deallocate(char*, size_t);
- We need to support tensors which contain non-POD, non-trivially
copyable data; specifically tensors of std::string. This is
an upcoming requirement from Caffe2. It's dirty AF, but
it's really useful.
- It should use C++ standard library types like std::unique_ptr
(which is hugely problematic because std::unique_ptr doesn't
call the deleter when the pointer is null.)
Here is the billing of changes:
- Built-in support for realloc() has been DROPPED ENTIRELY.
Instead, you're expected to allocate and then copy from
the old memory to the new memory if you want to do a
reallocation. This is what you'd generally have expected
to occur; and axing realloc() from the design lets us avoid
some tricky correctness issues with std::realloc(), namely
the fact that we must refuse the realloc if the type of the
elements are not trivially copyeable. If it really matters,
we can add this back, but there really needs to be a good
explanation WHY you need fast resizing reallocations (by in
large, people don't resize their storages, and it should
be acceptable to have a performance degradation when they
do).
- TH_STORAGE_FREEMEM is no more; instead, if you want a
storage which doesn't free its result, you just give it
an empty deleter.
- What we used to call an "allocator" (really, a combined
object for allocating/deleting) has been split into two
concepts, an allocator, and a smart pointer (SupervisedPtr)
which knows how to delete data.
- Unlike previously, where THAllocator/THCDeviceAllocator
could have a per-tensor context storing extra information
(e.g., a pointer to the metadata you need to actually
free the tensor), there is no context in the allocator or
the deleter of the smart pointer; instead, the smart
pointer directly holds an owning reference to the
metadata necessary to free the data. This metadata
is *freshly manufactured* upon every allocation, which
permits us to resize tensors even in the absence of
built-in support for realloc().
- By default, allocators don't support "raw" allocations
and deallocations with raw pointers. This is because
some allocations may return a different context every
time, in which case you need to reconstruct the context
at delete time (because all you got was a void*, not
a unique_ptr that carries the deleter).
- The diff between at::Allocator and THCDeviceAllocator is a
bit larger:
- It used to return a cudaError_t. Now, allocators
are expected to check the error status immediately and throw
an exception if there was an error. It turns out that this
is what was immediately done after all occurrences of
allocate/release, so it wasn't a big deal (although some
subsidiary interfaces had to themselves be converted to
not return cudaError_t).
There is one notable exception to this, and it is how
we handle CUDA OOM: if this occurs, we attempt to return
unused memory to the system and try again. This is now
handled by a catch-all try-catch block. The cost of
catching the exception is probably the least of your worries
if you're about to OOM.
- It used to take the CUDA stream to perform the allocation
on as an argument. However, it turned out that all call
sites, this stream was the stream for the current device.
So we can push this into the allocator (and the choice,
in the future, could be made explicitly by twiddling
thread local state.)
- It held two extra methods, emptyCache and cacheInfo, specifically
for interacting with some state in THCCachingAllocator.
But this "generality" was a lie, since THCCachingAllocator
was the only allocator that actually implemented these
methods, and there is actually a bunch of code in THC
which assumes that it is the caching allocator that is
the underlying allocator for CUDA allocations. So I
folded these two methods into this interface as
THCCachingAllocator_emptyCache and THCCachingAllocator_cacheInfo.
- It held its context directly inside the THCDeviceAllocator
struct. This context has been moved out into whatever
is holding the at::Allocator*.
- The APIs for getting at allocators/deleters is now a little different.
- Previously there were a bunch of static variables you could get
the address of (e.g., &THDefaultAllocator); now there is a
function getTHDefaultAllocator().
- Some "allocators" didn't actually know how to allocate (e.g.,
the IPC "allocator"). These have been deleted; instead, you
can wrap the produced pointers into SupervisedPtr using
an appropriate makeSupervisedPtr() static method.
- Storage sharing was a lot of work to wrangle, but I think I've
tamed the beast.
- THMapAllocator and its "subclasses" have been refactored to
be proper, honest to goodness C++ classes. I used the enum
argument trick to get "named" constructors. We use inheritance
to add refcounting and management (in libshm). What we previously
called the "Context" class (Context has been dropped from the name)
is now the supervisor for the data.
- Sometimes, we need to pull out the file descriptor from a
tensor. Previously, it was pulled out of the allocator context.
Now, we pull it out of the supervisor of the SupervisorPtr,
using the static method fromSupervisedPtr(), which uses the
deleter as the typeid, and refines the type if it matches.
- I renamed the std::function deleter into
InefficientStdFunctionSupervisor, to emphasize the fact that it does
a dynamic allocation to save the std::function deleter.
TODO:
- Windows libshm is in shambles and needs to be fixed.
Perhaps for the future:
- newFromFd is now unconditionally calling cudaPointerGetAttributes
even though this is unnecessary, because we know what the device
is from higher up in the callstack. We can fix this by making
newWithDataAndAllocator also take an explicit device argument.
- Consider statically distinguishing between allocators that
support raw_allocate/raw_deallocate, and those which don't.
The Thrust constraint applies only to the CUDA device allocator;
you never need to allocate CPU memory this way
- Really want to get rid of storage views. Ugh.
Nontrivial bugs I noticed when preparing this patch:
- I forgot to placement-new unique pointers and attempted to
assign them directly on uninitialized memory; very bad! Sam
Gross has encouraged me to replace this with a proper constructor
but I keep putting it off, because once everything goes in
StorageImpl there really will be a proper constructor.
- I rewrote a number of APIs to use newWithDataAndAllocator
instead of newWithAllocator, calling the allocator at the
call site (because they required "allocation context" which
we no longer give to "allocators"). When I did this, I forgot
to insert the multiplication with sizeof(real) to scale from
numels to number of bytes.
- The implementation of swap on storages was missing it for
scalarType and backend. It was benign (because the only case
we call swap is when these are the same), but I fixed it anyway.
- I accidentally returned a nullptr unique_ptr with no deleter,
even though there was a legitimate one. This matters, because
some code still shoves its hands in the deleter context to
get extra metadata about the function.
- I used std::move() on a unique_ptr, and then did a boolean
test on the pointer aftewards (always false!)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9358
Reviewed By: SsnL
Differential Revision: D8811822
Pulled By: ezyang
fbshipit-source-id: 4befe2d12c3e7fd62bad819ff52b054a9bf47c75
2018-07-15 21:56:28 +00:00
|
|
|
return THWStorage_(newWithAllocator)(LIBRARY_STATE size, allocator);
|
2016-10-15 22:38:26 +00:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-03 15:42:00 +00:00
|
|
|
static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
2016-05-02 21:06:53 +00:00
|
|
|
{
|
2016-05-05 18:58:13 +00:00
|
|
|
HANDLE_TH_ERRORS
|
2016-09-22 03:36:39 +00:00
|
|
|
Py_ssize_t num_args = args ? PyTuple_Size(args) : 0;
|
|
|
|
|
|
2017-05-25 19:35:54 +00:00
|
|
|
THPStoragePtr self((THPStorage *)type->tp_alloc(type, 0));
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_assert(self, "failed to allocate a " THPStorageStr " object");
|
2019-01-16 13:33:14 +00:00
|
|
|
c10::Allocator* allocator = nullptr;
|
2016-06-19 21:45:41 +00:00
|
|
|
|
2016-09-22 03:36:39 +00:00
|
|
|
// Internally we allow constructing with a keywoard only argument cdata
|
2018-08-30 23:22:24 +00:00
|
|
|
if (kwargs != nullptr) {
|
2016-10-15 22:38:26 +00:00
|
|
|
PyObject *allocator_ptr = PyDict_GetItemString(kwargs, "allocator");
|
|
|
|
|
if (allocator_ptr) {
|
|
|
|
|
THPUtils_assert(THPUtils_checkLong(allocator_ptr), "invalid allocator");
|
2019-01-16 13:33:14 +00:00
|
|
|
allocator = static_cast<c10::Allocator*>(PyLong_AsVoidPtr(allocator_ptr));
|
2016-10-15 22:38:26 +00:00
|
|
|
PyDict_DelItemString(kwargs, "allocator");
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-22 03:36:39 +00:00
|
|
|
Py_ssize_t num_kwargs = PyDict_Size(kwargs);
|
|
|
|
|
if (num_args == 0) {
|
|
|
|
|
PyObject *cdata_ptr = PyDict_GetItemString(kwargs, "cdata");
|
|
|
|
|
if (num_kwargs == 1 && cdata_ptr && THPUtils_checkLong(cdata_ptr)) {
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
THWStorage *ptr = (THWStorage*)PyLong_AsVoidPtr(cdata_ptr);
|
2016-09-22 03:36:39 +00:00
|
|
|
self->cdata = ptr;
|
|
|
|
|
return (PyObject*)self.release();
|
2016-06-15 21:03:47 +00:00
|
|
|
}
|
|
|
|
|
}
|
2016-10-15 22:38:26 +00:00
|
|
|
THPUtils_assert(num_kwargs == 0, THPStorageStr "(): invalid keyword arguments");
|
2016-09-22 03:36:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// torch.Storage()
|
|
|
|
|
if (num_args == 0) {
|
2016-10-15 22:38:26 +00:00
|
|
|
if (allocator) {
|
|
|
|
|
self->cdata = THPStorage_(newWithAllocator)(0, allocator);
|
|
|
|
|
} else {
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
self->cdata = THWStorage_(new)(LIBRARY_STATE_NOARGS);
|
2016-10-15 22:38:26 +00:00
|
|
|
}
|
2016-09-22 03:36:39 +00:00
|
|
|
return (PyObject*)self.release();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PyObject *first_arg = PyTuple_GET_ITEM(args, 0);
|
|
|
|
|
|
|
|
|
|
// torch.Storage(size)
|
|
|
|
|
if (num_args == 1 && THPUtils_checkLong(first_arg)) {
|
2017-11-08 18:51:35 +00:00
|
|
|
int64_t size = THPUtils_unpackLong(first_arg);
|
2016-10-15 22:38:26 +00:00
|
|
|
if (allocator) {
|
|
|
|
|
self->cdata = THPStorage_(newWithAllocator)(size, allocator);
|
|
|
|
|
} else {
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
self->cdata = THWStorage_(newWithSize)(LIBRARY_STATE size);
|
2016-10-15 22:38:26 +00:00
|
|
|
}
|
2016-09-22 03:36:39 +00:00
|
|
|
return (PyObject*)self.release();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// torch.Storage(view_source, [offset, [size]])
|
|
|
|
|
if (num_args < 4 && THPStorage_(Check)(first_arg)) {
|
2018-07-16 22:17:57 +00:00
|
|
|
THPUtils_setError("storage views not supported");
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-06-15 21:03:47 +00:00
|
|
|
}
|
2016-06-19 21:45:41 +00:00
|
|
|
|
2016-09-22 03:36:39 +00:00
|
|
|
// torch.Storage(sequence)
|
|
|
|
|
if (num_args == 1 && PySequence_Check(first_arg)) {
|
|
|
|
|
Py_ssize_t length = PySequence_Length(first_arg);
|
|
|
|
|
THPUtils_assert(length >= 0, "couldn't obtain the length of %s",
|
|
|
|
|
THPUtils_typename(first_arg));
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
self->cdata = THWStorage_(newWithSize)(LIBRARY_STATE length);
|
2016-09-22 03:36:39 +00:00
|
|
|
THPObjectPtr item;
|
|
|
|
|
try {
|
|
|
|
|
for (Py_ssize_t i = 0; i < length; i++) {
|
|
|
|
|
item = PySequence_GetItem(first_arg, i);
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
scalar_t value = THPUtils_(unpackReal)(item.get());
|
2017-05-04 17:06:42 +00:00
|
|
|
#if !defined(THC_GENERIC_FILE)
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
self->cdata->unsafe_data<scalar_t>()[i] = value;
|
2016-06-30 17:02:42 +00:00
|
|
|
#else
|
|
|
|
|
// TODO: this might be slow - consider batched updates?
|
2016-09-22 03:36:39 +00:00
|
|
|
THCStorage_(set)(LIBRARY_STATE self->cdata, i, value);
|
2016-06-30 17:02:42 +00:00
|
|
|
#endif
|
2016-06-15 21:03:47 +00:00
|
|
|
}
|
2018-11-07 19:18:17 +00:00
|
|
|
} catch (const std::exception &e) {
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_setError("tried to construct a storage from a sequence (%s), "
|
|
|
|
|
"but one of the items was of type %s instead of %s",
|
|
|
|
|
THPUtils_typename(first_arg),
|
|
|
|
|
THPUtils_typename(item.get()),
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
THPUtils_typeTraits<scalar_t>::python_type_str);
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-09-22 03:36:39 +00:00
|
|
|
}
|
|
|
|
|
return (PyObject*)self.release();
|
2016-05-02 21:06:53 +00:00
|
|
|
}
|
2016-09-22 03:36:39 +00:00
|
|
|
|
2017-01-05 00:49:11 +00:00
|
|
|
THPUtils_invalidArguments(args, kwargs, THPStorageStr " constructor", 6,
|
2016-09-22 03:36:39 +00:00
|
|
|
"no arguments",
|
|
|
|
|
"(int size)",
|
|
|
|
|
"(Sequence data)",
|
|
|
|
|
"(" THPStorageStr " view_source)",
|
|
|
|
|
"(" THPStorageStr " view_source, int offset)",
|
|
|
|
|
"(" THPStorageStr " view_source, int offset, int size)");
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-05-05 18:58:13 +00:00
|
|
|
END_HANDLE_TH_ERRORS
|
2016-05-02 21:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-03 15:42:00 +00:00
|
|
|
static Py_ssize_t THPStorage_(length)(THPStorage *self)
|
2016-05-02 21:06:53 +00:00
|
|
|
{
|
2016-05-05 18:58:13 +00:00
|
|
|
HANDLE_TH_ERRORS
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
return THWStorage_(size)(LIBRARY_STATE self->cdata);
|
2016-05-05 18:58:13 +00:00
|
|
|
END_HANDLE_TH_ERRORS_RET(-1)
|
2016-05-02 21:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-03 15:42:00 +00:00
|
|
|
static PyObject * THPStorage_(get)(THPStorage *self, PyObject *index)
|
2016-05-03 13:58:51 +00:00
|
|
|
{
|
2016-05-05 18:58:13 +00:00
|
|
|
HANDLE_TH_ERRORS
|
2016-05-03 13:58:51 +00:00
|
|
|
/* Integer index */
|
2016-09-21 02:37:20 +00:00
|
|
|
if (THPUtils_checkLong(index)) {
|
2017-11-08 18:51:35 +00:00
|
|
|
int64_t nindex = THPUtils_unpackLong(index);
|
2016-05-10 21:50:15 +00:00
|
|
|
if (nindex < 0)
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
nindex += THWStorage_(size)(LIBRARY_STATE self->cdata);
|
2018-08-30 03:01:38 +00:00
|
|
|
if (nindex < 0 || nindex >= self->cdata->numel()) {
|
2017-11-08 18:51:35 +00:00
|
|
|
PyErr_Format(PyExc_IndexError, "index %" PRId64 " out of range for storage of "
|
2018-08-30 03:01:38 +00:00
|
|
|
"size %" PRId64, (int64_t) nindex, (int64_t) self->cdata->numel());
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-12-01 19:16:03 +00:00
|
|
|
}
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
scalar_t value = THWStorage_(get)(LIBRARY_STATE self->cdata, nindex);
|
2016-09-22 03:36:39 +00:00
|
|
|
return THPUtils_(newReal)(value);
|
2016-05-03 13:58:51 +00:00
|
|
|
/* Slice index */
|
|
|
|
|
} else if (PySlice_Check(index)) {
|
Reimplement storage slicing. (#11314)
Summary:
In #9466 I got rid of storage views and eliminated all places where
they were used... OR SO I THOUGHT. In actuality, under certain
conditions (specifically, if you trained a CUDA multiprocessing model
shared over CUDA IPC and then serialized your parameters), you could
also serialize storage slices to the saved model format. In #9466,
I "fixed" the case when you loaded the legacy model format (really,
just unshared the storages--not strictly kosher but if you aren't
updating the parameters, shouldn't matter), but NOT the modern model format, so
such models would fail.
So, I could have applied the legacy model format fix too, but
hyperfraise remarked that he had applied a fix that was effectively
the same as unsharing the storages, but it had caused his model to
behave differently. So I looked into it again, and realized that
using a custom deleter, I could simulate the same behavior as old
storage slices. So back they come.
In principle, I could also reimplement storage views entirely using
our allocators, but I'm not going to do that unless someone really
really wants it.
Fixes #10120.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11314
Reviewed By: ailzhang
Differential Revision: D9671966
Pulled By: ezyang
fbshipit-source-id: fd863783d03b6a6421d6b9ae21ce2f0e44a0dcce
2018-09-06 23:06:25 +00:00
|
|
|
Py_ssize_t start, stop, slicelength, step;
|
|
|
|
|
int64_t len = THWStorage_(size)(LIBRARY_STATE self->cdata);
|
|
|
|
|
if (!THPUtils_parseSlice(index, len, &start, &stop, &step, &slicelength))
|
2018-12-07 20:22:49 +00:00
|
|
|
return nullptr;
|
Reimplement storage slicing. (#11314)
Summary:
In #9466 I got rid of storage views and eliminated all places where
they were used... OR SO I THOUGHT. In actuality, under certain
conditions (specifically, if you trained a CUDA multiprocessing model
shared over CUDA IPC and then serialized your parameters), you could
also serialize storage slices to the saved model format. In #9466,
I "fixed" the case when you loaded the legacy model format (really,
just unshared the storages--not strictly kosher but if you aren't
updating the parameters, shouldn't matter), but NOT the modern model format, so
such models would fail.
So, I could have applied the legacy model format fix too, but
hyperfraise remarked that he had applied a fix that was effectively
the same as unsharing the storages, but it had caused his model to
behave differently. So I looked into it again, and realized that
using a custom deleter, I could simulate the same behavior as old
storage slices. So back they come.
In principle, I could also reimplement storage views entirely using
our allocators, but I'm not going to do that unless someone really
really wants it.
Fixes #10120.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11314
Reviewed By: ailzhang
Differential Revision: D9671966
Pulled By: ezyang
fbshipit-source-id: fd863783d03b6a6421d6b9ae21ce2f0e44a0dcce
2018-09-06 23:06:25 +00:00
|
|
|
if (step != 1) {
|
|
|
|
|
THPUtils_setError("Trying to slice with a step of %" PRId64 ", but only a step of "
|
|
|
|
|
"1 is supported", (int64_t)step);
|
2018-12-07 20:22:49 +00:00
|
|
|
return nullptr;
|
Reimplement storage slicing. (#11314)
Summary:
In #9466 I got rid of storage views and eliminated all places where
they were used... OR SO I THOUGHT. In actuality, under certain
conditions (specifically, if you trained a CUDA multiprocessing model
shared over CUDA IPC and then serialized your parameters), you could
also serialize storage slices to the saved model format. In #9466,
I "fixed" the case when you loaded the legacy model format (really,
just unshared the storages--not strictly kosher but if you aren't
updating the parameters, shouldn't matter), but NOT the modern model format, so
such models would fail.
So, I could have applied the legacy model format fix too, but
hyperfraise remarked that he had applied a fix that was effectively
the same as unsharing the storages, but it had caused his model to
behave differently. So I looked into it again, and realized that
using a custom deleter, I could simulate the same behavior as old
storage slices. So back they come.
In principle, I could also reimplement storage views entirely using
our allocators, but I'm not going to do that unless someone really
really wants it.
Fixes #10120.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11314
Reviewed By: ailzhang
Differential Revision: D9671966
Pulled By: ezyang
fbshipit-source-id: fd863783d03b6a6421d6b9ae21ce2f0e44a0dcce
2018-09-06 23:06:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
scalar_t *data = THWStorage_(data)(LIBRARY_STATE self->cdata);
|
|
|
|
|
|
|
|
|
|
at::StorageImpl* old_storage = self->cdata;
|
|
|
|
|
c10::raw::intrusive_ptr::incref(old_storage);
|
|
|
|
|
at::Storage new_storage(c10::make_intrusive<at::StorageImpl>(
|
|
|
|
|
old_storage->dtype(),
|
|
|
|
|
slicelength,
|
|
|
|
|
at::DataPtr(static_cast<void*>(data + start),
|
|
|
|
|
old_storage,
|
|
|
|
|
[](void* s) { c10::raw::intrusive_ptr::decref(static_cast<at::StorageImpl*>(s)); },
|
|
|
|
|
old_storage->device()),
|
|
|
|
|
old_storage->allocator(),
|
|
|
|
|
/* resizable */ false));
|
|
|
|
|
|
|
|
|
|
PyObject *_ret = THPStorage_(New)(new_storage.unsafeReleaseStorageImpl());
|
|
|
|
|
return _ret;
|
2016-05-03 13:58:51 +00:00
|
|
|
}
|
2016-12-01 19:16:03 +00:00
|
|
|
PyErr_Format(PyExc_TypeError, "can't index a " THPStorageStr " with %s",
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_typename(index));
|
2018-08-30 23:22:24 +00:00
|
|
|
return nullptr;
|
2016-05-05 18:58:13 +00:00
|
|
|
END_HANDLE_TH_ERRORS
|
2016-05-02 21:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-03 15:42:00 +00:00
|
|
|
static int THPStorage_(set)(THPStorage *self, PyObject *index, PyObject *value)
|
2016-05-02 21:06:53 +00:00
|
|
|
{
|
2016-05-05 18:58:13 +00:00
|
|
|
HANDLE_TH_ERRORS
|
2016-09-21 02:37:20 +00:00
|
|
|
if (!THPUtils_(checkReal)(value)) {
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_setError("can only set storage content with a %s, but got "
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
"%s instead", THPUtils_typeTraits<scalar_t>::python_type_str,
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_typename(value));
|
2016-05-03 13:11:32 +00:00
|
|
|
return -1;
|
2016-09-21 02:37:20 +00:00
|
|
|
}
|
2016-05-03 13:58:51 +00:00
|
|
|
|
Rename real to scalar_t. (#11163)
Summary:
This is necessary to allow us to use the complex header
which defines real (and is very sad if real is macro'ed).
We should also fix accreal, ureal, Real and REAL, but
only 'real' is the real blocker.
```
codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163
Reviewed By: SsnL
Differential Revision: D9619906
Pulled By: ezyang
fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942
2018-09-02 22:24:01 +00:00
|
|
|
scalar_t rvalue = THPUtils_(unpackReal)(value);
|
2016-09-21 02:37:20 +00:00
|
|
|
if (THPUtils_checkLong(index)) {
|
2017-11-08 18:51:35 +00:00
|
|
|
int64_t nindex = THPUtils_unpackLong(index);
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
THWStorage_(set)(LIBRARY_STATE self->cdata, nindex, rvalue);
|
2016-05-03 13:58:51 +00:00
|
|
|
return 0;
|
|
|
|
|
} else if (PySlice_Check(index)) {
|
2017-02-20 00:50:48 +00:00
|
|
|
Py_ssize_t start, stop, slicelength, step;
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
int64_t len = THWStorage_(size)(LIBRARY_STATE self->cdata);
|
2017-02-20 00:50:48 +00:00
|
|
|
if (!THPUtils_parseSlice(index, len, &start, &stop, &step, &slicelength))
|
2016-05-03 13:58:51 +00:00
|
|
|
return -1;
|
2017-02-20 00:50:48 +00:00
|
|
|
if (step != 1) {
|
2017-11-08 18:51:35 +00:00
|
|
|
THPUtils_setError("Trying to slice with a step of %" PRId64 ", but only a step of "
|
|
|
|
|
"1 is supported", (int64_t)step);
|
2017-02-20 00:50:48 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
2016-05-03 13:58:51 +00:00
|
|
|
// TODO: check the bounds only once
|
2016-09-22 03:36:39 +00:00
|
|
|
// TODO: fill?
|
2016-05-03 13:58:51 +00:00
|
|
|
for (;start < stop; start++)
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
THWStorage_(set)(LIBRARY_STATE self->cdata, start, rvalue);
|
2016-05-03 13:58:51 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
2016-09-22 03:36:39 +00:00
|
|
|
THPUtils_setError("can't index a " THPStorageStr " with %s",
|
|
|
|
|
THPUtils_typename(index));
|
2016-05-03 13:58:51 +00:00
|
|
|
return -1;
|
2016-05-05 18:58:13 +00:00
|
|
|
END_HANDLE_TH_ERRORS_RET(-1)
|
2016-05-02 21:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static PyMappingMethods THPStorage_(mappingmethods) = {
|
2016-05-03 15:42:00 +00:00
|
|
|
(lenfunc)THPStorage_(length),
|
|
|
|
|
(binaryfunc)THPStorage_(get),
|
|
|
|
|
(objobjargproc)THPStorage_(set)
|
2016-05-02 21:06:53 +00:00
|
|
|
};
|
|
|
|
|
|
2016-06-13 11:26:00 +00:00
|
|
|
// TODO: implement equality
|
2016-05-05 20:44:43 +00:00
|
|
|
PyTypeObject THPStorageType = {
|
2018-08-30 23:22:24 +00:00
|
|
|
PyVarObject_HEAD_INIT(nullptr, 0)
|
2016-06-30 17:02:42 +00:00
|
|
|
"torch._C." THPStorageBaseStr, /* tp_name */
|
2016-05-02 21:06:53 +00:00
|
|
|
sizeof(THPStorage), /* tp_basicsize */
|
|
|
|
|
0, /* tp_itemsize */
|
|
|
|
|
(destructor)THPStorage_(dealloc), /* tp_dealloc */
|
2018-12-07 20:22:49 +00:00
|
|
|
nullptr, /* tp_print */
|
|
|
|
|
nullptr, /* tp_getattr */
|
|
|
|
|
nullptr, /* tp_setattr */
|
|
|
|
|
nullptr, /* tp_reserved */
|
|
|
|
|
nullptr, /* tp_repr */
|
|
|
|
|
nullptr, /* tp_as_number */
|
|
|
|
|
nullptr, /* tp_as_sequence */
|
2016-05-02 21:06:53 +00:00
|
|
|
&THPStorage_(mappingmethods), /* tp_as_mapping */
|
2018-12-07 20:22:49 +00:00
|
|
|
nullptr, /* tp_hash */
|
|
|
|
|
nullptr, /* tp_call */
|
|
|
|
|
nullptr, /* tp_str */
|
|
|
|
|
nullptr, /* tp_getattro */
|
|
|
|
|
nullptr, /* tp_setattro */
|
|
|
|
|
nullptr, /* tp_as_buffer */
|
2016-05-02 21:06:53 +00:00
|
|
|
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
|
2018-08-30 23:22:24 +00:00
|
|
|
nullptr, /* tp_doc */
|
2018-12-07 20:22:49 +00:00
|
|
|
nullptr, /* tp_traverse */
|
|
|
|
|
nullptr, /* tp_clear */
|
|
|
|
|
nullptr, /* tp_richcompare */
|
2016-05-02 21:06:53 +00:00
|
|
|
0, /* tp_weaklistoffset */
|
2018-12-07 20:22:49 +00:00
|
|
|
nullptr, /* tp_iter */
|
|
|
|
|
nullptr, /* tp_iternext */
|
|
|
|
|
nullptr, /* will be assigned in init */ /* tp_methods */
|
|
|
|
|
nullptr, /* will be assigned in init */ /* tp_members */
|
|
|
|
|
nullptr, /* tp_getset */
|
|
|
|
|
nullptr, /* tp_base */
|
|
|
|
|
nullptr, /* tp_dict */
|
|
|
|
|
nullptr, /* tp_descr_get */
|
|
|
|
|
nullptr, /* tp_descr_set */
|
2016-05-02 21:06:53 +00:00
|
|
|
0, /* tp_dictoffset */
|
2018-12-07 20:22:49 +00:00
|
|
|
nullptr, /* tp_init */
|
|
|
|
|
nullptr, /* tp_alloc */
|
2016-05-03 15:42:00 +00:00
|
|
|
THPStorage_(pynew), /* tp_new */
|
2016-05-02 21:06:53 +00:00
|
|
|
};
|
|
|
|
|
|
2016-05-05 18:58:13 +00:00
|
|
|
static struct PyMemberDef THPStorage_(members)[] = {
|
2018-08-30 23:22:24 +00:00
|
|
|
{(char*)"_cdata", T_ULONGLONG, offsetof(THPStorage, cdata), READONLY, nullptr},
|
|
|
|
|
{nullptr}
|
2016-05-05 18:58:13 +00:00
|
|
|
};
|
|
|
|
|
|
2019-04-03 14:52:54 +00:00
|
|
|
static PyObject * THPStorage_(device)(THPStorage* self) {
|
|
|
|
|
HANDLE_TH_ERRORS
|
|
|
|
|
return THPDevice_New(self->cdata->device());
|
|
|
|
|
END_HANDLE_TH_ERRORS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static PyObject * THPStorage_(dtype)(THPStorage *self)
|
|
|
|
|
{
|
|
|
|
|
HANDLE_TH_ERRORS
|
|
|
|
|
return torch::autograd::utils::wrap(
|
|
|
|
|
torch::getDtype(at::typeMetaToScalarType(self->cdata->dtype())));
|
|
|
|
|
END_HANDLE_TH_ERRORS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
typedef PyObject *(*getter)(PyObject *, void *);
|
|
|
|
|
|
|
|
|
|
static struct PyGetSetDef THPStorage_(properties)[] = {
|
|
|
|
|
{"device", (getter)THPStorage_(device), nullptr, nullptr, nullptr},
|
|
|
|
|
{"dtype", (getter)THPStorage_(dtype), nullptr, nullptr, nullptr},
|
|
|
|
|
{nullptr}
|
|
|
|
|
};
|
|
|
|
|
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
extern THPCopyList THWStorage_(copy_functions);
|
|
|
|
|
THPCopyList THWStorage_(copy_functions);
|
2016-12-11 20:54:58 +00:00
|
|
|
|
|
|
|
|
void THPStorage_(initCopyMethods)()
|
|
|
|
|
{
|
Don't override Tensor, Storage macros defined outside torch/csrc in t… (#8243)
* Don't override Tensor, Storage macros defined outside torch/csrc in torch/csrc.
This PR does the following:
1) Removes THSTensor macros in torch/csrc, which aren't used.
2) For macros defined outside of torch/csrc (THTensor, THTensor_, THStorage, THStorage_):
a) No longer override them, i.e. previously THTensor could actually be THCTensor if a generic file was included from a file including THCP.h.
b) Instead, introduce new macros THW* (e.g. THWTensor) to represent a (potentially empty) wildcard character.
In addition to making this code easier to read and codemod, this allows us to more freely change TH/THC; for example:
currently in the THC random code, the state is casted to THByteTensor*; this happens to work because the macros don't happen to override THByteTensor.
But if THByteTensor just becomes an alias of THTensor (which is the plan for a single tensor type), then this no longer works.
The whole thing is a bit of a mess previously because you really have to understand which macros and redefined and which aren't.
We could also rename the macros that live in torch/csrc (e.g. the THPTensor macros), but since that is more self contained, I punted for now.
* Don't change the plugin.
2018-06-07 20:10:10 +00:00
|
|
|
auto& h = THWStorage_(copy_functions);
|
2016-12-11 20:54:58 +00:00
|
|
|
// copy from CPU types
|
2018-12-05 21:12:37 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPByteStorageType, h, &THWStorage_(copyByte));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPCharStorageType, h, &THWStorage_(copyChar));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPShortStorageType, h, &THWStorage_(copyShort));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPIntStorageType, h, &THWStorage_(copyInt));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPLongStorageType, h, &THWStorage_(copyLong));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPHalfStorageType, h, &THWStorage_(copyHalf));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPFloatStorageType, h, &THWStorage_(copyFloat));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPDoubleStorageType, h, &THWStorage_(copyDouble));
|
2019-02-19 16:17:49 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPBoolStorageType, h, &THWStorage_(copyBool));
|
2019-07-10 04:47:47 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THPBFloat16StorageType, h, &THWStorage_(copyBFloat16));
|
2016-12-11 20:54:58 +00:00
|
|
|
#ifdef THC_GENERIC_FILE
|
|
|
|
|
// copy from GPU types
|
2018-12-05 21:12:37 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPByteStorageType, h, &THWStorage_(copyCudaByte));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPCharStorageType, h, &THWStorage_(copyCudaChar));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPShortStorageType, h, &THWStorage_(copyCudaShort));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPIntStorageType, h, &THWStorage_(copyCudaInt));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPLongStorageType, h, &THWStorage_(copyCudaLong));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPFloatStorageType, h, &THWStorage_(copyCudaFloat));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPDoubleStorageType, h, &THWStorage_(copyCudaDouble));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPHalfStorageType, h, &THWStorage_(copyCudaHalf));
|
2019-02-19 16:17:49 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPBoolStorageType, h, &THWStorage_(copyCudaBool));
|
2019-07-10 04:47:47 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPBFloat16StorageType, h, &THWStorage_(copyCudaBFloat16));
|
2016-12-11 20:54:58 +00:00
|
|
|
// add CPU <- GPU copies to base type
|
2018-12-05 21:12:37 +00:00
|
|
|
/// #define THPCpuStorage TH_CONCAT_3(THP, Real, Storage)
|
2016-12-11 20:54:58 +00:00
|
|
|
#define THCpuStorage_(name) TH_CONCAT_4(TH, Real, Storage_, name)
|
|
|
|
|
extern THPCopyList THCpuStorage_(copy_functions);
|
|
|
|
|
auto& b = THCpuStorage_(copy_functions);
|
2018-12-05 21:12:37 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPByteStorageType, b, &THCpuStorage_(copyCudaByte));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPCharStorageType, b, &THCpuStorage_(copyCudaChar));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPShortStorageType, b, &THCpuStorage_(copyCudaShort));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPIntStorageType, b, &THCpuStorage_(copyCudaInt));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPLongStorageType, b, &THCpuStorage_(copyCudaLong));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPFloatStorageType, b, &THCpuStorage_(copyCudaFloat));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPDoubleStorageType, b, &THCpuStorage_(copyCudaDouble));
|
|
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPHalfStorageType, b, &THCpuStorage_(copyCudaHalf));
|
2019-02-19 16:17:49 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPBoolStorageType, b, &THCpuStorage_(copyCudaBool));
|
2019-07-10 04:47:47 +00:00
|
|
|
THPInsertStorageCopyFunction<THPStorage, THPStorage>(&THCPBFloat16StorageType, b, &THCpuStorage_(copyCudaBFloat16));
|
2018-06-02 15:05:02 +00:00
|
|
|
#undef THCpuStorage
|
2016-12-11 20:54:58 +00:00
|
|
|
#undef THCpuStorage_
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
Canonicalize all includes in PyTorch. (#14849)
Summary:
Anywhere we used #include "foo.h", we now say #include <foo.h>
Paths are adjusted to be rooted out of aten/src, torch/lib, or
the root level directory.
I modified CMakeLists.txt by hand to remove TH and THC from
the include paths.
I used the following script to do the canonicalization:
```
import subprocess
import re
import os.path
files = subprocess.check_output(['git', 'ls-files']).decode('utf-8').rstrip().split('\n')
for fn in files:
if not any(fn.endswith(suff) for suff in ['.cu', '.cpp', '.in', '.h', '.hpp', '.cu', '.cuh', '.cc']):
continue
if not any(fn.startswith(pref) for pref in ["aten/", "torch/"]):
continue
with open(fn, 'r') as f:
c = f.read()
def fmt(p):
return "#include <{}>".format(p)
def repl(m):
p = m.group(1)
if p in ["dlfcn.h", "unistd.h", "nvrtc.h", "cuda.h", "cuda_runtime.h", "cstdint", "cudnn.h", "Python.h", "cusparse.h", "cuda_runtime_api.h", "cuda_fp16.h", "cublas_v2.h", "stdint.h", "curand_kernel.h"]:
return fmt(p)
if any(p.startswith(pref) for pref in ["torch/csrc", "c10/", "ATen/", "caffe2/", "TH/", "THC/", "Eigen/", "gtest/", "zdl/", "gloo/", "onnx/", "miopen/"]):
return fmt(p)
for root in ["aten/src", "torch/lib", ""]:
for bad_root in [os.path.dirname(fn), "aten/src/TH", "aten/src/THC", "torch/csrc"]:
new_p = os.path.relpath(os.path.join(bad_root, p), root)
if not new_p.startswith("../") and (os.path.exists(os.path.join(root, new_p)) or os.path.exists(os.path.join(root, new_p + ".in"))):
return fmt(new_p)
print("ERROR: ", fn, p)
return m.group(0)
new_c = re.sub(r'#include "([^"]+)"', repl, c)
if new_c != c:
print(fn)
with open(fn, 'w') as f:
f.write(new_c)
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14849
Reviewed By: dzhulgakov
Differential Revision: D13363445
Pulled By: ezyang
fbshipit-source-id: 52361f878a672785f9306c9e9ab2513128092b68
2018-12-09 03:32:01 +00:00
|
|
|
#include <torch/csrc/generic/StorageMethods.cpp>
|
|
|
|
|
#include <torch/csrc/generic/StorageSharing.cpp>
|
2016-05-05 18:58:13 +00:00
|
|
|
|
2016-05-02 21:06:53 +00:00
|
|
|
bool THPStorage_(init)(PyObject *module)
|
|
|
|
|
{
|
2016-12-20 01:42:53 +00:00
|
|
|
static std::vector<PyMethodDef> methods;
|
|
|
|
|
THPUtils_addPyMethodDefs(methods, THPStorage_(methods));
|
|
|
|
|
THPUtils_addPyMethodDefs(methods, THPStorage_(sharingMethods));
|
|
|
|
|
|
|
|
|
|
THPStorageType.tp_methods = methods.data();
|
2016-05-05 18:58:13 +00:00
|
|
|
THPStorageType.tp_members = THPStorage_(members);
|
2019-04-03 14:52:54 +00:00
|
|
|
THPStorageType.tp_getset = THPStorage_(properties);
|
2016-05-02 21:06:53 +00:00
|
|
|
if (PyType_Ready(&THPStorageType) < 0)
|
|
|
|
|
return false;
|
|
|
|
|
Py_INCREF(&THPStorageType);
|
|
|
|
|
PyModule_AddObject(module, THPStorageBaseStr, (PyObject *)&THPStorageType);
|
2016-12-11 20:54:58 +00:00
|
|
|
THPStorage_(initCopyMethods)();
|
2016-05-02 21:06:53 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-20 19:18:07 +00:00
|
|
|
void THPStorage_(postInit)(PyObject *module)
|
|
|
|
|
{
|
|
|
|
|
THPStorageClass = PyObject_GetAttrString(module,(char*)TH_CONCAT_STRING_2(Real,Storage));
|
|
|
|
|
if (!THPStorageClass) throw python_error();
|
|
|
|
|
|
2019-05-15 01:15:58 +00:00
|
|
|
at::Backend backend = at::Backend::CPU;
|
2017-11-20 19:18:07 +00:00
|
|
|
#ifdef THC_GENERIC_FILE
|
2019-05-15 01:15:58 +00:00
|
|
|
backend = at::Backend::CUDA;
|
2017-11-20 19:18:07 +00:00
|
|
|
#endif
|
2019-05-31 03:43:36 +00:00
|
|
|
|
|
|
|
|
#ifdef THQUANTIZED
|
|
|
|
|
backend = at::Backend::QuantizedCPU;
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-05-15 01:15:58 +00:00
|
|
|
torch::registerStoragePyTypeObject((PyTypeObject*)THPStorageClass, backend, TH_CONCAT_2(at::k, Real));
|
2017-11-20 19:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-02 21:06:53 +00:00
|
|
|
#endif
|