2020-09-23 02:42:28 +00:00
|
|
|
#include <gtest/gtest.h>
|
|
|
|
|
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
#include <torch/jit.h>
|
2019-03-15 20:53:23 +00:00
|
|
|
#include "test/cpp/jit/test_utils.h"
|
2020-02-27 20:18:24 +00:00
|
|
|
#include "torch/csrc/jit/runtime/argument_spec.h"
|
2019-03-15 20:53:23 +00:00
|
|
|
|
|
|
|
|
namespace torch {
|
|
|
|
|
namespace jit {
|
|
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
namespace {
|
|
|
|
|
|
2019-03-15 20:53:23 +00:00
|
|
|
int device(const autograd::Variable& v) {
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
2019-12-05 18:53:32 +00:00
|
|
|
return v.device().is_cuda() ? v.get_device() : -1;
|
2019-03-15 20:53:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool isEqual(at::IntArrayRef lhs, at::IntArrayRef rhs) {
|
|
|
|
|
return lhs.size() == rhs.size() &&
|
|
|
|
|
std::equal(lhs.begin(), lhs.end(), rhs.begin());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool isEqual(const CompleteArgumentInfo& ti, const autograd::Variable& v) {
|
|
|
|
|
if (!ti.defined())
|
|
|
|
|
return ti.defined() == v.defined();
|
|
|
|
|
return ti.device() == device(v) && ti.requires_grad() == v.requires_grad() &&
|
|
|
|
|
ti.type() == v.scalar_type() && isEqual(ti.sizes(), v.sizes()) &&
|
|
|
|
|
isEqual(ti.strides(), v.strides());
|
|
|
|
|
}
|
|
|
|
|
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
bool isEqual(const ArgumentInfo& ti, const autograd::Variable& v) {
|
|
|
|
|
if (!ti.defined())
|
|
|
|
|
return ti.defined() == v.defined();
|
|
|
|
|
return ti.device() == device(v) && ti.requires_grad() == v.requires_grad() &&
|
|
|
|
|
ti.type() == v.scalar_type() && ti.dim() == v.dim();
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-26 18:15:49 +00:00
|
|
|
autograd::Variable var(
|
|
|
|
|
at::TensorOptions t,
|
|
|
|
|
at::IntArrayRef sizes,
|
|
|
|
|
bool requires_grad) {
|
2019-12-05 18:53:32 +00:00
|
|
|
return autograd::make_variable(at::rand(sizes, t), requires_grad);
|
2019-03-15 20:53:23 +00:00
|
|
|
}
|
|
|
|
|
autograd::Variable undef() {
|
|
|
|
|
return autograd::Variable();
|
|
|
|
|
}
|
2020-09-23 02:42:28 +00:00
|
|
|
} // namespace
|
2019-03-15 20:53:23 +00:00
|
|
|
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
2020-09-23 02:42:28 +00:00
|
|
|
TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) {
|
2019-12-05 18:53:32 +00:00
|
|
|
auto const CF = at::CPU(at::kFloat);
|
|
|
|
|
auto const CD = at::CPU(at::kDouble);
|
|
|
|
|
auto const GF = at::CUDA(at::kFloat);
|
|
|
|
|
auto const GD = at::CUDA(at::kDouble);
|
2019-03-15 20:53:23 +00:00
|
|
|
|
2021-01-09 22:34:42 +00:00
|
|
|
auto list = createStack(
|
|
|
|
|
{var(CF, {1}, true),
|
|
|
|
|
var(CD, {1, 2}, false),
|
|
|
|
|
var(GF, {}, true),
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
2021-01-09 22:34:42 +00:00
|
|
|
var(GD, {4, 5, 6}, false),
|
|
|
|
|
undef()});
|
2019-03-15 20:53:23 +00:00
|
|
|
|
|
|
|
|
// make sure we have some non-standard strides
|
|
|
|
|
list[1].toTensor().transpose_(0, 1);
|
|
|
|
|
|
|
|
|
|
// same list but different backing values
|
2021-01-09 22:34:42 +00:00
|
|
|
auto list2 = createStack(
|
|
|
|
|
{var(CF, {1}, true),
|
|
|
|
|
var(CD, {1, 2}, false),
|
|
|
|
|
var(GF, {}, true),
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
2021-01-09 22:34:42 +00:00
|
|
|
var(GD, {4, 5, 6}, false),
|
|
|
|
|
undef()});
|
2019-03-15 20:53:23 +00:00
|
|
|
list2[1].toTensor().transpose_(0, 1);
|
|
|
|
|
|
|
|
|
|
CompleteArgumentSpec a(true, list);
|
|
|
|
|
CompleteArgumentSpec b(true, list);
|
|
|
|
|
ASSERT_EQ(a.hashCode(), b.hashCode());
|
|
|
|
|
|
|
|
|
|
ASSERT_EQ(a, b);
|
|
|
|
|
CompleteArgumentSpec d(true, list2);
|
|
|
|
|
ASSERT_EQ(d, a);
|
|
|
|
|
ASSERT_EQ(d.hashCode(), a.hashCode());
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < list.size(); ++i) {
|
|
|
|
|
ASSERT_TRUE(isEqual(a.at(i), list[i].toTensor()));
|
|
|
|
|
}
|
|
|
|
|
CompleteArgumentSpec no_grad(/*with_grad=*/false, list);
|
|
|
|
|
ASSERT_TRUE(no_grad != a);
|
|
|
|
|
|
|
|
|
|
std::unordered_set<CompleteArgumentSpec> spec;
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
spec.insert(a); // we use a below, so no move
|
2019-03-15 20:53:23 +00:00
|
|
|
ASSERT_TRUE(spec.count(b) > 0);
|
|
|
|
|
ASSERT_EQ(spec.count(no_grad), 0);
|
|
|
|
|
spec.insert(std::move(no_grad));
|
|
|
|
|
ASSERT_EQ(spec.count(CompleteArgumentSpec(true, list)), 1);
|
|
|
|
|
|
|
|
|
|
list2[1].toTensor().transpose_(0, 1);
|
|
|
|
|
CompleteArgumentSpec c(true, list2); // same as list, except for one stride
|
|
|
|
|
ASSERT_FALSE(c == a);
|
|
|
|
|
ASSERT_EQ(spec.count(c), 0);
|
|
|
|
|
|
|
|
|
|
Stack stack = {var(CF, {1, 2}, true), 3, var(CF, {1, 2}, true)};
|
|
|
|
|
CompleteArgumentSpec with_const(true, stack);
|
|
|
|
|
ASSERT_EQ(with_const.at(2).sizes().size(), 2);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// TODO: this test was disabled for unknown reasons and doesn't run.
|
|
|
|
|
// static size_t hashCode(const TensorTypePtr& ptr) {
|
|
|
|
|
// return std::hash<TensorType>()(*ptr.get());
|
|
|
|
|
// }
|
2019-07-30 20:02:26 +00:00
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// TEST(ArgumentSpecTest, VaryingShape) {
|
|
|
|
|
// c10::VaryingShape<int64_t> vs(c10::optional<size_t>{});
|
|
|
|
|
// auto ptt_empty1 = TensorType::create({}, {}, vs, vs, false);
|
|
|
|
|
// auto ptt_empty2 = TensorType::create({}, {}, vs, vs, false);
|
|
|
|
|
// ASSERT_EQ(hashCode(ptt_empty1), hashCode(ptt_empty2));
|
2019-07-30 20:02:26 +00:00
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// c10::VaryingShape<int64_t> vs22(std::vector<int64_t>{2, 2});
|
|
|
|
|
// auto ptt_vs22_vs22_1 = TensorType::create({}, {}, vs22, vs22, false);
|
|
|
|
|
// auto ptt_vs22_vs22_2 = TensorType::create({}, {}, vs22, vs22, false);
|
|
|
|
|
// ASSERT_EQ(hashCode(ptt_vs22_vs22_1), hashCode(ptt_vs22_vs22_2));
|
2019-07-30 20:02:26 +00:00
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// c10::VaryingShape<int64_t> vs23(std::vector<int64_t>{2, 3});
|
|
|
|
|
// auto ptt_vs22_vs23_2 = TensorType::create({}, {}, vs22, vs23, false);
|
|
|
|
|
// ASSERT_NE(hashCode(ptt_vs22_vs22_1), hashCode(ptt_vs22_vs23_2));
|
2019-07-30 20:02:26 +00:00
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// auto ptt_vs22_vs22_1_true = TensorType::create({}, {}, vs22, vs22, true);
|
|
|
|
|
// auto ptt_vs22_vs22_2_true = TensorType::create({}, {}, vs22, vs22, true);
|
|
|
|
|
// ASSERT_EQ(hashCode(ptt_vs22_vs22_1_true), hashCode(ptt_vs22_vs22_2_true));
|
2019-07-30 20:02:26 +00:00
|
|
|
|
2020-09-23 02:42:28 +00:00
|
|
|
// auto ptt_vs22_vs22_1_false = TensorType::create({}, {}, vs22, vs22, false);
|
|
|
|
|
// ASSERT_NE(hashCode(ptt_vs22_vs22_1_true), hashCode(ptt_vs22_vs22_1_false));
|
|
|
|
|
// }
|
2019-07-30 20:02:26 +00:00
|
|
|
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
2020-09-23 02:42:28 +00:00
|
|
|
TEST(ArgumentSpecTest, Basic_CUDA) {
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
auto& CF = at::CPU(at::kFloat);
|
|
|
|
|
auto& CD = at::CPU(at::kDouble);
|
|
|
|
|
auto& GF = at::CUDA(at::kFloat);
|
|
|
|
|
auto& GD = at::CUDA(at::kDouble);
|
|
|
|
|
|
|
|
|
|
auto graph = jit::compile(R"JIT(
|
|
|
|
|
def fn(a, b, c, d, e):
|
|
|
|
|
return a, b, c, d, e
|
|
|
|
|
)JIT")
|
|
|
|
|
->get_function("fn")
|
|
|
|
|
.graph();
|
|
|
|
|
|
|
|
|
|
ArgumentSpecCreator arg_spec_creator(*graph);
|
|
|
|
|
|
2021-01-09 22:34:42 +00:00
|
|
|
auto list = createStack(
|
|
|
|
|
{var(CF, {1}, true),
|
|
|
|
|
var(CD, {1, 2}, false),
|
|
|
|
|
var(GF, {}, true),
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
2021-01-09 22:34:42 +00:00
|
|
|
var(GD, {4, 5, 6}, false),
|
|
|
|
|
undef()});
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
|
|
|
|
|
// make sure we have some non-standard strides
|
|
|
|
|
list[1].toTensor().transpose_(0, 1);
|
|
|
|
|
|
|
|
|
|
// same list but different backing values
|
2021-01-09 22:34:42 +00:00
|
|
|
auto list2 = createStack(
|
|
|
|
|
{var(CF, {1}, true),
|
|
|
|
|
var(CD, {1, 2}, false),
|
|
|
|
|
var(GF, {}, true),
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
2021-01-09 22:34:42 +00:00
|
|
|
var(GD, {4, 5, 6}, false),
|
|
|
|
|
undef()});
|
Specialize Optional[T] to T (or subtype for Tensor) or None when executing graph (#18407)
Summary:
This patch specializes `Optional[Tensor]` graph inputs to either a `DimensionedTensorType` (if a Tensor is passed) or `NoneType`. Other `Optional[T]` are specialized to `T` or `None`.
- For unwrapping (checked and unchecked) we need to keep the output type, as IR code that follows unwrapping may not work with NoneType (just as it doesn't deal with Optional). While it would not be hit during execution, it will run against the (legitimate) assumptions of the analysis passes.
- Function lookup currently will not match NoneType when it expects optional (I'm not entirely sure why this doesn't lead to unhappyness currently, but hey), I amend this at the level of the function matching code (`operator.cpp`), but see Adam's comments. We would run into trouble if we needed to select between functions whose signature only differs in Optional types with different subtypes, but we would have the same problem when calling them directly, so I would think this is OK.
- It would enable throwing away branches we can't hit. This also reduces the "blockyness" of the graph, so it may be easier to apply optimizations (e.g. fuse things in `if t is None: ...` and outside the `if`.
- Arguments passed into `Optional[Tensor]` arguments will get shape information, which is very handy.
- It get's rid of the problem that tensors passed into Optional arguments get requires_grad set erroneously #18270 (though that also affects lists, which aren't fixed here).
- `Optional[List[int]]` is needed for #18697.
- We're changing typing in a more subtle way than the `TensorType`->`DimensionedTensorType`.
- In particular, specializing to NoneType loses the Type information captured in the `OptionalType` element type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18407
Reviewed By: zdevito
Differential Revision: D15216808
Pulled By: eellison
fbshipit-source-id: 01f1a7643deaf4962c3f55eff2070d54b0e54b69
2019-05-06 21:54:10 +00:00
|
|
|
list2[1].toTensor().transpose_(0, 1);
|
|
|
|
|
|
|
|
|
|
ArgumentSpec a = arg_spec_creator.create(true, list);
|
|
|
|
|
ArgumentSpec b = arg_spec_creator.create(true, list);
|
|
|
|
|
ASSERT_EQ(a.hashCode(), b.hashCode());
|
|
|
|
|
|
|
|
|
|
ASSERT_EQ(a, b);
|
|
|
|
|
ArgumentSpec d = arg_spec_creator.create(true, list2);
|
|
|
|
|
ASSERT_EQ(d, a);
|
|
|
|
|
ASSERT_EQ(d.hashCode(), a.hashCode());
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < list.size(); ++i) {
|
|
|
|
|
ASSERT_TRUE(isEqual(a.tensorAt(i), list[i].toTensor()));
|
|
|
|
|
}
|
|
|
|
|
ArgumentSpec no_grad = arg_spec_creator.create(/*with_grad=*/false, list);
|
|
|
|
|
ASSERT_TRUE(no_grad != a);
|
|
|
|
|
|
|
|
|
|
std::unordered_set<ArgumentSpec> spec;
|
|
|
|
|
spec.insert(a); // we still need a for the test below
|
|
|
|
|
ASSERT_TRUE(spec.count(b) > 0);
|
|
|
|
|
ASSERT_EQ(spec.count(no_grad), 0);
|
|
|
|
|
spec.insert(std::move(no_grad));
|
|
|
|
|
ASSERT_EQ(spec.count(arg_spec_creator.create(true, list)), 1);
|
|
|
|
|
|
|
|
|
|
list2[1].toTensor().transpose_(0, 1);
|
|
|
|
|
ArgumentSpec c = arg_spec_creator.create(
|
|
|
|
|
true, list2); // same as list, except for one stride, used to be
|
|
|
|
|
// different, now the same
|
|
|
|
|
ASSERT_TRUE(c == a);
|
|
|
|
|
ASSERT_EQ(spec.count(c), 1);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-15 20:53:23 +00:00
|
|
|
} // namespace jit
|
|
|
|
|
} // namespace torch
|