2021-02-17 15:15:45 +00:00
|
|
|
#include <torch/csrc/jit/backends/backend.h>
|
2021-06-11 17:14:56 +00:00
|
|
|
#include <torch/csrc/jit/backends/backend_debug_handler.h>
|
2021-04-06 07:53:59 +00:00
|
|
|
#include <torch/csrc/jit/backends/backend_preprocess.h>
|
2021-02-17 15:15:45 +00:00
|
|
|
|
|
|
|
|
namespace torch {
|
|
|
|
|
namespace jit {
|
|
|
|
|
// This test JIT backend is intended to do the minimal amount of work
|
|
|
|
|
// necessary to test that the JIT backend registration endpoints and
|
|
|
|
|
// code generation are working correctly. It is not intended to
|
|
|
|
|
// produce numerically correct results.
|
Adds a bool is_available() method to the backend contract (#53068)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/53068
Adds a ```bool is_available()``` method to the backend contract: it returns ```true``` if ```compile()``` and ```execute()``` can be called; ```false``` otherwise.
It is used to implement the following changes in the ```LoweredModule```:
* ```compile()``` in ```__setstate__``` will run if ```is_available()```, else ```__setstate__``` throws an exception (“Backend not available.”).
* ```compile()``` at ```LoweredModule``` creation will run if ```is_available()```, else a WARNING will be thrown.
* ```execute()``` will only be executed if ```is_available()``` returns true; else throws an exception (“Backend not available.”).
The goal of these changes is to ensure we have a well defined behaviour for the different combinations of backend availability on-host and on-target.
More specifically, backends may have different capabilities to compile and/or execute the Module, depending whether this happens on-host (i.e. where the program is being written) or on-target (where the program is being executed).
First of all, we know that "preprocess" always takes place, and that only happens on-host at creation time. So, we can assume that any compilation is needed/possible on-host then all of it could be pushed here.
Overall, we want to ensure the following:
**On host**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | On module creation, LoweredModule is generated, with a warning (since compilation and execution can still take place on-target). On module load, throws an exception (since execution is not possible). |
| No | Yes | This configuration should not be possible. This assumes the full compiler is not available, even if some work was done in preprocess the program cannot be finalized for execution. |
| Yes | No | In this case, the expectation would be for is_available() to return false, and compilation logic to move into preprocess. |
| Yes | Yes | All good. This is the only case that is_available() should return true. |
**On target**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | Loading the LoweredModule throws an exception. Since execution is not possible. |
| No | Yes | Basically this is another instance of Yes/Yes: compilation per se may not be possible on device, which means compile() can be called without issue but it is a no-op, and thus is_available should return true. Consequently, loading the LoweredModule: Succeeds, if the preprocessed module is ready for execution. Fails with exception otherwise. |
| Yes | No | This configuration should not be possible. Just putting here for completeness. |
| Yes | Yes | All good. This, along with No/Yes case (because compilation is assumed to have happened on-host, so it's just another instance of Yes/Yes), are the cases where is_available() should return true. |
**Refactoring existing code**
This change also updates other backends (Glow) code, to implement the is_available() method to have the same behaviour as before this change (i.e. always available).
This should not cause backward incompatibilities with already saved models since we're adding a new method to the PyTorchBackendInterface.
Models saved with the old interface that didn't have is_available() will still find the other 2 methods in the bound object (i.e. compile and execute), and the saved LoweredModule logic will be the old one.
**Future**
We plan to use is_available() to implement support for fallback to the PyTorch interpreter.
ghstack-source-id: 123498571
Test Plan: Added C++ (test_backend.cpp) and Python (test_backends.py) tests to validate the exceptions.
Reviewed By: jackm321, spaugh, iseeyuan
Differential Revision: D26615833
fbshipit-source-id: 562e8b11db25784348b5f86bbc4179aedf15e0d3
2021-03-10 08:21:34 +00:00
|
|
|
template <bool isAvailable>
|
2021-02-17 15:15:45 +00:00
|
|
|
class TestBackend : public PyTorchBackendInterface {
|
|
|
|
|
public:
|
|
|
|
|
// Constructor.
|
Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 21:09:06 +00:00
|
|
|
// NOLINTNEXTLINE(modernize-use-equals-default)
|
2021-02-17 15:15:45 +00:00
|
|
|
explicit TestBackend() {}
|
2023-06-24 02:34:20 +00:00
|
|
|
virtual ~TestBackend() override = default;
|
2021-02-17 15:15:45 +00:00
|
|
|
|
Adds a bool is_available() method to the backend contract (#53068)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/53068
Adds a ```bool is_available()``` method to the backend contract: it returns ```true``` if ```compile()``` and ```execute()``` can be called; ```false``` otherwise.
It is used to implement the following changes in the ```LoweredModule```:
* ```compile()``` in ```__setstate__``` will run if ```is_available()```, else ```__setstate__``` throws an exception (“Backend not available.”).
* ```compile()``` at ```LoweredModule``` creation will run if ```is_available()```, else a WARNING will be thrown.
* ```execute()``` will only be executed if ```is_available()``` returns true; else throws an exception (“Backend not available.”).
The goal of these changes is to ensure we have a well defined behaviour for the different combinations of backend availability on-host and on-target.
More specifically, backends may have different capabilities to compile and/or execute the Module, depending whether this happens on-host (i.e. where the program is being written) or on-target (where the program is being executed).
First of all, we know that "preprocess" always takes place, and that only happens on-host at creation time. So, we can assume that any compilation is needed/possible on-host then all of it could be pushed here.
Overall, we want to ensure the following:
**On host**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | On module creation, LoweredModule is generated, with a warning (since compilation and execution can still take place on-target). On module load, throws an exception (since execution is not possible). |
| No | Yes | This configuration should not be possible. This assumes the full compiler is not available, even if some work was done in preprocess the program cannot be finalized for execution. |
| Yes | No | In this case, the expectation would be for is_available() to return false, and compilation logic to move into preprocess. |
| Yes | Yes | All good. This is the only case that is_available() should return true. |
**On target**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | Loading the LoweredModule throws an exception. Since execution is not possible. |
| No | Yes | Basically this is another instance of Yes/Yes: compilation per se may not be possible on device, which means compile() can be called without issue but it is a no-op, and thus is_available should return true. Consequently, loading the LoweredModule: Succeeds, if the preprocessed module is ready for execution. Fails with exception otherwise. |
| Yes | No | This configuration should not be possible. Just putting here for completeness. |
| Yes | Yes | All good. This, along with No/Yes case (because compilation is assumed to have happened on-host, so it's just another instance of Yes/Yes), are the cases where is_available() should return true. |
**Refactoring existing code**
This change also updates other backends (Glow) code, to implement the is_available() method to have the same behaviour as before this change (i.e. always available).
This should not cause backward incompatibilities with already saved models since we're adding a new method to the PyTorchBackendInterface.
Models saved with the old interface that didn't have is_available() will still find the other 2 methods in the bound object (i.e. compile and execute), and the saved LoweredModule logic will be the old one.
**Future**
We plan to use is_available() to implement support for fallback to the PyTorch interpreter.
ghstack-source-id: 123498571
Test Plan: Added C++ (test_backend.cpp) and Python (test_backends.py) tests to validate the exceptions.
Reviewed By: jackm321, spaugh, iseeyuan
Differential Revision: D26615833
fbshipit-source-id: 562e8b11db25784348b5f86bbc4179aedf15e0d3
2021-03-10 08:21:34 +00:00
|
|
|
bool is_available() override {
|
|
|
|
|
return isAvailable;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-17 15:15:45 +00:00
|
|
|
c10::impl::GenericDict compile(
|
|
|
|
|
c10::IValue processed,
|
|
|
|
|
c10::impl::GenericDict method_compile_spec) override {
|
|
|
|
|
auto spec =
|
|
|
|
|
c10::impl::toTypedDict<std::string, at::IValue>(method_compile_spec);
|
|
|
|
|
|
|
|
|
|
// Return the same string as a value for every key in method_compile_spec.
|
|
|
|
|
auto handles = c10::Dict<std::string, std::string>();
|
|
|
|
|
for (const auto& it : spec) {
|
|
|
|
|
handles.insert(it.key(), it.key());
|
|
|
|
|
}
|
|
|
|
|
return c10::impl::toGenericDict(handles);
|
|
|
|
|
}
|
|
|
|
|
c10::impl::GenericList execute(
|
|
|
|
|
c10::IValue handle,
|
|
|
|
|
c10::impl::GenericList inputs) override {
|
|
|
|
|
TORCH_INTERNAL_ASSERT(handle.isString());
|
|
|
|
|
TORCH_INTERNAL_ASSERT(inputs.size() > 0);
|
|
|
|
|
|
|
|
|
|
c10::List<at::Tensor> output_list;
|
|
|
|
|
|
|
|
|
|
// Implement simple accumulator and negative accumulator (?) ops. Return one
|
|
|
|
|
// or both of them depending on the handle to make sure multiple outputs are
|
|
|
|
|
// handled.
|
|
|
|
|
c10::IValue value = inputs[0];
|
|
|
|
|
at::Tensor accum = value.toTensor();
|
|
|
|
|
accum = accum.clone();
|
|
|
|
|
at::Tensor sub_accum = value.toTensor();
|
|
|
|
|
sub_accum = sub_accum.clone();
|
|
|
|
|
|
|
|
|
|
for (size_t i = 1, e = inputs.size(); i < e; ++i) {
|
|
|
|
|
value = inputs[i];
|
|
|
|
|
accum.add_(value.toTensor(), 1.0);
|
|
|
|
|
sub_accum.sub_(value.toTensor(), 1.0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (handle.toStringRef() == "accum") {
|
|
|
|
|
output_list.emplace_back(accum);
|
|
|
|
|
} else if (handle.toStringRef() == "sub_accum") {
|
|
|
|
|
output_list.emplace_back(sub_accum);
|
|
|
|
|
} else if (handle.toStringRef() == "forward") {
|
|
|
|
|
output_list.emplace_back(accum);
|
|
|
|
|
output_list.emplace_back(sub_accum);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return c10::impl::toList(output_list);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2021-02-26 19:51:29 +00:00
|
|
|
namespace {
|
2021-02-17 15:15:45 +00:00
|
|
|
c10::IValue preprocess(
|
|
|
|
|
const Module& mod,
|
2021-06-11 17:14:56 +00:00
|
|
|
const c10::Dict<IValue, IValue>& method_compile_spec,
|
|
|
|
|
const BackendDebugHandleGenerator& generate_debug_handles) {
|
2021-02-17 15:15:45 +00:00
|
|
|
return mod._ivalue();
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-06 07:53:59 +00:00
|
|
|
constexpr auto backend_name = "test_backend";
|
Adds a bool is_available() method to the backend contract (#53068)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/53068
Adds a ```bool is_available()``` method to the backend contract: it returns ```true``` if ```compile()``` and ```execute()``` can be called; ```false``` otherwise.
It is used to implement the following changes in the ```LoweredModule```:
* ```compile()``` in ```__setstate__``` will run if ```is_available()```, else ```__setstate__``` throws an exception (“Backend not available.”).
* ```compile()``` at ```LoweredModule``` creation will run if ```is_available()```, else a WARNING will be thrown.
* ```execute()``` will only be executed if ```is_available()``` returns true; else throws an exception (“Backend not available.”).
The goal of these changes is to ensure we have a well defined behaviour for the different combinations of backend availability on-host and on-target.
More specifically, backends may have different capabilities to compile and/or execute the Module, depending whether this happens on-host (i.e. where the program is being written) or on-target (where the program is being executed).
First of all, we know that "preprocess" always takes place, and that only happens on-host at creation time. So, we can assume that any compilation is needed/possible on-host then all of it could be pushed here.
Overall, we want to ensure the following:
**On host**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | On module creation, LoweredModule is generated, with a warning (since compilation and execution can still take place on-target). On module load, throws an exception (since execution is not possible). |
| No | Yes | This configuration should not be possible. This assumes the full compiler is not available, even if some work was done in preprocess the program cannot be finalized for execution. |
| Yes | No | In this case, the expectation would be for is_available() to return false, and compilation logic to move into preprocess. |
| Yes | Yes | All good. This is the only case that is_available() should return true. |
**On target**
| compile | execute | Outcome |
| -- | -- | -- |
| No | No | Loading the LoweredModule throws an exception. Since execution is not possible. |
| No | Yes | Basically this is another instance of Yes/Yes: compilation per se may not be possible on device, which means compile() can be called without issue but it is a no-op, and thus is_available should return true. Consequently, loading the LoweredModule: Succeeds, if the preprocessed module is ready for execution. Fails with exception otherwise. |
| Yes | No | This configuration should not be possible. Just putting here for completeness. |
| Yes | Yes | All good. This, along with No/Yes case (because compilation is assumed to have happened on-host, so it's just another instance of Yes/Yes), are the cases where is_available() should return true. |
**Refactoring existing code**
This change also updates other backends (Glow) code, to implement the is_available() method to have the same behaviour as before this change (i.e. always available).
This should not cause backward incompatibilities with already saved models since we're adding a new method to the PyTorchBackendInterface.
Models saved with the old interface that didn't have is_available() will still find the other 2 methods in the bound object (i.e. compile and execute), and the saved LoweredModule logic will be the old one.
**Future**
We plan to use is_available() to implement support for fallback to the PyTorch interpreter.
ghstack-source-id: 123498571
Test Plan: Added C++ (test_backend.cpp) and Python (test_backends.py) tests to validate the exceptions.
Reviewed By: jackm321, spaugh, iseeyuan
Differential Revision: D26615833
fbshipit-source-id: 562e8b11db25784348b5f86bbc4179aedf15e0d3
2021-03-10 08:21:34 +00:00
|
|
|
static auto cls_available =
|
2021-04-06 07:53:59 +00:00
|
|
|
torch::jit::backend<TestBackend<true>>(backend_name);
|
|
|
|
|
static auto pre_reg = backend_preprocess_register(backend_name, preprocess);
|
|
|
|
|
|
|
|
|
|
constexpr auto backend_unavailable_name = "test_backend_unavailable";
|
|
|
|
|
static auto cls_unavailable =
|
|
|
|
|
torch::jit::backend<TestBackend<false>>(backend_unavailable_name);
|
|
|
|
|
static auto pre_reg_unavailable =
|
|
|
|
|
backend_preprocess_register(backend_unavailable_name, preprocess);
|
2021-02-17 15:15:45 +00:00
|
|
|
|
2021-04-06 07:53:59 +00:00
|
|
|
} // namespace
|
2021-02-17 15:15:45 +00:00
|
|
|
} // namespace jit
|
|
|
|
|
} // namespace torch
|