2019-03-20 02:38:09 +00:00
|
|
|
#include "test/cpp/jit/test_base.h"
|
|
|
|
|
#include "test/cpp/jit/test_utils.h"
|
|
|
|
|
|
2020-08-22 02:58:30 +00:00
|
|
|
#include <stdexcept>
|
2019-03-20 02:38:09 +00:00
|
|
|
namespace torch {
|
|
|
|
|
namespace jit {
|
|
|
|
|
|
2020-08-22 02:58:30 +00:00
|
|
|
void testTypeCheck() {
|
|
|
|
|
{
|
|
|
|
|
auto graph = std::make_shared<Graph>();
|
|
|
|
|
std::unordered_map<std::string, Value*> vmap;
|
|
|
|
|
parseIR(
|
|
|
|
|
R"IR(
|
|
|
|
|
graph(%a.1 : Tensor,
|
|
|
|
|
%b.1 : Tensor):
|
|
|
|
|
%t0 : Float(2:2, 2:1, device=cpu, requires_grad=1), %t1 : Float(3:3, 3:1), %type_matched : bool = prim::TypeCheck(%a.1, %b.1)
|
|
|
|
|
return (%t0, %t1, %type_matched)
|
|
|
|
|
)IR",
|
|
|
|
|
&*graph,
|
|
|
|
|
vmap);
|
|
|
|
|
|
|
|
|
|
Code function(graph, "");
|
|
|
|
|
InterpreterState interp(function);
|
|
|
|
|
{
|
|
|
|
|
// TypeCheck yields to true! Shape, grad and device matches.
|
|
|
|
|
auto a = at::zeros({2, 2}, at::kFloat);
|
|
|
|
|
auto b = at::ones({3, 3}, at::kFloat);
|
|
|
|
|
a.set_requires_grad(true);
|
|
|
|
|
a = a.to(at::kCPU);
|
|
|
|
|
std::vector<IValue> stack({a, b});
|
|
|
|
|
interp.run(stack);
|
|
|
|
|
ASSERT_TRUE(exactlyEqual(stack[0].toTensor(), a));
|
|
|
|
|
ASSERT_TRUE(exactlyEqual(stack[1].toTensor(), b));
|
|
|
|
|
ASSERT_TRUE(stack[2].toBool());
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
auto a = at::zeros({2, 2}, at::kFloat);
|
|
|
|
|
auto b = at::ones({2, 2}, at::kFloat); // Size mismatch
|
|
|
|
|
a.set_requires_grad(true);
|
|
|
|
|
a = a.to(at::kCPU);
|
|
|
|
|
std::vector<IValue> stack({a, b});
|
|
|
|
|
interp.run(stack);
|
|
|
|
|
ASSERT_FALSE(stack[2].toBool());
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
auto a = at::zeros({2, 2}, at::kFloat);
|
|
|
|
|
auto b = at::ones({3, 3}, at::kFloat);
|
|
|
|
|
a = a.to(at::kCPU);
|
|
|
|
|
a.set_requires_grad(false); // Gradient mismatch
|
|
|
|
|
std::vector<IValue> stack({a, b});
|
|
|
|
|
interp.run(stack);
|
|
|
|
|
ASSERT_FALSE(stack[2].toBool());
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
auto a = at::zeros({2, 2}, at::kFloat);
|
|
|
|
|
auto b = at::ones({3, 3}, at::kFloat);
|
|
|
|
|
a = a.to(at::kCPU);
|
|
|
|
|
a.set_requires_grad(true);
|
|
|
|
|
a = a.to(at::kInt); // Scalar type mismatch
|
|
|
|
|
std::vector<IValue> stack({a, b});
|
|
|
|
|
interp.run(stack);
|
|
|
|
|
ASSERT_FALSE(stack[2].toBool());
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
auto a = at::zeros({2, 2}, at::kFloat);
|
|
|
|
|
auto b = at::ones({3, 3}, at::kFloat);
|
|
|
|
|
a.set_requires_grad(true);
|
|
|
|
|
a = a.to(at::kCUDA); // Device mismatch
|
|
|
|
|
std::vector<IValue> stack({a, b});
|
|
|
|
|
interp.run(stack);
|
|
|
|
|
ASSERT_FALSE(stack[2].toBool());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try { // Test empty Typecheck raises an internal assertion
|
|
|
|
|
auto graph = std::make_shared<Graph>();
|
|
|
|
|
std::unordered_map<std::string, Value*> vmap;
|
|
|
|
|
parseIR(
|
|
|
|
|
R"IR(
|
|
|
|
|
graph(%a.1 : Tensor,
|
|
|
|
|
%b.1 : Tensor):
|
|
|
|
|
%type_matched : bool = prim::TypeCheck()
|
|
|
|
|
return (%type_matched)
|
|
|
|
|
)IR",
|
|
|
|
|
&*graph,
|
|
|
|
|
vmap);
|
|
|
|
|
} catch (const std::exception& e) {
|
|
|
|
|
}
|
|
|
|
|
try { // Test for assertion if num_inputs + 1 != num_outputs
|
|
|
|
|
auto graph = std::make_shared<Graph>();
|
|
|
|
|
std::unordered_map<std::string, Value*> vmap;
|
|
|
|
|
parseIR(
|
|
|
|
|
R"IR(
|
|
|
|
|
graph(%a.1 : Tensor,
|
|
|
|
|
%b.1 : Tensor):
|
|
|
|
|
%type_matched : bool = prim::TypeCheck(%a.1)
|
|
|
|
|
return (%type_matched)
|
|
|
|
|
)IR",
|
|
|
|
|
&*graph,
|
|
|
|
|
vmap);
|
|
|
|
|
} catch (const std::exception& e) {
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-03-20 02:38:09 +00:00
|
|
|
void testInterp() {
|
|
|
|
|
constexpr int batch_size = 4;
|
|
|
|
|
constexpr int input_size = 256;
|
|
|
|
|
constexpr int seq_len = 32;
|
|
|
|
|
|
|
|
|
|
int hidden_size = 2 * input_size;
|
|
|
|
|
|
|
|
|
|
auto input = at::randn({seq_len, batch_size, input_size}, at::kCUDA);
|
|
|
|
|
auto hx = at::randn({batch_size, hidden_size}, at::kCUDA);
|
|
|
|
|
auto cx = at::randn({batch_size, hidden_size}, at::kCUDA);
|
|
|
|
|
auto w_ih = t_def(at::randn({4 * hidden_size, input_size}, at::kCUDA));
|
|
|
|
|
auto w_hh = t_def(at::randn({4 * hidden_size, hidden_size}, at::kCUDA));
|
|
|
|
|
|
|
|
|
|
auto lstm_g = build_lstm();
|
improved TorchScript traceback (#33834)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/33834
This changes how we report Tracebacks to make them more clear when
there are both serialized and non-serialized ranges. It now looks like:
```
Traceback (most recent call last):
File "foo.py", line 25, in <module>
s2(a, b)
File "/scratch/zdevito/pytorch/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
RuntimeError: The following operation failed in the TorchScript interpreter.
Traceback of TorchScript, serialized code (most recent call last):
File "code/__torch__.py", line 7, in forward
x: Tensor,
y: Tensor) -> Tensor:
return (self).bar(x, y, )
~~~~~~~~~ <--- HERE
def bar(self: __torch__.Moo,
x: Tensor,
File "code/__torch__.py", line 11, in bar
x: Tensor,
y: Tensor) -> Tensor:
_0 = (self).baz(x, y, )
~~~~~~~~~ <--- HERE
_1 = torch.ones([3], dtype=None, layout=None, device=None, pin_memory=None)
return torch.add(_0, _1, alpha=1)
File "code/__torch__.py", line 17, in baz
x: Tensor,
y: Tensor) -> Tensor:
return torch.add(x, y, alpha=1)
~~~~~~~~~ <--- HERE
Traceback of TorchScript, original code (most recent call last):
File "foo.py", line 11, in forward
def forward(self, x, y):
return self.bar(x, y)
~~~~~~~~ <--- HERE
File "foo.py", line 9, in bar
def bar(self, x, y):
return self.baz(x, y) + torch.ones(3)
~~~~~~~~ <--- HERE
File "foo.py", line 7, in baz
def baz(self, x, y):
return x + y
~~~~~ <--- HERE
RuntimeError: The size of tensor a (4) must match the size of tensor b (5) at non-singleton dimension 1
```
It follows Python convension of having the most important information last
and reading from the bottom up.
Changes:
* Moved the error message to the end, to copy Python
* Report original traceback separate from serialized traceback
* Make sure root functions have names in the interpreter trace.
Test Plan: Imported from OSS
Differential Revision: D20126136
Pulled By: zdevito
fbshipit-source-id: fd01f9985e5d74e04c4d064c02e8bc320f4fac13
2020-03-03 20:24:28 +00:00
|
|
|
Code lstm_function(lstm_g, "");
|
2019-03-20 02:38:09 +00:00
|
|
|
InterpreterState lstm_interp(lstm_function);
|
|
|
|
|
auto outputs = run(lstm_interp, {input[0], hx, cx, w_ih, w_hh});
|
|
|
|
|
std::tie(hx, cx) = lstm(input[0], hx, cx, w_ih, w_hh);
|
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(exactlyEqual(outputs[0], hx));
|
|
|
|
|
ASSERT_TRUE(exactlyEqual(outputs[1], cx));
|
|
|
|
|
}
|
|
|
|
|
} // namespace jit
|
|
|
|
|
} // namespace torch
|