pytorch/test/cpp/api/module.cpp

440 lines
13 KiB
C++
Raw Normal View History

#include "catch_utils.hpp"
#include <torch/nn/module.h>
#include <torch/nn/modules/linear.h>
#include <torch/nn/modules/rnn.h>
#include <torch/tensor.h>
#include <torch/utils.h>
#include <test/cpp/api/util.h>
using namespace torch::nn;
using namespace torch::test;
using Catch::StartsWith;
struct AGIUnit : torch::nn::Module {};
namespace test {
struct AGIUnit : torch::nn::Module {};
struct AGIUnit2 : torch::nn::Module {
AGIUnit2() : torch::nn::Module("Foo") {}
};
} // namespace test
CATCH_TEST_CASE("module/training-mode") {
torch::manual_seed(0);
Linear module(3, 4);
CATCH_REQUIRE(module->is_training());
CATCH_SECTION("Enable eval mode") {
module->eval();
CATCH_REQUIRE(!module->is_training());
}
CATCH_SECTION("Enable train mode") {
module->train();
CATCH_REQUIRE(module->is_training());
}
}
CATCH_TEST_CASE("module/zero-grad") {
torch::manual_seed(0);
Linear module(3, 4);
auto weight = torch::ones({8, 3}, torch::requires_grad());
auto loss = module->forward(weight).sum();
loss.backward();
for (auto& parameter : module->parameters()) {
auto grad = parameter->grad();
CATCH_REQUIRE(grad.defined());
CATCH_REQUIRE(grad.sum().toCFloat() != 0);
}
module->zero_grad();
for (auto& parameter : module->parameters()) {
auto grad = parameter->grad();
CATCH_REQUIRE(grad.defined());
CATCH_REQUIRE(grad.sum().toCFloat() == 0);
}
}
CATCH_TEST_CASE("module/zero-grad-with-undefined") {
struct TestModule : torch::nn::Module {
TestModule() {
x = register_parameter("x", torch::ones(5, at::requires_grad()));
y = register_parameter("y", torch::ones(5, at::requires_grad()));
}
torch::Tensor x, y;
};
TestModule module;
auto z = module.x * 2;
z.sum().backward();
CATCH_REQUIRE(module.x.grad().defined());
CATCH_REQUIRE(!module.y.grad().defined());
module.zero_grad();
CATCH_REQUIRE(module.x.grad().defined());
CATCH_REQUIRE(!module.y.grad().defined());
CATCH_REQUIRE(module.x.grad().sum().toCFloat() == 0);
}
CATCH_TEST_CASE("module/name") {
2018-05-12 01:56:53 +00:00
// CHECK instead of REQUIRE because demangling may fail.
AGIUnit agi;
// Call it twice just to make sure there are no bugs in the lazy
// initialization semantics.
CATCH_CHECK(agi.name() == "AGIUnit");
CATCH_CHECK(agi.name() == "AGIUnit");
CATCH_SECTION("correctly demangled") {
CATCH_CHECK(test::AGIUnit().name() == "test::AGIUnit");
CATCH_CHECK(test::AGIUnit2().name() == "Foo");
}
}
CATCH_TEST_CASE("module/as") {
Linear module(3, 4);
CATCH_REQUIRE(module->as<Linear>() == module.get());
CATCH_REQUIRE(module->as<LinearImpl>() == module.get());
CATCH_REQUIRE(module->as<Module>() == module.get());
CATCH_REQUIRE(module->as<AGIUnit>() == nullptr);
std::shared_ptr<Module> raw = module.ptr();
CATCH_REQUIRE(raw->as<Linear>() == module.get());
CATCH_REQUIRE(raw->as<LinearImpl>() == module.get());
CATCH_REQUIRE(raw->as<Module>() == module.get());
CATCH_REQUIRE(raw->as<AGIUnit>() == nullptr);
Module& raw_ref = *raw.get();
CATCH_REQUIRE(raw_ref.as<Linear>() == module.get());
CATCH_REQUIRE(raw_ref.as<LinearImpl>() == module.get());
CATCH_REQUIRE(raw_ref.as<Module>() == module.get());
CATCH_REQUIRE(raw_ref.as<AGIUnit>() == nullptr);
if (auto* linear = raw_ref.as<Linear>()) {
CATCH_REQUIRE(linear->weight.ndimension() == 2);
}
AGIUnit unit;
CATCH_REQUIRE(unit.as<Linear>() == nullptr);
CATCH_REQUIRE(unit.as<LinearImpl>() == nullptr);
CATCH_REQUIRE(unit.as<AGIUnit>() == &unit);
}
CATCH_TEST_CASE("module/conversions", "[multi-cuda]") {
torch::manual_seed(0);
Linear module(128, 64);
CATCH_SECTION("starts as float on CPU") {
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->device() == torch::Device(torch::kCPU));
CATCH_REQUIRE(parameter->dtype() == torch::kFloat32);
}
}
CATCH_SECTION("to(CUDA)") {
module->to({torch::kCUDA, 0});
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->device().type() == torch::Device::Type::CUDA);
CATCH_REQUIRE(parameter->device().index() == 0);
}
module->to({at::kCUDA, 1});
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->device().type() == torch::Device::Type::CUDA);
CATCH_REQUIRE(parameter->device().index() == 1);
}
}
CATCH_SECTION("to(CPU)") {
module->to(torch::Device(torch::kCPU));
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->device().type() == torch::Device::Type::CPU);
}
}
CATCH_SECTION("to(Int32)") {
module->to(torch::kInt32);
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->dtype() == torch::kInt32);
}
}
CATCH_SECTION("to(Float64)") {
module->to(torch::kFloat64);
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->dtype() == torch::kFloat64);
}
}
CATCH_SECTION("to(CUDA, Byte)") {
module->to(torch::Device(torch::kCUDA, 1), torch::kUInt8);
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->device().type() == torch::Device::Type::CUDA);
CATCH_REQUIRE(parameter->device().index() == 1);
}
for (auto& parameter : module->parameters()) {
CATCH_REQUIRE(parameter->dtype() == torch::kUInt8);
}
}
}
CATCH_TEST_CASE("module/clone") {
torch::manual_seed(0);
CATCH_SECTION(
"a module that does not override clone() throws when clone() is called") {
struct UnCloneable : Module {};
UnCloneable module;
CATCH_REQUIRE_THROWS_WITH(
module.clone(), StartsWith("clone() has not been implemented"));
}
CATCH_SECTION(
"a module that overrides clone() does not throw when clone() is called ") {
struct Cloneable : Module {
std::shared_ptr<Module> clone(
at::optional<torch::Device> device = at::nullopt) const override {
return nullptr;
}
};
Cloneable module;
CATCH_REQUIRE_NOTHROW(module.clone());
}
CATCH_SECTION("Cloning creates distinct parameters") {
struct TestModule : public Cloneable<TestModule> {
TestModule() {
reset();
}
void reset() override {
l1 = register_module("l1", Linear(10, 3));
l2 = register_module("l2", Linear(3, 5));
l3 = register_module("l3", Linear(5, 100));
buffer = register_buffer("buf", torch::ones({2, 2}));
}
Make Sequential ref-counted (#9151) Summary: In the C++ API, `Sequential` currently was not refcounted itself, but stored `shared_ptr<AnyModule>` to get the reference semantics. This is unfortunate because most modules in the API are accessed via `->`, e.g. `Linear l(1, 2); l->forward(...);`. `Sequential` was different in that it had value semantics itself, thus was accessed via `.`. This PR makes `Sequential` store `AnyModule` (without extra indirection), and uses the same pImpl mechanism we use for all other modules to make `Sequential` have reference semantics itself. This makes it consistent with the rest of the library. It also removes one level of indirection inside of `Sequential`, which is cool. One thing I had to change was that the `ModuleHolder` with which the whole pImpl thing is implemented previously did some tricks to make `Linear(3, 4)` actually construct `Linear(LinearOptions(3, 4))`. This doesn't work well with `Sequential` since it takes a variadic parameter pack. Instead, I made `ModuleHolder` forward all arguments to the underlying module, and then further pushed the trick to forward parameters to modules' options types into the actual Modules. This adds one constructor per Module in the library. This is not something user modules have to do (unless they want this nice forwarding themselves). It makes the code simpler overall. ezyang ebetica apaszke Pull Request resolved: https://github.com/pytorch/pytorch/pull/9151 Reviewed By: ezyang Differential Revision: D8809298 Pulled By: goldsborough fbshipit-source-id: da68452c3de912fbc67af330ba93b5220de6909f
2018-07-12 00:15:08 +00:00
Linear l1{nullptr}, l2{nullptr}, l3{nullptr};
torch::Tensor buffer;
};
auto module = std::make_shared<TestModule>();
torch::NoGradGuard no_grad;
auto module2 = module->clone();
auto params1 = module->parameters();
auto params2 = module2->parameters();
CATCH_REQUIRE(params1.size() == 6);
CATCH_REQUIRE(params2.size() == 6);
for (auto& param : params1) {
CATCH_REQUIRE(!pointer_equal(param.value, params2[param.key]));
CATCH_REQUIRE(param->allclose(params2[param.key]));
param->add_(2);
}
for (auto& param : params1) {
CATCH_REQUIRE(!param->allclose(params2[param.key]));
}
auto buffers1 = module->buffers();
auto buffers2 = module2->buffers();
CATCH_REQUIRE(buffers1.size() == 1);
CATCH_REQUIRE(buffers2.size() == 1);
for (auto& buffer : buffers1) {
CATCH_REQUIRE(!pointer_equal(buffer.value, buffers2[buffer.key]));
CATCH_REQUIRE(buffer->allclose(buffers2[buffer.key]));
buffer->add_(2);
}
for (auto& buffer : buffers1) {
CATCH_REQUIRE(!buffer->allclose(buffers2[buffer.key]));
}
}
CATCH_SECTION("Cloning preserves external references") {
struct TestModule : public Cloneable<TestModule> {
TestModule() {
reset();
}
void reset() override {
Create ATen tensors via TensorOptions (#7869) * Created TensorOptions Storing the type in TensorOptions to solve the Variable problem Created convenience creation functions for TensorOptions and added tests Converted zeros to TensorOptions Converted rand to TensorOptions Fix codegen for TensorOptions and multiple arguments Put TensorOptions convenience functions into torch namespace too All factory functions except *_like support TensorOptions Integrated with recent JIT changes Support *_like functions Fix in place modification Some cleanups and fixes Support sparse_coo_tensor Fix bug in Type.cpp Fix .empty calls in C++ API Fix bug in Type.cpp Trying to fix device placement Make AutoGPU CPU compatible Remove some auto_gpu.h uses Fixing some headers Fix some remaining CUDA/AutoGPU issues Fix some AutoGPU uses Fixes to dispatch_tensor_conversion Reset version of new variables to zero Implemented parsing device strings Random fixes to tests Self review cleanups flake8 Undo changes to variable.{h,cpp} because they fail on gcc7.2 Add [cuda] tag to tensor_options_cuda.cpp Move AutoGPU::set_index_from into .cpp file because Windows is stupid and sucks Fix linker error in AutoGPU.cpp Fix bad merge conflict in native_functions.yaml Fixed caffe2/contrib/aten Fix new window functions added to TensorFactories.cpp * Removed torch::TensorOptions Added code to generate wrapper functions for factory methods Add implicit constructor from Backend to TensorOptions Remove Var() from C++ API and use torch:: functions Use torch:: functions more subtly in C++ API Make AutoGPU::set_device more exception safe Check status directly in DynamicCUDAHooksInterface Rename AutoGPU to DeviceGuard Removed set_requires_grad from python_variables.h and warn appropriately in Variable::set_requires_grad remove python_default_init: self.type() Add back original factory functions, but with deprecation warnings Disable DeviceGuard for a couple functions in ATen Remove print statement Fix DeviceGuard construction from undefined tensor Fixing CUDA device compiler issues Moved as many methods as possible into header files Dont generate python functions for deprecated factories Remove merge conflict artefact Fix tensor_options_cuda.cpp Fix set_requires_grad not being checked Fix tensor_new.h TEMPORARILY put some methods in .cpp files to see if it solves issues on windows and mac Fix bug in DeviceGuard.h Missing includes TEMPORARILY moving a few more methods into .cpp to see if it fixes windows Fixing linker errors * Fix up SummaryOps to use new factories Undo device agnostic behavior of DeviceGuard Use -1 instead of optional for default device index Also move DeviceGuard methods into header Fixes around device index after optional -> int32_t switch Fix use of DeviceGuard in new_with_tensor_copy Fix tensor_options.cpp * Fix Type::copy( * Remove test_non_float_params from ONNX tests * Set requires_grad=False in ONNX tests that use ints * Put layout/dtype/device on Tensor * Post merge fixes * Change behavior of DeviceGuard to match AutoGPU * Fix C++ API integration tests * Fix flip functions
2018-06-16 07:40:35 +00:00
weight = register_parameter("weight", torch::ones({4, 4}));
}
torch::Tensor weight;
};
auto module = std::make_shared<TestModule>();
{
torch::NoGradGuard no_grad;
module->weight += 1;
}
CATCH_REQUIRE(pointer_equal(module->weight, module->parameters()["weight"]));
CATCH_REQUIRE(module->weight.allclose(module->parameters()["weight"]));
auto module2 = std::dynamic_pointer_cast<TestModule>(
std::shared_ptr<Module>(module->clone()));
CATCH_REQUIRE(!pointer_equal(module2->weight, module->weight));
CATCH_REQUIRE(pointer_equal(module2->weight, module2->parameters()["weight"]));
CATCH_REQUIRE(module2->weight.allclose(module2->parameters()["weight"]));
CATCH_REQUIRE(module2->weight.allclose(module->weight));
CATCH_REQUIRE(!pointer_equal(module2->weight, module->parameters()["weight"]));
}
CATCH_SECTION("Cloning copies the values of variables of submodules") {
struct TestModule : public Cloneable<TestModule> {
TestModule() {
reset();
}
void reset() override {
Create ATen tensors via TensorOptions (#7869) * Created TensorOptions Storing the type in TensorOptions to solve the Variable problem Created convenience creation functions for TensorOptions and added tests Converted zeros to TensorOptions Converted rand to TensorOptions Fix codegen for TensorOptions and multiple arguments Put TensorOptions convenience functions into torch namespace too All factory functions except *_like support TensorOptions Integrated with recent JIT changes Support *_like functions Fix in place modification Some cleanups and fixes Support sparse_coo_tensor Fix bug in Type.cpp Fix .empty calls in C++ API Fix bug in Type.cpp Trying to fix device placement Make AutoGPU CPU compatible Remove some auto_gpu.h uses Fixing some headers Fix some remaining CUDA/AutoGPU issues Fix some AutoGPU uses Fixes to dispatch_tensor_conversion Reset version of new variables to zero Implemented parsing device strings Random fixes to tests Self review cleanups flake8 Undo changes to variable.{h,cpp} because they fail on gcc7.2 Add [cuda] tag to tensor_options_cuda.cpp Move AutoGPU::set_index_from into .cpp file because Windows is stupid and sucks Fix linker error in AutoGPU.cpp Fix bad merge conflict in native_functions.yaml Fixed caffe2/contrib/aten Fix new window functions added to TensorFactories.cpp * Removed torch::TensorOptions Added code to generate wrapper functions for factory methods Add implicit constructor from Backend to TensorOptions Remove Var() from C++ API and use torch:: functions Use torch:: functions more subtly in C++ API Make AutoGPU::set_device more exception safe Check status directly in DynamicCUDAHooksInterface Rename AutoGPU to DeviceGuard Removed set_requires_grad from python_variables.h and warn appropriately in Variable::set_requires_grad remove python_default_init: self.type() Add back original factory functions, but with deprecation warnings Disable DeviceGuard for a couple functions in ATen Remove print statement Fix DeviceGuard construction from undefined tensor Fixing CUDA device compiler issues Moved as many methods as possible into header files Dont generate python functions for deprecated factories Remove merge conflict artefact Fix tensor_options_cuda.cpp Fix set_requires_grad not being checked Fix tensor_new.h TEMPORARILY put some methods in .cpp files to see if it solves issues on windows and mac Fix bug in DeviceGuard.h Missing includes TEMPORARILY moving a few more methods into .cpp to see if it fixes windows Fixing linker errors * Fix up SummaryOps to use new factories Undo device agnostic behavior of DeviceGuard Use -1 instead of optional for default device index Also move DeviceGuard methods into header Fixes around device index after optional -> int32_t switch Fix use of DeviceGuard in new_with_tensor_copy Fix tensor_options.cpp * Fix Type::copy( * Remove test_non_float_params from ONNX tests * Set requires_grad=False in ONNX tests that use ints * Put layout/dtype/device on Tensor * Post merge fixes * Change behavior of DeviceGuard to match AutoGPU * Fix C++ API integration tests * Fix flip functions
2018-06-16 07:40:35 +00:00
weight = register_parameter("weight", torch::ones({4, 4}));
}
torch::Tensor weight;
int value = 0;
};
struct NestedModule : public Cloneable<NestedModule> {
NestedModule() {
reset();
}
void reset() override {
module = register_module("module", std::make_shared<TestModule>());
}
std::shared_ptr<TestModule> module;
};
auto a = std::make_shared<NestedModule>();
{
torch::NoGradGuard no_grad;
a->module->weight += 1;
a->module->value = 123;
}
auto b = std::dynamic_pointer_cast<NestedModule>(a->clone());
CATCH_REQUIRE(!pointer_equal(b->module->weight, a->module->weight));
CATCH_REQUIRE(
pointer_equal(b->module->weight, b->module->parameters()["weight"]));
CATCH_REQUIRE(b->module->parameters()["weight"].allclose(a->module->weight));
CATCH_REQUIRE(b->module->weight.allclose(a->module->weight));
CATCH_REQUIRE(b->module->value == a->module->value);
}
}
CATCH_TEST_CASE("module/clone-to-device", "[cuda]") {
struct TestModule : public Cloneable<TestModule> {
TestModule() {
reset();
}
void reset() override {
l1 = register_module("l1", Linear(10, 3));
l2 = register_module("l2", Linear(3, 5));
l3 = register_module("l3", Linear(5, 100));
buffer = register_buffer("buf", torch::ones({2, 2}));
}
Linear l1{nullptr}, l2{nullptr}, l3{nullptr};
torch::Tensor buffer;
};
CATCH_SECTION("Cloning preserves the device of parameters/buffers") {
TestModule m;
torch::Device device(torch::kCUDA, 0);
m.to(device);
auto clone = m.clone();
for (const auto& parameter : clone->parameters()) {
CATCH_REQUIRE(parameter->device().type() == device.type());
CATCH_REQUIRE(parameter->device().index() == device.index());
}
for (const auto& buffer : clone->buffers()) {
CATCH_REQUIRE(buffer->device().type() == device.type());
CATCH_REQUIRE(buffer->device().index() == device.index());
}
}
CATCH_SECTION(
"Cloning to a particular device places all parameters/buffers there") {
TestModule m;
torch::Device device(torch::kCUDA, 1);
// everything is on CPU here
auto clone = m.clone(device);
for (const auto& parameter : clone->parameters()) {
CATCH_REQUIRE(parameter->device().type() == device.type());
CATCH_REQUIRE(parameter->device().index() == device.index());
}
for (const auto& buffer : clone->buffers()) {
CATCH_REQUIRE(buffer->device().type() == device.type());
CATCH_REQUIRE(buffer->device().index() == device.index());
}
}
}
CATCH_TEST_CASE("module/parameters") {
torch::manual_seed(0);
struct TestModule : Module {
TestModule() {
Create ATen tensors via TensorOptions (#7869) * Created TensorOptions Storing the type in TensorOptions to solve the Variable problem Created convenience creation functions for TensorOptions and added tests Converted zeros to TensorOptions Converted rand to TensorOptions Fix codegen for TensorOptions and multiple arguments Put TensorOptions convenience functions into torch namespace too All factory functions except *_like support TensorOptions Integrated with recent JIT changes Support *_like functions Fix in place modification Some cleanups and fixes Support sparse_coo_tensor Fix bug in Type.cpp Fix .empty calls in C++ API Fix bug in Type.cpp Trying to fix device placement Make AutoGPU CPU compatible Remove some auto_gpu.h uses Fixing some headers Fix some remaining CUDA/AutoGPU issues Fix some AutoGPU uses Fixes to dispatch_tensor_conversion Reset version of new variables to zero Implemented parsing device strings Random fixes to tests Self review cleanups flake8 Undo changes to variable.{h,cpp} because they fail on gcc7.2 Add [cuda] tag to tensor_options_cuda.cpp Move AutoGPU::set_index_from into .cpp file because Windows is stupid and sucks Fix linker error in AutoGPU.cpp Fix bad merge conflict in native_functions.yaml Fixed caffe2/contrib/aten Fix new window functions added to TensorFactories.cpp * Removed torch::TensorOptions Added code to generate wrapper functions for factory methods Add implicit constructor from Backend to TensorOptions Remove Var() from C++ API and use torch:: functions Use torch:: functions more subtly in C++ API Make AutoGPU::set_device more exception safe Check status directly in DynamicCUDAHooksInterface Rename AutoGPU to DeviceGuard Removed set_requires_grad from python_variables.h and warn appropriately in Variable::set_requires_grad remove python_default_init: self.type() Add back original factory functions, but with deprecation warnings Disable DeviceGuard for a couple functions in ATen Remove print statement Fix DeviceGuard construction from undefined tensor Fixing CUDA device compiler issues Moved as many methods as possible into header files Dont generate python functions for deprecated factories Remove merge conflict artefact Fix tensor_options_cuda.cpp Fix set_requires_grad not being checked Fix tensor_new.h TEMPORARILY put some methods in .cpp files to see if it solves issues on windows and mac Fix bug in DeviceGuard.h Missing includes TEMPORARILY moving a few more methods into .cpp to see if it fixes windows Fixing linker errors * Fix up SummaryOps to use new factories Undo device agnostic behavior of DeviceGuard Use -1 instead of optional for default device index Also move DeviceGuard methods into header Fixes around device index after optional -> int32_t switch Fix use of DeviceGuard in new_with_tensor_copy Fix tensor_options.cpp * Fix Type::copy( * Remove test_non_float_params from ONNX tests * Set requires_grad=False in ONNX tests that use ints * Put layout/dtype/device on Tensor * Post merge fixes * Change behavior of DeviceGuard to match AutoGPU * Fix C++ API integration tests * Fix flip functions
2018-06-16 07:40:35 +00:00
a = register_parameter("a", torch::zeros({2, 2}));
b = register_parameter("b", torch::ones({2, 2}));
c = register_parameter("c", torch::ones({2, 2}) * 2);
}
torch::Tensor a, b, c;
};
TestModule module;
CATCH_SECTION("has correct number of parameters") {
CATCH_REQUIRE(module.parameters().size() == 3);
}
CATCH_SECTION("contains parameters with the correct name") {
auto parameters = module.parameters();
CATCH_REQUIRE(parameters.contains("a"));
CATCH_REQUIRE(parameters.contains("b"));
CATCH_REQUIRE(parameters.contains("c"));
}
}
CATCH_TEST_CASE("module/buffers") {
torch::manual_seed(0);
struct TestModule : Module {
TestModule() {
a = register_buffer("a", torch::zeros({2, 2}));
b = register_buffer("b", torch::ones({2, 2}));
c = register_buffer("c", torch::ones({2, 2}) * 2);
}
torch::Tensor a, b, c;
};
TestModule module;
CATCH_SECTION("has correct number of buffers") {
CATCH_REQUIRE(module.buffers().size() == 3);
}
CATCH_SECTION("contains buffers with the correct name") {
auto buffers = module.buffers();
CATCH_REQUIRE(buffers.contains("a"));
CATCH_REQUIRE(buffers.contains("b"));
CATCH_REQUIRE(buffers.contains("c"));
}
}
CATCH_TEST_CASE("module/default-constructor") {
struct AImpl : torch::nn::Module {
AImpl() : x_(123) {}
AImpl(int x) : x_(x) {}
int x_;
};
TORCH_MODULE(A);
{
A a;
CATCH_REQUIRE(a);
CATCH_REQUIRE(!a.is_empty());
CATCH_REQUIRE(a->x_ == 123);
}
{
A a(5);
CATCH_REQUIRE(a);
CATCH_REQUIRE(!a.is_empty());
CATCH_REQUIRE(a->x_ == 5);
}
{
A a = nullptr;
CATCH_REQUIRE(!a);
CATCH_REQUIRE(a.is_empty());
CATCH_REQUIRE_THROWS_WITH(a->x_, StartsWith("Accessing empty ModuleHolder"));
}
}