introduce INTERN_DISABLE_AUTOGRAD flag to create inference only library for mobile

Summary:
This is the first of a series of changes to reduce build size by cutting
autograd functions from mobile build.

When INTERN_DISABLE_AUTOGRAD is set:
* On CMake side we exclude Functions.h/cpp, VariableType*.h/cpp,
  VariableTypeManual.cpp from the build process. Still keep variable_factories.h
  as we rely on it to create variables instead of tensors.
* In source code we gate a couple autograd references (in autograd/variable.cpp)
  with C10_MOBILE (technically we should use a dedicated c macro but its
  maintenance cost is higher than cmake macro as we have several build systems
  to change).
* Pass --disable-autograd flag to codegen script, which will stop generating
  Functions/VariableType code. And for variable_factories.h it will stop
  generating tracing code.

Edit: in this diff we will keep Functions.h/cpp to avoid changing source code.

Why we need this change if it's already not calling VariableType and autograd
stuff with USE_STATIC_DISPATCH=ON for mobile?
It's trying to reduce static library size for iOS build, for which it's
relatively harder to strip size with linker approach.

Why we need make involved change into codegen script?
There isn't a global config system in codegen - autograd/env.py provides similar
functionality but it says not adding anything there.

Test Plan:
- will check CI;
- test mobile build in sample app;

Differential Revision: D17202733

Pulled By: ljk53

fbshipit-source-id: 5701c6639b39ce58aba9bf5489a08d30d1dcd299
This commit is contained in:
Jiakai Liu 2019-09-10 10:18:19 -07:00 committed by Facebook Github Bot
parent 41cf5564fe
commit 8485710143
7 changed files with 53 additions and 23 deletions

View file

@ -301,6 +301,7 @@ if (INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
set(USE_FBGEMM OFF)
set(USE_STATIC_DISPATCH ON)
set(INTERN_DISABLE_ONNX ON)
set(INTERN_DISABLE_AUTOGRAD ON)
endif()
# ---[ Utils

View file

@ -248,22 +248,32 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
set(GENERATED_CXX_TORCH
"${TORCH_SRC_DIR}/csrc/autograd/generated/Functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_0.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_1.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_2.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_3.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_4.cpp"
"${TORCH_SRC_DIR}/csrc/jit/generated/register_aten_ops_0.cpp"
"${TORCH_SRC_DIR}/csrc/jit/generated/register_aten_ops_1.cpp"
"${TORCH_SRC_DIR}/csrc/jit/generated/register_aten_ops_2.cpp"
)
if(NOT INTERN_DISABLE_AUTOGRAD)
list(APPEND GENERATED_CXX_TORCH
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_0.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_1.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_2.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_3.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_4.cpp"
)
endif()
set(GENERATED_H_TORCH
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType.h"
"${TORCH_SRC_DIR}/csrc/autograd/generated/Functions.h"
"${TORCH_SRC_DIR}/csrc/autograd/generated/variable_factories.h"
)
if(NOT INTERN_DISABLE_AUTOGRAD)
list(APPEND GENERATED_H_TORCH
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType.h"
)
endif()
set(GENERATED_CXX_PYTHON
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_functions.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_variable_methods.cpp"
@ -293,6 +303,7 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
"${PYTHON_EXECUTABLE}" tools/setup_helpers/generate_code.py
--declarations-path "${CMAKE_BINARY_DIR}/aten/src/ATen/Declarations.yaml"
--nn-path "aten/src"
$<$<BOOL:${INTERN_DISABLE_AUTOGRAD}>:--disable-autograd>
DEPENDS
"${CMAKE_BINARY_DIR}/aten/src/ATen/Declarations.yaml"
"${CMAKE_CURRENT_LIST_DIR}/../aten/src/THNN/generic/THNN.h"
@ -352,7 +363,6 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
${TORCH_SRC_DIR}/csrc/autograd/record_function.cpp
${TORCH_SRC_DIR}/csrc/autograd/saved_variable.cpp
${TORCH_SRC_DIR}/csrc/autograd/variable.cpp
${TORCH_SRC_DIR}/csrc/autograd/VariableTypeManual.cpp
${TORCH_SRC_DIR}/csrc/jit/autodiff.cpp
${TORCH_SRC_DIR}/csrc/jit/attributes.cpp
${TORCH_SRC_DIR}/csrc/jit/argument_spec.cpp
@ -448,6 +458,12 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
${TORCH_SRC_DIR}/csrc/jit/function.cpp
)
if (NOT INTERN_DISABLE_AUTOGRAD)
list(APPEND TORCH_SRCS
${TORCH_SRC_DIR}/csrc/autograd/VariableTypeManual.cpp
)
endif()
if (NOT INTERN_BUILD_MOBILE)
list(APPEND TORCH_SRCS
${TORCH_SRC_DIR}/csrc/api/src/jit.cpp

View file

@ -180,7 +180,7 @@ def load_deprecated_signatures(aten_decls, deprecated_path):
return declarations
def gen_autograd(aten_path, out, autograd_dir):
def gen_autograd(aten_path, out, autograd_dir, disable_autograd=False):
aten_decls = load_aten_declarations(aten_path)
# Parse and load derivatives.yaml
@ -191,21 +191,19 @@ def gen_autograd(aten_path, out, autograd_dir):
template_path = os.path.join(autograd_dir, 'templates')
# Generate VariableType.h/cpp
from .gen_variable_type import gen_variable_type
gen_variable_type(out, aten_decls, template_path)
if not disable_autograd:
from .gen_variable_type import gen_variable_type
gen_variable_type(out, aten_decls, template_path)
# Generate Functions.h/cpp
from .gen_autograd_functions import gen_autograd_functions_lib
gen_autograd_functions_lib(
out, autograd_functions, template_path)
# Load deprecated signatures
deprecated = load_deprecated_signatures(
aten_decls, os.path.join(autograd_dir, 'deprecated.yaml'))
# Generate variable_factories.h
from .gen_variable_factories import gen_variable_factories
gen_variable_factories(out, aten_decls, template_path)
gen_variable_factories(
out, aten_decls, template_path, disable_autograd=disable_autograd)
def gen_autograd_python(aten_path, out, autograd_dir):

View file

@ -34,20 +34,21 @@ def fully_qualified_type(argument_type):
return "{}at::{}".format(argument_type[:index], argument_type[index:])
def gen_variable_factories(out, declarations, template_path):
def gen_variable_factories(out, declarations, template_path, disable_autograd=False):
function_definitions = []
for decl in declarations:
has_tensor_options = any(a["simple_type"] == "TensorOptions" for a in decl["arguments"])
is_namespace_fn = 'namespace' in decl['method_of']
if (has_tensor_options or decl["name"].endswith("_like")) and is_namespace_fn:
function_definitions.append(process_function(decl, has_tensor_options))
function_definitions.append(
process_function(decl, has_tensor_options, disable_autograd=disable_autograd))
write(out,
"variable_factories.h",
CodeTemplate.from_file(template_path + "/variable_factories.h"),
{"function_definitions": function_definitions})
def process_function(decl, has_tensor_options):
def process_function(decl, has_tensor_options, disable_autograd):
formals = []
actuals = []
for argument in decl["arguments"]:
@ -65,7 +66,10 @@ def process_function(decl, has_tensor_options):
# it's a tensor
actuals.append('{}.options().is_variable(false)'.format(actuals[0]))
pre_record_trace, post_record_trace = format_trace(decl)
if not disable_autograd:
pre_record_trace, post_record_trace = format_trace(decl)
else:
pre_record_trace, post_record_trace = '', ''
return FUNCTION_TEMPLATE.substitute(
name=decl["name"], formals=formals, actuals=actuals, requires_grad=requires_grad,

View file

@ -23,7 +23,8 @@ def generate_code(ninja_global=None,
declarations_path=None,
nn_path=None,
install_dir=None,
subset=None):
subset=None,
disable_autograd=False):
# cwrap depends on pyyaml, so we can't import it earlier
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
@ -41,7 +42,12 @@ def generate_code(ninja_global=None,
gen_autograd_python(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd')
if subset == "libtorch" or not subset:
gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, 'tools/autograd')
gen_autograd(
declarations_path or DECLARATIONS_PATH,
autograd_gen_dir,
'tools/autograd',
disable_autograd=disable_autograd,
)
gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir, 'tools/jit/templates')
@ -55,6 +61,12 @@ def main():
'--subset',
help='Subset of source files to generate. Can be "libtorch" or "pybindings". Generates both when omitted.'
)
parser.add_argument(
'--disable-autograd',
default=False,
action='store_true',
help='It can skip generating autograd related code when the flag is set',
)
options = parser.parse_args()
generate_code(
options.ninja_global,
@ -62,6 +74,7 @@ def main():
options.nn_path,
options.install_dir,
options.subset,
options.disable_autograd,
)

View file

@ -3,7 +3,6 @@
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/basic_ops.h>
#include <torch/csrc/autograd/functions/utils.h>
#include <torch/csrc/autograd/generated/Functions.h>
#include <torch/csrc/autograd/variable.h>
#include <ATen/ATen.h>

View file

@ -6,7 +6,6 @@
#include <torch/csrc/autograd/functions/accumulate_grad.h>
#include <torch/csrc/autograd/functions/tensor.h>
#include <torch/csrc/autograd/generated/Functions.h>
#include <torch/csrc/autograd/generated/VariableType.h>
#include <ATen/ATen.h>
#include <c10/util/Exception.h>