Build system changes (#8627)

* All changes needed to get rid of process_github.sh

* allow thnn_h_path
This commit is contained in:
anderspapitto 2018-06-20 14:45:26 -07:00 committed by Soumith Chintala
parent 0acddd6cee
commit 48e90e3339
13 changed files with 85 additions and 26 deletions

View file

@ -19,7 +19,16 @@ struct Handle {
}
~Handle() {
if (handle) {
// this is because of something dumb in the ordering of
// destruction. Sometimes atexit, the cuda context (or something)
// would already be destroyed by the time this gets destroyed. It
// happens in fbcode setting. @colesbury and I decided to not destroy
// the handle as a workaround.
// - @soumith
#ifdef NO_CUDNN_DESTROY_HANDLE
#else
cudnnDestroy(handle);
#endif
}
}
};

View file

@ -379,8 +379,8 @@ class build_deps(PytorchCommand):
# Use copies instead of symbolic files.
# Windows has very poor support for them.
sym_files = ['tools/shared/cwrap_common.py']
orig_files = ['aten/src/ATen/common_with_cwrap.py']
sym_files = ['tools/shared/cwrap_common.py', 'tools/shared/_utils_internal.py']
orig_files = ['aten/src/ATen/common_with_cwrap.py', 'torch/_utils_internal.py']
for sym_file, orig_file in zip(sym_files, orig_files):
if os.path.exists(sym_file):
os.remove(sym_file)

View file

@ -27,6 +27,7 @@ import errno
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
@ -505,7 +506,7 @@ def download_file(url, binary=True):
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = os.path.join(os.path.dirname(__file__), 'data')
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):

View file

@ -17,11 +17,12 @@ import torch.nn.functional as F
from torch.autograd import Variable
from common import TestCase
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
BACKEND = os.environ['BACKEND']
TEMP_DIR = os.environ['TEMP_DIR']
INIT_METHOD = os.getenv('INIT_METHOD', 'env://')
MASTER_PORT = '29500'
MASTER_ADDR = '127.0.0.1'
DEFAULT_TIMEOUT = 15
CUSTOMIZED_TIMEOUT = {'test_DistributedDataParallel': 25}

View file

@ -13,6 +13,7 @@ import unittest
import warnings
import pickle
import gzip
from torch._utils_internal import get_file_path, get_file_path_2
from torch.utils.dlpack import from_dlpack, to_dlpack
from torch._utils import _rebuild_tensor
from itertools import product, combinations
@ -6656,7 +6657,10 @@ class TestTorch(TestCase):
return module
with filecontext_lambda() as checkpoint:
fname = os.path.join(os.path.dirname(__file__), 'data/network1.py')
try:
fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network1.py')
except IOError:
fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network1.pyc')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
@ -6669,7 +6673,10 @@ class TestTorch(TestCase):
self.assertEquals(len(w), 0)
# Replace the module with different source
fname = os.path.join(os.path.dirname(__file__), 'data/network2.py')
try:
fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network2.py')
except IOError:
fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network2.pyc')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:

View file

@ -530,6 +530,7 @@ class TestLuaReader(TestCase):
return input, target.sub(1)
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
def _run(self, command):
"""Returns (return-code, stdout, stderr)"""

View file

@ -1,5 +1 @@
from .generate_wrappers import generate_wrappers, wrap_function
try:
from .generate_wrappers import import_module
except ImportError:
pass
from .generate_wrappers import generate_wrappers, wrap_function, import_module

View file

@ -3,17 +3,16 @@ import sys
from string import Template, ascii_lowercase
from ..cwrap import cwrap
from ..cwrap.plugins import NNExtension, NullableArguments, AutoGPU
from ..shared import import_module
BASE_PATH = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
WRAPPER_PATH = os.path.join(BASE_PATH, 'torch', 'csrc', 'nn')
THNN_UTILS_PATH = os.path.join(BASE_PATH, 'torch', '_thnn', 'utils.py')
from ..shared._utils_internal import get_file_path
THNN_H_PATH = get_file_path('torch', 'lib', 'THNN.h')
THCUNN_H_PATH = get_file_path('torch', 'lib', 'THCUNN.h')
try:
from torch._thnn import utils as thnn_utils
except ImportError:
from ..shared import import_module
thnn_utils = import_module('torch._thnn.utils', THNN_UTILS_PATH)
THNN_UTILS_PATH = get_file_path('torch', '_thnn', 'utils.py')
thnn_utils = import_module('torch._thnn.utils', THNN_UTILS_PATH)
FUNCTION_TEMPLATE = Template("""\
[[
@ -105,7 +104,7 @@ def generate_wrappers(nn_root=None, install_dir=None, template_path=None):
def wrap_nn(thnn_h_path, install_dir, template_path):
wrapper = '#include <TH/TH.h>\n\n\n'
nn_functions = thnn_utils.parse_header(thnn_h_path or thnn_utils.THNN_H_PATH)
nn_functions = thnn_utils.parse_header(thnn_h_path or THNN_H_PATH)
for fn in nn_functions:
for t in ['Float', 'Double']:
wrapper += wrap_function(fn.name, t, fn.arguments)
@ -124,7 +123,7 @@ def wrap_nn(thnn_h_path, install_dir, template_path):
def wrap_cunn(thcunn_h_path, install_dir, template_path):
wrapper = '#include <TH/TH.h>\n'
wrapper += '#include <THC/THC.h>\n\n\n'
cunn_functions = thnn_utils.parse_header(thcunn_h_path or thnn_utils.THCUNN_H_PATH)
cunn_functions = thnn_utils.parse_header(thcunn_h_path or THCUNN_H_PATH)
for fn in cunn_functions:
for t in ['CudaHalf', 'Cuda', 'CudaDouble']:
wrapper += wrap_function(fn.name, t, fn.arguments)

View file

@ -77,6 +77,7 @@ def generate_code(ninja_global=None,
sys.path.insert(0, root)
from tools.autograd.gen_autograd import gen_autograd
from tools.jit.gen_jit_dispatch import gen_jit_dispatch
from tools.nnwrap import generate_wrappers as generate_nn_wrappers
# Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily

View file

@ -137,6 +137,10 @@ configure_file("${CAFFE2_PATH}/aten/src/ATen/common_with_cwrap.py"
"${TOOLS_PATH}/shared/cwrap_common.py"
COPYONLY)
configure_file("${CAFFE2_PATH}/torch/_utils_internal.py"
"${TOOLS_PATH}/shared/_utils_internal.py"
COPYONLY)
add_custom_command(
OUTPUT
"${TORCH_SRC_DIR}/csrc/nn/THNN.cpp"

View file

@ -8,9 +8,11 @@ It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
import os
import sys
import platform
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment
from .version import __version__
from ._six import string_classes as _string_classes
@ -233,8 +235,8 @@ _tensor_classes = set()
def manager_path():
if platform.system() == 'Windows':
return b""
import os
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')
path = get_file_path('torch', 'lib', 'torch_shm_manager')
prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')

View file

@ -4,9 +4,15 @@ import importlib
# in fbcode, this fails in some cases, but we don't need it, therefore the try-catch
try:
THNN_H_PATH = os.path.join(os.path.dirname(__file__), '..', 'lib', 'THNN.h')
THCUNN_H_PATH = os.path.join(os.path.dirname(__file__), '..', 'lib', 'THCUNN.h')
except Exception:
# when compiling a cffi extension, this works. When compiling
# torch itself, it doesn't work because the parent module can't
# yet be imported. However that's fine because we don't need it in
# that case.
from .._utils_internal import get_file_path
THNN_H_PATH = get_file_path('torch', 'lib', 'THNN.h')
THCUNN_H_PATH = get_file_path('torch', 'lib', 'THCUNN.h')
except Exception as e:
pass

32
torch/_utils_internal.py Normal file
View file

@ -0,0 +1,32 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import os
# this arbitrary-looking assortment of functionality is provided here
# to have a central place for overrideable behavior. The motivating
# use is the FB build environment, where this source file is replaced
# by an equivalent.
if os.path.basename(os.path.dirname(__file__)) == 'shared':
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
else:
torch_parent = os.path.dirname(os.path.dirname(__file__))
def get_file_path(*path_components):
return os.path.join(torch_parent, *path_components)
def get_file_path_2(*path_components):
return os.path.join(*path_components)
def get_writable_path(path):
return path
def prepare_multiprocessing_environment(path):
pass
TEST_MASTER_ADDR = '127.0.0.1'