2018-03-09 21:02:02 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
|
|
import argparse
|
2020-10-22 17:53:07 +00:00
|
|
|
import copy
|
2021-08-12 18:39:31 +00:00
|
|
|
from datetime import datetime
|
2021-08-16 22:30:24 +00:00
|
|
|
from distutils.util import strtobool
|
Properly import LooseVersion (#69904)
Summary:
This fixes regression introduced by https://github.com/pytorch/pytorch/pull/57040
Somehow importing `distutils` from `setuptool` caused import of
`distutils.versions`, which is not a documented dependency and got
change with the release of
[setuptools-59.6.0](https://github.com/pypa/setuptools/tree/v59.6.0)
We should not rely on that, as
`import distutils` never re-imports `distutils.version`, which one can
see by observing
https://github.com/python/cpython/blob/3.9/Lib/distutils/__init__.py
or by running:
```
% python3 -c "import distutils;print(distutils.__version__, dir(distutils))"
3.7.5 ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version__', 'sys']
% python3 -c "from setuptools import distutils;print(distutils.__version__, dir(distutils))"
3.7.5 ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version__', 'archive_util', 'ccompiler', 'cmd', 'config', 'core', 'debug', 'dep_util', 'dir_util', 'dist', 'errors', 'extension', 'fancy_getopt', 'file_util', 'filelist', 'log', 'spawn', 'sys', 'sysconfig', 'util', 'version']
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/69904
Reviewed By: albanD, atalman, janeyx99
Differential Revision: D33094453
Pulled By: malfet
fbshipit-source-id: aaf1adb7c6f293c4e376ccff21c64cd6ba625e97
2021-12-14 17:23:21 +00:00
|
|
|
from distutils.version import LooseVersion
|
2021-11-22 17:51:34 +00:00
|
|
|
import functools
|
2018-03-09 21:02:02 +00:00
|
|
|
import os
|
2021-08-25 19:58:24 +00:00
|
|
|
import pathlib
|
2018-03-09 21:02:02 +00:00
|
|
|
import shutil
|
2018-04-10 18:31:23 +00:00
|
|
|
import signal
|
2018-03-09 21:02:02 +00:00
|
|
|
import subprocess
|
|
|
|
|
import sys
|
|
|
|
|
import tempfile
|
2022-07-28 16:35:01 +00:00
|
|
|
import json
|
|
|
|
|
from typing import Dict, Optional, List, cast, Any
|
2018-03-09 21:02:02 +00:00
|
|
|
|
|
|
|
|
import torch
|
2021-08-12 17:56:55 +00:00
|
|
|
from torch.utils import cpp_extension
|
2021-08-25 18:19:49 +00:00
|
|
|
from torch.testing._internal.common_utils import (
|
2022-06-11 06:52:15 +00:00
|
|
|
IS_CI,
|
2021-08-25 18:19:49 +00:00
|
|
|
FILE_SCHEMA,
|
|
|
|
|
TEST_WITH_ROCM,
|
|
|
|
|
shell,
|
|
|
|
|
set_cwd,
|
Make -h work with run_test.py
As per title.
### When running `python run_test.py -h`
It used to show:
- The general unittest parser help that we print via a second thread https://github.com/pytorch/pytorch/blob/35545d85dc69687c4fc6f5fbab575ca9079624a3/torch/testing/_internal/common_utils.py#L467-L470
- The common_utils's parser help
<details><summary>Full result</summary>
<p>
```bash
$ python run_test.py -h
usage: run_test.py [-h] [-v] [-q] [--locals] [-f] [-c] [-b] [-k TESTNAMEPATTERNS] [tests [tests ...]]
positional arguments:
tests a list of any number of test modules, classes and test methods.
optional arguments:
-h, --help show this help message and exit
-v, --verbose Verbose output
-q, --quiet Quiet output
--locals Show local variables in tracebacks
-f, --failfast Stop on first fail or error
-c, --catch Catch Ctrl-C and display results so far
-b, --buffer Buffer stdout and stderr during tests
-k TESTNAMEPATTERNS Only run tests which match the given substring
Examples:
run_test.py - run default set of tests
run_test.py MyTestSuite - run suite 'MyTestSuite'
run_test.py MyTestCase.testSomething - run MyTestCase.testSomething
run_test.py MyTestCase - run all 'test*' test methods
in MyTestCase
usage: run_test.py [-h] [--subprocess] [--seed SEED] [--accept] [--jit_executor JIT_EXECUTOR] [--repeat REPEAT] [--test_bailouts]
[--save-xml [SAVE_XML]] [--discover-tests] [--log-suffix LOG_SUFFIX] [--run-parallel RUN_PARALLEL]
[--import-slow-tests [IMPORT_SLOW_TESTS]] [--import-disabled-tests [IMPORT_DISABLED_TESTS]]
optional arguments:
-h, --help show this help message and exit
--subprocess whether to run each test in a subprocess
--seed SEED
--accept
--jit_executor JIT_EXECUTOR
--repeat REPEAT
--test_bailouts
--save-xml [SAVE_XML]
--discover-tests
--log-suffix LOG_SUFFIX
--run-parallel RUN_PARALLEL
--import-slow-tests [IMPORT_SLOW_TESTS]
--import-disabled-tests [IMPORT_DISABLED_TESTS]
```
</p>
</details>
It now prints:
- The general unittest parser help the same way. Should we remove this? We can't merge them unfortunately as inittest does not accept parent / does not expose the parser for us to take it as a parent.
- The combined common_utils + run_test parsers help
<details><summary>Full result</summary>
<p>
```bash
$ python run_test.py -h
usage: run_test.py [-h] [-v] [-q] [--locals] [-f] [-c] [-b] [-k TESTNAMEPATTERNS] [tests [tests ...]]
positional arguments:
tests a list of any number of test modules, classes and test methods.
optional arguments:
-h, --help show this help message and exit
-v, --verbose Verbose output
-q, --quiet Quiet output
--locals Show local variables in tracebacks
-f, --failfast Stop on first fail or error
-c, --catch Catch Ctrl-C and display results so far
-b, --buffer Buffer stdout and stderr during tests
-k TESTNAMEPATTERNS Only run tests which match the given substring
Examples:
run_test.py - run default set of tests
run_test.py MyTestSuite - run suite 'MyTestSuite'
run_test.py MyTestCase.testSomething - run MyTestCase.testSomething
run_test.py MyTestCase - run all 'test*' test methods
in MyTestCase
Ignoring disabled issues: []
usage: run_test.py [-h] [--subprocess] [--seed SEED] [--accept] [--jit_executor JIT_EXECUTOR] [--repeat REPEAT] [--test_bailouts]
[--save-xml [SAVE_XML]] [--discover-tests] [--log-suffix LOG_SUFFIX] [--run-parallel RUN_PARALLEL]
[--import-slow-tests [IMPORT_SLOW_TESTS]] [--import-disabled-tests [IMPORT_DISABLED_TESTS]] [-v] [--jit]
[--distributed-tests] [-core] [-pt] [-c] [-i TESTS [TESTS ...]] [-x TESTS [TESTS ...]] [-f TESTS] [-l TESTS]
[--bring-to-front TESTS [TESTS ...]] [--ignore-win-blocklist] [--continue-through-error]
[--export-past-test-times [EXPORT_PAST_TEST_TIMES]] [--shard SHARD SHARD] [--exclude-jit-executor]
[--exclude-distributed-tests] [--run-specified-test-cases [RUN_SPECIFIED_TEST_CASES]]
[--use-specified-test-cases-by {include,bring-to-front}] [--dry-run]
[additional_unittest_args [additional_unittest_args ...]]
Run the PyTorch unit test suite
positional arguments:
additional_unittest_args
additional arguments passed through to unittest, e.g., python run_test.py -i sparse -- TestSparse.test_factory_size_check
optional arguments:
-h, --help show this help message and exit
--subprocess whether to run each test in a subprocess
--seed SEED
--accept
--jit_executor JIT_EXECUTOR
--repeat REPEAT
--test_bailouts
--save-xml [SAVE_XML]
--discover-tests
--log-suffix LOG_SUFFIX
--run-parallel RUN_PARALLEL
--import-slow-tests [IMPORT_SLOW_TESTS]
--import-disabled-tests [IMPORT_DISABLED_TESTS]
-v, --verbose print verbose information and test-by-test results
--jit, --jit run all jit tests
--distributed-tests, --distributed-tests
run all distributed tests
-core, --core Only run core tests, or tests that validate PyTorch's ops, modules,and autograd. They are defined by CORE_TEST_LIST.
-pt, --pytest If true, use `pytest` to execute the tests. E.g., this runs TestTorch with pytest in verbose and coverage mode: python run_test.py -vci torch -pt
-c, --coverage enable coverage
-i TESTS [TESTS ...], --include TESTS [TESTS ...]
select a set of tests to include (defaults to ALL tests). tests must be a part of the TESTS list defined in run_test.py
-x TESTS [TESTS ...], --exclude TESTS [TESTS ...]
select a set of tests to exclude
-f TESTS, --first TESTS
select the test to start from (excludes previous tests)
-l TESTS, --last TESTS
select the last test to run (excludes following tests)
--bring-to-front TESTS [TESTS ...]
select a set of tests to run first. This can be used in situations where you want to run all tests, but care more about some set, e.g. after making a change to a specific component
--ignore-win-blocklist
always run blocklisted windows tests
--continue-through-error
Runs the full test suite despite one of the tests failing
--export-past-test-times [EXPORT_PAST_TEST_TIMES]
dumps test times from previous S3 stats into a file, format JSON
--shard SHARD SHARD runs a shard of the tests (taking into account other selections), e.g., --shard 2 3 will break up the selected tests into 3 shards and run the tests in the 2nd shard (the first number should not exceed the second)
--exclude-jit-executor
exclude tests that are run for a specific jit config
--exclude-distributed-tests
exclude distributed tests
--run-specified-test-cases [RUN_SPECIFIED_TEST_CASES]
load specified test cases file dumped from previous OSS CI stats, format CSV. If all test cases should run for a <test_module> please add a single row:
test_filename,test_case_name
...
<test_module>,__all__
...
how we use the stats will be based on option "--use-specified-test-cases-by".
--use-specified-test-cases-by {include,bring-to-front}
used together with option "--run-specified-test-cases". When specified test case file is set, this option allows the user to control whether to only run the specified test modules or to simply bring the specified modules to front and also run the remaining modules. Note: regardless of this option, we will only run the specified test cases within a specified test module. For unspecified test modules with the bring-to-front option, all test cases will be run, as one may expect.
--dry-run Only list the test that will run.
where TESTS is any of: benchmark_utils/test_benchmark_utils, distributed/_shard/sharded_optim/test_sharded_optim, distributed/_shard/sharded_tensor/ops/test_binary_cmp, distributed/_shard/sharded_tensor/ops/test_elementwise_ops, distributed/_shard/sharded_tensor/ops/test_embedding, distributed/_shard/sharded_tensor/ops/test_embedding_bag, distributed/_shard/sharded_tensor/ops/test_init, distributed/_shard/sharded_tensor/ops/test_linear, distributed/_shard/sharded_tensor/ops/test_math_ops, distributed/_shard/sharded_tensor/test_megatron_prototype, distributed/_shard/sharded_tensor/test_partial_tensor, distributed/_shard/sharded_tensor/test_sharded_tensor, distributed/_shard/sharded_tensor/test_sharded_tensor_reshard, distributed/_shard/sharding_spec/test_sharding_spec, distributed/_shard/test_replicated_tensor, distributed/algorithms/test_join, distributed/elastic/events/lib_test, distributed/elastic/metrics/api_test, distributed/elastic/multiprocessing/api_test, distributed/elastic/timer/api_test, distributed/elastic/timer/local_timer_example, distributed/elastic/timer/local_timer_test, distributed/elastic/utils/distributed_test, distributed/elastic/utils/logging_test, distributed/elastic/utils/util_test, distributed/fsdp/test_flatten_params_wrapper, distributed/fsdp/test_fsdp_apply, distributed/fsdp/test_fsdp_checkpoint, distributed/fsdp/test_fsdp_clip_grad_norm, distributed/fsdp/test_fsdp_comm, distributed/fsdp/test_fsdp_core, distributed/fsdp/test_fsdp_freezing_weights, distributed/fsdp/test_fsdp_grad_acc, distributed/fsdp/test_fsdp_ignored_modules, distributed/fsdp/test_fsdp_input, distributed/fsdp/test_fsdp_memory, distributed/fsdp/test_fsdp_mixed_precision, distributed/fsdp/test_fsdp_multiple_forward, distributed/fsdp/test_fsdp_multiple_wrapping, distributed/fsdp/test_fsdp_optim_state, distributed/fsdp/test_fsdp_overlap, distributed/fsdp/test_fsdp_pure_fp16, distributed/fsdp/test_fsdp_state_dict, distributed/fsdp/test_fsdp_summon_full_params, distributed/fsdp/test_fsdp_traversal, distributed/fsdp/test_fsdp_uneven, distributed/fsdp/test_shard_utils, distributed/fsdp/test_utils, distributed/fsdp/test_wrap, distributed/nn/jit/test_instantiator, distributed/optim/test_zero_redundancy_optimizer, distributed/pipeline/sync/skip/test_api, distributed/pipeline/sync/skip/test_gpipe, distributed/pipeline/sync/skip/test_inspect_skip_layout, distributed/pipeline/sync/skip/test_leak, distributed/pipeline/sync/skip/test_portal, distributed/pipeline/sync/skip/test_stash_pop, distributed/pipeline/sync/skip/test_tracker, distributed/pipeline/sync/skip/test_verify_skippables, distributed/pipeline/sync/test_balance, distributed/pipeline/sync/test_bugs, distributed/pipeline/sync/test_checkpoint, distributed/pipeline/sync/test_copy, distributed/pipeline/sync/test_deferred_batch_norm, distributed/pipeline/sync/test_dependency, distributed/pipeline/sync/test_inplace, distributed/pipeline/sync/test_microbatch, distributed/pipeline/sync/test_phony, distributed/pipeline/sync/test_pipe, distributed/pipeline/sync/test_pipeline, distributed/pipeline/sync/test_stream, distributed/pipeline/sync/test_transparency, distributed/pipeline/sync/test_worker, distributed/rpc/cuda/test_tensorpipe_agent, distributed/rpc/test_faulty_agent, distributed/rpc/test_tensorpipe_agent, distributed/test_c10d_common, distributed/test_c10d_gloo, distributed/test_c10d_nccl, distributed/test_c10d_spawn_gloo, distributed/test_c10d_spawn_nccl, distributed/test_data_parallel, distributed/test_distributed_spawn, distributed/test_launcher, distributed/test_nccl, distributed/test_pg_wrapper, distributed/test_store, distributions/test_constraints, distributions/test_distributions, lazy/test_bindings, lazy/test_extract_compiled_graph, lazy/test_ts_opinfo, test_ao_sparsity, test_autocast, test_autograd, test_binary_ufuncs, test_bundled_inputs, test_complex, test_cpp_api_parity, test_cpp_extensions_aot_ninja, test_cpp_extensions_aot_no_ninja, test_cpp_extensions_jit, test_cuda, test_cuda_primary_ctx, test_dataloader, test_datapipe, test_deploy, test_deploy, test_dispatch, test_expanded_weights, test_foreach, test_function_schema, test_functional_autograd_benchmark, test_functional_optim, test_functionalization, test_futures, test_fx, test_fx_experimental, test_hub, test_import_stats, test_indexing, test_jit, test_jit_autocast, test_jit_cuda_fuser, test_jit_disabled, test_jit_fuser_legacy, test_jit_fuser_te, test_jit_legacy, test_jit_profiling, test_license, test_linalg, test_logging, test_masked, test_mkldnn, test_mobile_optimizer, test_model_dump, test_module_init, test_modules, test_monitor, test_multiprocessing, test_multiprocessing_spawn, test_namedtensor, test_namedtuple_return_api, test_native_functions, test_nestedtensor, test_nn, test_numba_integration, test_numpy_interop, test_openmp, test_ops, test_ops_gradients, test_ops_jit, test_optim, test_overrides, test_package, test_per_overload_api, test_profiler, test_pruning_op, test_public_bindings, test_python_dispatch, test_pytree, test_quantization, test_reductions, test_scatter_gather_ops, test_serialization, test_set_default_mobile_cpu_allocator, test_shape_ops, test_show_pickle, test_sort_and_select, test_sparse, test_sparse_csr, test_spectral_ops, test_stateless, test_tensor_creation_ops, test_tensorboard, test_tensorexpr, test_tensorexpr_pybind, test_testing, test_torch, test_type_hints, test_type_info, test_type_promotion, test_unary_ufuncs, test_utils, test_view_ops, test_vmap, test_vulkan, test_xnnpack_integration
```
</p>
</details>
### When running anything else (for example `python test_autograd.py -h`)
It did not change and still does:
- The general unittest parser help that we print via a second thread
- The common_utils's parser help
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76152
Approved by: https://github.com/malfet, https://github.com/seemethere
2022-04-25 14:01:33 +00:00
|
|
|
parser as common_parser,
|
2021-08-25 18:19:49 +00:00
|
|
|
)
|
2021-08-12 18:39:31 +00:00
|
|
|
import torch.distributed as dist
|
2022-09-23 20:45:20 +00:00
|
|
|
from torch.multiprocessing import Pool, get_context
|
2021-03-05 21:34:17 +00:00
|
|
|
|
2021-08-25 19:58:24 +00:00
|
|
|
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
|
|
|
|
|
|
2021-03-05 21:34:17 +00:00
|
|
|
try:
|
2021-07-06 16:04:49 +00:00
|
|
|
# using tools/ to optimize test run.
|
2021-08-25 19:58:24 +00:00
|
|
|
sys.path.append(str(REPO_ROOT))
|
2022-07-28 16:35:01 +00:00
|
|
|
from tools.stats.export_test_times import TEST_TIMES_FILE
|
2021-07-06 16:04:49 +00:00
|
|
|
from tools.testing.test_selections import (
|
2021-07-12 18:20:12 +00:00
|
|
|
get_reordered_tests,
|
|
|
|
|
get_test_case_configs,
|
2022-07-09 06:59:03 +00:00
|
|
|
calculate_shards,
|
2021-07-06 16:04:49 +00:00
|
|
|
)
|
|
|
|
|
HAVE_TEST_SELECTION_TOOLS = True
|
2021-03-05 21:34:17 +00:00
|
|
|
except ImportError:
|
2021-07-06 16:04:49 +00:00
|
|
|
HAVE_TEST_SELECTION_TOOLS = False
|
2021-08-25 18:19:49 +00:00
|
|
|
print(
|
|
|
|
|
"Unable to import test_selections from tools/testing. Running without test selection stats..."
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2021-03-17 19:19:27 +00:00
|
|
|
|
2021-09-01 00:19:11 +00:00
|
|
|
def discover_tests(
|
|
|
|
|
base_dir: Optional[pathlib.Path] = None,
|
|
|
|
|
blocklisted_patterns: Optional[List[str]] = None,
|
|
|
|
|
blocklisted_tests: Optional[List[str]] = None,
|
|
|
|
|
extra_tests: Optional[List[str]] = None) -> List[str]:
|
|
|
|
|
"""
|
|
|
|
|
Searches for all python files starting with test_ excluding one specified by patterns
|
|
|
|
|
"""
|
|
|
|
|
def skip_test_p(name: str) -> bool:
|
|
|
|
|
rc = False
|
|
|
|
|
if blocklisted_patterns is not None:
|
|
|
|
|
rc |= any(name.startswith(pattern) for pattern in blocklisted_patterns)
|
|
|
|
|
if blocklisted_tests is not None:
|
|
|
|
|
rc |= name in blocklisted_tests
|
|
|
|
|
return rc
|
|
|
|
|
cwd = pathlib.Path(__file__).resolve().parent if base_dir is None else base_dir
|
|
|
|
|
all_py_files = list(cwd.glob('**/test_*.py'))
|
|
|
|
|
rc = [str(fname.relative_to(cwd))[:-3] for fname in all_py_files]
|
|
|
|
|
# Invert slashes on Windows
|
|
|
|
|
if sys.platform == "win32":
|
|
|
|
|
rc = [name.replace('\\', '/') for name in rc]
|
|
|
|
|
rc = [test for test in rc if not skip_test_p(test)]
|
|
|
|
|
if extra_tests is not None:
|
|
|
|
|
rc += extra_tests
|
|
|
|
|
return sorted(rc)
|
|
|
|
|
|
2022-07-28 16:35:01 +00:00
|
|
|
|
2021-09-01 00:19:11 +00:00
|
|
|
TESTS = discover_tests(
|
|
|
|
|
blocklisted_patterns=[
|
|
|
|
|
'ao',
|
|
|
|
|
'bottleneck_test',
|
|
|
|
|
'custom_backend',
|
|
|
|
|
'custom_operator',
|
2022-02-10 18:42:24 +00:00
|
|
|
'fx', # executed by test_fx.py
|
2021-09-01 00:19:11 +00:00
|
|
|
'jit', # executed by test_jit.py
|
|
|
|
|
'mobile',
|
|
|
|
|
'onnx',
|
|
|
|
|
'package', # executed by test_package.py
|
|
|
|
|
'quantization', # executed by test_quantization.py
|
2022-03-01 16:15:30 +00:00
|
|
|
'autograd', # executed by test_autograd.py
|
2021-09-01 00:19:11 +00:00
|
|
|
],
|
|
|
|
|
blocklisted_tests=[
|
|
|
|
|
'test_bundled_images',
|
|
|
|
|
'test_cpp_extensions_aot',
|
2021-09-14 16:38:34 +00:00
|
|
|
'test_determination',
|
2021-09-01 00:19:11 +00:00
|
|
|
'test_jit_fuser',
|
|
|
|
|
'test_jit_simple',
|
|
|
|
|
'test_jit_string',
|
|
|
|
|
'test_kernel_launch_checks',
|
|
|
|
|
'test_metal',
|
2022-09-22 22:13:47 +00:00
|
|
|
# Right now we have a separate CI job for running MPS
|
|
|
|
|
'test_mps',
|
2021-09-01 00:19:11 +00:00
|
|
|
'test_nnapi',
|
|
|
|
|
'test_segment_reductions',
|
|
|
|
|
'test_static_runtime',
|
|
|
|
|
'test_throughput_benchmark',
|
|
|
|
|
'test_typing',
|
|
|
|
|
"distributed/bin/test_script",
|
|
|
|
|
"distributed/elastic/multiprocessing/bin/test_script",
|
|
|
|
|
"distributed/launcher/bin/test_script",
|
|
|
|
|
"distributed/launcher/bin/test_script_init_method",
|
|
|
|
|
"distributed/launcher/bin/test_script_is_torchelastic_launched",
|
|
|
|
|
"distributed/launcher/bin/test_script_local_rank",
|
|
|
|
|
"distributed/test_c10d_spawn",
|
|
|
|
|
'distributions/test_transforms',
|
|
|
|
|
'distributions/test_utils',
|
|
|
|
|
],
|
|
|
|
|
extra_tests=[
|
|
|
|
|
"test_cpp_extensions_aot_ninja",
|
|
|
|
|
"test_cpp_extensions_aot_no_ninja",
|
|
|
|
|
"distributed/elastic/timer/api_test",
|
|
|
|
|
"distributed/elastic/timer/local_timer_example",
|
|
|
|
|
"distributed/elastic/timer/local_timer_test",
|
|
|
|
|
"distributed/elastic/events/lib_test",
|
|
|
|
|
"distributed/elastic/metrics/api_test",
|
|
|
|
|
"distributed/elastic/utils/logging_test",
|
|
|
|
|
"distributed/elastic/utils/util_test",
|
|
|
|
|
"distributed/elastic/utils/distributed_test",
|
|
|
|
|
"distributed/elastic/multiprocessing/api_test",
|
|
|
|
|
]
|
|
|
|
|
)
|
2020-10-22 17:53:07 +00:00
|
|
|
|
2022-08-22 20:07:23 +00:00
|
|
|
# The doctests are a special case that don't correspond to a file that discover
|
|
|
|
|
# tests can enable.
|
|
|
|
|
TESTS = TESTS + ['doctests']
|
|
|
|
|
|
2021-10-07 16:05:05 +00:00
|
|
|
FSDP_TEST = [test for test in TESTS if test.startswith("distributed/fsdp")]
|
|
|
|
|
|
2020-10-22 17:53:07 +00:00
|
|
|
# Tests need to be run with pytest.
|
|
|
|
|
USE_PYTEST_LIST = [
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/pipeline/sync/skip/test_api",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_gpipe",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_leak",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_portal",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_stash_pop",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_tracker",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_verify_skippables",
|
|
|
|
|
"distributed/pipeline/sync/test_balance",
|
|
|
|
|
"distributed/pipeline/sync/test_bugs",
|
|
|
|
|
"distributed/pipeline/sync/test_checkpoint",
|
|
|
|
|
"distributed/pipeline/sync/test_copy",
|
|
|
|
|
"distributed/pipeline/sync/test_deferred_batch_norm",
|
|
|
|
|
"distributed/pipeline/sync/test_dependency",
|
|
|
|
|
"distributed/pipeline/sync/test_inplace",
|
|
|
|
|
"distributed/pipeline/sync/test_microbatch",
|
|
|
|
|
"distributed/pipeline/sync/test_phony",
|
|
|
|
|
"distributed/pipeline/sync/test_pipe",
|
|
|
|
|
"distributed/pipeline/sync/test_pipeline",
|
|
|
|
|
"distributed/pipeline/sync/test_stream",
|
|
|
|
|
"distributed/pipeline/sync/test_transparency",
|
|
|
|
|
"distributed/pipeline/sync/test_worker",
|
|
|
|
|
"distributions/test_constraints",
|
|
|
|
|
"distributions/test_transforms",
|
|
|
|
|
"distributions/test_utils",
|
|
|
|
|
"test_typing",
|
2021-03-11 19:16:25 +00:00
|
|
|
"distributed/elastic/events/lib_test",
|
2021-04-29 13:11:18 +00:00
|
|
|
"distributed/elastic/agent/server/test/api_test",
|
2022-03-17 12:12:14 +00:00
|
|
|
"test_deploy",
|
2018-03-09 21:02:02 +00:00
|
|
|
]
|
2019-08-26 20:56:53 +00:00
|
|
|
|
2020-07-28 14:51:28 +00:00
|
|
|
WINDOWS_BLOCKLIST = [
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/nn/jit/test_instantiator",
|
|
|
|
|
"distributed/rpc/test_faulty_agent",
|
|
|
|
|
"distributed/rpc/test_tensorpipe_agent",
|
2022-06-01 00:07:48 +00:00
|
|
|
"distributed/rpc/test_share_memory",
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/rpc/cuda/test_tensorpipe_agent",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_api",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_gpipe",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_leak",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_portal",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_stash_pop",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_tracker",
|
|
|
|
|
"distributed/pipeline/sync/skip/test_verify_skippables",
|
|
|
|
|
"distributed/pipeline/sync/test_balance",
|
|
|
|
|
"distributed/pipeline/sync/test_bugs",
|
|
|
|
|
"distributed/pipeline/sync/test_checkpoint",
|
|
|
|
|
"distributed/pipeline/sync/test_copy",
|
|
|
|
|
"distributed/pipeline/sync/test_deferred_batch_norm",
|
|
|
|
|
"distributed/pipeline/sync/test_dependency",
|
|
|
|
|
"distributed/pipeline/sync/test_inplace",
|
|
|
|
|
"distributed/pipeline/sync/test_microbatch",
|
|
|
|
|
"distributed/pipeline/sync/test_phony",
|
|
|
|
|
"distributed/pipeline/sync/test_pipe",
|
|
|
|
|
"distributed/pipeline/sync/test_pipeline",
|
|
|
|
|
"distributed/pipeline/sync/test_stream",
|
|
|
|
|
"distributed/pipeline/sync/test_transparency",
|
|
|
|
|
"distributed/pipeline/sync/test_worker",
|
2021-04-29 13:11:18 +00:00
|
|
|
"distributed/elastic/agent/server/test/api_test",
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/elastic/multiprocessing/api_test",
|
2022-05-21 22:33:58 +00:00
|
|
|
"distributed/_shard/checkpoint/test_checkpoint"
|
|
|
|
|
"distributed/_shard/checkpoint/test_file_system_checkpoint"
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharding_spec/test_sharding_spec",
|
2022-04-25 12:55:24 +00:00
|
|
|
"distributed/_shard/sharding_plan/test_sharding_plan",
|
2022-02-03 06:06:43 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_megatron_prototype",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_sharded_tensor",
|
2022-02-03 05:20:44 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
|
2022-05-03 17:09:21 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_chunk",
|
2022-02-03 06:06:43 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_embedding",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_init",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_linear",
|
2022-04-11 21:56:45 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_math_ops",
|
2022-05-03 17:09:21 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
|
2022-05-06 16:04:08 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_softmax",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_optim/test_sharded_optim",
|
2022-04-28 06:17:28 +00:00
|
|
|
"distributed/_shard/test_partial_tensor",
|
2022-03-24 12:36:04 +00:00
|
|
|
"distributed/_shard/test_replicated_tensor",
|
2022-02-10 18:42:24 +00:00
|
|
|
] + FSDP_TEST
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2020-07-28 14:51:28 +00:00
|
|
|
ROCM_BLOCKLIST = [
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/rpc/test_faulty_agent",
|
|
|
|
|
"distributed/rpc/test_tensorpipe_agent",
|
2022-06-01 00:07:48 +00:00
|
|
|
"distributed/rpc/test_share_memory",
|
2021-08-25 18:19:49 +00:00
|
|
|
"distributed/rpc/cuda/test_tensorpipe_agent",
|
2022-05-21 22:33:58 +00:00
|
|
|
"distributed/_shard/checkpoint/test_checkpoint"
|
|
|
|
|
"distributed/_shard/checkpoint/test_file_system_checkpoint"
|
2022-04-25 12:55:24 +00:00
|
|
|
"distributed/_shard/sharding_spec/test_sharding_spec",
|
|
|
|
|
"distributed/_shard/sharding_plan/test_sharding_plan",
|
2022-02-03 06:06:43 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_megatron_prototype",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_sharded_tensor",
|
2022-02-03 05:20:44 +00:00
|
|
|
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
|
2022-05-03 17:09:21 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_chunk",
|
2022-02-03 06:06:43 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_embedding",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_init",
|
|
|
|
|
"distributed/_shard/sharded_tensor/ops/test_linear",
|
2022-04-11 21:56:45 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_math_ops",
|
2022-05-03 17:09:21 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
|
2022-05-06 16:04:08 +00:00
|
|
|
"distributed/_shard/sharded_tensor/ops/test_softmax",
|
2022-02-02 06:53:18 +00:00
|
|
|
"distributed/_shard/sharded_optim/test_sharded_optim",
|
2022-04-28 06:17:28 +00:00
|
|
|
"distributed/_shard/test_partial_tensor",
|
2022-03-24 12:36:04 +00:00
|
|
|
"distributed/_shard/test_replicated_tensor",
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_determination",
|
|
|
|
|
"test_jit_legacy",
|
2022-04-22 19:50:36 +00:00
|
|
|
]
|
2018-08-06 21:48:45 +00:00
|
|
|
|
2020-07-28 14:51:28 +00:00
|
|
|
RUN_PARALLEL_BLOCKLIST = [
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_cpp_extensions_jit",
|
2022-07-11 22:11:58 +00:00
|
|
|
"test_cpp_extensions_open_device_registration",
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_jit_disabled",
|
|
|
|
|
"test_mobile_optimizer",
|
|
|
|
|
"test_multiprocessing",
|
|
|
|
|
"test_multiprocessing_spawn",
|
|
|
|
|
"test_namedtuple_return_api",
|
|
|
|
|
"test_overrides",
|
|
|
|
|
"test_show_pickle",
|
|
|
|
|
"test_tensorexpr",
|
|
|
|
|
"test_cuda_primary_ctx",
|
2022-08-11 10:21:40 +00:00
|
|
|
"test_cuda_trace",
|
2021-10-07 16:05:05 +00:00
|
|
|
] + FSDP_TEST
|
2021-08-25 18:19:49 +00:00
|
|
|
|
2021-08-26 16:27:47 +00:00
|
|
|
# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected
|
|
|
|
|
CORE_TEST_LIST = [
|
|
|
|
|
"test_autograd",
|
|
|
|
|
"test_modules",
|
|
|
|
|
"test_nn",
|
|
|
|
|
"test_ops",
|
2022-03-17 02:07:50 +00:00
|
|
|
"test_ops_gradients",
|
|
|
|
|
"test_ops_jit",
|
2021-08-26 16:27:47 +00:00
|
|
|
"test_torch"
|
|
|
|
|
]
|
2020-10-13 17:54:18 +00:00
|
|
|
|
2021-03-10 17:33:02 +00:00
|
|
|
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
|
|
|
|
|
SLOW_TEST_THRESHOLD = 300
|
|
|
|
|
|
2019-09-11 09:17:48 +00:00
|
|
|
DISTRIBUTED_TESTS_CONFIG = {}
|
2018-08-29 19:54:55 +00:00
|
|
|
|
|
|
|
|
|
2018-09-11 06:21:36 +00:00
|
|
|
if dist.is_available():
|
2021-08-25 18:19:49 +00:00
|
|
|
DISTRIBUTED_TESTS_CONFIG["test"] = {"WORLD_SIZE": "1"}
|
2020-02-10 20:36:56 +00:00
|
|
|
if not TEST_WITH_ROCM and dist.is_mpi_available():
|
2021-08-25 18:19:49 +00:00
|
|
|
DISTRIBUTED_TESTS_CONFIG["mpi"] = {
|
|
|
|
|
"WORLD_SIZE": "3",
|
|
|
|
|
"TEST_REPORT_SOURCE_OVERRIDE": "dist-mpi",
|
2018-08-29 19:54:55 +00:00
|
|
|
}
|
2018-09-11 06:21:36 +00:00
|
|
|
if dist.is_nccl_available():
|
2021-08-25 18:19:49 +00:00
|
|
|
DISTRIBUTED_TESTS_CONFIG["nccl"] = {
|
|
|
|
|
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
|
|
|
|
|
"TEST_REPORT_SOURCE_OVERRIDE": "dist-nccl",
|
2018-08-29 19:54:55 +00:00
|
|
|
}
|
2020-11-26 03:50:30 +00:00
|
|
|
if dist.is_gloo_available():
|
2021-08-25 18:19:49 +00:00
|
|
|
DISTRIBUTED_TESTS_CONFIG["gloo"] = {
|
|
|
|
|
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
|
|
|
|
|
"TEST_REPORT_SOURCE_OVERRIDE": "dist-gloo",
|
2019-09-11 09:17:48 +00:00
|
|
|
}
|
2022-09-10 10:56:05 +00:00
|
|
|
if dist.is_ucc_available():
|
|
|
|
|
DISTRIBUTED_TESTS_CONFIG["ucc"] = {
|
|
|
|
|
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
|
|
|
|
|
"TEST_REPORT_SOURCE_OVERRIDE": "dist-ucc",
|
|
|
|
|
"UCX_TLS": "tcp",
|
|
|
|
|
"UCC_TLS": "nccl,ucp",
|
|
|
|
|
"UCC_TL_UCP_TUNE": "cuda:0", # don't use UCP TL on CUDA as it is not well supported
|
|
|
|
|
}
|
2018-08-29 19:54:55 +00:00
|
|
|
|
2018-04-10 18:31:23 +00:00
|
|
|
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
|
2021-08-25 18:19:49 +00:00
|
|
|
SIGNALS_TO_NAMES_DICT = {
|
|
|
|
|
getattr(signal, n): n for n in dir(signal) if n.startswith("SIG") and "_" not in n
|
|
|
|
|
}
|
2018-04-10 18:31:23 +00:00
|
|
|
|
2018-11-07 22:27:06 +00:00
|
|
|
CPP_EXTENSIONS_ERROR = """
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
Ninja (https://ninja-build.org) is required for some of the C++ extensions
|
|
|
|
|
tests, but it could not be found. Install ninja with `pip install ninja`
|
|
|
|
|
or `conda install ninja`. Alternatively, disable said tests with
|
2020-02-21 20:07:51 +00:00
|
|
|
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
|
2018-11-07 22:27:06 +00:00
|
|
|
"""
|
|
|
|
|
|
2020-08-26 23:14:17 +00:00
|
|
|
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2020-10-06 14:11:37 +00:00
|
|
|
JIT_EXECUTOR_TESTS = [
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_jit_profiling",
|
|
|
|
|
"test_jit_legacy",
|
|
|
|
|
"test_jit_fuser_legacy",
|
2020-10-12 18:56:17 +00:00
|
|
|
]
|
2020-10-06 14:11:37 +00:00
|
|
|
|
2022-03-01 16:15:30 +00:00
|
|
|
DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith("distributed")]
|
2021-08-24 15:01:36 +00:00
|
|
|
|
2022-07-24 23:14:13 +00:00
|
|
|
|
|
|
|
|
def discover_functorch_tests():
|
|
|
|
|
pytorch_root = pathlib.Path(__file__).resolve().parent.parent
|
|
|
|
|
functorch_test_dir = os.path.join(pytorch_root, 'functorch', 'test')
|
|
|
|
|
result = discover_tests(pathlib.Path(functorch_test_dir))
|
|
|
|
|
result = [os.path.join(functorch_test_dir, r) for r in result]
|
|
|
|
|
|
|
|
|
|
# Sanity check
|
|
|
|
|
assert len(result) >= 8
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
FUNCTORCH_TESTS = discover_functorch_tests()
|
|
|
|
|
|
2022-02-15 16:33:59 +00:00
|
|
|
TESTS_REQUIRING_LAPACK = [
|
|
|
|
|
"distributions/test_constraints",
|
|
|
|
|
"distributions/test_distributions",
|
|
|
|
|
]
|
|
|
|
|
|
2021-06-08 22:59:32 +00:00
|
|
|
|
2018-03-15 05:12:51 +00:00
|
|
|
def print_to_stderr(message):
|
|
|
|
|
print(message, file=sys.stderr)
|
|
|
|
|
|
|
|
|
|
|
2021-01-19 22:57:07 +00:00
|
|
|
def get_executable_command(options, allow_pytest, disable_coverage=False):
|
|
|
|
|
if options.coverage and not disable_coverage:
|
2021-08-25 18:19:49 +00:00
|
|
|
executable = ["coverage", "run", "--parallel-mode", "--source=torch"]
|
2020-07-28 15:12:41 +00:00
|
|
|
else:
|
2022-06-21 01:12:21 +00:00
|
|
|
executable = [sys.executable, "-bb"]
|
2020-07-28 15:12:41 +00:00
|
|
|
if options.pytest:
|
|
|
|
|
if allow_pytest:
|
2021-08-25 18:19:49 +00:00
|
|
|
executable += ["-m", "pytest"]
|
2020-07-28 15:12:41 +00:00
|
|
|
else:
|
2021-08-25 18:19:49 +00:00
|
|
|
print_to_stderr(
|
|
|
|
|
"Pytest cannot be used for this test. Falling back to unittest."
|
|
|
|
|
)
|
2020-07-28 15:12:41 +00:00
|
|
|
return executable
|
|
|
|
|
|
|
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
def run_test(
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
test_module,
|
|
|
|
|
test_directory,
|
|
|
|
|
options,
|
|
|
|
|
launcher_cmd=None,
|
|
|
|
|
extra_unittest_args=None,
|
|
|
|
|
env=None,
|
2022-09-23 20:45:20 +00:00
|
|
|
) -> int:
|
2020-09-09 22:07:27 +00:00
|
|
|
unittest_args = options.additional_unittest_args.copy()
|
2018-09-04 02:55:58 +00:00
|
|
|
if options.verbose:
|
2020-11-19 15:57:09 +00:00
|
|
|
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
|
2020-07-28 14:51:28 +00:00
|
|
|
if test_module in RUN_PARALLEL_BLOCKLIST:
|
2021-08-25 18:19:49 +00:00
|
|
|
unittest_args = [
|
|
|
|
|
arg for arg in unittest_args if not arg.startswith("--run-parallel")
|
|
|
|
|
]
|
2020-07-28 15:12:41 +00:00
|
|
|
if extra_unittest_args:
|
|
|
|
|
assert isinstance(extra_unittest_args, list)
|
|
|
|
|
unittest_args.extend(extra_unittest_args)
|
2020-10-29 22:18:05 +00:00
|
|
|
|
|
|
|
|
# If using pytest, replace -f with equivalent -x
|
|
|
|
|
if options.pytest:
|
2021-08-25 18:19:49 +00:00
|
|
|
unittest_args = [arg if arg != "-f" else "-x" for arg in unittest_args]
|
2022-06-11 06:52:15 +00:00
|
|
|
elif IS_CI:
|
2021-07-12 18:20:12 +00:00
|
|
|
# use the downloaded test cases configuration, not supported in pytest
|
2021-08-25 18:19:49 +00:00
|
|
|
unittest_args.extend(["--import-slow-tests", "--import-disabled-tests"])
|
2020-10-29 22:18:05 +00:00
|
|
|
|
2020-07-28 15:12:41 +00:00
|
|
|
# Extra arguments are not supported with pytest
|
2021-08-25 18:19:49 +00:00
|
|
|
executable = get_executable_command(
|
2022-06-12 17:29:01 +00:00
|
|
|
options, allow_pytest=not extra_unittest_args
|
2021-08-25 18:19:49 +00:00
|
|
|
)
|
2020-01-23 05:05:28 +00:00
|
|
|
|
2021-06-08 22:59:32 +00:00
|
|
|
# Can't call `python -m unittest test_*` here because it doesn't run code
|
|
|
|
|
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
|
2021-08-25 18:19:49 +00:00
|
|
|
argv = [test_module + ".py"] + unittest_args
|
2021-06-08 22:59:32 +00:00
|
|
|
|
2022-09-23 20:45:20 +00:00
|
|
|
os.makedirs(REPO_ROOT / "test" / "test-reports", exist_ok=True)
|
|
|
|
|
log_fd, log_path = tempfile.mkstemp(dir=REPO_ROOT / "test" / "test-reports",
|
|
|
|
|
prefix="{}_".format(test_module.replace("\\", "-").replace("/", "-")))
|
|
|
|
|
os.close(log_fd)
|
2020-07-28 15:12:41 +00:00
|
|
|
command = (launcher_cmd or []) + executable + argv
|
2021-08-25 18:19:49 +00:00
|
|
|
print_to_stderr("Executing {} ... [{}]".format(command, datetime.now()))
|
2022-09-23 20:45:20 +00:00
|
|
|
with open(log_path, "w") as f:
|
|
|
|
|
ret_code = shell(command, test_directory, stdout=f, stderr=f, env=env)
|
|
|
|
|
print_log_file(test_module, log_path)
|
|
|
|
|
os.remove(log_path)
|
|
|
|
|
return ret_code
|
2018-03-09 21:02:02 +00:00
|
|
|
|
|
|
|
|
|
2020-07-28 15:12:41 +00:00
|
|
|
def test_cuda_primary_ctx(test_module, test_directory, options):
|
2021-08-25 18:19:49 +00:00
|
|
|
return run_test(
|
|
|
|
|
test_module, test_directory, options, extra_unittest_args=["--subprocess"]
|
|
|
|
|
)
|
2019-07-16 17:05:53 +00:00
|
|
|
|
2022-07-28 16:35:01 +00:00
|
|
|
|
2021-11-22 17:51:34 +00:00
|
|
|
run_test_with_subprocess = functools.partial(run_test, extra_unittest_args=["--subprocess"])
|
|
|
|
|
|
|
|
|
|
|
2021-11-22 17:51:34 +00:00
|
|
|
def get_run_test_with_subprocess_fn():
|
|
|
|
|
return lambda test_module, test_directory, options: run_test_with_subprocess(test_module, test_directory, options)
|
|
|
|
|
|
|
|
|
|
|
2021-06-24 16:19:16 +00:00
|
|
|
def _test_cpp_extensions_aot(test_directory, options, use_ninja):
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
if use_ninja:
|
|
|
|
|
try:
|
|
|
|
|
cpp_extension.verify_ninja_availability()
|
|
|
|
|
except RuntimeError:
|
|
|
|
|
print(CPP_EXTENSIONS_ERROR)
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
# Wipe the build folder, if it exists already
|
2021-08-25 18:19:49 +00:00
|
|
|
cpp_extensions_test_dir = os.path.join(test_directory, "cpp_extensions")
|
|
|
|
|
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, "build")
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
if os.path.exists(cpp_extensions_test_build_dir):
|
|
|
|
|
shutil.rmtree(cpp_extensions_test_build_dir)
|
|
|
|
|
|
|
|
|
|
# Build the test cpp extensions modules
|
|
|
|
|
shell_env = os.environ.copy()
|
2021-08-25 18:19:49 +00:00
|
|
|
shell_env["USE_NINJA"] = str(1 if use_ninja else 0)
|
|
|
|
|
cmd = [sys.executable, "setup.py", "install", "--root", "./install"]
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
|
2018-04-10 18:31:23 +00:00
|
|
|
if return_code != 0:
|
|
|
|
|
return return_code
|
2021-08-25 18:19:49 +00:00
|
|
|
if sys.platform != "win32":
|
|
|
|
|
return_code = shell(
|
|
|
|
|
cmd,
|
|
|
|
|
cwd=os.path.join(cpp_extensions_test_dir, "no_python_abi_suffix_test"),
|
|
|
|
|
env=shell_env,
|
|
|
|
|
)
|
2018-11-28 01:33:54 +00:00
|
|
|
if return_code != 0:
|
|
|
|
|
return return_code
|
2018-03-09 21:02:02 +00:00
|
|
|
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
# "install" the test modules and run tests
|
2021-08-25 18:19:49 +00:00
|
|
|
python_path = os.environ.get("PYTHONPATH", "")
|
2021-06-24 16:19:16 +00:00
|
|
|
from shutil import copyfile
|
2021-08-25 18:19:49 +00:00
|
|
|
|
2022-06-02 21:35:56 +00:00
|
|
|
os.environ['USE_NINJA'] = shell_env['USE_NINJA']
|
2021-08-25 18:19:49 +00:00
|
|
|
test_module = "test_cpp_extensions_aot" + ("_ninja" if use_ninja else "_no_ninja")
|
|
|
|
|
copyfile(
|
|
|
|
|
test_directory + "/test_cpp_extensions_aot.py",
|
|
|
|
|
test_directory + "/" + test_module + ".py",
|
|
|
|
|
)
|
2018-03-11 00:16:40 +00:00
|
|
|
try:
|
2021-08-25 18:19:49 +00:00
|
|
|
cpp_extensions = os.path.join(test_directory, "cpp_extensions")
|
|
|
|
|
install_directory = ""
|
2018-04-29 16:10:03 +00:00
|
|
|
# install directory is the one that is named site-packages
|
2021-08-25 18:19:49 +00:00
|
|
|
for root, directories, _ in os.walk(os.path.join(cpp_extensions, "install")):
|
2018-04-29 16:10:03 +00:00
|
|
|
for directory in directories:
|
2021-08-25 18:19:49 +00:00
|
|
|
if "-packages" in directory:
|
2018-04-29 16:10:03 +00:00
|
|
|
install_directory = os.path.join(root, directory)
|
2018-04-02 17:53:25 +00:00
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
assert install_directory, "install_directory must not be empty"
|
|
|
|
|
os.environ["PYTHONPATH"] = os.pathsep.join([install_directory, python_path])
|
2020-07-28 15:12:41 +00:00
|
|
|
return run_test(test_module, test_directory, options)
|
2018-03-11 00:16:40 +00:00
|
|
|
finally:
|
2021-08-25 18:19:49 +00:00
|
|
|
os.environ["PYTHONPATH"] = python_path
|
|
|
|
|
if os.path.exists(test_directory + "/" + test_module + ".py"):
|
|
|
|
|
os.remove(test_directory + "/" + test_module + ".py")
|
2022-06-02 21:35:56 +00:00
|
|
|
os.environ.pop('USE_NINJA')
|
2018-03-09 21:02:02 +00:00
|
|
|
|
|
|
|
|
|
2020-07-28 15:12:41 +00:00
|
|
|
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
|
2021-06-24 16:19:16 +00:00
|
|
|
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
|
|
|
|
|
|
2020-07-28 15:12:41 +00:00
|
|
|
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
|
2021-06-24 16:19:16 +00:00
|
|
|
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
|
|
|
|
|
|
2020-07-28 15:12:41 +00:00
|
|
|
def test_distributed(test_module, test_directory, options):
|
2021-05-10 17:50:23 +00:00
|
|
|
# MPI tests are broken with Python-3.9
|
2021-08-25 18:19:49 +00:00
|
|
|
mpi_available = subprocess.call(
|
|
|
|
|
"command -v mpiexec", shell=True
|
|
|
|
|
) == 0 and sys.version_info < (3, 9)
|
2018-03-14 11:44:58 +00:00
|
|
|
if options.verbose and not mpi_available:
|
2021-08-25 18:19:49 +00:00
|
|
|
print_to_stderr("MPI not available -- MPI backend tests will be skipped")
|
2018-08-29 19:54:55 +00:00
|
|
|
config = DISTRIBUTED_TESTS_CONFIG
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
|
|
|
|
|
for with_init_file in {True, False}:
|
|
|
|
|
# Run all distributed backends in parallel, trying to run env/file init
|
|
|
|
|
# methods in parallel too ends in failures in which the subprocesses
|
|
|
|
|
# timeout
|
|
|
|
|
pool = Pool(processes=len(config))
|
|
|
|
|
return_codes = []
|
|
|
|
|
tmp_dirs = []
|
|
|
|
|
|
|
|
|
|
for backend, env_vars in config.items():
|
|
|
|
|
if sys.platform == "win32" and backend != "gloo":
|
|
|
|
|
continue
|
|
|
|
|
if backend == "mpi" and not mpi_available:
|
|
|
|
|
continue
|
2021-08-25 18:19:49 +00:00
|
|
|
if sys.platform == "win32" and not with_init_file:
|
2020-09-25 19:35:42 +00:00
|
|
|
continue
|
2018-03-09 21:02:02 +00:00
|
|
|
tmp_dir = tempfile.mkdtemp()
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
tmp_dirs.append(tmp_dir)
|
2018-03-14 11:44:58 +00:00
|
|
|
if options.verbose:
|
2020-09-09 06:08:55 +00:00
|
|
|
init_str = "with {} init_method"
|
|
|
|
|
with_init = init_str.format("file" if with_init_file else "env")
|
2018-03-15 05:12:51 +00:00
|
|
|
print_to_stderr(
|
2021-08-25 18:19:49 +00:00
|
|
|
"Running distributed tests for the {} backend {}".format(
|
|
|
|
|
backend, with_init
|
|
|
|
|
)
|
|
|
|
|
)
|
2022-05-03 23:01:42 +00:00
|
|
|
old_environ = dict(os.environ)
|
2021-08-25 18:19:49 +00:00
|
|
|
os.environ["TEMP_DIR"] = tmp_dir
|
|
|
|
|
os.environ["BACKEND"] = backend
|
|
|
|
|
os.environ["INIT_METHOD"] = "env://"
|
2018-03-09 21:02:02 +00:00
|
|
|
os.environ.update(env_vars)
|
2018-03-15 05:12:51 +00:00
|
|
|
if with_init_file:
|
2021-08-20 19:09:49 +00:00
|
|
|
if test_module == "test_distributed_spawn":
|
2021-08-25 18:19:49 +00:00
|
|
|
init_method = f"{FILE_SCHEMA}{tmp_dir}/"
|
2018-08-29 19:54:55 +00:00
|
|
|
else:
|
2021-08-25 18:19:49 +00:00
|
|
|
init_method = f"{FILE_SCHEMA}{tmp_dir}/shared_init_file"
|
|
|
|
|
os.environ["INIT_METHOD"] = init_method
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
try:
|
2021-08-25 18:19:49 +00:00
|
|
|
os.mkdir(os.path.join(tmp_dir, "barrier"))
|
|
|
|
|
os.mkdir(os.path.join(tmp_dir, "test_dir"))
|
|
|
|
|
if backend == "mpi":
|
2018-04-18 03:34:33 +00:00
|
|
|
# test mpiexec for --noprefix option
|
2021-08-25 18:19:49 +00:00
|
|
|
with open(os.devnull, "w") as devnull:
|
|
|
|
|
allowrunasroot_opt = (
|
|
|
|
|
"--allow-run-as-root"
|
|
|
|
|
if subprocess.call(
|
|
|
|
|
'mpiexec --allow-run-as-root -n 1 bash -c ""',
|
|
|
|
|
shell=True,
|
|
|
|
|
stdout=devnull,
|
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
|
)
|
|
|
|
|
== 0
|
|
|
|
|
else ""
|
|
|
|
|
)
|
|
|
|
|
noprefix_opt = (
|
|
|
|
|
"--noprefix"
|
|
|
|
|
if subprocess.call(
|
|
|
|
|
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""',
|
|
|
|
|
shell=True,
|
|
|
|
|
stdout=devnull,
|
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
|
)
|
|
|
|
|
== 0
|
|
|
|
|
else ""
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
mpiexec = ["mpiexec", "-n", "3", noprefix_opt, allowrunasroot_opt]
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
return_code = pool.apply_async(
|
|
|
|
|
run_test,
|
|
|
|
|
args=(test_module, test_directory, options),
|
|
|
|
|
kwds={
|
|
|
|
|
"launcher_cmd": mpiexec,
|
|
|
|
|
"env": os.environ.copy(),
|
|
|
|
|
}
|
2021-08-25 18:19:49 +00:00
|
|
|
)
|
2018-04-10 18:31:23 +00:00
|
|
|
else:
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
return_code = pool.apply_async(
|
|
|
|
|
run_test,
|
|
|
|
|
args=(test_module, test_directory, options),
|
|
|
|
|
kwds={
|
|
|
|
|
"extra_unittest_args": ["--subprocess"],
|
|
|
|
|
"env": os.environ.copy(),
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return_codes.append(return_code)
|
|
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
finally:
|
2022-05-03 23:01:42 +00:00
|
|
|
os.environ.clear()
|
|
|
|
|
os.environ.update(old_environ)
|
Test distributed backends in parallel (#84034)
This allows multiple backends (nccl, gloo) to be tested in parallel and speed up the process. The improvement is mainly in the 1st distributed CUDA shard where the long pole `distributed/test_distributed_spawn` test is executed:
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 1, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596825?check_suite_focus=true#logs) takes 1h24m. This is better than the current average expectation of 2h12m
On the other hand, there is no improvement for the following two jobs:
* [linux-focal-py3.7-gcc7 / test (distributed, 1, 1, linux.2xlarge)](https://github.com/pytorch/pytorch/runs/8007417353?check_suite_focus=true#logs) takes 1h47m
* [linux-bionic-cuda11.6-py3.10-gcc7 / test (distributed, 2, 2, linux.8xlarge.nvidia.gpu)](https://github.com/pytorch/pytorch/runs/8007596870?check_suite_focus=true#logs) takes 1h40m
This is still a gain though because it allows us to add more shards for distributed test if needed.
Issue https://github.com/pytorch/pytorch/issues/83694
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84034
Approved by: https://github.com/wanchaol
2022-09-01 03:48:52 +00:00
|
|
|
|
|
|
|
|
pool.close()
|
|
|
|
|
# Close the pool and wait for all the processes to finish
|
|
|
|
|
pool.join()
|
|
|
|
|
|
|
|
|
|
for tmp_dir in tmp_dirs:
|
|
|
|
|
shutil.rmtree(tmp_dir)
|
|
|
|
|
|
|
|
|
|
for return_code in return_codes:
|
|
|
|
|
if return_code.get() != 0:
|
|
|
|
|
return return_code
|
|
|
|
|
|
2018-04-10 18:31:23 +00:00
|
|
|
return 0
|
2018-03-09 21:02:02 +00:00
|
|
|
|
|
|
|
|
|
2022-08-22 20:07:23 +00:00
|
|
|
def run_doctests(test_module, test_directory, options):
|
|
|
|
|
"""
|
|
|
|
|
Assumes the incoming test module is called doctest, and simply executes the
|
|
|
|
|
xdoctest runner on the torch library itself.
|
|
|
|
|
"""
|
|
|
|
|
import xdoctest
|
|
|
|
|
import pathlib
|
|
|
|
|
pkgpath = pathlib.Path(torch.__file__).parent
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
enabled = {
|
|
|
|
|
# TODO: expose these options to the user
|
|
|
|
|
# Temporary disable all feature-conditional tests
|
|
|
|
|
# 'lapack': 'auto',
|
|
|
|
|
# 'cuda': 'auto',
|
|
|
|
|
# 'cuda1': 'auto',
|
|
|
|
|
# 'qengine': 'auto',
|
|
|
|
|
'lapack': 0,
|
|
|
|
|
'cuda': 0,
|
|
|
|
|
'cuda1': 0,
|
|
|
|
|
'qengine': 0,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Resolve "auto" based on a test to determine if the feature is available.
|
|
|
|
|
if enabled['cuda'] == 'auto' and torch.cuda.is_available():
|
|
|
|
|
enabled['cuda'] = True
|
|
|
|
|
|
|
|
|
|
if enabled['cuda1'] == 'auto' and torch.cuda.is_available() and torch.cuda.device_count() > 1:
|
|
|
|
|
enabled['cuda1'] = True
|
|
|
|
|
|
|
|
|
|
if enabled['lapack'] == 'auto' and torch._C.has_lapack:
|
|
|
|
|
enabled['lapack'] = True
|
|
|
|
|
|
|
|
|
|
if enabled['qengine'] == 'auto':
|
|
|
|
|
try:
|
|
|
|
|
# Is there a better check if quantization is enabled?
|
|
|
|
|
import torch.nn.quantized as nnq # NOQA
|
|
|
|
|
torch.backends.quantized.engine = 'qnnpack'
|
|
|
|
|
torch.backends.quantized.engine = 'fbgemm'
|
|
|
|
|
except (ImportError, RuntimeError):
|
|
|
|
|
...
|
|
|
|
|
else:
|
|
|
|
|
enabled['qengine'] = True
|
|
|
|
|
|
|
|
|
|
# Set doctest environment variables
|
|
|
|
|
if enabled['cuda']:
|
|
|
|
|
os.environ['TORCH_DOCTEST_CUDA'] = '1'
|
|
|
|
|
|
|
|
|
|
if enabled['cuda1']:
|
|
|
|
|
os.environ['TORCH_DOCTEST_CUDA1'] = '1'
|
|
|
|
|
|
|
|
|
|
if enabled['lapack']:
|
|
|
|
|
os.environ['TORCH_DOCTEST_LAPACK'] = '1'
|
|
|
|
|
|
|
|
|
|
if enabled['qengine']:
|
|
|
|
|
os.environ['TORCH_DOCTEST_QENGINE'] = '1'
|
|
|
|
|
|
|
|
|
|
pkgpath = os.path.dirname(torch.__file__)
|
|
|
|
|
xdoctest_config = {
|
|
|
|
|
'global_exec': r'\n'.join([
|
|
|
|
|
'from torch import nn',
|
|
|
|
|
'import torch.nn.functional as F',
|
|
|
|
|
'import torch',
|
|
|
|
|
]),
|
|
|
|
|
'style': 'google',
|
|
|
|
|
'options': '+IGNORE_WHITESPACE',
|
|
|
|
|
}
|
|
|
|
|
xdoctest_verbose = max(1, options.verbose)
|
|
|
|
|
run_summary = xdoctest.runner.doctest_module(
|
|
|
|
|
os.fspath(pkgpath), config=xdoctest_config, verbose=xdoctest_verbose,
|
|
|
|
|
command=options.xdoctest_command, argv=[])
|
|
|
|
|
result = 1 if run_summary.get('n_failed', 0) else 0
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2022-09-23 20:45:20 +00:00
|
|
|
def print_log_file(test: str, file_path: str) -> None:
|
|
|
|
|
with open(file_path, "r") as f:
|
|
|
|
|
print_to_stderr("")
|
|
|
|
|
print_to_stderr(f"PRINT LOG FILE of {test} ({file_path})")
|
|
|
|
|
print_to_stderr(f"##[group]PRINT LOG FILE of {test} ({file_path})")
|
|
|
|
|
print_to_stderr(f.read())
|
|
|
|
|
print_to_stderr("##[endgroup]")
|
|
|
|
|
print_to_stderr(f"FINISHED PRINT LOG FILE of {test} ({file_path})")
|
|
|
|
|
print_to_stderr("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_test_ops(test_module, test_directory, options):
|
|
|
|
|
if 'slow-gradcheck' in os.getenv("BUILD_ENVIRONMENT", ""):
|
|
|
|
|
# there are a lot of tests that take up a lot of space in slowgrad check, so don't bother parallelizing
|
|
|
|
|
# it's also on periodic so we don't care about TTS as much
|
|
|
|
|
return run_test(test_module, test_directory, copy.deepcopy(options),
|
|
|
|
|
extra_unittest_args=["--use-pytest", '-vv', '-x', '--reruns=2', '-rfEX'],
|
|
|
|
|
)
|
|
|
|
|
NUM_PROCS = 3
|
|
|
|
|
return_codes = []
|
|
|
|
|
os.environ["NUM_PARALLEL_PROCS"] = str(NUM_PROCS)
|
|
|
|
|
pool = get_context("spawn").Pool(NUM_PROCS)
|
|
|
|
|
for i in range(NUM_PROCS):
|
|
|
|
|
return_code = pool.apply_async(run_test, args=(test_module, test_directory, copy.deepcopy(options)),
|
|
|
|
|
kwds={"extra_unittest_args": ["--use-pytest", '-vv', '-x', '--reruns=2', '-rfEX',
|
|
|
|
|
f'--shard-id={i}', f'--num-shards={NUM_PROCS}',
|
|
|
|
|
"-k=not _linalg_cholesky_"],
|
|
|
|
|
})
|
|
|
|
|
return_codes.append(return_code)
|
|
|
|
|
pool.close()
|
|
|
|
|
pool.join()
|
|
|
|
|
del os.environ['NUM_PARALLEL_PROCS']
|
|
|
|
|
|
|
|
|
|
for return_code in return_codes:
|
|
|
|
|
if return_code.get() != 0:
|
|
|
|
|
return return_code.get()
|
|
|
|
|
return_code = run_test(test_module, test_directory, copy.deepcopy(options),
|
|
|
|
|
extra_unittest_args=["--use-pytest", '-vv', '-x', '--reruns=2', '-rfEX',
|
|
|
|
|
"-k=_linalg_cholesky_"],
|
|
|
|
|
)
|
|
|
|
|
return return_code
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
CUSTOM_HANDLERS = {
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_cuda_primary_ctx": test_cuda_primary_ctx,
|
2022-08-11 10:21:40 +00:00
|
|
|
"test_cuda_trace": get_run_test_with_subprocess_fn(),
|
2021-08-25 18:19:49 +00:00
|
|
|
"test_cpp_extensions_aot_no_ninja": test_cpp_extensions_aot_no_ninja,
|
|
|
|
|
"test_cpp_extensions_aot_ninja": test_cpp_extensions_aot_ninja,
|
|
|
|
|
"distributed/test_distributed_spawn": test_distributed,
|
2022-06-30 21:15:33 +00:00
|
|
|
"distributed/algorithms/quantization/test_quantization": test_distributed,
|
2021-11-22 17:51:34 +00:00
|
|
|
"distributed/test_c10d_nccl": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/test_c10d_gloo": get_run_test_with_subprocess_fn(),
|
2021-11-29 18:56:20 +00:00
|
|
|
"distributed/test_c10d_common": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/test_c10d_spawn_gloo": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/test_c10d_spawn_nccl": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/test_store": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/test_pg_wrapper": get_run_test_with_subprocess_fn(),
|
2021-11-29 18:56:20 +00:00
|
|
|
"distributed/rpc/test_faulty_agent": get_run_test_with_subprocess_fn(),
|
|
|
|
|
"distributed/rpc/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
|
2022-06-01 00:07:48 +00:00
|
|
|
"distributed/rpc/test_share_memory": get_run_test_with_subprocess_fn(),
|
2021-11-29 18:56:20 +00:00
|
|
|
"distributed/rpc/cuda/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
|
2022-08-22 20:07:23 +00:00
|
|
|
"doctests": run_doctests,
|
2022-09-23 20:45:20 +00:00
|
|
|
"test_ops": run_test_ops,
|
|
|
|
|
"test_ops_gradients": run_test_ops,
|
|
|
|
|
"test_ops_jit": run_test_ops,
|
2018-03-09 21:02:02 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-28 16:35:01 +00:00
|
|
|
|
2018-04-16 18:33:50 +00:00
|
|
|
def parse_test_module(test):
|
2021-08-25 18:19:49 +00:00
|
|
|
return test.split(".")[0]
|
2018-04-16 18:33:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestChoices(list):
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
super(TestChoices, self).__init__(args[0])
|
|
|
|
|
|
|
|
|
|
def __contains__(self, item):
|
|
|
|
|
return list.__contains__(self, parse_test_module(item))
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
def parse_args():
|
|
|
|
|
parser = argparse.ArgumentParser(
|
2021-08-25 18:19:49 +00:00
|
|
|
description="Run the PyTorch unit test suite",
|
|
|
|
|
epilog="where TESTS is any of: {}".format(", ".join(TESTS)),
|
|
|
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
Make -h work with run_test.py
As per title.
### When running `python run_test.py -h`
It used to show:
- The general unittest parser help that we print via a second thread https://github.com/pytorch/pytorch/blob/35545d85dc69687c4fc6f5fbab575ca9079624a3/torch/testing/_internal/common_utils.py#L467-L470
- The common_utils's parser help
<details><summary>Full result</summary>
<p>
```bash
$ python run_test.py -h
usage: run_test.py [-h] [-v] [-q] [--locals] [-f] [-c] [-b] [-k TESTNAMEPATTERNS] [tests [tests ...]]
positional arguments:
tests a list of any number of test modules, classes and test methods.
optional arguments:
-h, --help show this help message and exit
-v, --verbose Verbose output
-q, --quiet Quiet output
--locals Show local variables in tracebacks
-f, --failfast Stop on first fail or error
-c, --catch Catch Ctrl-C and display results so far
-b, --buffer Buffer stdout and stderr during tests
-k TESTNAMEPATTERNS Only run tests which match the given substring
Examples:
run_test.py - run default set of tests
run_test.py MyTestSuite - run suite 'MyTestSuite'
run_test.py MyTestCase.testSomething - run MyTestCase.testSomething
run_test.py MyTestCase - run all 'test*' test methods
in MyTestCase
usage: run_test.py [-h] [--subprocess] [--seed SEED] [--accept] [--jit_executor JIT_EXECUTOR] [--repeat REPEAT] [--test_bailouts]
[--save-xml [SAVE_XML]] [--discover-tests] [--log-suffix LOG_SUFFIX] [--run-parallel RUN_PARALLEL]
[--import-slow-tests [IMPORT_SLOW_TESTS]] [--import-disabled-tests [IMPORT_DISABLED_TESTS]]
optional arguments:
-h, --help show this help message and exit
--subprocess whether to run each test in a subprocess
--seed SEED
--accept
--jit_executor JIT_EXECUTOR
--repeat REPEAT
--test_bailouts
--save-xml [SAVE_XML]
--discover-tests
--log-suffix LOG_SUFFIX
--run-parallel RUN_PARALLEL
--import-slow-tests [IMPORT_SLOW_TESTS]
--import-disabled-tests [IMPORT_DISABLED_TESTS]
```
</p>
</details>
It now prints:
- The general unittest parser help the same way. Should we remove this? We can't merge them unfortunately as inittest does not accept parent / does not expose the parser for us to take it as a parent.
- The combined common_utils + run_test parsers help
<details><summary>Full result</summary>
<p>
```bash
$ python run_test.py -h
usage: run_test.py [-h] [-v] [-q] [--locals] [-f] [-c] [-b] [-k TESTNAMEPATTERNS] [tests [tests ...]]
positional arguments:
tests a list of any number of test modules, classes and test methods.
optional arguments:
-h, --help show this help message and exit
-v, --verbose Verbose output
-q, --quiet Quiet output
--locals Show local variables in tracebacks
-f, --failfast Stop on first fail or error
-c, --catch Catch Ctrl-C and display results so far
-b, --buffer Buffer stdout and stderr during tests
-k TESTNAMEPATTERNS Only run tests which match the given substring
Examples:
run_test.py - run default set of tests
run_test.py MyTestSuite - run suite 'MyTestSuite'
run_test.py MyTestCase.testSomething - run MyTestCase.testSomething
run_test.py MyTestCase - run all 'test*' test methods
in MyTestCase
Ignoring disabled issues: []
usage: run_test.py [-h] [--subprocess] [--seed SEED] [--accept] [--jit_executor JIT_EXECUTOR] [--repeat REPEAT] [--test_bailouts]
[--save-xml [SAVE_XML]] [--discover-tests] [--log-suffix LOG_SUFFIX] [--run-parallel RUN_PARALLEL]
[--import-slow-tests [IMPORT_SLOW_TESTS]] [--import-disabled-tests [IMPORT_DISABLED_TESTS]] [-v] [--jit]
[--distributed-tests] [-core] [-pt] [-c] [-i TESTS [TESTS ...]] [-x TESTS [TESTS ...]] [-f TESTS] [-l TESTS]
[--bring-to-front TESTS [TESTS ...]] [--ignore-win-blocklist] [--continue-through-error]
[--export-past-test-times [EXPORT_PAST_TEST_TIMES]] [--shard SHARD SHARD] [--exclude-jit-executor]
[--exclude-distributed-tests] [--run-specified-test-cases [RUN_SPECIFIED_TEST_CASES]]
[--use-specified-test-cases-by {include,bring-to-front}] [--dry-run]
[additional_unittest_args [additional_unittest_args ...]]
Run the PyTorch unit test suite
positional arguments:
additional_unittest_args
additional arguments passed through to unittest, e.g., python run_test.py -i sparse -- TestSparse.test_factory_size_check
optional arguments:
-h, --help show this help message and exit
--subprocess whether to run each test in a subprocess
--seed SEED
--accept
--jit_executor JIT_EXECUTOR
--repeat REPEAT
--test_bailouts
--save-xml [SAVE_XML]
--discover-tests
--log-suffix LOG_SUFFIX
--run-parallel RUN_PARALLEL
--import-slow-tests [IMPORT_SLOW_TESTS]
--import-disabled-tests [IMPORT_DISABLED_TESTS]
-v, --verbose print verbose information and test-by-test results
--jit, --jit run all jit tests
--distributed-tests, --distributed-tests
run all distributed tests
-core, --core Only run core tests, or tests that validate PyTorch's ops, modules,and autograd. They are defined by CORE_TEST_LIST.
-pt, --pytest If true, use `pytest` to execute the tests. E.g., this runs TestTorch with pytest in verbose and coverage mode: python run_test.py -vci torch -pt
-c, --coverage enable coverage
-i TESTS [TESTS ...], --include TESTS [TESTS ...]
select a set of tests to include (defaults to ALL tests). tests must be a part of the TESTS list defined in run_test.py
-x TESTS [TESTS ...], --exclude TESTS [TESTS ...]
select a set of tests to exclude
-f TESTS, --first TESTS
select the test to start from (excludes previous tests)
-l TESTS, --last TESTS
select the last test to run (excludes following tests)
--bring-to-front TESTS [TESTS ...]
select a set of tests to run first. This can be used in situations where you want to run all tests, but care more about some set, e.g. after making a change to a specific component
--ignore-win-blocklist
always run blocklisted windows tests
--continue-through-error
Runs the full test suite despite one of the tests failing
--export-past-test-times [EXPORT_PAST_TEST_TIMES]
dumps test times from previous S3 stats into a file, format JSON
--shard SHARD SHARD runs a shard of the tests (taking into account other selections), e.g., --shard 2 3 will break up the selected tests into 3 shards and run the tests in the 2nd shard (the first number should not exceed the second)
--exclude-jit-executor
exclude tests that are run for a specific jit config
--exclude-distributed-tests
exclude distributed tests
--run-specified-test-cases [RUN_SPECIFIED_TEST_CASES]
load specified test cases file dumped from previous OSS CI stats, format CSV. If all test cases should run for a <test_module> please add a single row:
test_filename,test_case_name
...
<test_module>,__all__
...
how we use the stats will be based on option "--use-specified-test-cases-by".
--use-specified-test-cases-by {include,bring-to-front}
used together with option "--run-specified-test-cases". When specified test case file is set, this option allows the user to control whether to only run the specified test modules or to simply bring the specified modules to front and also run the remaining modules. Note: regardless of this option, we will only run the specified test cases within a specified test module. For unspecified test modules with the bring-to-front option, all test cases will be run, as one may expect.
--dry-run Only list the test that will run.
where TESTS is any of: benchmark_utils/test_benchmark_utils, distributed/_shard/sharded_optim/test_sharded_optim, distributed/_shard/sharded_tensor/ops/test_binary_cmp, distributed/_shard/sharded_tensor/ops/test_elementwise_ops, distributed/_shard/sharded_tensor/ops/test_embedding, distributed/_shard/sharded_tensor/ops/test_embedding_bag, distributed/_shard/sharded_tensor/ops/test_init, distributed/_shard/sharded_tensor/ops/test_linear, distributed/_shard/sharded_tensor/ops/test_math_ops, distributed/_shard/sharded_tensor/test_megatron_prototype, distributed/_shard/sharded_tensor/test_partial_tensor, distributed/_shard/sharded_tensor/test_sharded_tensor, distributed/_shard/sharded_tensor/test_sharded_tensor_reshard, distributed/_shard/sharding_spec/test_sharding_spec, distributed/_shard/test_replicated_tensor, distributed/algorithms/test_join, distributed/elastic/events/lib_test, distributed/elastic/metrics/api_test, distributed/elastic/multiprocessing/api_test, distributed/elastic/timer/api_test, distributed/elastic/timer/local_timer_example, distributed/elastic/timer/local_timer_test, distributed/elastic/utils/distributed_test, distributed/elastic/utils/logging_test, distributed/elastic/utils/util_test, distributed/fsdp/test_flatten_params_wrapper, distributed/fsdp/test_fsdp_apply, distributed/fsdp/test_fsdp_checkpoint, distributed/fsdp/test_fsdp_clip_grad_norm, distributed/fsdp/test_fsdp_comm, distributed/fsdp/test_fsdp_core, distributed/fsdp/test_fsdp_freezing_weights, distributed/fsdp/test_fsdp_grad_acc, distributed/fsdp/test_fsdp_ignored_modules, distributed/fsdp/test_fsdp_input, distributed/fsdp/test_fsdp_memory, distributed/fsdp/test_fsdp_mixed_precision, distributed/fsdp/test_fsdp_multiple_forward, distributed/fsdp/test_fsdp_multiple_wrapping, distributed/fsdp/test_fsdp_optim_state, distributed/fsdp/test_fsdp_overlap, distributed/fsdp/test_fsdp_pure_fp16, distributed/fsdp/test_fsdp_state_dict, distributed/fsdp/test_fsdp_summon_full_params, distributed/fsdp/test_fsdp_traversal, distributed/fsdp/test_fsdp_uneven, distributed/fsdp/test_shard_utils, distributed/fsdp/test_utils, distributed/fsdp/test_wrap, distributed/nn/jit/test_instantiator, distributed/optim/test_zero_redundancy_optimizer, distributed/pipeline/sync/skip/test_api, distributed/pipeline/sync/skip/test_gpipe, distributed/pipeline/sync/skip/test_inspect_skip_layout, distributed/pipeline/sync/skip/test_leak, distributed/pipeline/sync/skip/test_portal, distributed/pipeline/sync/skip/test_stash_pop, distributed/pipeline/sync/skip/test_tracker, distributed/pipeline/sync/skip/test_verify_skippables, distributed/pipeline/sync/test_balance, distributed/pipeline/sync/test_bugs, distributed/pipeline/sync/test_checkpoint, distributed/pipeline/sync/test_copy, distributed/pipeline/sync/test_deferred_batch_norm, distributed/pipeline/sync/test_dependency, distributed/pipeline/sync/test_inplace, distributed/pipeline/sync/test_microbatch, distributed/pipeline/sync/test_phony, distributed/pipeline/sync/test_pipe, distributed/pipeline/sync/test_pipeline, distributed/pipeline/sync/test_stream, distributed/pipeline/sync/test_transparency, distributed/pipeline/sync/test_worker, distributed/rpc/cuda/test_tensorpipe_agent, distributed/rpc/test_faulty_agent, distributed/rpc/test_tensorpipe_agent, distributed/test_c10d_common, distributed/test_c10d_gloo, distributed/test_c10d_nccl, distributed/test_c10d_spawn_gloo, distributed/test_c10d_spawn_nccl, distributed/test_data_parallel, distributed/test_distributed_spawn, distributed/test_launcher, distributed/test_nccl, distributed/test_pg_wrapper, distributed/test_store, distributions/test_constraints, distributions/test_distributions, lazy/test_bindings, lazy/test_extract_compiled_graph, lazy/test_ts_opinfo, test_ao_sparsity, test_autocast, test_autograd, test_binary_ufuncs, test_bundled_inputs, test_complex, test_cpp_api_parity, test_cpp_extensions_aot_ninja, test_cpp_extensions_aot_no_ninja, test_cpp_extensions_jit, test_cuda, test_cuda_primary_ctx, test_dataloader, test_datapipe, test_deploy, test_deploy, test_dispatch, test_expanded_weights, test_foreach, test_function_schema, test_functional_autograd_benchmark, test_functional_optim, test_functionalization, test_futures, test_fx, test_fx_experimental, test_hub, test_import_stats, test_indexing, test_jit, test_jit_autocast, test_jit_cuda_fuser, test_jit_disabled, test_jit_fuser_legacy, test_jit_fuser_te, test_jit_legacy, test_jit_profiling, test_license, test_linalg, test_logging, test_masked, test_mkldnn, test_mobile_optimizer, test_model_dump, test_module_init, test_modules, test_monitor, test_multiprocessing, test_multiprocessing_spawn, test_namedtensor, test_namedtuple_return_api, test_native_functions, test_nestedtensor, test_nn, test_numba_integration, test_numpy_interop, test_openmp, test_ops, test_ops_gradients, test_ops_jit, test_optim, test_overrides, test_package, test_per_overload_api, test_profiler, test_pruning_op, test_public_bindings, test_python_dispatch, test_pytree, test_quantization, test_reductions, test_scatter_gather_ops, test_serialization, test_set_default_mobile_cpu_allocator, test_shape_ops, test_show_pickle, test_sort_and_select, test_sparse, test_sparse_csr, test_spectral_ops, test_stateless, test_tensor_creation_ops, test_tensorboard, test_tensorexpr, test_tensorexpr_pybind, test_testing, test_torch, test_type_hints, test_type_info, test_type_promotion, test_unary_ufuncs, test_utils, test_view_ops, test_vmap, test_vulkan, test_xnnpack_integration
```
</p>
</details>
### When running anything else (for example `python test_autograd.py -h`)
It did not change and still does:
- The general unittest parser help that we print via a second thread
- The common_utils's parser help
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76152
Approved by: https://github.com/malfet, https://github.com/seemethere
2022-04-25 14:01:33 +00:00
|
|
|
parents=[common_parser]
|
2021-08-25 18:19:49 +00:00
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-v",
|
|
|
|
|
"--verbose",
|
|
|
|
|
action="count",
|
2020-11-19 15:57:09 +00:00
|
|
|
default=0,
|
2021-08-25 18:19:49 +00:00
|
|
|
help="print verbose information and test-by-test results",
|
|
|
|
|
)
|
|
|
|
|
parser.add_argument("--jit", "--jit", action="store_true", help="run all jit tests")
|
2021-08-24 15:01:36 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--distributed-tests",
|
|
|
|
|
"--distributed-tests",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="run all distributed tests",
|
|
|
|
|
)
|
2022-07-24 23:14:13 +00:00
|
|
|
parser.add_argument(
|
|
|
|
|
"--functorch",
|
|
|
|
|
"--functorch",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help=(
|
|
|
|
|
"If this flag is present, we will only run functorch tests. "
|
|
|
|
|
"If this flag is not present, we will not run any functorch tests. "
|
|
|
|
|
"This requires functorch to already be installed."
|
|
|
|
|
)
|
|
|
|
|
)
|
2021-08-26 16:27:47 +00:00
|
|
|
parser.add_argument(
|
|
|
|
|
"-core",
|
|
|
|
|
"--core",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Only run core tests, or tests that validate PyTorch's ops, modules,"
|
|
|
|
|
"and autograd. They are defined by CORE_TEST_LIST."
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-pt",
|
|
|
|
|
"--pytest",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="If true, use `pytest` to execute the tests. E.g., this runs "
|
|
|
|
|
"TestTorch with pytest in verbose and coverage mode: "
|
|
|
|
|
"python run_test.py -vci torch -pt",
|
|
|
|
|
)
|
2021-08-12 18:39:31 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-c",
|
|
|
|
|
"--coverage",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="enable coverage",
|
|
|
|
|
default=PYTORCH_COLLECT_COVERAGE,
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-i",
|
|
|
|
|
"--include",
|
|
|
|
|
nargs="+",
|
2018-04-16 18:33:50 +00:00
|
|
|
choices=TestChoices(TESTS),
|
2018-03-09 21:02:02 +00:00
|
|
|
default=TESTS,
|
2021-08-25 18:19:49 +00:00
|
|
|
metavar="TESTS",
|
|
|
|
|
help="select a set of tests to include (defaults to ALL tests)."
|
|
|
|
|
" tests must be a part of the TESTS list defined in run_test.py",
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-x",
|
|
|
|
|
"--exclude",
|
|
|
|
|
nargs="+",
|
2018-03-09 21:02:02 +00:00
|
|
|
choices=TESTS,
|
2021-08-25 18:19:49 +00:00
|
|
|
metavar="TESTS",
|
2018-03-09 21:02:02 +00:00
|
|
|
default=[],
|
2021-08-25 18:19:49 +00:00
|
|
|
help="select a set of tests to exclude",
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-f",
|
|
|
|
|
"--first",
|
2018-03-09 21:02:02 +00:00
|
|
|
choices=TESTS,
|
2021-08-25 18:19:49 +00:00
|
|
|
metavar="TESTS",
|
|
|
|
|
help="select the test to start from (excludes previous tests)",
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"-l",
|
|
|
|
|
"--last",
|
2018-03-09 21:02:02 +00:00
|
|
|
choices=TESTS,
|
2021-08-25 18:19:49 +00:00
|
|
|
metavar="TESTS",
|
|
|
|
|
help="select the last test to run (excludes following tests)",
|
|
|
|
|
)
|
2019-08-15 01:06:14 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--bring-to-front",
|
|
|
|
|
nargs="+",
|
2019-08-15 01:06:14 +00:00
|
|
|
choices=TestChoices(TESTS),
|
|
|
|
|
default=[],
|
2021-08-25 18:19:49 +00:00
|
|
|
metavar="TESTS",
|
|
|
|
|
help="select a set of tests to run first. This can be used in situations"
|
|
|
|
|
" where you want to run all tests, but care more about some set, "
|
|
|
|
|
"e.g. after making a change to a specific component",
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--ignore-win-blocklist",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="always run blocklisted windows tests",
|
|
|
|
|
)
|
2021-11-24 02:44:14 +00:00
|
|
|
# NS: Disable target determination until it can be made more reliable
|
|
|
|
|
# parser.add_argument(
|
|
|
|
|
# "--determine-from",
|
|
|
|
|
# help="File of affected source filenames to determine which tests to run.",
|
|
|
|
|
# )
|
2020-07-09 00:24:06 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--continue-through-error",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Runs the full test suite despite one of the tests failing",
|
|
|
|
|
default=strtobool(os.environ.get("CONTINUE_THROUGH_ERROR", "False")),
|
|
|
|
|
)
|
2018-09-04 02:55:58 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"additional_unittest_args",
|
|
|
|
|
nargs="*",
|
|
|
|
|
help="additional arguments passed through to unittest, e.g., "
|
|
|
|
|
"python run_test.py -i sparse -- TestSparse.test_factory_size_check",
|
|
|
|
|
)
|
2020-10-02 18:10:12 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--shard",
|
2020-10-02 18:10:12 +00:00
|
|
|
nargs=2,
|
|
|
|
|
type=int,
|
2021-08-25 18:19:49 +00:00
|
|
|
help="runs a shard of the tests (taking into account other selections), e.g., "
|
|
|
|
|
"--shard 2 3 will break up the selected tests into 3 shards and run the tests "
|
|
|
|
|
"in the 2nd shard (the first number should not exceed the second)",
|
2020-10-02 18:10:12 +00:00
|
|
|
)
|
2020-10-06 14:11:37 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--exclude-jit-executor",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="exclude tests that are run for a specific jit config",
|
2020-10-06 14:11:37 +00:00
|
|
|
)
|
2021-08-24 15:01:36 +00:00
|
|
|
parser.add_argument(
|
2021-08-25 18:19:49 +00:00
|
|
|
"--exclude-distributed-tests",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="exclude distributed tests",
|
2021-08-24 15:01:36 +00:00
|
|
|
)
|
2022-02-22 14:52:11 +00:00
|
|
|
parser.add_argument(
|
|
|
|
|
"--dry-run",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Only list the test that will run.",
|
|
|
|
|
)
|
2022-08-22 20:07:23 +00:00
|
|
|
parser.add_argument(
|
|
|
|
|
"--xdoctest-command",
|
|
|
|
|
default='list',
|
|
|
|
|
help=(
|
|
|
|
|
"Control the specific doctest action. "
|
|
|
|
|
"Use 'list' to simply parse doctests and check syntax. "
|
|
|
|
|
"Use 'all' to execute all doctests or specify a specific "
|
|
|
|
|
"doctest to run")
|
|
|
|
|
)
|
2018-03-09 21:02:02 +00:00
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
2018-04-16 18:33:50 +00:00
|
|
|
def find_test_index(test, selected_tests, find_last_index=False):
|
2019-01-17 06:56:56 +00:00
|
|
|
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
|
2018-04-16 18:33:50 +00:00
|
|
|
|
2019-01-17 06:56:56 +00:00
|
|
|
This function is used to determine the indices when slicing the list of selected tests when
|
2018-04-16 18:33:50 +00:00
|
|
|
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
|
|
|
|
|
|
|
|
|
|
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
|
|
|
|
|
as part of the same test module, e.g.:
|
|
|
|
|
|
|
|
|
|
```
|
|
|
|
|
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
|
|
|
|
|
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
|
|
|
|
|
```
|
|
|
|
|
|
2018-09-04 02:55:58 +00:00
|
|
|
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
|
|
|
|
|
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
|
2018-04-16 18:33:50 +00:00
|
|
|
|
2020-12-28 17:33:01 +00:00
|
|
|
Args:
|
2018-04-16 18:33:50 +00:00
|
|
|
test (str): Name of test to lookup
|
|
|
|
|
selected_tests (list): List of tests
|
|
|
|
|
find_last_index (bool, optional): should we lookup the index of first or last
|
|
|
|
|
occurrence (first is default)
|
|
|
|
|
|
|
|
|
|
Returns:
|
2019-12-03 04:15:54 +00:00
|
|
|
index of the first or last occurrence of the given test
|
2018-04-16 18:33:50 +00:00
|
|
|
"""
|
|
|
|
|
idx = 0
|
|
|
|
|
found_idx = -1
|
|
|
|
|
for t in selected_tests:
|
|
|
|
|
if t.startswith(test):
|
|
|
|
|
found_idx = idx
|
|
|
|
|
if not find_last_index:
|
|
|
|
|
break
|
|
|
|
|
idx += 1
|
|
|
|
|
return found_idx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
|
|
|
|
|
for exclude_test in exclude_list:
|
Add option to use ninja to compile ahead-of-time cpp_extensions (#32495)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/32495
Background
------------------------------
Previously, ninja was used to compile+link inline cpp_extensions and
ahead-of-time cpp_extensions were compiled with distutils. This PR adds
the ability to compile (but not link) ahead-of-time cpp_extensions with ninja.
The main motivation for this is to speed up cpp_extension builds: distutils
does not make use of parallelism. With this PR, using the new option, on my machine,
- torchvision compilation goes from 3m43s to 49s
- nestedtensor compilation goes from 2m0s to 28s.
User-facing changes
------------------------------
I added a `use_ninja` flag to BuildExtension. This defaults to
`True`. When `use_ninja` is True:
- it will attempt to use ninja.
- If we cannot use ninja, then this throws a warning and falls back to
distutils.
- Situations we cannot use ninja: Windows (NYI, I'll open a new issue
for this), if ninja cannot be found on the system.
Implementation Details
------------------------------
This PR makes this change in two steps. Please me know if it would be
easier to review this if I split this up into a stacked diff.
Those changes are:
1) refactor _write_ninja_file to separate the policy (what compiler flags
to pass) from the mechanism (how to write the ninja file and do compilation).
2) call _write_ninja_file and _run_ninja_build while building
ahead-of-time cpp_extensions. These are only used to compile objects;
distutils still handles the linking.
Change 1: refactor _write_ninja_file to seperate policy from mechanism
- I split _write_ninja_file into: _write_ninja_file and
_write_ninja_file_to_build_library
- I renamed _build_extension_module to _run_ninja_build
Change 2: Call _write_ninja_file while building ahead-of-time
cpp_extensions
- _write_ninja_file_and_compile_objects calls _write_ninja_file to only
build object files.
- We monkey-patch distutils.CCompiler.compile to call
_write_ninja_files_and_compile_objects
- distutils still handles the linking step. The linking step is not a
bottleneck so it was not a concern.
- This change only works on unix-based systems. Our code for windows
goes down a different codepath and I did not want to mess with that.
- If a system does not support ninja, we raise a warning and fall back
to the original compilation path.
Test Plan
------------------------------
Adhoc testing
- I built torchvision using pytorch master and printed out the build
commands. Next, I used this branch to build torchvision and looked at
the ninja file. I compared the ninja file with the build commands and
asserted that they were functionally the same.
- I repeated the above for pytorch/nestedtensor.
PyTorch test suite
- I split `test_cpp_extensions` into `test_cpp_extensions_aot` and
`test_cpp_extensions_jit`. The AOT (ahead-of-time) version tests
ahead-of-time and the JIT version tests just-in-time (not to be confused
with TorchScript)
- `test_cpp_extensions_aot` gets run TWICE by run_test.py, once with
a module that was built with ninja, and once with a module that was
built without ninja.
- run_test.py asserts that when we are building with use_ninja=True,
ninja is actually available on the system.
Test Plan: Imported from OSS
Differential Revision: D19730432
Pulled By: zou3519
fbshipit-source-id: 819590d01cf65e8da5a1e8019b8b3084792fee90
2020-02-06 02:44:19 +00:00
|
|
|
tests_copy = selected_tests[:]
|
2018-04-16 18:33:50 +00:00
|
|
|
for test in tests_copy:
|
|
|
|
|
if test.startswith(exclude_test):
|
|
|
|
|
if exclude_message is not None:
|
2021-08-25 18:19:49 +00:00
|
|
|
print_to_stderr("Excluding {} {}".format(test, exclude_message))
|
2018-04-16 18:33:50 +00:00
|
|
|
selected_tests.remove(test)
|
|
|
|
|
return selected_tests
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
def get_selected_tests(options):
|
|
|
|
|
selected_tests = options.include
|
|
|
|
|
|
2021-08-24 15:01:36 +00:00
|
|
|
# filter if there's JIT only and distributed only test options
|
|
|
|
|
if options.jit:
|
|
|
|
|
selected_tests = list(
|
2021-08-25 18:19:49 +00:00
|
|
|
filter(lambda test_name: "jit" in test_name, selected_tests)
|
|
|
|
|
)
|
2021-08-24 15:01:36 +00:00
|
|
|
|
|
|
|
|
if options.distributed_tests:
|
|
|
|
|
selected_tests = list(
|
2021-08-25 18:19:49 +00:00
|
|
|
filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)
|
|
|
|
|
)
|
2021-08-24 15:01:36 +00:00
|
|
|
|
2021-08-26 16:27:47 +00:00
|
|
|
# Filter to only run core tests when --core option is specified
|
|
|
|
|
if options.core:
|
|
|
|
|
selected_tests = list(
|
|
|
|
|
filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)
|
|
|
|
|
)
|
|
|
|
|
|
2022-07-24 23:14:13 +00:00
|
|
|
if options.functorch:
|
|
|
|
|
selected_tests = FUNCTORCH_TESTS
|
|
|
|
|
|
2021-08-24 15:01:36 +00:00
|
|
|
# process reordering
|
2019-08-15 01:06:14 +00:00
|
|
|
if options.bring_to_front:
|
|
|
|
|
to_front = set(options.bring_to_front)
|
2021-08-25 18:19:49 +00:00
|
|
|
selected_tests = options.bring_to_front + list(
|
|
|
|
|
filter(lambda name: name not in to_front, selected_tests)
|
|
|
|
|
)
|
2019-08-15 01:06:14 +00:00
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
if options.first:
|
2018-04-16 18:33:50 +00:00
|
|
|
first_index = find_test_index(options.first, selected_tests)
|
2018-03-09 21:02:02 +00:00
|
|
|
selected_tests = selected_tests[first_index:]
|
|
|
|
|
|
|
|
|
|
if options.last:
|
2018-04-16 18:33:50 +00:00
|
|
|
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
|
2021-08-25 18:19:49 +00:00
|
|
|
selected_tests = selected_tests[: last_index + 1]
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2021-08-24 15:01:36 +00:00
|
|
|
# process exclusion
|
2020-10-06 14:11:37 +00:00
|
|
|
if options.exclude_jit_executor:
|
|
|
|
|
options.exclude.extend(JIT_EXECUTOR_TESTS)
|
|
|
|
|
|
2021-08-24 15:01:36 +00:00
|
|
|
if options.exclude_distributed_tests:
|
|
|
|
|
options.exclude.extend(DISTRIBUTED_TESTS)
|
|
|
|
|
|
2022-04-07 22:37:09 +00:00
|
|
|
# these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
|
2022-08-10 00:03:48 +00:00
|
|
|
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= "11.6":
|
2022-04-07 22:37:09 +00:00
|
|
|
options.exclude.extend(["distributions/test_constraints"])
|
|
|
|
|
|
2018-04-16 18:33:50 +00:00
|
|
|
selected_tests = exclude_tests(options.exclude, selected_tests)
|
|
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
if sys.platform == "win32" and not options.ignore_win_blocklist:
|
|
|
|
|
target_arch = os.environ.get("VSCMD_ARG_TGT_ARCH")
|
|
|
|
|
if target_arch != "x64":
|
|
|
|
|
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_no_ninja")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_ninja")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("cpp_extensions_jit")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("jit")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("jit_fuser")
|
2018-04-12 10:12:39 +00:00
|
|
|
|
2022-01-06 16:53:50 +00:00
|
|
|
# This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460
|
2021-12-14 04:47:33 +00:00
|
|
|
# This below code should be removed once this issue is solved
|
2021-12-14 22:45:47 +00:00
|
|
|
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= "11.5":
|
2021-12-14 04:47:33 +00:00
|
|
|
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_ninja")
|
|
|
|
|
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_no_ninja")
|
|
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, "on Windows")
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2018-08-06 21:48:45 +00:00
|
|
|
elif TEST_WITH_ROCM:
|
2021-08-25 18:19:49 +00:00
|
|
|
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, "on ROCm")
|
2018-08-06 21:48:45 +00:00
|
|
|
|
2021-08-24 15:01:36 +00:00
|
|
|
# sharding
|
2021-06-07 22:03:19 +00:00
|
|
|
if options.shard:
|
|
|
|
|
assert len(options.shard) == 2, "Unexpected shard format"
|
|
|
|
|
assert min(options.shard) > 0, "Shards must be positive numbers"
|
|
|
|
|
which_shard, num_shards = options.shard
|
2021-08-25 18:19:49 +00:00
|
|
|
assert (
|
|
|
|
|
which_shard <= num_shards
|
|
|
|
|
), "Selected shard must be less than or equal to total number of shards"
|
|
|
|
|
assert num_shards <= len(
|
|
|
|
|
selected_tests
|
|
|
|
|
), f"Number of shards must be less than {len(selected_tests)}"
|
2022-07-09 06:59:03 +00:00
|
|
|
|
|
|
|
|
if num_shards == 1:
|
|
|
|
|
return selected_tests
|
|
|
|
|
|
|
|
|
|
# Download previous test times to make sharding decisions
|
2022-07-28 16:35:01 +00:00
|
|
|
path = os.path.join(str(REPO_ROOT), TEST_TIMES_FILE)
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
|
with open(path, "r") as f:
|
|
|
|
|
test_file_times = cast(Dict[str, Any], json.load(f))
|
|
|
|
|
else:
|
|
|
|
|
test_file_times = {}
|
2022-07-29 22:57:18 +00:00
|
|
|
test_config = os.environ.get("TEST_CONFIG")
|
|
|
|
|
if test_config not in test_file_times:
|
2022-07-09 06:59:03 +00:00
|
|
|
print(
|
2022-07-28 16:35:01 +00:00
|
|
|
"::warning:: Gathered no stats from artifacts. Proceeding with default sharding plan."
|
2022-07-09 06:59:03 +00:00
|
|
|
)
|
2022-09-21 20:21:25 +00:00
|
|
|
selected_tests = selected_tests[which_shard - 1 :: num_shards]
|
2022-07-09 06:59:03 +00:00
|
|
|
else:
|
2022-07-28 16:35:01 +00:00
|
|
|
print("Found test time stats from artifacts")
|
2022-07-29 22:57:18 +00:00
|
|
|
test_file_times_config = test_file_times[test_config]
|
2022-09-21 20:21:25 +00:00
|
|
|
shards = calculate_shards(num_shards, selected_tests, test_file_times_config)
|
2022-07-09 06:59:03 +00:00
|
|
|
_, tests_from_shard = shards[which_shard - 1]
|
|
|
|
|
selected_tests = tests_from_shard
|
2021-06-07 22:03:19 +00:00
|
|
|
|
2022-01-06 16:53:50 +00:00
|
|
|
# skip all distributed tests if distributed package is not available.
|
|
|
|
|
if not dist.is_available():
|
|
|
|
|
selected_tests = exclude_tests(DISTRIBUTED_TESTS, selected_tests,
|
|
|
|
|
"PyTorch is built without distributed support.")
|
|
|
|
|
|
2022-02-15 16:33:59 +00:00
|
|
|
# skip tests that require LAPACK when it's not available
|
|
|
|
|
if not torch._C.has_lapack:
|
|
|
|
|
selected_tests = exclude_tests(TESTS_REQUIRING_LAPACK, selected_tests,
|
|
|
|
|
"PyTorch is built without LAPACK support.")
|
|
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
return selected_tests
|
|
|
|
|
|
|
|
|
|
|
2020-08-26 23:14:17 +00:00
|
|
|
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
|
|
|
|
|
test_module = parse_test_module(test)
|
|
|
|
|
|
|
|
|
|
# Printing the date here can help diagnose which tests are slow
|
2021-08-25 18:19:49 +00:00
|
|
|
print_to_stderr("Running {} ... [{}]".format(test, datetime.now()))
|
2021-03-12 17:51:27 +00:00
|
|
|
handler = CUSTOM_HANDLERS.get(test_module, run_test)
|
2020-08-26 23:14:17 +00:00
|
|
|
return_code = handler(test_module, test_directory, options)
|
|
|
|
|
assert isinstance(return_code, int) and not isinstance(
|
2021-08-25 18:19:49 +00:00
|
|
|
return_code, bool
|
2022-09-23 20:45:20 +00:00
|
|
|
), f"While running {test} got non integer return code {return_code}"
|
2020-08-26 23:14:17 +00:00
|
|
|
if return_code == 0:
|
|
|
|
|
return None
|
|
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
message = f"{test} failed!"
|
2020-08-26 23:14:17 +00:00
|
|
|
if return_code < 0:
|
|
|
|
|
# subprocess.Popen returns the child process' exit signal as
|
|
|
|
|
# return code -N, where N is the signal number.
|
|
|
|
|
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
|
2021-08-25 18:19:49 +00:00
|
|
|
message += f" Received signal: {signal_name}"
|
2020-08-26 23:14:17 +00:00
|
|
|
return message
|
|
|
|
|
|
2021-04-22 17:25:41 +00:00
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
def main():
|
|
|
|
|
options = parse_args()
|
2021-03-17 19:19:27 +00:00
|
|
|
|
2021-08-25 19:58:24 +00:00
|
|
|
test_directory = str(REPO_ROOT / "test")
|
2018-03-09 21:02:02 +00:00
|
|
|
selected_tests = get_selected_tests(options)
|
2018-04-16 18:33:50 +00:00
|
|
|
|
2018-03-09 21:02:02 +00:00
|
|
|
if options.verbose:
|
2022-02-22 14:52:11 +00:00
|
|
|
print_to_stderr("Selected tests:\n {}".format("\n ".join(selected_tests)))
|
|
|
|
|
|
|
|
|
|
if options.dry_run:
|
|
|
|
|
return
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2020-08-26 23:14:17 +00:00
|
|
|
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
|
2021-08-25 18:19:49 +00:00
|
|
|
shell(["coverage", "erase"])
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2022-06-11 06:52:15 +00:00
|
|
|
if IS_CI:
|
2022-06-12 17:20:36 +00:00
|
|
|
selected_tests = get_reordered_tests(selected_tests)
|
2021-07-12 18:20:12 +00:00
|
|
|
# downloading test cases configuration to local environment
|
2021-08-25 19:58:24 +00:00
|
|
|
get_test_case_configs(dirpath=test_directory)
|
2021-04-22 17:25:41 +00:00
|
|
|
|
2022-09-21 20:21:25 +00:00
|
|
|
has_failed = False
|
2020-07-09 00:24:06 +00:00
|
|
|
failure_messages = []
|
2020-08-26 23:14:17 +00:00
|
|
|
try:
|
2022-09-21 20:21:25 +00:00
|
|
|
for test in selected_tests:
|
2020-10-22 17:53:07 +00:00
|
|
|
options_clone = copy.deepcopy(options)
|
|
|
|
|
if test in USE_PYTEST_LIST:
|
|
|
|
|
options_clone.pytest = True
|
|
|
|
|
err_message = run_test_module(test, test_directory, options_clone)
|
2020-08-26 23:14:17 +00:00
|
|
|
if err_message is None:
|
|
|
|
|
continue
|
2022-09-21 20:21:25 +00:00
|
|
|
has_failed = True
|
2020-08-26 23:14:17 +00:00
|
|
|
failure_messages.append(err_message)
|
2020-10-22 17:53:07 +00:00
|
|
|
if not options_clone.continue_through_error:
|
2020-08-26 23:14:17 +00:00
|
|
|
raise RuntimeError(err_message)
|
|
|
|
|
print_to_stderr(err_message)
|
|
|
|
|
finally:
|
|
|
|
|
if options.coverage:
|
2020-12-19 01:07:05 +00:00
|
|
|
from coverage import Coverage
|
2021-08-25 18:19:49 +00:00
|
|
|
|
2021-08-25 19:58:24 +00:00
|
|
|
with set_cwd(test_directory):
|
2020-12-19 01:07:05 +00:00
|
|
|
cov = Coverage()
|
|
|
|
|
if PYTORCH_COLLECT_COVERAGE:
|
|
|
|
|
cov.load()
|
|
|
|
|
cov.combine(strict=False)
|
|
|
|
|
cov.save()
|
|
|
|
|
if not PYTORCH_COLLECT_COVERAGE:
|
|
|
|
|
cov.html_report()
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2022-09-21 20:21:25 +00:00
|
|
|
if options.continue_through_error and has_failed:
|
2020-07-09 00:24:06 +00:00
|
|
|
for err in failure_messages:
|
2020-10-23 21:13:12 +00:00
|
|
|
print_to_stderr(err)
|
2020-07-09 00:24:06 +00:00
|
|
|
sys.exit(1)
|
2018-03-09 21:02:02 +00:00
|
|
|
|
2021-08-25 18:19:49 +00:00
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2018-03-09 21:02:02 +00:00
|
|
|
main()
|