mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Revert D23544563: Benchmarks: re-enable profiling-te configuration.
Test Plan: revert-hammer
Differential Revision:
D23544563 (ac1f471fe2)
Original commit changeset: 98659e8860fa
fbshipit-source-id: 5dab7044699f59c709e64d178758f5f462ebb788
This commit is contained in:
parent
1b2da9ed82
commit
626e410e1d
3 changed files with 15 additions and 22 deletions
|
|
@ -338,10 +338,8 @@ test_benchmarks() {
|
|||
pip_install --user "requests"
|
||||
BENCHMARK_DATA="benchmarks/.data"
|
||||
mkdir -p ${BENCHMARK_DATA}
|
||||
pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns_legacy_old.json --fuser=old --executor=legacy
|
||||
python benchmarks/upload_scribe.py --pytest_bench_json ${BENCHMARK_DATA}/fastrnns_legacy_old.json
|
||||
pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns_profiling_te.json --fuser=te --executor=profiling
|
||||
python benchmarks/upload_scribe.py --pytest_bench_json ${BENCHMARK_DATA}/fastrnns_profiling_te.json
|
||||
pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns.json
|
||||
python benchmarks/upload_scribe.py --pytest_bench_json ${BENCHMARK_DATA}/fastrnns.json
|
||||
assert_git_not_dirty
|
||||
fi
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,17 +0,0 @@
|
|||
import pytest # noqa: F401
|
||||
|
||||
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
|
||||
'jit_multilayer', 'py']
|
||||
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
|
||||
all_nets = default_rnns + default_cnns
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# This creates lists of tests to generate, can be customized
|
||||
if metafunc.cls.__name__ == "TestBenchNetwork":
|
||||
metafunc.parametrize('net_name', all_nets, scope="class")
|
||||
metafunc.parametrize("executor", [metafunc.config.getoption("executor")], scope="class")
|
||||
metafunc.parametrize("fuser", [metafunc.config.getoption("fuser")], scope="class")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--fuser", default="old", help="fuser to use for benchmarks")
|
||||
parser.addoption("--executor", default="legacy", help="executor to use for benchmarks")
|
||||
|
|
@ -4,8 +4,20 @@ import torch
|
|||
from .fuser import set_fuser
|
||||
from .runner import get_nn_runners
|
||||
|
||||
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
|
||||
'jit_multilayer', 'py']
|
||||
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
|
||||
all_nets = default_rnns + default_cnns
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# This creates lists of tests to generate, can be customized
|
||||
if metafunc.cls.__name__ == "TestBenchNetwork":
|
||||
metafunc.parametrize('net_name', all_nets, scope="class")
|
||||
metafunc.parametrize("executor_and_fuser", ["legacy-old"], scope="class")
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def modeldef(request, net_name, executor, fuser):
|
||||
def modeldef(request, net_name, executor_and_fuser):
|
||||
executor, fuser = executor_and_fuser.split("-")
|
||||
set_fuser(fuser, executor)
|
||||
|
||||
# Given a 'net_name' provided by generate_tests, build the thing
|
||||
|
|
|
|||
Loading…
Reference in a new issue