mirror of
https://github.com/saymrwulf/uhd.git
synced 2026-05-16 21:10:10 +00:00
devtest: Make Python tests their own type of devtest
Like the example test, the Python-based test is now codified. The "special sauce" for Python tests revolves around the fact that they require the Python API, so we have to gracefully fail if the Python API was never enabled, but hard-fail when the Python API is enabled, but not working. This creates a new type of devtest (UHDPythonTestCase) and moves uhd_python_api_test over to the new type.
This commit is contained in:
parent
509b3b0961
commit
8a33db6022
3 changed files with 68 additions and 41 deletions
|
|
@ -37,4 +37,3 @@ class gpio_test(uhd_example_test_case):
|
|||
print(app.stderr)
|
||||
self.report_example_results(test_name, run_results)
|
||||
return run_results
|
||||
|
||||
|
|
|
|||
|
|
@ -8,35 +8,16 @@
|
|||
|
||||
import os
|
||||
from uhd_test_base import shell_application
|
||||
from uhd_test_base import uhd_test_case
|
||||
try:
|
||||
import uhd
|
||||
except ImportError:
|
||||
uhd = None
|
||||
from uhd_test_base import UHDPythonTestCase
|
||||
|
||||
# For what we're doing here, ruamel.yaml and yaml are compatible, and we'll use
|
||||
# whatever we can find
|
||||
try:
|
||||
from ruamel import yaml
|
||||
except:
|
||||
import yaml
|
||||
|
||||
|
||||
class uhd_python_api_test(uhd_test_case):
|
||||
class uhd_python_api_test(UHDPythonTestCase):
|
||||
""" Run multi_usrp_test """
|
||||
def test_api(self):
|
||||
test_name = 'uhd_python_api_test'
|
||||
|
||||
def run_test(self, test_name, test_args):
|
||||
"""
|
||||
Run test and report results.
|
||||
"""
|
||||
if uhd is None:
|
||||
print("UHD module not found -- checking for Python API")
|
||||
config_info_app = shell_application('uhd_config_info')
|
||||
config_info_app.run(['--enabled-components'])
|
||||
if "Python API" in config_info_app.stdout:
|
||||
raise RuntimeError("Python API enabled, but cannot load uhd module!")
|
||||
print("Skipping test, Python API not installed.")
|
||||
self.report_result("python_api_tester", 'status', 'Skipped')
|
||||
return
|
||||
devtest_src_dir = os.getenv('_UHD_DEVTEST_SRC_DIR', '')
|
||||
multi_usrp_test_path = \
|
||||
os.path.join(devtest_src_dir, 'multi_usrp_test.py')
|
||||
|
|
@ -71,11 +52,4 @@ class uhd_python_api_test(uhd_test_case):
|
|||
'status',
|
||||
'Passed' if run_results['passed'] else 'Failed',
|
||||
)
|
||||
self.assertTrue(
|
||||
run_results['passed'],
|
||||
msg="Errors occurred during test python_api_test. "
|
||||
"Check log file for details.\n"
|
||||
"Run results:\n{r}".format(
|
||||
r=yaml.dump(run_results, default_flow_style=False)
|
||||
)
|
||||
)
|
||||
return run_results
|
||||
|
|
|
|||
|
|
@ -16,8 +16,9 @@ import unittest
|
|||
import re
|
||||
import time
|
||||
import logging
|
||||
import importlib
|
||||
from subprocess import Popen, PIPE
|
||||
# For what we're doing here, ruamel.yaml and yaml are copatible, and we'll use
|
||||
# For what we're doing here, ruamel.yaml and yaml are compatible, and we'll use
|
||||
# whatever we can find
|
||||
try:
|
||||
from ruamel import yaml
|
||||
|
|
@ -105,7 +106,6 @@ class uhd_test_case(unittest.TestCase):
|
|||
"""
|
||||
Override this to add own setup code per test.
|
||||
"""
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
self.name = self.__class__.__name__
|
||||
|
|
@ -146,7 +146,6 @@ class uhd_test_case(unittest.TestCase):
|
|||
|
||||
def tear_down(self):
|
||||
"""Nothing to do."""
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
self.tear_down()
|
||||
|
|
@ -178,7 +177,6 @@ class uhd_example_test_case(uhd_test_case):
|
|||
"""
|
||||
Override this to add specific setup code.
|
||||
"""
|
||||
pass
|
||||
|
||||
def set_up(self):
|
||||
"""Called by the unit testing framework on tests. """
|
||||
|
|
@ -248,12 +246,68 @@ class uhd_example_test_case(uhd_test_case):
|
|||
or (self.usrp_info['product'] in test_args.get('products', [])):
|
||||
run_results = self.run_test(test_name, test_args)
|
||||
passed = bool(run_results)
|
||||
errors = ''
|
||||
if isinstance(run_results, dict):
|
||||
passed = run_results['passed']
|
||||
errors = run_results.pop("errors", None)
|
||||
if not passed:
|
||||
print("Error log:", file=sys.stderr)
|
||||
print(errors)
|
||||
errors = run_results.pop("errors", None)
|
||||
if not passed:
|
||||
print("Error log:", file=sys.stderr)
|
||||
print(errors)
|
||||
self.assertTrue(
|
||||
passed,
|
||||
msg="Errors occurred during test `{t}'. "
|
||||
"Check log file for details.\n"
|
||||
"Run results:\n{r}".format(
|
||||
t=test_name,
|
||||
r=yaml.dump(run_results, default_flow_style=False)
|
||||
)
|
||||
)
|
||||
|
||||
class UHDPythonTestCase(uhd_test_case):
|
||||
"""
|
||||
Helper class for Python test cases. These require the uhd module, but that's
|
||||
not always available. We thus test for its existence.
|
||||
"""
|
||||
|
||||
def run_test(self, test_name, test_args):
|
||||
"""
|
||||
Override this to run the actual example.
|
||||
|
||||
Needs to return either a boolean or a dict with key 'passed' to determine
|
||||
pass/fail.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def test_all(self):
|
||||
"""
|
||||
Hook for test runner. Needs to be a class method that starts with 'test'.
|
||||
Calls run_test().
|
||||
"""
|
||||
try:
|
||||
self.uhd = importlib.import_module('uhd')
|
||||
except ImportError:
|
||||
print("UHD module not found -- checking for Python API")
|
||||
config_info_app = shell_application('uhd_config_info')
|
||||
config_info_app.run(['--enabled-components'])
|
||||
if "Python API" in config_info_app.stdout:
|
||||
raise RuntimeError("Python API enabled, but cannot load uhd module!")
|
||||
self.log.info("Skipping test, Python API not installed.")
|
||||
self.report_result("python_api_tester", 'status', 'Skipped')
|
||||
return
|
||||
# Now: The actual test
|
||||
test_params = getattr(self, 'test_params', {'default': {},})
|
||||
for test_name, test_args in test_params.items():
|
||||
time.sleep(15) # Wait for X300 devices to reclaim them
|
||||
if 'products' not in test_args \
|
||||
or (self.usrp_info['product'] in test_args.get('products', [])):
|
||||
run_results = self.run_test(test_name, test_args)
|
||||
passed = bool(run_results)
|
||||
if isinstance(run_results, dict):
|
||||
passed = run_results['passed']
|
||||
errors = run_results.pop("errors", None)
|
||||
if not passed:
|
||||
print("Error log:", file=sys.stderr)
|
||||
print(errors)
|
||||
self.assertTrue(
|
||||
passed,
|
||||
msg="Errors occurred during test `{t}'. "
|
||||
|
|
|
|||
Loading…
Reference in a new issue