mirror of
https://github.com/saymrwulf/uhd.git
synced 2026-05-16 21:10:10 +00:00
Usually, devtest is run via make (or ninja), and will use the correct Python interpreter. When running directly on the command line, it is important to pick the right Python interpreter so it will work with the Python API. Here, we change the default interpreter from Python 2 to 3, because that's the more common version, and will be the only option for upcoming UHD 4.0 anyway.
52 lines
1.6 KiB
Python
Executable file
52 lines
1.6 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
#
|
|
# Copyright 2015 Ettus Research LLC
|
|
# Copyright 2018 Ettus Research, a National Instruments Company
|
|
#
|
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
#
|
|
""" Run the test for tx_burst """
|
|
|
|
import re
|
|
from uhd_test_base import uhd_example_test_case
|
|
|
|
class uhd_tx_bursts_test(uhd_example_test_case):
|
|
""" Run test_messages. """
|
|
tests = {
|
|
'default': {
|
|
'nsamps': 10000,
|
|
'rate': 5e6,
|
|
'channels': '0',
|
|
},
|
|
}
|
|
|
|
def setup_example(self):
|
|
"""
|
|
Set args.
|
|
"""
|
|
self.test_params = uhd_tx_bursts_test.tests
|
|
|
|
def run_test(self, test_name, test_args):
|
|
""" Run the app and scrape for the failure messages. """
|
|
self.log.info('Running test {name}, Channel = {channel}, Sample Rate = {rate}'.format(
|
|
name=test_name, channel=test_args.get('channel'), rate=test_args.get('rate'),
|
|
))
|
|
# Run example:
|
|
args = [
|
|
self.create_addr_args_str(),
|
|
'--nsamps', str(test_args['nsamps']),
|
|
'--channels', str(test_args['channels']),
|
|
'--rate', str(test_args.get('rate', 1e6)),
|
|
]
|
|
if 'subdev' in test_args:
|
|
args.append('--subdev')
|
|
args.append(test_args['subdev'])
|
|
(app, run_results) = self.run_example('tx_bursts', args)
|
|
# Evaluate pass/fail:
|
|
run_results['passed'] = all([
|
|
app.returncode == 0,
|
|
])
|
|
run_results['async_burst_ack_found'] = re.search('success', app.stdout) is not None
|
|
self.report_example_results(test_name, run_results)
|
|
return run_results
|
|
|