diff options
Diffstat (limited to 'host/tests/devtest/uhd_test_base.py')
-rwxr-xr-x | host/tests/devtest/uhd_test_base.py | 70 |
1 files changed, 62 insertions, 8 deletions
diff --git a/host/tests/devtest/uhd_test_base.py b/host/tests/devtest/uhd_test_base.py index 80ce5b9f0..698ba4be3 100755 --- a/host/tests/devtest/uhd_test_base.py +++ b/host/tests/devtest/uhd_test_base.py @@ -16,8 +16,9 @@ import unittest import re import time import logging +import importlib from subprocess import Popen, PIPE -# For what we're doing here, ruamel.yaml and yaml are copatible, and we'll use +# For what we're doing here, ruamel.yaml and yaml are compatible, and we'll use # whatever we can find try: from ruamel import yaml @@ -105,7 +106,6 @@ class uhd_test_case(unittest.TestCase): """ Override this to add own setup code per test. """ - pass def setUp(self): self.name = self.__class__.__name__ @@ -146,7 +146,6 @@ class uhd_test_case(unittest.TestCase): def tear_down(self): """Nothing to do.""" - pass def tearDown(self): self.tear_down() @@ -178,7 +177,6 @@ class uhd_example_test_case(uhd_test_case): """ Override this to add specific setup code. """ - pass def set_up(self): """Called by the unit testing framework on tests. """ @@ -248,12 +246,68 @@ class uhd_example_test_case(uhd_test_case): or (self.usrp_info['product'] in test_args.get('products', [])): run_results = self.run_test(test_name, test_args) passed = bool(run_results) + errors = '' if isinstance(run_results, dict): passed = run_results['passed'] - errors = run_results.pop("errors", None) - if not passed: - print("Error log:", file=sys.stderr) - print(errors) + errors = run_results.pop("errors", None) + if not passed: + print("Error log:", file=sys.stderr) + print(errors) + self.assertTrue( + passed, + msg="Errors occurred during test `{t}'. " + "Check log file for details.\n" + "Run results:\n{r}".format( + t=test_name, + r=yaml.dump(run_results, default_flow_style=False) + ) + ) + +class UHDPythonTestCase(uhd_test_case): + """ + Helper class for Python test cases. These require the uhd module, but that's + not always available. We thus test for its existence. + """ + + def run_test(self, test_name, test_args): + """ + Override this to run the actual example. + + Needs to return either a boolean or a dict with key 'passed' to determine + pass/fail. + """ + raise NotImplementedError + + def test_all(self): + """ + Hook for test runner. Needs to be a class method that starts with 'test'. + Calls run_test(). + """ + try: + self.uhd = importlib.import_module('uhd') + except ImportError: + print("UHD module not found -- checking for Python API") + config_info_app = shell_application('uhd_config_info') + config_info_app.run(['--enabled-components']) + if "Python API" in config_info_app.stdout: + raise RuntimeError("Python API enabled, but cannot load uhd module!") + self.log.info("Skipping test, Python API not installed.") + self.report_result("python_api_tester", 'status', 'Skipped') + return + # Now: The actual test + test_params = getattr(self, 'test_params', {'default': {},}) + for test_name, test_args in test_params.items(): + time.sleep(15) # Wait for X300 devices to reclaim them + if 'products' not in test_args \ + or (self.usrp_info['product'] in test_args.get('products', [])): + run_results = self.run_test(test_name, test_args) + passed = bool(run_results) + if isinstance(run_results, dict): + passed = run_results['passed'] + errors = run_results.pop("errors", None) + if not passed: + print("Error log:", file=sys.stderr) + print(errors) self.assertTrue( passed, msg="Errors occurred during test `{t}'. " |