aboutsummaryrefslogtreecommitdiffstats
path: root/host/tests/pytests
diff options
context:
space:
mode:
authorMatthew Crymble <matthew.crymble@ni.com>2021-11-15 14:57:07 -0600
committerAaron Rossetto <aaron.rossetto@ni.com>2021-11-30 07:33:28 -0800
commitf24d6561a842baffbce9ddcdc9802b98f5fa2af0 (patch)
tree977e76a8cb93844a0366612da8fae12cb693f0a9 /host/tests/pytests
parentdf7f65d858f723fa528f6375737e73d127fa9c1b (diff)
downloaduhd-f24d6561a842baffbce9ddcdc9802b98f5fa2af0.tar.gz
uhd-f24d6561a842baffbce9ddcdc9802b98f5fa2af0.tar.bz2
uhd-f24d6561a842baffbce9ddcdc9802b98f5fa2af0.zip
tests: add automated streaming tests
Diffstat (limited to 'host/tests/pytests')
-rw-r--r--host/tests/pytests/conftest.py59
-rw-r--r--host/tests/pytests/test_length_utils.py20
-rw-r--r--host/tests/pytests/test_streaming.py303
3 files changed, 382 insertions, 0 deletions
diff --git a/host/tests/pytests/conftest.py b/host/tests/pytests/conftest.py
new file mode 100644
index 000000000..dfca5d69f
--- /dev/null
+++ b/host/tests/pytests/conftest.py
@@ -0,0 +1,59 @@
+import test_length_utils
+
+dut_type_list = [
+ "N310",
+ "N320",
+ "B210",
+ "E320",
+ "X310",
+ "X310_TwinRx",
+ "X410"
+]
+
+
+test_length_list = [
+ test_length_utils.Test_Length_Smoke,
+ test_length_utils.Test_Length_Full,
+ test_length_utils.Test_Length_Stress
+]
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--addr",
+ type=str,
+ help="address of first 10 GbE interface",)
+ parser.addoption(
+ "--second_addr",
+ type=str,
+ help="address of second 10 GbE interface")
+ parser.addoption(
+ "--name",
+ type=str,
+ help="name of B2xx device")
+ parser.addoption(
+ "--mgmt_addr",
+ type=str,
+ help="address of management interface. only needed for DPDK test cases")
+ parser.addoption(
+ "--dut_type",
+ type=str,
+ required=True,
+ choices=dut_type_list,
+ help="")
+ parser.addoption(
+ "--test_length",
+ type=str,
+ default=test_length_utils.Test_Length_Full,
+ choices=test_length_list,
+ help="")
+ parser.addoption(
+ "--uhd_build_dir",
+ required=True,
+ type=str,
+ help="")
+
+
+def pytest_configure(config):
+ # register additional markers
+ config.addinivalue_line("markers", "dpdk: run with DPDK enable")
diff --git a/host/tests/pytests/test_length_utils.py b/host/tests/pytests/test_length_utils.py
new file mode 100644
index 000000000..db4eeab13
--- /dev/null
+++ b/host/tests/pytests/test_length_utils.py
@@ -0,0 +1,20 @@
+from collections import namedtuple
+import pytest
+
+# This provides a way to run a quick smoke test run for PRs, a more exhaustive set
+# of tests for nightly runs, and long running tests for stress tests over the weekend
+#
+# smoke: subset of tests, short duration
+# full: all test cases, short duration
+# stress: subset of tests, long duration
+Test_Length_Smoke = "smoke"
+Test_Length_Full = "full"
+Test_Length_Stress = "stress"
+
+test_length_params = namedtuple('test_length_params', 'iterations duration')
+
+def select_test_cases_by_length(test_length, test_cases):
+ if test_length == Test_Length_Full:
+ return [test_case[1] for test_case in test_cases]
+ else:
+ return [test_case[1] for test_case in test_cases if test_length in test_case[0]] \ No newline at end of file
diff --git a/host/tests/pytests/test_streaming.py b/host/tests/pytests/test_streaming.py
new file mode 100644
index 000000000..a7e26354a
--- /dev/null
+++ b/host/tests/pytests/test_streaming.py
@@ -0,0 +1,303 @@
+import pytest
+from pathlib import Path
+import batch_run_benchmark_rate
+import test_length_utils
+from test_length_utils import Test_Length_Smoke, Test_Length_Full, Test_Length_Stress
+
+ARGNAMES_DUAL_10G = ["dual_10G", "rate", "rx_rate", "rx_channels", "tx_rate", "tx_channels"]
+ARGNAMES = ["rate", "rx_rate", "rx_channels", "tx_rate", "tx_channels"]
+
+def parametrize_test_length(metafunc, test_length, fast_params, stress_params):
+ argnames = ["iterations", "duration"]
+
+ # select how long to run tests
+ if(test_length == Test_Length_Smoke or test_length == Test_Length_Full):
+ argvalues = [
+ # iterations duration test case ID
+ # ------------------------------------------------------------
+ pytest.param(fast_params.iterations, fast_params.duration, id="fast"),
+ ]
+ elif(test_length == Test_Length_Stress):
+ argvalues = [
+ # iterations duration test case ID
+ # ----------------------------------------------------------
+ pytest.param(stress_params.iterations, stress_params.duration, id="stress"),
+ ]
+
+ metafunc.parametrize(argnames, argvalues)
+
+
+def generate_N310_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ----------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0", 0, "", id="1x10GbE-1xRX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0,1", 0, "", id="1x10GbE-2xRX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 0, "", 153.6e6, "0", id="1x10GbE-1xTX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 0, "", 153.6e6, "0,1", id="1x10GbE-2xTX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0", 153.6e6, "0", id="1x10GbE-1xTRX@153.6e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 153.6e6, 153.6e6, "0,1", 153.6e6, "0,1", id="1x10GbE-2xTRX@153.6e6")],
+ [{}, pytest.param(False, 125e6, 125e6, "0,1", 125e6, "0,1", id="1x10GbE-2xTRX@125e6")],
+ [{}, pytest.param(False, 62.5e6, 62.5e6, "0,1,2,3", 0, "", id="1x10GbE-4xRX@62.5e6")],
+ [{}, pytest.param(False, 62.5e6, 0, "", 62.5e6, "0,1,2,3", id="1x10GbE-4xTX@62.5e6")],
+ [{Test_Length_Smoke, Test_Length_Stress}, pytest.param(False, 62.5e6, 62.5e6, "0,1,2,3", 62.5e6, "0,1,2,3", id="1x10GbE-4xTRX@62.5e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1", 0, "", id="2x10GbE-2xRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 0, "", 153.6e6, "0,1", id="2x10GbE-2xTX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1", 153.6e6, "0,1", id="2x10GbE-2xTRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1,2,3", 0, "", id="2x10GbE-4xRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 0, "", 153.6e6, "0,1,2,3", id="2x10GbE-4xTX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1,2,3", 153.6e6, "0,1,2,3", id="2x10GbE-4xTRX@153.6e6")],
+ [{}, pytest.param(True, 125e6, 125e6, "0,1,2,3", 0, "", id="2x10GbE-4xRX@125e6")],
+ [{}, pytest.param(True, 125e6, 0, "", 125e6, "0,1,2,3", id="2x10GbE-4xTX@62.5e6")],
+ [{Test_Length_Smoke, Test_Length_Stress}, pytest.param(True, 125e6, 125e6, "0,1,2,3", 125e6, "0,1,2,3", id="2x10GbE-4xTRX@62.5e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_N320_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ---------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 250e6, 250e6, "0", 0, "", id="1x10GbE-1xRX@250e6")],
+ [{}, pytest.param(False, 250e6, 0, "", 250e6, "0", id="1x10GbE-1xTX@250e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 250e6, 250e6, "0", 250e6, "0", id="1x10GbE-1xTRX@250e6")],
+ [{}, pytest.param(True, 250e6, 250e6, "0,1", 0, "", id="2x10GbE-2xRX@250e6")],
+ [{}, pytest.param(True, 250e6, 0, "", 250e6, "0,1", id="2x10GbE-2xTX@250e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 250e6, 250e6, "0,1", 250e6, "0,1", id="2x10GbE-2xTRX@250e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_B210_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(61.44e6, 61.44e6, "0", 0, "", id="1xRX@61.44e6")],
+ [{}, pytest.param(30.72e6, 30.72e6, "0,1", 0, "", id="2xRX@30.72e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0", id="1xTX@61.44e6")],
+ [{}, pytest.param(30.72e6, 0, "", 30.72e6, "0,1", id="2xTX@30.72e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(30.72e6, 30.72e6, "0", 30.72e6, "0", id="1xTRX@30.72e6")],
+ [{}, pytest.param(15.36e6, 15.36e6, "0,1", 15.36e6, "0,1", id="2xTRX@15.36e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_E320_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(61.44e6, 61.44e6, "0", 0, "", id="1xRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 61.44e6, "0,1", 0, "", id="2xRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0", id="1xTX@61.44e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0,1", id="2xTX@61.44e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(61.44e6, 61.44e6, "0", 61.44e6, "0", id="1xTRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 61.44e6, "0,1", 61.44e6, "0,1", id="2xTRX@61.44e6")],
+
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X310_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ---------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 200e6, 200e6, "0", 0, "", id="1x10GbE-1xRX@200e6")],
+ [{}, pytest.param(False, 100e6, 100e6, "0,1", 0, "", id="1x10GbE-2xRX@100e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 200e6, "0", id="1x10GbE-1xTX@200e6")],
+ [{}, pytest.param(False, 100e6, 0, "", 100e6, "0,1", id="1x10GbE-2xTX@100e6")],
+ [{}, pytest.param(False, 200e6, 200e6, "0", 200e6, "0", id="1x10GbE-1xTRX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 100e6, 100e6, "0,1", 100e6, "0", id="1x10GbE-2xTRX@100e6")],
+ [{}, pytest.param(True, 200e6, 200e6, "0,1", 0, "", id="2x10GbE-2xRX@200e6")],
+ [{}, pytest.param(True, 200e6, 0, "", 200e6, "0,1", id="2x10GbE-2xTX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 200e6, 200e6, "0,1", 200e6, "0,1", id="2x10GbE-2xTRX@200e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=60)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X310_TwinRx_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # --------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 100e6, 100e6, "0,1,2", 0, "", id="1x10GbE-3xRX@100e6")],
+ [{}, pytest.param(False, 50e6, 50e6, "0,1,2,4", 0, "", id="1x10GbE-4xRX@50e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 100e6, 100e6, "0,1,2,4", 0, "", id="2x10GbE-4xRX@100e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X410_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 200e6, 200e6, "0", 0, "", id="1x10GbE-1xRX@200e6")],
+ [{}, pytest.param(False, 200e6, 100e6, "0,1", 0, "", id="1x10GbE-2xRX@100e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 200e6, "0", id="1x10GbE-1xTX@200e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 100e6, "0,1", id="1x10GbE-2xTX@100e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 200e6, 200e6, "0", 200e6, "0", id="1x10GbE-1xTRX@200e6")],
+ [{}, pytest.param(False, 200e6, 100e6, "0,1", 100e6, "0,1", id="1x10GbE-2xTRX@100e6")],
+ [{}, pytest.param(True, 200e6, 200e6, "0,1", 0, "", id="2x10GbE-2xRX@200e6")],
+ [{}, pytest.param(True, 200e6, 0, "", 200e6, "0,1", id="2x10GbE-2xTX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 200e6, 100e6, "0,1", 100e6, "0,1", id="2x10GbE-2xTRX@100e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=60)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def pytest_generate_tests(metafunc):
+ dut_type = metafunc.config.getoption("dut_type")
+ test_length = metafunc.config.getoption("test_length")
+
+ metafunc.parametrize("dut_type", [dut_type])
+
+ if dut_type.lower() != "b210":
+ argvalues_DPDK = [
+ # use_dpdk test case ID marks
+ pytest.param(True, id="DPDK", marks=pytest.mark.dpdk),
+ pytest.param(False, id="NO DPDK",)
+ ]
+ metafunc.parametrize("use_dpdk", argvalues_DPDK)
+
+ if dut_type.lower() == 'n310':
+ generate_N310_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'n320':
+ generate_N320_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'b210':
+ generate_B210_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'e320':
+ generate_E320_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x310':
+ generate_X310_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x310_twinrx':
+ generate_X310_TwinRx_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x410':
+ generate_X410_test_cases(metafunc, test_length)
+
+
+def test_streaming(pytestconfig, dut_type, use_dpdk, dual_10G, rate, rx_rate, rx_channels,
+ tx_rate, tx_channels, iterations, duration):
+
+ benchmark_rate_path = Path(pytestconfig.getoption('uhd_build_dir')) / 'examples/benchmark_rate'
+
+ # construct device args string
+ device_args = f"master_clock_rate={rate},"
+
+ if dut_type == "B210":
+ device_args += f"name={pytestconfig.getoption('name')},"
+ else:
+ device_args += f"addr={pytestconfig.getoption('addr')},"
+
+ if dual_10G:
+ device_args += f"second_addr={pytestconfig.getoption('second_addr')},"
+
+ if use_dpdk:
+ device_args += f"use_dpdk=1,mgmt_addr={pytestconfig.getoption('mgmt_addr')}"
+
+ # construct benchmark_rate params dictionary
+ benchmark_rate_params = {
+ "args": device_args,
+ "duration": duration,
+ }
+
+ if rx_channels:
+ benchmark_rate_params["rx_rate"] = rx_rate
+ benchmark_rate_params["rx_channels"] = rx_channels
+
+ if tx_channels:
+ benchmark_rate_params["tx_rate"] = tx_rate
+ benchmark_rate_params["tx_channels"] = tx_channels
+
+ # run benchmark rate
+ print()
+ results = batch_run_benchmark_rate.run(benchmark_rate_path, iterations, benchmark_rate_params)
+ stats = batch_run_benchmark_rate.calculate_stats(results)
+ print(batch_run_benchmark_rate.get_summary_string(stats, iterations, benchmark_rate_params))
+
+ # compare results against thresholds
+ dropped_samps_threshold = 0
+ overruns_threshold = 2
+ rx_timeouts_threshold = 0
+ rx_seq_err_threshold = 0
+
+ underruns_threshold = 2
+ tx_timeouts_threshold = 0
+ tx_seq_err_threshold = 0
+
+ late_cmds_threshold = 0
+
+ # TODO: define custom failed assertion explanations to avoid extra output
+ # https://docs.pytest.org/en/6.2.x/assert.html#defining-your-own-explanation-for-failed-assertions
+
+ if rx_channels:
+ assert stats.avg_vals.dropped_samps <= dropped_samps_threshold, \
+ f"""Number of dropped samples exceeded threshold.
+ Expected dropped samples: <= {dropped_samps_threshold}
+ Actual dropped samples: {stats.avg_vals.dropped_samps}"""
+ assert stats.avg_vals.overruns <= overruns_threshold, \
+ f"""Number of overruns exceeded threshold.
+ Expected overruns: <= {overruns_threshold}
+ Actual overruns: {stats.avg_vals.overruns}"""
+ assert stats.avg_vals.rx_timeouts <= rx_timeouts_threshold, \
+ f"""Number of rx timeouts exceeded threshold.
+ Expected rx timeouts: <= {rx_timeouts_threshold}
+ Actual rx timeouts: {stats.avg_vals.rx_timeouts}"""
+ assert stats.avg_vals.rx_seq_errs <= rx_seq_err_threshold, \
+ f"""Number of rx sequence errors exceeded threshold.
+ Expected rx sequence errors: <= {rx_seq_err_threshold}
+ Actual rx sequence errors: {stats.avg_vals.rx_seq_errs}"""
+
+ if tx_channels:
+ assert stats.avg_vals.underruns <= underruns_threshold, \
+ f"""Number of underruns exceeded threshold.
+ Expected underruns: <= {underruns_threshold}
+ Actual underruns: {stats.avg_vals.underruns}"""
+ assert stats.avg_vals.tx_timeouts <= tx_timeouts_threshold, \
+ f"""Number of tx timeouts exceeded threshold.
+ Expected tx timeouts: <= {tx_timeouts_threshold}
+ Actual tx timeouts: {stats.avg_vals.tx_timeouts}"""
+ assert stats.avg_vals.tx_seq_errs <= tx_seq_err_threshold, \
+ f"""Number of tx sequence errors exceeded threshold.
+ Expected tx sequence errors: <= {tx_seq_err_threshold}
+ Actual tx sequence errors: {stats.avg_vals.tx_seq_errs}"""
+
+ assert stats.avg_vals.late_cmds <= late_cmds_threshold, \
+ f"""Number of late commands exceeded threshold.
+ Expected late commands: <= {late_cmds_threshold}
+ Actual late commands: {stats.avg_vals.late_cmds}"""