aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.ci/templates/job-uhd-streaming-tests-beauty.yml25
-rw-r--r--.ci/templates/job-uhd-streaming-tests.yml83
-rw-r--r--.ci/templates/stages-uhd-pipeline.yml16
-rw-r--r--.ci/uhd-pipeline-pr.yml11
-rw-r--r--.ci/uhd-pipeline.yml11
-rw-r--r--host/tests/pytests/conftest.py59
-rw-r--r--host/tests/pytests/test_length_utils.py20
-rw-r--r--host/tests/pytests/test_streaming.py303
8 files changed, 528 insertions, 0 deletions
diff --git a/.ci/templates/job-uhd-streaming-tests-beauty.yml b/.ci/templates/job-uhd-streaming-tests-beauty.yml
new file mode 100644
index 000000000..0f9065830
--- /dev/null
+++ b/.ci/templates/job-uhd-streaming-tests-beauty.yml
@@ -0,0 +1,25 @@
+parameters:
+- name: testOS
+ type: string
+ values:
+ - ubuntu2004
+- name: uhdSrcDir
+ type: string
+- name: testLength
+ type: string
+
+jobs:
+- template: job-uhd-streaming-tests.yml
+ parameters:
+ suiteName: 'beauty'
+ testOS: '${{ parameters.testOS }}'
+ testLength: '${{ parameters.testLength }}'
+ toolset: 'make'
+ uhdSrcDir: '${{ parameters.uhdSrcDir }}'
+ dutMatrix:
+ beauty-N320-0 XG:
+ dutName: 'beauty-N320-0'
+ dutType: 'N320'
+ dutAddr: '192.168.10.2'
+ dutSecondAddr: '192.168.20.2'
+ dutMgmtAddr: '10.0.57.13'
diff --git a/.ci/templates/job-uhd-streaming-tests.yml b/.ci/templates/job-uhd-streaming-tests.yml
new file mode 100644
index 000000000..9a9b3168d
--- /dev/null
+++ b/.ci/templates/job-uhd-streaming-tests.yml
@@ -0,0 +1,83 @@
+parameters:
+- name: suiteName
+ type: string
+- name: testOS
+ type: string
+ values:
+ - ubuntu2004
+- name: toolset
+ type: string
+ values:
+ - make
+- name: uhdSrcDir
+ type: string
+- name: dutMatrix
+ type: object
+- name: testLength
+ type: string
+ values:
+ - 'smoke'
+ - 'full'
+ - 'stress'
+
+jobs:
+- job: uhd_streaming_tests_${{ parameters.suiteName }}
+ displayName: uhd streaming tests ${{ parameters.suiteName }}
+ timeoutInMinutes: 180
+ pool:
+ name: de-dre-lab
+ demands:
+ - suiteName -equals ${{ parameters.suiteName }}
+ strategy:
+ matrix: ${{ parameters.dutMatrix }}
+ workspace:
+ clean: outputs
+ steps:
+ - checkout: self
+ clean: true
+ - download: current
+ artifact: ${{ parameters.testOS }}-${{ parameters.toolset }}
+ displayName: download pipeline artifact ${{ parameters.testOS }}-${{ parameters.toolset }}
+ - task: ExtractFiles@1
+ inputs:
+ archiveFilePatterns: $(Pipeline.Workspace)/${{ parameters.testOS }}-${{ parameters.toolset }}/${{ parameters.testOS }}-${{ parameters.toolset }}.tar.gz
+ destinationFolder: $(Build.BinariesDirectory)
+ cleanDestinationFolder: true
+ - script: |
+ cd ${{ parameters.uhdSrcDir }}/host/tests/streaming_performance
+ sudo ./setup.sh --auto
+ sleep 5
+ displayName: setup interfaces for use without DPDK
+ - script: |
+ set -x
+ export PYTHONPATH=${{ parameters.uhdSrcDir }}/host/tests/streaming_performance
+ cd ${{ parameters.uhdSrcDir }}/host/tests/pytests
+ python3 -m pytest -s test_streaming.py -m "not dpdk" --dut_type $(dutType) --test_length ${{ parameters.testLength }} \
+ --addr $(dutAddr) --second_addr $(dutSecondAddr) --mgmt_addr $(dutMgmtAddr) \
+ --uhd_build_dir $(Build.BinariesDirectory)/uhddev/build --junitxml $(Common.TestResultsDirectory)/TEST-$(dutName).xml
+ continueOnError: true
+ displayName: Run streaming tests on $(dutName)
+ - script: |
+ cd ${{ parameters.uhdSrcDir }}/host/tests/streaming_performance
+ sudo ./setup.sh --auto --dpdk
+ sleep 5
+ displayName: setup interfaces for use with DPDK
+ - script: |
+ set -x
+ export PYTHONPATH=${{ parameters.uhdSrcDir }}/host/tests/streaming_performance
+ cd ${{ parameters.uhdSrcDir }}/host/tests/pytests
+ sudo --preserve-env=PYTHONPATH python3 -m pytest -s test_streaming.py -m "dpdk" --dut_type $(dutType) --test_length ${{ parameters.testLength }} \
+ --addr $(dutAddr) --second_addr $(dutSecondAddr) --mgmt_addr $(dutMgmtAddr) \
+ --uhd_build_dir $(Build.BinariesDirectory)/uhddev/build --junitxml $(Common.TestResultsDirectory)/TEST-$(dutName)-dpdk.xml
+ continueOnError: true
+ displayName: Run streaming tests with DPDK on $(dutName)
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: 'JUnit'
+ testResultsFiles: '$(Common.TestResultsDirectory)/TEST-*.xml'
+ testRunTitle: $(dutName) streaming tests
+ buildConfiguration: 'Release'
+ mergeTestResults: true
+ failTaskOnFailedTests: false
+ displayName: Upload streaming test results
+
diff --git a/.ci/templates/stages-uhd-pipeline.yml b/.ci/templates/stages-uhd-pipeline.yml
index f5ff5c227..a67dbed97 100644
--- a/.ci/templates/stages-uhd-pipeline.yml
+++ b/.ci/templates/stages-uhd-pipeline.yml
@@ -14,6 +14,11 @@ parameters:
- name: release_binaries
type: boolean
default: false
+- name: testLength
+ type: string
+- name: run_streaming_tests
+ type: boolean
+ default: false
variables:
- template: ../uhd-pipeline-vars.yml
@@ -71,3 +76,14 @@ stages:
parameters:
testOS: ubuntu2004
uhdSrcDir: $(Build.SourcesDirectory)
+
+- stage: test_streaming_stage
+ displayName: Test UHD Streaming
+ dependsOn: build_uhd_stage
+ condition: and(succeeded('build_uhd_stage'), ${{ parameters.run_streaming_tests }})
+ jobs:
+ - template: job-uhd-streaming-tests-beauty.yml
+ parameters:
+ testOS: ubuntu2004
+ uhdSrcDir: $(Build.SourcesDirectory)
+ testLength: ${{ parameters.testLength }} \ No newline at end of file
diff --git a/.ci/uhd-pipeline-pr.yml b/.ci/uhd-pipeline-pr.yml
index 9887214bf..2c1c7d247 100644
--- a/.ci/uhd-pipeline-pr.yml
+++ b/.ci/uhd-pipeline-pr.yml
@@ -14,6 +14,15 @@ parameters:
- name: release_binaries
type: boolean
default: false
+- name: testLength
+ type: string
+ values:
+ - 'smoke'
+ - 'full'
+ - 'stress'
+- name: run_streaming_tests
+ type: boolean
+ default: False
trigger: none
@@ -38,3 +47,5 @@ extends:
custom_boost_version: ${{ parameters.custom_boost_version }}
custom_boost_version_url: ${{ parameters.custom_boost_version_url }}
release_binaries: ${{ parameters.release_binaries }}
+ testLength: ${{ parameters.testLength }}
+ run_streaming_tests: ${{ parameters.run_streaming_tests }}
diff --git a/.ci/uhd-pipeline.yml b/.ci/uhd-pipeline.yml
index 8ea2eb4f5..b2ffffc6f 100644
--- a/.ci/uhd-pipeline.yml
+++ b/.ci/uhd-pipeline.yml
@@ -8,6 +8,15 @@ parameters:
- name: release_binaries
type: boolean
default: false
+- name: testLength
+ type: string
+ values:
+ - 'smoke'
+ - 'full'
+ - 'stress'
+- name: run_streaming_tests
+ type: boolean
+ default: False
trigger:
batch: true
@@ -31,3 +40,5 @@ extends:
template: templates/stages-uhd-pipeline.yml
parameters:
release_binaries: ${{ parameters.release_binaries }}
+ testLength: ${{ parameters.testLength }}
+ run_streaming_tests: ${{ parameters.run_streaming_tests }}
diff --git a/host/tests/pytests/conftest.py b/host/tests/pytests/conftest.py
new file mode 100644
index 000000000..dfca5d69f
--- /dev/null
+++ b/host/tests/pytests/conftest.py
@@ -0,0 +1,59 @@
+import test_length_utils
+
+dut_type_list = [
+ "N310",
+ "N320",
+ "B210",
+ "E320",
+ "X310",
+ "X310_TwinRx",
+ "X410"
+]
+
+
+test_length_list = [
+ test_length_utils.Test_Length_Smoke,
+ test_length_utils.Test_Length_Full,
+ test_length_utils.Test_Length_Stress
+]
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--addr",
+ type=str,
+ help="address of first 10 GbE interface",)
+ parser.addoption(
+ "--second_addr",
+ type=str,
+ help="address of second 10 GbE interface")
+ parser.addoption(
+ "--name",
+ type=str,
+ help="name of B2xx device")
+ parser.addoption(
+ "--mgmt_addr",
+ type=str,
+ help="address of management interface. only needed for DPDK test cases")
+ parser.addoption(
+ "--dut_type",
+ type=str,
+ required=True,
+ choices=dut_type_list,
+ help="")
+ parser.addoption(
+ "--test_length",
+ type=str,
+ default=test_length_utils.Test_Length_Full,
+ choices=test_length_list,
+ help="")
+ parser.addoption(
+ "--uhd_build_dir",
+ required=True,
+ type=str,
+ help="")
+
+
+def pytest_configure(config):
+ # register additional markers
+ config.addinivalue_line("markers", "dpdk: run with DPDK enable")
diff --git a/host/tests/pytests/test_length_utils.py b/host/tests/pytests/test_length_utils.py
new file mode 100644
index 000000000..db4eeab13
--- /dev/null
+++ b/host/tests/pytests/test_length_utils.py
@@ -0,0 +1,20 @@
+from collections import namedtuple
+import pytest
+
+# This provides a way to run a quick smoke test run for PRs, a more exhaustive set
+# of tests for nightly runs, and long running tests for stress tests over the weekend
+#
+# smoke: subset of tests, short duration
+# full: all test cases, short duration
+# stress: subset of tests, long duration
+Test_Length_Smoke = "smoke"
+Test_Length_Full = "full"
+Test_Length_Stress = "stress"
+
+test_length_params = namedtuple('test_length_params', 'iterations duration')
+
+def select_test_cases_by_length(test_length, test_cases):
+ if test_length == Test_Length_Full:
+ return [test_case[1] for test_case in test_cases]
+ else:
+ return [test_case[1] for test_case in test_cases if test_length in test_case[0]] \ No newline at end of file
diff --git a/host/tests/pytests/test_streaming.py b/host/tests/pytests/test_streaming.py
new file mode 100644
index 000000000..a7e26354a
--- /dev/null
+++ b/host/tests/pytests/test_streaming.py
@@ -0,0 +1,303 @@
+import pytest
+from pathlib import Path
+import batch_run_benchmark_rate
+import test_length_utils
+from test_length_utils import Test_Length_Smoke, Test_Length_Full, Test_Length_Stress
+
+ARGNAMES_DUAL_10G = ["dual_10G", "rate", "rx_rate", "rx_channels", "tx_rate", "tx_channels"]
+ARGNAMES = ["rate", "rx_rate", "rx_channels", "tx_rate", "tx_channels"]
+
+def parametrize_test_length(metafunc, test_length, fast_params, stress_params):
+ argnames = ["iterations", "duration"]
+
+ # select how long to run tests
+ if(test_length == Test_Length_Smoke or test_length == Test_Length_Full):
+ argvalues = [
+ # iterations duration test case ID
+ # ------------------------------------------------------------
+ pytest.param(fast_params.iterations, fast_params.duration, id="fast"),
+ ]
+ elif(test_length == Test_Length_Stress):
+ argvalues = [
+ # iterations duration test case ID
+ # ----------------------------------------------------------
+ pytest.param(stress_params.iterations, stress_params.duration, id="stress"),
+ ]
+
+ metafunc.parametrize(argnames, argvalues)
+
+
+def generate_N310_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ----------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0", 0, "", id="1x10GbE-1xRX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0,1", 0, "", id="1x10GbE-2xRX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 0, "", 153.6e6, "0", id="1x10GbE-1xTX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 0, "", 153.6e6, "0,1", id="1x10GbE-2xTX@153.6e6")],
+ [{}, pytest.param(False, 153.6e6, 153.6e6, "0", 153.6e6, "0", id="1x10GbE-1xTRX@153.6e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 153.6e6, 153.6e6, "0,1", 153.6e6, "0,1", id="1x10GbE-2xTRX@153.6e6")],
+ [{}, pytest.param(False, 125e6, 125e6, "0,1", 125e6, "0,1", id="1x10GbE-2xTRX@125e6")],
+ [{}, pytest.param(False, 62.5e6, 62.5e6, "0,1,2,3", 0, "", id="1x10GbE-4xRX@62.5e6")],
+ [{}, pytest.param(False, 62.5e6, 0, "", 62.5e6, "0,1,2,3", id="1x10GbE-4xTX@62.5e6")],
+ [{Test_Length_Smoke, Test_Length_Stress}, pytest.param(False, 62.5e6, 62.5e6, "0,1,2,3", 62.5e6, "0,1,2,3", id="1x10GbE-4xTRX@62.5e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1", 0, "", id="2x10GbE-2xRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 0, "", 153.6e6, "0,1", id="2x10GbE-2xTX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1", 153.6e6, "0,1", id="2x10GbE-2xTRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1,2,3", 0, "", id="2x10GbE-4xRX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 0, "", 153.6e6, "0,1,2,3", id="2x10GbE-4xTX@153.6e6")],
+ [{}, pytest.param(True, 153.6e6, 153.6e6, "0,1,2,3", 153.6e6, "0,1,2,3", id="2x10GbE-4xTRX@153.6e6")],
+ [{}, pytest.param(True, 125e6, 125e6, "0,1,2,3", 0, "", id="2x10GbE-4xRX@125e6")],
+ [{}, pytest.param(True, 125e6, 0, "", 125e6, "0,1,2,3", id="2x10GbE-4xTX@62.5e6")],
+ [{Test_Length_Smoke, Test_Length_Stress}, pytest.param(True, 125e6, 125e6, "0,1,2,3", 125e6, "0,1,2,3", id="2x10GbE-4xTRX@62.5e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_N320_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ---------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 250e6, 250e6, "0", 0, "", id="1x10GbE-1xRX@250e6")],
+ [{}, pytest.param(False, 250e6, 0, "", 250e6, "0", id="1x10GbE-1xTX@250e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 250e6, 250e6, "0", 250e6, "0", id="1x10GbE-1xTRX@250e6")],
+ [{}, pytest.param(True, 250e6, 250e6, "0,1", 0, "", id="2x10GbE-2xRX@250e6")],
+ [{}, pytest.param(True, 250e6, 0, "", 250e6, "0,1", id="2x10GbE-2xTX@250e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 250e6, 250e6, "0,1", 250e6, "0,1", id="2x10GbE-2xTRX@250e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_B210_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(61.44e6, 61.44e6, "0", 0, "", id="1xRX@61.44e6")],
+ [{}, pytest.param(30.72e6, 30.72e6, "0,1", 0, "", id="2xRX@30.72e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0", id="1xTX@61.44e6")],
+ [{}, pytest.param(30.72e6, 0, "", 30.72e6, "0,1", id="2xTX@30.72e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(30.72e6, 30.72e6, "0", 30.72e6, "0", id="1xTRX@30.72e6")],
+ [{}, pytest.param(15.36e6, 15.36e6, "0,1", 15.36e6, "0,1", id="2xTRX@15.36e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def generate_E320_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(61.44e6, 61.44e6, "0", 0, "", id="1xRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 61.44e6, "0,1", 0, "", id="2xRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0", id="1xTX@61.44e6")],
+ [{}, pytest.param(61.44e6, 0, "", 61.44e6, "0,1", id="2xTX@61.44e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(61.44e6, 61.44e6, "0", 61.44e6, "0", id="1xTRX@61.44e6")],
+ [{}, pytest.param(61.44e6, 61.44e6, "0,1", 61.44e6, "0,1", id="2xTRX@61.44e6")],
+
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X310_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ---------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 200e6, 200e6, "0", 0, "", id="1x10GbE-1xRX@200e6")],
+ [{}, pytest.param(False, 100e6, 100e6, "0,1", 0, "", id="1x10GbE-2xRX@100e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 200e6, "0", id="1x10GbE-1xTX@200e6")],
+ [{}, pytest.param(False, 100e6, 0, "", 100e6, "0,1", id="1x10GbE-2xTX@100e6")],
+ [{}, pytest.param(False, 200e6, 200e6, "0", 200e6, "0", id="1x10GbE-1xTRX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 100e6, 100e6, "0,1", 100e6, "0", id="1x10GbE-2xTRX@100e6")],
+ [{}, pytest.param(True, 200e6, 200e6, "0,1", 0, "", id="2x10GbE-2xRX@200e6")],
+ [{}, pytest.param(True, 200e6, 0, "", 200e6, "0,1", id="2x10GbE-2xTX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 200e6, 200e6, "0,1", 200e6, "0,1", id="2x10GbE-2xTRX@200e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=60)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X310_TwinRx_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # --------------------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 100e6, 100e6, "0,1,2", 0, "", id="1x10GbE-3xRX@100e6")],
+ [{}, pytest.param(False, 50e6, 50e6, "0,1,2,4", 0, "", id="1x10GbE-4xRX@50e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 100e6, 100e6, "0,1,2,4", 0, "", id="2x10GbE-4xRX@100e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=30)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+def generate_X410_test_cases(metafunc, test_length):
+ test_cases = [
+ # Test Lengths dual_10G rate rx_rate rx_channels tx_rate tx_channels test case ID
+ # ------------------------------------------------------------------------------------------------------------------------------
+ [{}, pytest.param(False, 200e6, 200e6, "0", 0, "", id="1x10GbE-1xRX@200e6")],
+ [{}, pytest.param(False, 200e6, 100e6, "0,1", 0, "", id="1x10GbE-2xRX@100e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 200e6, "0", id="1x10GbE-1xTX@200e6")],
+ [{}, pytest.param(False, 200e6, 0, "", 100e6, "0,1", id="1x10GbE-2xTX@100e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(False, 200e6, 200e6, "0", 200e6, "0", id="1x10GbE-1xTRX@200e6")],
+ [{}, pytest.param(False, 200e6, 100e6, "0,1", 100e6, "0,1", id="1x10GbE-2xTRX@100e6")],
+ [{}, pytest.param(True, 200e6, 200e6, "0,1", 0, "", id="2x10GbE-2xRX@200e6")],
+ [{}, pytest.param(True, 200e6, 0, "", 200e6, "0,1", id="2x10GbE-2xTX@200e6")],
+ [{Test_Length_Stress, Test_Length_Smoke}, pytest.param(True, 200e6, 100e6, "0,1", 100e6, "0,1", id="2x10GbE-2xTRX@100e6")],
+ ]
+
+ argvalues = test_length_utils.select_test_cases_by_length(test_length, test_cases)
+ metafunc.parametrize(ARGNAMES_DUAL_10G, argvalues)
+
+ fast_params = test_length_utils.test_length_params(iterations=10, duration=60)
+ stress_params = test_length_utils.test_length_params(iterations=2, duration=600)
+ parametrize_test_length(metafunc, test_length, fast_params, stress_params)
+
+
+def pytest_generate_tests(metafunc):
+ dut_type = metafunc.config.getoption("dut_type")
+ test_length = metafunc.config.getoption("test_length")
+
+ metafunc.parametrize("dut_type", [dut_type])
+
+ if dut_type.lower() != "b210":
+ argvalues_DPDK = [
+ # use_dpdk test case ID marks
+ pytest.param(True, id="DPDK", marks=pytest.mark.dpdk),
+ pytest.param(False, id="NO DPDK",)
+ ]
+ metafunc.parametrize("use_dpdk", argvalues_DPDK)
+
+ if dut_type.lower() == 'n310':
+ generate_N310_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'n320':
+ generate_N320_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'b210':
+ generate_B210_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'e320':
+ generate_E320_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x310':
+ generate_X310_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x310_twinrx':
+ generate_X310_TwinRx_test_cases(metafunc, test_length)
+ elif dut_type.lower() == 'x410':
+ generate_X410_test_cases(metafunc, test_length)
+
+
+def test_streaming(pytestconfig, dut_type, use_dpdk, dual_10G, rate, rx_rate, rx_channels,
+ tx_rate, tx_channels, iterations, duration):
+
+ benchmark_rate_path = Path(pytestconfig.getoption('uhd_build_dir')) / 'examples/benchmark_rate'
+
+ # construct device args string
+ device_args = f"master_clock_rate={rate},"
+
+ if dut_type == "B210":
+ device_args += f"name={pytestconfig.getoption('name')},"
+ else:
+ device_args += f"addr={pytestconfig.getoption('addr')},"
+
+ if dual_10G:
+ device_args += f"second_addr={pytestconfig.getoption('second_addr')},"
+
+ if use_dpdk:
+ device_args += f"use_dpdk=1,mgmt_addr={pytestconfig.getoption('mgmt_addr')}"
+
+ # construct benchmark_rate params dictionary
+ benchmark_rate_params = {
+ "args": device_args,
+ "duration": duration,
+ }
+
+ if rx_channels:
+ benchmark_rate_params["rx_rate"] = rx_rate
+ benchmark_rate_params["rx_channels"] = rx_channels
+
+ if tx_channels:
+ benchmark_rate_params["tx_rate"] = tx_rate
+ benchmark_rate_params["tx_channels"] = tx_channels
+
+ # run benchmark rate
+ print()
+ results = batch_run_benchmark_rate.run(benchmark_rate_path, iterations, benchmark_rate_params)
+ stats = batch_run_benchmark_rate.calculate_stats(results)
+ print(batch_run_benchmark_rate.get_summary_string(stats, iterations, benchmark_rate_params))
+
+ # compare results against thresholds
+ dropped_samps_threshold = 0
+ overruns_threshold = 2
+ rx_timeouts_threshold = 0
+ rx_seq_err_threshold = 0
+
+ underruns_threshold = 2
+ tx_timeouts_threshold = 0
+ tx_seq_err_threshold = 0
+
+ late_cmds_threshold = 0
+
+ # TODO: define custom failed assertion explanations to avoid extra output
+ # https://docs.pytest.org/en/6.2.x/assert.html#defining-your-own-explanation-for-failed-assertions
+
+ if rx_channels:
+ assert stats.avg_vals.dropped_samps <= dropped_samps_threshold, \
+ f"""Number of dropped samples exceeded threshold.
+ Expected dropped samples: <= {dropped_samps_threshold}
+ Actual dropped samples: {stats.avg_vals.dropped_samps}"""
+ assert stats.avg_vals.overruns <= overruns_threshold, \
+ f"""Number of overruns exceeded threshold.
+ Expected overruns: <= {overruns_threshold}
+ Actual overruns: {stats.avg_vals.overruns}"""
+ assert stats.avg_vals.rx_timeouts <= rx_timeouts_threshold, \
+ f"""Number of rx timeouts exceeded threshold.
+ Expected rx timeouts: <= {rx_timeouts_threshold}
+ Actual rx timeouts: {stats.avg_vals.rx_timeouts}"""
+ assert stats.avg_vals.rx_seq_errs <= rx_seq_err_threshold, \
+ f"""Number of rx sequence errors exceeded threshold.
+ Expected rx sequence errors: <= {rx_seq_err_threshold}
+ Actual rx sequence errors: {stats.avg_vals.rx_seq_errs}"""
+
+ if tx_channels:
+ assert stats.avg_vals.underruns <= underruns_threshold, \
+ f"""Number of underruns exceeded threshold.
+ Expected underruns: <= {underruns_threshold}
+ Actual underruns: {stats.avg_vals.underruns}"""
+ assert stats.avg_vals.tx_timeouts <= tx_timeouts_threshold, \
+ f"""Number of tx timeouts exceeded threshold.
+ Expected tx timeouts: <= {tx_timeouts_threshold}
+ Actual tx timeouts: {stats.avg_vals.tx_timeouts}"""
+ assert stats.avg_vals.tx_seq_errs <= tx_seq_err_threshold, \
+ f"""Number of tx sequence errors exceeded threshold.
+ Expected tx sequence errors: <= {tx_seq_err_threshold}
+ Actual tx sequence errors: {stats.avg_vals.tx_seq_errs}"""
+
+ assert stats.avg_vals.late_cmds <= late_cmds_threshold, \
+ f"""Number of late commands exceeded threshold.
+ Expected late commands: <= {late_cmds_threshold}
+ Actual late commands: {stats.avg_vals.late_cmds}"""