aboutsummaryrefslogtreecommitdiffstats
path: root/host/tests/streaming_performance
diff options
context:
space:
mode:
Diffstat (limited to 'host/tests/streaming_performance')
-rw-r--r--host/tests/streaming_performance/CMakeLists.txt14
-rwxr-xr-xhost/tests/streaming_performance/batch_run_benchmark_rate.py174
-rw-r--r--host/tests/streaming_performance/parse_benchmark_rate.py149
-rw-r--r--host/tests/streaming_performance/run_benchmark_rate.py86
4 files changed, 423 insertions, 0 deletions
diff --git a/host/tests/streaming_performance/CMakeLists.txt b/host/tests/streaming_performance/CMakeLists.txt
new file mode 100644
index 000000000..0ea828737
--- /dev/null
+++ b/host/tests/streaming_performance/CMakeLists.txt
@@ -0,0 +1,14 @@
+#
+# Copyright 2019 Ettus Research, a National Instruments Brand
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+set(streaming_performance_files
+ parse_benchmark_rate.py
+ run_benchmark_rate.py
+ batch_run_benchmark_rate.py
+ run_X300_performance_tests.py
+)
+
+UHD_INSTALL(PROGRAMS ${streaming_performance_files} DESTINATION ${PKG_LIB_DIR}/tests/streaming_performance COMPONENT tests)
diff --git a/host/tests/streaming_performance/batch_run_benchmark_rate.py b/host/tests/streaming_performance/batch_run_benchmark_rate.py
new file mode 100755
index 000000000..ad47cfd5c
--- /dev/null
+++ b/host/tests/streaming_performance/batch_run_benchmark_rate.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python3
+"""
+Copyright 2019 Ettus Research, A National Instrument Brand
+
+SPDX-License-Identifier: GPL-3.0-or-later
+
+Runs the benchmark rate C++ example for a specified number of iterations and
+aggregates results.
+
+Example usage:
+batch_run_benchmark_rate.py --path <benchmark_rate_dir>/benchmark_rate --iterations 1 --args "addr=192.168.30.2" --rx_rate 1e6
+"""
+import argparse
+import collections
+import parse_benchmark_rate
+import run_benchmark_rate
+
+Results = collections.namedtuple(
+ 'Results',
+ """
+ avg_vals
+ min_vals
+ max_vals
+ non_zero_vals
+ """
+)
+
+def calculate_stats(results):
+ """
+ Calculates performance metrics from list of parsed benchmark rate results.
+ """
+ result_avg = parse_benchmark_rate.average(results)
+ result_min = parse_benchmark_rate.min_vals(results)
+ result_max = parse_benchmark_rate.max_vals(results)
+ result_nz = parse_benchmark_rate.non_zero_vals(results)
+ return Results(
+ avg_vals = result_avg,
+ min_vals = result_min,
+ max_vals = result_max,
+ non_zero_vals = result_nz)
+
+def run(path, iterations, benchmark_rate_params, stop_on_error=True):
+ """
+ Runs benchmark rate multiple times and returns a list of parsed results.
+ """
+ print("Running benchmark rate {} times with the following arguments: ".format(iterations))
+ for key, val in benchmark_rate_params.items():
+ print("{:14} {}".format(key, val))
+
+ parsed_results = []
+ iteration = 0
+ while iteration < iterations:
+ proc = run_benchmark_rate.run(path, benchmark_rate_params)
+ result = parse_benchmark_rate.parse(proc.stdout.decode('ASCII'))
+ if result != None:
+ parsed_results.append(result)
+ iteration += 1
+ else:
+ if stop_on_error:
+ msg = "Could not parse results of benchmark_rate\n"
+ msg += "Benchmark rate arguments:\n"
+ msg += str(proc.args) + "\n"
+ msg += "Stderr capture:\n"
+ msg += proc.stderr.decode('ASCII')
+ msg += "Stdout capture:\n"
+ msg += proc.stdout.decode('ASCII')
+ raise RuntimeError(msg)
+ else:
+ print("Failed to parse benchmark rate results")
+ print(proc.stderr.decode('ASCII'))
+
+ return parsed_results
+
+def get_summary_string(summary, params=None):
+ """
+ Returns summary info in a string resembling benchmark_rate output.
+ """
+ statistics_msg = """
+Benchmark rate summary:
+ Num received samples: avg {}, min {}, max {}, non-zero {}
+ Num dropped samples: avg {}, min {}, max {}, non-zero {}
+ Num overruns detected: avg {}, min {}, max {}, non-zero {}
+ Num transmitted samples: avg {}, min {}, max {}, non-zero {}
+ Num sequence errors (Tx): avg {}, min {}, max {}, non-zero {}
+ Num sequence errors (Rx): avg {}, min {}, max {}, non-zero {}
+ Num underruns detected: avg {}, min {}, max {}, non-zero {}
+ Num late commands: avg {}, min {}, max {}, non-zero {}
+ Num timeouts (Tx): avg {}, min {}, max {}, non-zero {}
+ Num timeouts (Rx): avg {}, min {}, max {}, non-zero {}
+""".format(
+ summary.avg_vals.received_samps,
+ summary.min_vals.received_samps,
+ summary.max_vals.received_samps,
+ summary.non_zero_vals.received_samps,
+ summary.avg_vals.dropped_samps,
+ summary.min_vals.dropped_samps,
+ summary.max_vals.dropped_samps,
+ summary.non_zero_vals.dropped_samps,
+ summary.avg_vals.overruns,
+ summary.min_vals.overruns,
+ summary.max_vals.overruns,
+ summary.non_zero_vals.overruns,
+ summary.avg_vals.transmitted_samps,
+ summary.min_vals.transmitted_samps,
+ summary.max_vals.transmitted_samps,
+ summary.non_zero_vals.transmitted_samps,
+ summary.avg_vals.tx_seq_errs,
+ summary.min_vals.tx_seq_errs,
+ summary.max_vals.tx_seq_errs,
+ summary.non_zero_vals.tx_seq_errs,
+ summary.avg_vals.rx_seq_errs,
+ summary.min_vals.rx_seq_errs,
+ summary.max_vals.rx_seq_errs,
+ summary.non_zero_vals.rx_seq_errs,
+ summary.avg_vals.underruns,
+ summary.min_vals.underruns,
+ summary.max_vals.underruns,
+ summary.non_zero_vals.underruns,
+ summary.avg_vals.late_cmds,
+ summary.min_vals.late_cmds,
+ summary.max_vals.late_cmds,
+ summary.non_zero_vals.late_cmds,
+ summary.avg_vals.tx_timeouts,
+ summary.min_vals.tx_timeouts,
+ summary.max_vals.tx_timeouts,
+ summary.non_zero_vals.tx_timeouts,
+ summary.avg_vals.rx_timeouts,
+ summary.min_vals.rx_timeouts,
+ summary.max_vals.rx_timeouts,
+ summary.non_zero_vals.rx_timeouts)
+
+ if params is not None:
+ duration = 10
+ if "duration" in params:
+ duration = int(params["duration"])
+
+ num_rx_channels = summary.min_vals.num_rx_channels
+ rx_rate = summary.min_vals.rx_rate
+
+ if num_rx_channels != 0:
+ avg_samps = summary.avg_vals.received_samps
+ expected_samps = num_rx_channels * duration * rx_rate
+ percent = (avg_samps) / expected_samps * 100
+ statistics_msg += " Expected samps (Rx): {}\n".format(expected_samps)
+ statistics_msg += " Actual samps % (Rx): {}\n".format(round(percent, 1))
+
+ num_tx_channels = summary.min_vals.num_tx_channels
+ tx_rate = summary.min_vals.tx_rate
+
+ if num_tx_channels != 0:
+ avg_samps = summary.avg_vals.transmitted_samps
+ expected_samps = num_tx_channels * duration * tx_rate
+ percent = (avg_samps) / expected_samps * 100
+ statistics_msg += " Expected samps (Tx): {}\n".format(expected_samps)
+ statistics_msg += " Actual samps % (Tx): {}\n".format(round(percent, 1))
+
+ return statistics_msg
+
+def parse_args():
+ """
+ Parse the command line arguments for batch run benchmark rate.
+ """
+ benchmark_rate_params, rest = run_benchmark_rate.parse_known_args()
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--path", type=str, required=True, help="path to benchmark rate example")
+ parser.add_argument("--iterations", type=int, default=100, help="number of iterations to run")
+ params = parser.parse_args(rest)
+ return params.path, params.iterations, benchmark_rate_params
+
+if __name__ == "__main__":
+ path, iterations, params = parse_args();
+ results = run(path, iterations, params)
+ summary = calculate_stats(results)
+ print(get_summary_string(summary, params))
diff --git a/host/tests/streaming_performance/parse_benchmark_rate.py b/host/tests/streaming_performance/parse_benchmark_rate.py
new file mode 100644
index 000000000..e30c3e6aa
--- /dev/null
+++ b/host/tests/streaming_performance/parse_benchmark_rate.py
@@ -0,0 +1,149 @@
+"""
+Copyright 2019 Ettus Research, A National Instrument Brand
+
+SPDX-License-Identifier: GPL-3.0-or-later
+
+Helper script that parses the results of benchmark_rate and extracts numeric
+values printed at the end of execution.
+"""
+
+import collections
+import re
+import csv
+
+Results = collections.namedtuple(
+ 'Results',
+ """
+ num_rx_channels
+ num_tx_channels
+ rx_rate
+ tx_rate
+ received_samps
+ dropped_samps
+ overruns
+ transmitted_samps
+ tx_seq_errs
+ rx_seq_errs
+ underruns
+ late_cmds
+ tx_timeouts
+ rx_timeouts
+ """
+)
+
+def average(results):
+ """
+ Returns the average of a list of results.
+ """
+ results_as_lists = [list(r) for r in results]
+ avg_vals = [sum(x)/len(results) for x in zip(*results_as_lists)]
+ return Results(*avg_vals)
+
+def min_vals(results):
+ """
+ Returns the minimum values of a list of results.
+ """
+ results_as_lists = [list(r) for r in results]
+ min_vals = [min(x) for x in zip(*results_as_lists)]
+ return Results(*min_vals)
+
+def max_vals(results):
+ """
+ Returns the maximum values of a list of results.
+ """
+ results_as_lists = [list(r) for r in results]
+ max_vals = [max(x) for x in zip(*results_as_lists)]
+ return Results(*max_vals)
+
+def non_zero_vals(results):
+ """
+ Returns the number of non-zero values from a list of results.
+ """
+ results_as_lists = [list(r) for r in results]
+ results_as_lists = [[1 if x > 0 else 0 for x in y] for y in results_as_lists]
+ non_zero_vals = [sum(x) for x in zip(*results_as_lists)]
+ return Results(*non_zero_vals)
+
+def parse(result_str):
+ """
+ Parses benchmark results and returns numerical values.
+ """
+ # Parse rx rate
+ rx_rate = 0.0
+ num_rx_channels = 0
+ expr = "Testing receive rate ([0-9]+\.[0-9]+) Msps on (\d+) channels"
+ match = re.search(expr, result_str)
+ if match is not None:
+ rx_rate = float(match.group(1)) * 1.0e6
+ num_rx_channels = int(match.group(2))
+
+ tx_rate = 0.0
+ num_tx_channels = 0
+ expr = "Testing transmit rate ([0-9]+\.[0-9]+) Msps on (\d+) channels"
+ match = re.search(expr, result_str)
+ if match is not None:
+ tx_rate = float(match.group(1)) * 1.0e6
+ num_tx_channels = int(match.group(2))
+
+ # Parse results
+ expr = "Benchmark rate summary:"
+ expr += r"\s*Num received samples:\s*(\d+)"
+ expr += r"\s*Num dropped samples:\s*(\d+)"
+ expr += r"\s*Num overruns detected:\s*(\d+)"
+ expr += r"\s*Num transmitted samples:\s*(\d+)"
+ expr += r"\s*Num sequence errors \(Tx\):\s*(\d+)"
+ expr += r"\s*Num sequence errors \(Rx\):\s*(\d+)"
+ expr += r"\s*Num underruns detected:\s*(\d+)"
+ expr += r"\s*Num late commands:\s*(\d+)"
+ expr += r"\s*Num timeouts \(Tx\):\s*(\d+)"
+ expr += r"\s*Num timeouts \(Rx\):\s*(\d+)"
+ match = re.search(expr, result_str)
+ if match:
+ return Results(
+ num_rx_channels = num_rx_channels,
+ num_tx_channels = num_tx_channels,
+ rx_rate = rx_rate,
+ tx_rate = tx_rate,
+ received_samps = int(match.group(1)),
+ dropped_samps = int(match.group(2)),
+ overruns = int(match.group(3)),
+ transmitted_samps = int(match.group(4)),
+ tx_seq_errs = int(match.group(5)),
+ rx_seq_errs = int(match.group(6)),
+ underruns = int(match.group(7)),
+ late_cmds = int(match.group(8)),
+ tx_timeouts = int(match.group(9)),
+ rx_timeouts = int(match.group(10))
+ )
+ else:
+ return None
+
+def write_benchmark_rate_csv(results, file_name):
+ with open(file_name, 'w', newline='') as f:
+ w = csv.writer(f)
+ w.writerow(results[0]._fields)
+ w.writerows(results)
+
+if __name__ == "__main__":
+ result_str = """
+ [00:00:00.000376] Creating the usrp device with: addr=192.168.30.2, second_addr=192.168.40.2...
+ [00:00:05.63100253] Testing receive rate 200.000000 Msps on 2 channels
+ [00:00:05.73100253] Testing transmit rate 100.000000 Msps on 1 channels
+ [00:00:15.113339078] Benchmark complete.
+
+ Benchmark rate summary:
+ Num received samples: 10000
+ Num dropped samples: 200
+ Num overruns detected: 10
+ Num transmitted samples: 20000
+ Num sequence errors (Tx): 5
+ Num sequence errors (Rx): 6
+ Num underruns detected: 20
+ Num late commands: 2
+ Num timeouts (Tx): 0
+ Num timeouts (Rx): 100
+
+ Done!
+ """
+ print("Parsing hardcoded string for testing only")
+ print(parse(result_str))
diff --git a/host/tests/streaming_performance/run_benchmark_rate.py b/host/tests/streaming_performance/run_benchmark_rate.py
new file mode 100644
index 000000000..1989b6d92
--- /dev/null
+++ b/host/tests/streaming_performance/run_benchmark_rate.py
@@ -0,0 +1,86 @@
+"""
+Copyright 2019 Ettus Research, A National Instrument Brand
+
+SPDX-License-Identifier: GPL-3.0-or-later
+
+Helper script that provides a Python interface to run the benchmark rate C++
+example.
+"""
+import argparse
+import subprocess
+
+def run(path, params):
+ """
+ Run benchmark rate and return a CompletedProcess object.
+ """
+ proc_params = [path]
+
+ for key, val in params.items():
+ proc_params.append("--" + str(key))
+ proc_params.append(str(val))
+
+ return subprocess.run(proc_params, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+def create_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--args", type=str, help="single uhd device address args")
+ parser.add_argument("--duration", type=str, help="duration for the test in seconds")
+ parser.add_argument("--rx_subdev", type=str, help="specify the device subdev for RX")
+ parser.add_argument("--tx_subdev", type=str, help="specify the device subdev for TX")
+ parser.add_argument("--rx_stream_args", type=str, help="stream args for RX streamer")
+ parser.add_argument("--tx_stream_args", type=str, help="stream args for TX streamer")
+ parser.add_argument("--rx_rate", type=str, help="specify to perform a RX rate test (sps)")
+ parser.add_argument("--tx_rate", type=str, help="specify to perform a TX rate test (sps)")
+ parser.add_argument("--rx_otw", type=str, help="specify the over-the-wire sample mode for RX")
+ parser.add_argument("--tx_otw", type=str, help="specify the over-the-wire sample mode for TX")
+ parser.add_argument("--rx_cpu", type=str, help="specify the host/cpu sample mode for RX")
+ parser.add_argument("--tx_cpu", type=str, help="specify the host/cpu sample mode for TX")
+ parser.add_argument("--ref", type=str, help="clock reference (internal, external, mimo, gpsdo)")
+ parser.add_argument("--pps", type=str, help="PPS source (internal, external, mimo, gpsdo)")
+ parser.add_argument("--random", type=str, help="Run with random values of samples in send() and recv()")
+ parser.add_argument("--rx_channels", type=str, help="which RX channel(s) to use")
+ parser.add_argument("--tx_channels", type=str, help="which TX channel(s) to use")
+ return parser
+
+def parse_args():
+ """
+ Parse the command line arguments for benchmark rate, and returns arguments
+ in a dict.
+ """
+ parser = create_parser()
+ params = vars(parser.parse_args())
+ return { key : params[key] for key in params if params[key] != None }
+
+def parse_known_args():
+ """
+ Parse the command line arguments for benchmark rate. Returns a dict
+ containing the benchmark rate args, and a list containing args that
+ are not recognized by benchmark rate.
+ """
+ parser = create_parser()
+ params, rest = parser.parse_known_args()
+ params = vars(params)
+ return { key : params[key] for key in params if params[key] != None }, rest
+
+if __name__ == "__main__":
+ """
+ Main function used for testing only. Requires path to the example to be
+ passed as a command line parameter.
+ """
+ benchmark_rate_params, rest = parse_known_args()
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--path", type=str, required=True, help="path to benchmark rate example")
+ params = parser.parse_args(rest);
+ proc = run(params.path, benchmark_rate_params)
+
+ print("ARGS")
+ print(proc.args)
+
+ print("RETURNCODE")
+ print(proc.returncode)
+
+ print("STDERR")
+ print(proc.stderr.decode('ASCII'))
+
+ print("STDOUT")
+ print(proc.stdout.decode('ASCII'))