aboutsummaryrefslogtreecommitdiffstats
path: root/host/tests/streaming_performance
diff options
context:
space:
mode:
authorCiro Nishiguchi <ciro.nishiguchi@ni.com>2019-12-10 14:28:22 -0600
committerBrent Stapleton <brent.stapleton@ettus.com>2019-12-18 09:58:58 -0800
commited273d6353995f356fc8624566628f8219528cd0 (patch)
tree7bf7f9ccaf7cc3f44eba147ec2aa503a770bf37f /host/tests/streaming_performance
parent8fab267ea6ff0a77fd0689cc90689a3850c2ec74 (diff)
downloaduhd-ed273d6353995f356fc8624566628f8219528cd0.tar.gz
uhd-ed273d6353995f356fc8624566628f8219528cd0.tar.bz2
uhd-ed273d6353995f356fc8624566628f8219528cd0.zip
tests: Add more tests for max rate streaming
Add more tests to compare streaming rates with previous releases. Changed the output string to a table.
Diffstat (limited to 'host/tests/streaming_performance')
-rwxr-xr-xhost/tests/streaming_performance/batch_run_benchmark_rate.py191
-rwxr-xr-xhost/tests/streaming_performance/run_E3xx_max_rate_tests.py4
-rwxr-xr-xhost/tests/streaming_performance/run_N3xx_max_rate_tests.py361
-rwxr-xr-xhost/tests/streaming_performance/run_X3xx_max_rate_tests.py2
4 files changed, 469 insertions, 89 deletions
diff --git a/host/tests/streaming_performance/batch_run_benchmark_rate.py b/host/tests/streaming_performance/batch_run_benchmark_rate.py
index ad47cfd5c..6874ad131 100755
--- a/host/tests/streaming_performance/batch_run_benchmark_rate.py
+++ b/host/tests/streaming_performance/batch_run_benchmark_rate.py
@@ -12,6 +12,7 @@ batch_run_benchmark_rate.py --path <benchmark_rate_dir>/benchmark_rate --iterati
"""
import argparse
import collections
+import re
import parse_benchmark_rate
import run_benchmark_rate
@@ -71,90 +72,112 @@ def run(path, iterations, benchmark_rate_params, stop_on_error=True):
return parsed_results
-def get_summary_string(summary, params=None):
+def get_summary_string(stats, iterations, params):
"""
- Returns summary info in a string resembling benchmark_rate output.
+ Returns summary info in a table format.
"""
- statistics_msg = """
-Benchmark rate summary:
- Num received samples: avg {}, min {}, max {}, non-zero {}
- Num dropped samples: avg {}, min {}, max {}, non-zero {}
- Num overruns detected: avg {}, min {}, max {}, non-zero {}
- Num transmitted samples: avg {}, min {}, max {}, non-zero {}
- Num sequence errors (Tx): avg {}, min {}, max {}, non-zero {}
- Num sequence errors (Rx): avg {}, min {}, max {}, non-zero {}
- Num underruns detected: avg {}, min {}, max {}, non-zero {}
- Num late commands: avg {}, min {}, max {}, non-zero {}
- Num timeouts (Tx): avg {}, min {}, max {}, non-zero {}
- Num timeouts (Rx): avg {}, min {}, max {}, non-zero {}
-""".format(
- summary.avg_vals.received_samps,
- summary.min_vals.received_samps,
- summary.max_vals.received_samps,
- summary.non_zero_vals.received_samps,
- summary.avg_vals.dropped_samps,
- summary.min_vals.dropped_samps,
- summary.max_vals.dropped_samps,
- summary.non_zero_vals.dropped_samps,
- summary.avg_vals.overruns,
- summary.min_vals.overruns,
- summary.max_vals.overruns,
- summary.non_zero_vals.overruns,
- summary.avg_vals.transmitted_samps,
- summary.min_vals.transmitted_samps,
- summary.max_vals.transmitted_samps,
- summary.non_zero_vals.transmitted_samps,
- summary.avg_vals.tx_seq_errs,
- summary.min_vals.tx_seq_errs,
- summary.max_vals.tx_seq_errs,
- summary.non_zero_vals.tx_seq_errs,
- summary.avg_vals.rx_seq_errs,
- summary.min_vals.rx_seq_errs,
- summary.max_vals.rx_seq_errs,
- summary.non_zero_vals.rx_seq_errs,
- summary.avg_vals.underruns,
- summary.min_vals.underruns,
- summary.max_vals.underruns,
- summary.non_zero_vals.underruns,
- summary.avg_vals.late_cmds,
- summary.min_vals.late_cmds,
- summary.max_vals.late_cmds,
- summary.non_zero_vals.late_cmds,
- summary.avg_vals.tx_timeouts,
- summary.min_vals.tx_timeouts,
- summary.max_vals.tx_timeouts,
- summary.non_zero_vals.tx_timeouts,
- summary.avg_vals.rx_timeouts,
- summary.min_vals.rx_timeouts,
- summary.max_vals.rx_timeouts,
- summary.non_zero_vals.rx_timeouts)
-
- if params is not None:
- duration = 10
- if "duration" in params:
- duration = int(params["duration"])
-
- num_rx_channels = summary.min_vals.num_rx_channels
- rx_rate = summary.min_vals.rx_rate
-
- if num_rx_channels != 0:
- avg_samps = summary.avg_vals.received_samps
- expected_samps = num_rx_channels * duration * rx_rate
- percent = (avg_samps) / expected_samps * 100
- statistics_msg += " Expected samps (Rx): {}\n".format(expected_samps)
- statistics_msg += " Actual samps % (Rx): {}\n".format(round(percent, 1))
-
- num_tx_channels = summary.min_vals.num_tx_channels
- tx_rate = summary.min_vals.tx_rate
-
- if num_tx_channels != 0:
- avg_samps = summary.avg_vals.transmitted_samps
- expected_samps = num_tx_channels * duration * tx_rate
- percent = (avg_samps) / expected_samps * 100
- statistics_msg += " Expected samps (Tx): {}\n".format(expected_samps)
- statistics_msg += " Actual samps % (Tx): {}\n".format(round(percent, 1))
-
- return statistics_msg
+ header = "| stat | rx samps | tx samps | rx dropped | overrun | rx seq | tx seq | underrun | rx tmo | tx tmo | late |"
+ ruler = "|------|----------|----------|------------|---------|--------|--------|----------|--------|--------|------|"
+
+ def get_params_row(results, iterations, duration, mcr):
+ """
+ Returns a row containing the test setup, e.g.:
+ 1 rx, 1 tx, rate 6.452e+06 sps, 1 iterations, 10s duration
+ """
+ rate = max(results.rx_rate, results.tx_rate)
+ s = ""
+ s += "{} rx".format(int(results.num_rx_channels))
+ s += ", "
+ s += "{} tx".format(int(results.num_tx_channels))
+ s += ", "
+ s += "rate {:.3e} sps".format(round(rate, 2))
+ s += ", "
+ s += "{} iterations".format(iterations)
+ s += ", "
+ s += "{}s duration".format(duration)
+ if mcr is not None:
+ s += ", "
+ s += "mcr {}".format(mcr)
+
+ return "| " + s + " "*(len(ruler)-len(s)-3) + "|"
+
+ def get_table_row(results, iterations, duration, stat_label):
+ """
+ Returns a row of numeric results.
+ """
+ expected_samps = results.num_rx_channels * duration * results.rx_rate
+ rx_samps = 0
+ rx_dropped = 0
+
+ if expected_samps > 0:
+ rx_samps = results.received_samps / expected_samps * 100
+ rx_dropped = results.dropped_samps / expected_samps * 100
+
+ tx_samps = 0
+ expected_samps = results.num_tx_channels * duration * results.tx_rate
+
+ if expected_samps > 0:
+ tx_samps = results.transmitted_samps / expected_samps * 100
+
+ s = (
+ "| {} ".format(stat_label) +
+ "| {:>8} ".format(round(rx_samps, 1)) +
+ "| {:>8} ".format(round(tx_samps, 1)) +
+ "| {:>10} ".format(round(rx_dropped, 1)) +
+ "| {:>7} ".format(round(results.overruns, 1)) +
+ "| {:>6} ".format(round(results.rx_seq_errs, 1)) +
+ "| {:>6} ".format(round(results.tx_seq_errs, 1)) +
+ "| {:>8.1e} ".format(round(results.underruns, 1)) +
+ "| {:>6} ".format(round(results.rx_timeouts, 1)) +
+ "| {:>6} ".format(round(results.tx_timeouts, 1)) +
+ "| {:>4} ".format(round(results.late_cmds, 1))
+ )
+
+ return s + "|"
+
+ def get_non_zero_row(results):
+ """
+ Returns a row with the number of non-zero values for each value.
+ """
+ s = (
+ "| nz " +
+ "| {:>8} ".format(int(results.received_samps)) +
+ "| {:>8} ".format(int(results.transmitted_samps)) +
+ "| {:>10} ".format(int(results.dropped_samps)) +
+ "| {:>7} ".format(int(results.overruns)) +
+ "| {:>6} ".format(int(results.rx_seq_errs)) +
+ "| {:>6} ".format(int(results.tx_seq_errs)) +
+ "| {:>8} ".format(int(results.underruns)) +
+ "| {:>6} ".format(int(results.rx_timeouts)) +
+ "| {:>6} ".format(int(results.tx_timeouts)) +
+ "| {:>4} ".format(int(results.late_cmds))
+ )
+
+ return s + "|"
+
+ duration = 10
+ if "duration" in params:
+ duration = int(params["duration"])
+
+ mcr = None
+ if "args" in params:
+ args = params["args"]
+ expr = ""
+ expr += r"master_clock_rate\s*=\s*(\d[\deE+-.]*)"
+ match = re.search(expr, args)
+ if match:
+ mcr = match.group(1)
+
+ s = ""
+ s += header + "\n"
+ s += ruler + "\n"
+ s += get_params_row(stats.avg_vals, iterations, duration, mcr) + "\n"
+ s += get_table_row(stats.avg_vals, iterations, duration, "avg") + "\n"
+ s += get_table_row(stats.min_vals, iterations, duration, "min") + "\n"
+ s += get_table_row(stats.max_vals, iterations, duration, "max") + "\n"
+ s += get_non_zero_row(stats.non_zero_vals) + "\n"
+
+ return s
def parse_args():
"""
@@ -170,5 +193,5 @@ def parse_args():
if __name__ == "__main__":
path, iterations, params = parse_args();
results = run(path, iterations, params)
- summary = calculate_stats(results)
- print(get_summary_string(summary, params))
+ stats = calculate_stats(results)
+ print(get_summary_string(stats, iterations, params))
diff --git a/host/tests/streaming_performance/run_E3xx_max_rate_tests.py b/host/tests/streaming_performance/run_E3xx_max_rate_tests.py
index d53f03a46..ce1daa109 100755
--- a/host/tests/streaming_performance/run_E3xx_max_rate_tests.py
+++ b/host/tests/streaming_performance/run_E3xx_max_rate_tests.py
@@ -11,7 +11,7 @@ benchmark rate C++ example with different streaming parameters.
To run all the tests, execute it with all supported options for the test_type
parameter:
E320_XG Runs E320 tests with single and dual 10 GbE links
- E120_Liberio Runs E310 tests with Liberio
+ E310_Liberio Runs E310 tests with Liberio
Example usage:
run_E3xx_max_rate_tests.py --path <benchmark_rate_dir>/benchmark_rate --addr 192.168.10.2 --second_addr 192.168.20.2 --test_type E320_XG
@@ -60,7 +60,7 @@ def run_test(path, params, iterations, label):
print(label + "\n")
results = batch_run_benchmark_rate.run(path, iterations, params, False)
stats = batch_run_benchmark_rate.calculate_stats(results)
- print(batch_run_benchmark_rate.get_summary_string(stats))
+ print(batch_run_benchmark_rate.get_summary_string(stats, iterations, params))
def run_E320_tests_for_single_10G(path, addr, iterations, duration):
"""
diff --git a/host/tests/streaming_performance/run_N3xx_max_rate_tests.py b/host/tests/streaming_performance/run_N3xx_max_rate_tests.py
index 62010e3b8..f4b593577 100755
--- a/host/tests/streaming_performance/run_N3xx_max_rate_tests.py
+++ b/host/tests/streaming_performance/run_N3xx_max_rate_tests.py
@@ -12,6 +12,8 @@ To run all the tests, execute it with all supported options for the test_type
parameter:
N310_XG Runs N310 tests with single and dual 10 GbE links
N310_Liberio Runs N310 tests with Liberio
+ N320_XG Runs N320 tests with single and dual 10 GbE links
+ N320_Liberio Runs N320 tests with Liberio
Example usage:
run_N3xx_max_rate_tests.py --path <benchmark_rate_dir>/benchmark_rate --addr 192.168.10.2 --second_addr 192.168.20.2 --test_type N310_XG
@@ -24,8 +26,14 @@ import batch_run_benchmark_rate
Test_Type_N310_XG = "N310_XG"
Test_Type_N310_Liberio = "N310_Liberio"
+Test_Type_N320_XG = "N320_XG"
+Test_Type_N320_Liberio = "N320_Liberio"
-test_type_list = [Test_Type_N310_XG, Test_Type_N310_Liberio]
+test_type_list = [
+ Test_Type_N310_XG,
+ Test_Type_N310_Liberio,
+ Test_Type_N320_Liberio,
+ Test_Type_N320_XG]
def parse_args():
"""
@@ -65,7 +73,7 @@ def run_test(path, params, iterations, label):
print(label + "\n")
results = batch_run_benchmark_rate.run(path, iterations, params)
stats = batch_run_benchmark_rate.calculate_stats(results)
- print(batch_run_benchmark_rate.get_summary_string(stats, params))
+ print(batch_run_benchmark_rate.get_summary_string(stats, iterations, params))
def run_N310_tests_for_single_10G(path, addr, iterations, duration):
"""
@@ -624,6 +632,347 @@ def run_N310_tests_for_Liberio_315(path, iterations, duration):
params["tx_channels"] = "0,1,2,3"
run_test(path, params, iterations, "4xTRX @{}".format(rate))
+def run_N320_tests_for_Liberio_315(path, iterations, duration):
+ """
+ Runs tests that are in the neighborhood of max rate for Liberio
+ """
+ def base_params():
+ return {
+ "args" : "master_clock_rate=200e6",
+ "duration" : duration
+ }
+
+ # Run RX at 6.896552 Msps with one channel
+ rate = "6.896552e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run RX at 8 Msps with one channel
+ rate = "8.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run RX at 9.090909 Msps with one channel
+ rate = "9.090909e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run RX at 10 Msps with one channel
+ rate = "10.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run TX at 2 Msps with one channel
+ rate = "2.0e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 2.985075 Msps with one channel
+ rate = "2.985075e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 4 Msps with one channel
+ rate = "4.0e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 6.060606 Msps with one channel
+ rate = "6.060606e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 6.896552 Msps with one channel
+ rate = "6.896552e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TRX at 2.985075 Msps with one channel
+ rate = "2.985075e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 4 Msps with one channel
+ rate = "4.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 5 Msps with one channel
+ rate = "5.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 0.5 Msps with one channel
+ rate = "0.5e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 0.75 Msps with one channel
+ rate = "0.75e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 1.0 Msps with one channel
+ rate = "1.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+def run_N320_tests_for_Liberio_master_next(path, iterations, duration):
+ """
+ Runs tests that are in the neighborhood of max rate for Liberio
+ """
+ def base_params():
+ return {
+ "args" : "master_clock_rate=200e6",
+ "duration" : duration
+ }
+
+
+ # Run RX at 2 Msps with one channel
+ rate = "2e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run RX at 2.985075 Msps with one channel
+ rate = "2.985075e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run RX at 4 Msps with one channel
+ rate = "4e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run TX at 2.985075 Msps with one channel
+ rate = "2.985075e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 4 Msps with one channel
+ rate = "4.0e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 5 Msps with one channel
+ rate = "5.0e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 6.060606 Msps with one channel
+ rate = "6.060606e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TX at 6.896552 Msps with one channel
+ rate = "6.896552e6"
+ params = base_params()
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TRX at 2 Msps with one channel
+ rate = "2e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 2.985075 Msps with one channel
+ rate = "2.985075e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 4 Msps with one channel
+ rate = "4.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 5 Msps with one channel
+ rate = "5.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+ # Run TRX at 0.5 Msps with two channelS
+ rate = "0.5e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 0.75 Msps with two channels
+ rate = "0.75e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 1.0 Msps with two channels
+ rate = "1.0e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 1.25 Msps with two channels
+ rate = "1.25e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+ # Run TRX at 1.5 Msps with two channels
+ rate = "1.5e6"
+ params = base_params()
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
+def run_N320_tests_for_single_10G(path, addr, iterations, duration):
+ """
+ Runs tests that are in the neighborhood of max rate for single 10 GbE
+ """
+ def base_params(rate):
+ return {
+ "args" : "addr={},master_clock_rate={}".format(addr, rate),
+ "duration" : duration
+ }
+
+ # Run RX at 250 Msps with one channel
+ rate = "250e6"
+ params = base_params(rate)
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xRX @{}".format(rate))
+
+ # Run TX at 250 Msps with one channel
+ rate = "250e6"
+ params = base_params(rate)
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ run_test(path, params, iterations, "1xTX @{}".format(rate))
+
+ # Run TRX at 250 Msps with one channel
+ rate = "250e6"
+ params = base_params(rate)
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0"
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0"
+ run_test(path, params, iterations, "1xTRX @{}".format(rate))
+
+def run_N320_tests_for_dual_10G(path, addr, second_addr, iterations, duration):
+ """
+ Runs tests that are in the neighborhood of max rate for dual 10 GbE
+ """
+ def base_params(rate):
+ return {
+ "args" : "addr={},second_addr={},master_clock_rate={}".format(addr, second_addr, rate),
+ "duration" : duration
+ }
+
+ # Run RX at 250 Msps with two channels
+ rate = "250e6"
+ params = base_params(rate)
+ params["rx_rate"] = rate
+ params["rx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xRX @{}".format(rate))
+
+ # Run TX at 250 Msps with two channels
+ rate = "250e6"
+ params = base_params(rate)
+ params["tx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTX @{}".format(rate))
+
+ # Run TRX at 250 Msps with two channels
+ rate = "250e6"
+ params = base_params(rate)
+ params["tx_rate"] = rate
+ params["rx_rate"] = rate
+ params["tx_channels"] = "0,1"
+ params["rx_channels"] = "0,1"
+ run_test(path, params, iterations, "2xTRX @{}".format(rate))
+
def main():
path, test_type, addr, second_addr = parse_args()
start_time = time.time()
@@ -639,6 +988,14 @@ def main():
#run_N310_tests_for_Liberio_315(path, 10, 30)
run_N310_tests_for_Liberio_master_next(path, 10, 30)
+ if test_type == Test_Type_N320_Liberio:
+ #run_N320_tests_for_Liberio_315(path, 10, 30)
+ run_N320_tests_for_Liberio_master_next(path, 10, 30)
+
+ if test_type == Test_Type_N320_XG:
+ run_N320_tests_for_single_10G(path, addr, 10, 30)
+ run_N320_tests_for_dual_10G(path, addr, second_addr, 10, 30)
+
end_time = time.time()
elapsed = end_time - start_time
print("Elapsed time: {}".format(datetime.timedelta(seconds=elapsed)))
diff --git a/host/tests/streaming_performance/run_X3xx_max_rate_tests.py b/host/tests/streaming_performance/run_X3xx_max_rate_tests.py
index c317ef689..6feebf3c7 100755
--- a/host/tests/streaming_performance/run_X3xx_max_rate_tests.py
+++ b/host/tests/streaming_performance/run_X3xx_max_rate_tests.py
@@ -65,7 +65,7 @@ def run_test(path, params, iterations, label):
print(label + "\n")
results = batch_run_benchmark_rate.run(path, iterations, params)
stats = batch_run_benchmark_rate.calculate_stats(results)
- print(batch_run_benchmark_rate.get_summary_string(stats))
+ print(batch_run_benchmark_rate.get_summary_string(stats, iterations, params))
def run_tests_for_single_10G(path, addr, iterations, duration):
base_params = {