1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
#!/usr/bin/env python
#
# Copyright 2015-2016 Ettus Research LLC
# Copyright 2018 Ettus Research, a National Instruments Company
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
""" Test using benchmark_rate. """
import re
from uhd_test_base import uhd_example_test_case
DEFAULT_O_THRESHOLD = 1000
DEFAULT_U_THRESHOLD = 1000
DEFAULT_D_THRESHOLD = 50
DEFAULT_S_THRESHOLD = 50
class uhd_benchmark_rate_test(uhd_example_test_case):
"""
Run benchmark_rate in various configurations.
"""
tests = {}
def setup_example(self):
"""
Set args.
"""
self.test_params = uhd_benchmark_rate_test.tests
def run_test(self, test_name, test_args):
"""
Runs benchmark_rate with the given parameters. Parses output and writes
results to file.
We always run both tx and rx.
"""
# rel_samp_err_threshold = 0.1 # 10% off is still quite generous
samp_rate = test_args.get('rate', 1e6)
duration = test_args.get('duration', 1)
chan = test_args.get('chan', '0')
n_chans = len(chan.split(","))
expected_samples = n_chans * duration * samp_rate
self.log.info('Running test {n}, Channel = {c}, Sample Rate = {r}'.format(
n=test_name, c=chan, r=samp_rate,
))
args = [
self.create_addr_args_str(),
'--duration', str(duration),
'--channels', str(chan),
'--overrun-threshold',
str(test_args.get('acceptable-overruns', DEFAULT_O_THRESHOLD)),
'--underrun-threshold',
str(test_args.get('acceptable-underruns', DEFAULT_U_THRESHOLD)),
'--drop-threshold',
str(test_args.get('acceptable-D', DEFAULT_D_THRESHOLD)),
'--seq-threshold',
str(test_args.get('acceptable-S', DEFAULT_S_THRESHOLD)),
]
if 'tx' in test_args.get('direction', ''):
args.append('--tx_rate')
args.append(str(samp_rate))
if 'rx' in test_args.get('direction', ''):
args.append('--rx_rate')
args.append(str(samp_rate))
(app, run_results) = self.run_example('benchmark_rate', args)
match = re.search(r'(Num received samples):\s*(.*)', app.stdout)
run_results['num_rx_samples'] = int(match.group(2)) if match else -1
if run_results['num_rx_samples'] != -1:
run_results['rel_rx_samples_error'] = 1.0 * abs(
run_results['num_rx_samples']
- test_args.get('rx_buffer', 0)
- expected_samples
) / expected_samples
else:
run_results['rel_rx_samples_error'] = 100
match = re.search(r'(Num dropped samples):\s*(.*)', app.stdout)
run_results['num_rx_dropped'] = int(match.group(2)) if match else -1
match = re.search(r'(Num overflows detected):\s*(.*)', app.stdout)
run_results['num_rx_overruns'] = int(match.group(2)) if match else -1
match = re.search(r'(Num transmitted samples):\s*(.*)', app.stdout)
run_results['num_tx_samples'] = int(match.group(2)) if match else -1
if run_results['num_tx_samples'] != -1:
run_results['rel_tx_samples_error'] = 1.0 * abs(
run_results['num_tx_samples']
- test_args.get('tx_buffer', 0)
- expected_samples
) / expected_samples
else:
run_results['rel_tx_samples_error'] = 100
match = re.search(r'(Num sequence errors \(Tx\)):\s*(.*)', app.stdout)
run_results['num_tx_seqerrs'] = int(match.group(2)) if match else -1
match = re.search(r'(Num underflows detected):\s*(.*)', app.stdout)
run_results['num_tx_underruns'] = int(match.group(2)) if match else -1
match = re.search(r'(Num timeouts \(Rx\)):\s*(.*)', app.stdout)
run_results['num_timeouts_rx'] = int(match.group(2)) if match else -1
run_results['passed'] = all([
run_results['return_code'] == 0,
run_results['num_rx_dropped'] == 0,
run_results['num_tx_seqerrs'] == 0,
run_results['num_tx_underruns'] <= test_args.get('acceptable-underruns', 0),
run_results['num_rx_samples'] > 0,
run_results['num_tx_samples'] > 0,
run_results['num_timeouts_rx'] == 0,
# run_results['rel_rx_samples_error'] < rel_samp_err_threshold,
# run_results['rel_tx_samples_error'] < rel_samp_err_threshold,
])
self.report_example_results(test_name, run_results)
return run_results
|