1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
|
#!/usr/bin/env python
import os
import sys
import yaml
import unittest
import re
import time
import logging
from subprocess import Popen, PIPE, STDOUT
from usrp_probe import get_usrp_list
#--------------------------------------------------------------------------
# Application
#--------------------------------------------------------------------------
class shell_application(object):
"""
Wrapper for applications that are in $PATH.
Note: The CMake infrastructure makes sure all examples and utils are in $PATH.
"""
def __init__(self, name):
self.name = name
self.stdout = ''
self.stderr = ''
self.returncode = None
self.exec_time = None
def run(self, args = []):
cmd_line = [self.name]
cmd_line.extend(args)
start_time = time.time()
p = Popen(cmd_line, stdout=PIPE, stderr=PIPE, close_fds=True)
self.stdout, self.stderr = p.communicate()
self.returncode = p.returncode
self.exec_time = time.time() - start_time
#--------------------------------------------------------------------------
# Test case base
#--------------------------------------------------------------------------
class uhd_test_case(unittest.TestCase):
"""
Base class for UHD test cases.
"""
test_name = '--TEST--'
def set_up(self):
"""
Override this to add own setup code per test.
"""
pass
def setUp(self):
self.name = self.__class__.__name__
self.test_id = self.id().split('.')[-1]
self.results = {}
self.results_file = os.getenv('_UHD_TEST_RESULTSFILE', "")
if self.results_file and os.path.isfile(self.results_file):
self.results = yaml.safe_load(open(self.results_file).read()) or {}
self.args_str = os.getenv('_UHD_TEST_ARGS_STR', "")
self.usrp_info = get_usrp_list(self.args_str)[0]
if not self.results.has_key(self.usrp_info['serial']):
self.results[self.usrp_info['serial']] = {}
if not self.results[self.usrp_info['serial']].has_key(self.name):
self.results[self.usrp_info['serial']][self.name] = {}
self.setup_logger()
self.set_up()
def setup_logger(self):
" Add logging infrastructure "
self.log = logging.getLogger("devtest.{name}".format(name=self.name))
self.log_file = os.getenv('_UHD_TEST_LOGFILE', "devtest.log")
#self.log_level = int(os.getenv('_UHD_TEST_LOG_LEVEL', logging.DEBUG))
#self.print_level = int(os.getenv('_UHD_TEST_PRINT_LEVEL', logging.WARNING))
self.log_level = logging.DEBUG
self.print_level = logging.WARNING
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(self.log_level)
console_handler = logging.StreamHandler()
console_handler.setLevel(self.print_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
self.log.setLevel(logging.DEBUG)
self.log.addHandler(file_handler)
self.log.addHandler(console_handler)
self.log.info("Starting test with device: {dev}".format(dev=self.args_str))
def tear_down(self):
pass
def tearDown(self):
self.tear_down()
if self.results_file:
open(self.results_file, 'w').write(yaml.dump(self.results, default_flow_style=False))
def report_result(self, testname, key, value):
""" Store a result as a key/value pair.
After completion, all results for one test are written to the results file.
"""
if not self.results[self.usrp_info['serial']][self.name].has_key(testname):
self.results[self.usrp_info['serial']][self.name][testname] = {}
self.results[self.usrp_info['serial']][self.name][testname][key] = value
def create_addr_args_str(self, argname="args"):
""" Returns an args string, usually '--args "type=XXX,serial=YYY" """
if len(self.args_str) == 0:
return ''
return '--{}={}'.format(argname, self.args_str)
def filter_warnings(self, errstr):
""" Searches errstr for UHD warnings, removes them, and puts them into a separate string.
Returns (errstr, warnstr), where errstr no longer has warning. """
warn_re = re.compile("UHD Warning:\n(?: .*\n)+")
warnstr = "\n".join(warn_re.findall(errstr)).strip()
errstr = warn_re.sub('', errstr).strip()
return (errstr, warnstr)
def filter_stderr(self, stderr, run_results={}):
""" Filters the output to stderr. run_results[] is a dictionary.
This function will:
- Remove UUUUU... strings, since they are generally not a problem.
- Remove all DDDD and SSSS strings, and add run_results['has_S'] = True
and run_results['has_D'] = True.
- Remove warnings and put them in run_results['warnings']
- Put the filtered error string into run_results['errors'] and returns the dictionary
"""
errstr, run_results['warnings'] = self.filter_warnings(stderr)
# Scan for underruns and sequence errors / dropped packets not detected in the counter
errstr = re.sub('UU+', '', errstr)
(errstr, n_subs) = re.subn('SS+', '', errstr)
if n_subs:
run_results['has_S'] = True
(errstr, n_subs) = re.subn('DD+', '', errstr)
if n_subs:
run_results['has_D'] = True
errstr = re.sub("\n\n+", "\n", errstr)
run_results['errors'] = errstr.strip()
return run_results
class uhd_example_test_case(uhd_test_case):
"""
A test case that runs an example.
"""
def setup_example(self):
"""
Override this to add specific setup code.
"""
pass
def set_up(self):
"""
"""
self.setup_example()
def run_test(self, test_name, test_args):
"""
Override this to run the actual example.
Needs to return either a boolean or a dict with key 'passed' to determine
pass/fail.
"""
raise NotImplementedError
def run_example(self, example, args):
"""
Run `example' (which has to be a UHD example or utility) with `args'.
Return results and the app object.
"""
self.log.info("Running example: `{example} {args}'".format(example=example, args=" ".join(args)))
app = shell_application(example)
app.run(args)
run_results = {
'return_code': app.returncode,
'passed': False,
'has_D': False,
'has_S': False,
}
run_results = self.filter_stderr(app.stderr, run_results)
self.log.info('STDERR Output:')
self.log.info(str(app.stderr))
return (app, run_results)
def report_example_results(self, test_name, run_results):
for key in sorted(run_results):
self.log.info('{key} = {val}'.format(key=key, val=run_results[key]))
self.report_result(
test_name,
key, run_results[key]
)
if run_results.has_key('passed'):
self.report_result(
test_name,
'status',
'Passed' if run_results['passed'] else 'Failed',
)
if run_results.has_key('errors'):
self.report_result(
test_name,
'errors',
'Yes' if run_results['errors'] else 'No',
)
def test_all(self):
"""
Hook for test runner. Needs to be a class method that starts with 'test'.
Calls run_test().
"""
for test_name, test_args in self.test_params.iteritems():
if not test_args.has_key('products') or (self.usrp_info['product'] in test_args.get('products', [])):
run_results = self.run_test(test_name, test_args)
passed = bool(run_results)
if isinstance(run_results, dict):
passed = run_results['passed']
self.assertTrue(
passed,
msg="Errors occurred during test `{t}'. Check log file for details.\nRun results:\n{r}".format(
t=test_name, r=yaml.dump(run_results, default_flow_style=False)
)
)
|