aboutsummaryrefslogtreecommitdiffstats
path: root/host/utils/converter_benchmark.py
diff options
context:
space:
mode:
authorMartin Braun <martin.braun@ettus.com>2015-01-26 12:17:17 +0100
committerMartin Braun <martin.braun@ettus.com>2015-10-19 17:33:17 -0700
commit508311768ad4f9538eecda16f1cae899ea31184b (patch)
tree5d244df138615d8fc055c07dc156cf72066af667 /host/utils/converter_benchmark.py
parent4a6c47682daff6fae533647487297967ae559c49 (diff)
downloaduhd-508311768ad4f9538eecda16f1cae899ea31184b.tar.gz
uhd-508311768ad4f9538eecda16f1cae899ea31184b.tar.bz2
uhd-508311768ad4f9538eecda16f1cae899ea31184b.zip
tools: Added converter benchmark tool
Diffstat (limited to 'host/utils/converter_benchmark.py')
-rw-r--r--host/utils/converter_benchmark.py193
1 files changed, 193 insertions, 0 deletions
diff --git a/host/utils/converter_benchmark.py b/host/utils/converter_benchmark.py
new file mode 100644
index 000000000..c3cab8753
--- /dev/null
+++ b/host/utils/converter_benchmark.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 Ettus Research LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+Wrap the converter_benchmark tool and produce prettier results.
+"""
+
+from __future__ import print_function
+import argparse
+import csv
+import subprocess
+
+INTRO_SETUP = {
+ 'n_samples': {
+ 'title': 'Samples per iteration',
+ },
+ 'iterations': {
+ 'title': 'Number of iterations'
+ },
+}
+
+TABLE_SETUP = {
+ 'prio': {
+ 'title': 'Priority',
+ },
+ 'duration_ms': {
+ 'title': 'Total Duration (ms)',
+ },
+ 'avg_duration_ms': {
+ 'title': 'Avg. Duration (ms)',
+ },
+}
+
+def run_benchmark(args):
+ """ Run the tool with the given arguments, return the section in the {{{ }}} brackets """
+ call_args = ['./converter_benchmark',]
+ for k, v in args.__dict__.iteritems():
+ k = k.replace('_', '-')
+ if v is None:
+ continue
+ if k in ('debug-converter', 'hex'):
+ if v:
+ call_args.append('--{0}'.format(k))
+ continue
+ call_args.append('--{0}'.format(k))
+ call_args.append(str(v))
+ print(call_args)
+ try:
+ output = subprocess.check_output(call_args)
+ except subprocess.CalledProcessError as ex:
+ print(ex.output)
+ exit(ex.returncode)
+ header_out, csv_output = output.split('{{{', 1)
+ csv_output = csv_output.split('}}}', 1)
+ assert len(csv_output) == 2 and csv_output[1].strip() == ''
+ return header_out, csv_output[0]
+
+def print_stats_table(args, csv_output):
+ """
+ Print stats.
+ """
+ reader = csv.reader(csv_output.strip().split('\n'), delimiter=',')
+ title_row = reader.next()
+ row_widths = [0,] * len(TABLE_SETUP)
+ for idx, row in enumerate(reader):
+ if idx == 0:
+ # Print intro:
+ for k, v in INTRO_SETUP.iteritems():
+ print("{title}: {value}".format(
+ title=v['title'],
+ value=row[title_row.index(k)],
+ ))
+ print("")
+ # Print table header
+ for idx, item in enumerate(TABLE_SETUP):
+ print(" {title} ".format(title=TABLE_SETUP[item]['title']), end='')
+ row_widths[idx] = len(TABLE_SETUP[item]['title'])
+ if idx < len(TABLE_SETUP) - 1:
+ print("|", end='')
+ print("")
+ for idx, item in enumerate(TABLE_SETUP):
+ print("-" * (row_widths[idx] + 2), end='')
+ if idx < len(TABLE_SETUP) - 1:
+ print("+", end='')
+ print("")
+ # Print actual row data
+ for idx, item in enumerate(TABLE_SETUP):
+ format_str = " {{item:>{n}}} ".format(n=row_widths[idx])
+ print(format_str.format(item=row[title_row.index(item)]), end='')
+ if idx < len(TABLE_SETUP) - 1:
+ print("|", end='')
+ print("")
+
+def print_debug_table(args, csv_output):
+ """
+ Print debug output.
+ """
+ reader = csv.reader(csv_output.strip().split('\n'), delimiter=';')
+ print_widths_hex = {
+ 'u8': 2,
+ 'sc16': 8,
+ 'fc32': 16,
+ 's16': 4,
+ }
+ if args.hex:
+ format_str = "{{0[0]:0>{n_in}}} => {{0[1]:0>{n_out}}}".format(
+ n_in=print_widths_hex[getattr(args, 'in').split('_', 1)[0]],
+ n_out=print_widths_hex[args.out.split('_', 1)[0]]
+ )
+ else:
+ format_str = "{0[0]}\t=>\t{0[1]}"
+ for row in reader:
+ print(format_str.format(row))
+
+def setup_argparse():
+ """ Configure arg parser. """
+ parser = argparse.ArgumentParser(
+ description="UHD Converter Benchmark + Debugging Utility.",
+ )
+ parser.add_argument(
+ "-i", "--in", required=True,
+ help="Input format (e.g. 'sc16')"
+ )
+ parser.add_argument(
+ "-o", "--out", required=True,
+ help="Output format (e.g. 'sc16')"
+ )
+ parser.add_argument(
+ "-s", "--samples", type=int,
+ help="Number of samples per iteration"
+ )
+ parser.add_argument(
+ "-N", "--iterations", type=int,
+ help="Number of iterations per benchmark",
+ )
+ parser.add_argument(
+ "-p", "--priorities",
+ help="Converter priorities. Can be 'default', 'all', or a comma-separated list of priorities.",
+ )
+ parser.add_argument(
+ "--max-prio", type=int,
+ help="Largest available priority (advanced feature)",
+ )
+ parser.add_argument(
+ "--n-inputs", type=int,
+ help="Number of input vectors",
+ )
+ parser.add_argument(
+ "--n-outputs", type=int,
+ help="Number of output vectors",
+ )
+ parser.add_argument(
+ "--seed-mode", choices=('random', 'incremental'),
+ help="How to initialize the data: random, incremental",
+ )
+ parser.add_argument(
+ "--debug-converter", action='store_true',
+ help="Skip benchmark and print conversion results. Implies iterations==1 and will only run on a single converter.",
+ )
+ parser.add_argument(
+ "--hex", action='store_true',
+ help="In debug mode, display data as hex values.",
+ )
+ return parser
+
+def main():
+ """ Go, go, go! """
+ args = setup_argparse().parse_args()
+ print("Running converter benchmark...")
+ header_out, csv_output = run_benchmark(args)
+ print(header_out)
+ if args.debug_converter:
+ print_debug_table(args, csv_output)
+ else:
+ print_stats_table(args, csv_output)
+
+if __name__ == "__main__":
+ main()
+