diff options
Diffstat (limited to 'tools')
-rw-r--r-- | tools/kitchen_sink/kitchen_sink.cpp | 171 | ||||
-rw-r--r-- | tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_README.md | 17 | ||||
-rwxr-xr-x | tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_graph.py | 415 |
3 files changed, 577 insertions, 26 deletions
diff --git a/tools/kitchen_sink/kitchen_sink.cpp b/tools/kitchen_sink/kitchen_sink.cpp index c7265bea4..771329425 100644 --- a/tools/kitchen_sink/kitchen_sink.cpp +++ b/tools/kitchen_sink/kitchen_sink.cpp @@ -278,12 +278,16 @@ typedef struct RxParams { bool single_packets; bool size_map; size_t rx_sample_limit; - std::ofstream* capture_file; + std::vector<std::ofstream*> capture_files; bool set_rx_freq; double rx_freq; double rx_freq_delay; double rx_lo_offset; bool interleave_rx_file_samples; + bool ignore_late_start; + bool ignore_bad_packets; + bool ignore_timeout; + bool ignore_unexpected_error; } RX_PARAMS; static uint64_t recv_samp_count_progress = 0; @@ -465,25 +469,37 @@ void benchmark_rx_rate( recv_done.notify_one(); } - if (params.capture_file != NULL) + if (params.capture_files.empty() == false) { - if (params.interleave_rx_file_samples) + size_t channel_count = rx_stream->get_num_channels(); + + if ((channel_count == 1) || ((channel_count > 1) && (params.capture_files.size() == 1))) { - for (size_t i = 0; i < recv_samps; ++i) + if (params.interleave_rx_file_samples) + { + for (size_t i = 0; i < recv_samps; ++i) + { + for (size_t j = 0; j < channel_count; ++j) + { + params.capture_files[0]->write((const char*)buffs[j] + (bytes_per_samp * i), bytes_per_samp); + } + } + } + else { - size_t channel_count = rx_stream->get_num_channels(); - for (size_t j = 0; j < channel_count; ++j) + for (size_t i = 0; i < channel_count; ++i) { - params.capture_file->write((const char*)buffs[j] + (bytes_per_samp * i), bytes_per_samp); + size_t num_bytes = recv_samps * bytes_per_samp; + params.capture_files[0]->write((const char*)buffs[i], num_bytes); } } } else { - for (size_t i = 0; i < rx_stream->get_num_channels(); ++i) + for (size_t n = 0; n < channel_count; ++n) { size_t num_bytes = recv_samps * bytes_per_samp; - params.capture_file->write((const char*)buffs[i], num_bytes); + params.capture_files[n]->write((const char*)buffs[n], num_bytes); } } } @@ -498,13 +514,17 @@ void benchmark_rx_rate( //} //handle the error codes - switch(md.error_code) { + switch(md.error_code) + { case uhd::rx_metadata_t::ERROR_CODE_NONE: - if (had_an_overflow) { + { + if (had_an_overflow) + { had_an_overflow = false; num_dropped_samps += (md.time_spec - last_time).to_ticks(rate); // FIXME: Check this as 'num_dropped_samps' has come out -ve } break; + } // ERROR_CODE_OVERFLOW can indicate overflow or sequence error case uhd::rx_metadata_t::ERROR_CODE_OVERFLOW: // 'recv_samps' should be 0 @@ -523,13 +543,43 @@ void benchmark_rx_rate( ss << HEADER_RX"(" << get_stringified_time() << ") "; ss << boost::format("Timeout") << std::endl; std::cout << ss.str(); + if (params.ignore_timeout == false) + sig_int_handler(-1); + break; + } + + case uhd::rx_metadata_t::ERROR_CODE_LATE_COMMAND: + { + std::stringstream ss; + ss << HEADER_RX"(" << get_stringified_time() << ") "; + ss << boost::format("Late command") << std::endl; + std::cout << ss.str(); + if (params.ignore_late_start == false) + sig_int_handler(-1); + break; + } + + case uhd::rx_metadata_t::ERROR_CODE_BAD_PACKET: + { + std::stringstream ss; + ss << HEADER_RX"(" << get_stringified_time() << ") "; + ss << boost::format("Bad packet") << std::endl; + std::cout << ss.str(); + if (params.ignore_bad_packets == false) + sig_int_handler(-1); break; } default: - std::cerr << HEADER_RX"Error code: " << md.error_code << std::endl; - std::cerr << HEADER_RX"Unexpected error on recv, continuing..." << std::endl; + { + std::stringstream ss; + ss << HEADER_RX"(" << get_stringified_time() << ") "; + ss << (boost::format("Unexpected error (code: %d)") % md.error_code) << std::endl; + std::cout << ss.str(); + if (params.ignore_unexpected_error == false) + sig_int_handler(-1); break; + } } print_msgs(); @@ -550,11 +600,14 @@ void benchmark_rx_rate( rx_stream->issue_stream_cmd(uhd::stream_cmd_t::STREAM_MODE_STOP_CONTINUOUS); } - if (params.capture_file != NULL) + if (params.capture_files.empty() == false) { - std::cout << HEADER_RX"Closing capture file..." << std::endl; - delete params.capture_file; - params.capture_file = NULL; + std::cout << HEADER_RX"Closing capture files..." << std::endl; + + for (size_t n = 0; n < params.capture_files.size(); ++n) + delete params.capture_files[n]; + + params.capture_files.clear(); } l.lock(); @@ -628,7 +681,7 @@ void benchmark_tx_rate( size_t total_packet_count = (total_length / max_samps_per_packet) + ((total_length % max_samps_per_packet) ? 1 : 0); if ((params.use_tx_eob) && (params.tx_time_between_bursts > 0)) packet_time += uhd::time_spec_t(params.tx_time_between_bursts); - size_t max_late_count = (size_t)(rate / (double)packet_time.to_ticks(rate)) * total_packet_count; + size_t max_late_count = (size_t)(rate / (double)packet_time.to_ticks(rate)) * total_packet_count * tx_stream->get_num_channels(); // Also need to take into account number of radios // Will be much higher L values (e.g. 31K) on e.g. B200 when entire TX pipeline is full of late packets (large size due to total TX buffering throughout transport & DSP) @@ -1139,7 +1192,7 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ ("progress-interval", po::value<double>(&progress_interval)->default_value(progress_interval), "seconds between bandwidth updates (0 disables)") ("rx-progress-interval", po::value<double>(&rx_progress_interval), "seconds between RX bandwidth updates (0 disables)") ("tx-progress-interval", po::value<double>(&tx_progress_interval), "seconds between TX bandwidth updates (0 disables)") - ("tx-offset", po::value<double>(&tx_time_offset), "seconds that TX should be in front of RX when following") + ("tx-offset", po::value<double>(&tx_time_offset)->default_value(0.0), "seconds that TX should be in front of RX when following") ("tx-length", po::value<size_t>(&tx_burst_length)->default_value(0), "TX burst length in samples (0: maximum packet size)") ("tx-flush", po::value<size_t>(&tx_flush_length)->default_value(0), "samples to flush TX with after burst") ("tx-burst-separation", po::value<double>(&tx_time_between_bursts), "seconds between TX bursts") @@ -1177,6 +1230,10 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ ("recover-late", "recover from excessive late TX packets") ("disable-async", "disable the async message thread") ("interleave-rx-file-samples", "interleave individual samples (default is interleaving buffers)") + ("ignore-late-start", "continue receiving even if stream command was late") + ("ignore-bad-packets", "continue receiving after a bad packet") + ("ignore-timeout", "continue receiving after timeout") + ("ignore-unexpected", "continue receiving after unexpected error") // record TX/RX times // Optional interruption // simulate u / o at random / pulses @@ -1234,6 +1291,10 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ bool recover_late = (vm.count("recover-late") > 0); bool enable_async = (vm.count("disable-async") == 0); bool interleave_rx_file_samples = (vm.count("interleave-rx-file-samples") > 0); + bool ignore_late_start = (vm.count("ignore-late-start") > 0); + bool ignore_bad_packets = (vm.count("ignore-bad-packets") > 0); + bool ignore_timeout = (vm.count("ignore-timeout") > 0); + bool ignore_unexpected_error = (vm.count("ignore-unexpected") > 0); boost::posix_time::time_duration interrupt_timeout_duration(boost::posix_time::seconds(long(interrupt_timeout)) + boost::posix_time::microseconds(long((interrupt_timeout - floor(interrupt_timeout))*1e6))); @@ -1273,6 +1334,36 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ return ~0; } + bool rx_filename_has_format = false; + if (rx_channel_nums.size() > 0) + { + std::string str0; + try + { + str0 = boost::str(boost::format(rx_file) % 0); + rx_filename_has_format = true; + } + catch (...) + { + } + + bool format_different = false; + try + { + std::string str1(boost::str(boost::format(rx_file) % 1)); + format_different = (str0 != str1); + } + catch (...) + { + } + + if ((rx_filename_has_format) && (format_different == false)) + { + std::cout << HEADER_ERROR "Multi-channel RX capture filename format did not produce unique names" << std::endl; + return ~0; + } + } + if ((tx_rx_sync) || (tx_follows_rx)) { if (tx_channel_nums.size() == 0) @@ -1462,11 +1553,33 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ } } - rx_params.capture_file = NULL; if (rx_file.empty() == false) { - std::cout << boost::format(HEADER_RX"Capturing to \"%s\"") % rx_file << std::endl; - rx_params.capture_file = new std::ofstream(rx_file.c_str(), std::ios::out); + if (rx_filename_has_format == false) + { + if (rx_stream->get_num_channels() == 1) + { + std::cout << boost::format(HEADER_RX"Capturing single channel to \"%s\"") % rx_file << std::endl; + } + else + { + if (interleave_rx_file_samples) + std::cout << boost::format(HEADER_RX"Capturing all %d channels as interleaved samples to \"%s\"") % rx_stream->get_num_channels() % rx_file << std::endl; + else + std::cout << boost::format(HEADER_RX"Capturing all %d channels as interleaved buffers to \"%s\"") % rx_stream->get_num_channels() % rx_file << std::endl; + } + + rx_params.capture_files.push_back(new std::ofstream(rx_file.c_str(), std::ios::out)); + } + else + { + for (size_t n = 0; n < rx_stream->get_num_channels(); ++n) + { + std::cout << boost::format(HEADER_RX"Capturing channel %d to \"%s\"") % n % (boost::str(boost::format(rx_file) % n)) << std::endl; + std::string rx_file_name(boost::str(boost::format(rx_file) % n)); + rx_params.capture_files.push_back(new std::ofstream(rx_file_name.c_str(), std::ios::out)); + } + } } std::cout << boost::format( @@ -1488,6 +1601,10 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ rx_params.rx_freq_delay = rx_freq_delay; rx_params.rx_lo_offset = rx_lo_offset; rx_params.interleave_rx_file_samples = interleave_rx_file_samples; + rx_params.ignore_late_start = ignore_late_start; + rx_params.ignore_bad_packets = ignore_bad_packets; + rx_params.ignore_timeout = ignore_timeout; + rx_params.ignore_unexpected_error = ignore_unexpected_error; thread_group.create_thread(boost::bind( &benchmark_rx_rate, @@ -1500,7 +1617,7 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ if (tx_channel_nums.size() > 0) { //create a transmit streamer size_t bytes_per_tx_sample = uhd::convert::get_bytes_per_item(tx_cpu); - std::cout << boost::format(HEADER_TX"CPU bytes per TX sample: %d for '%s'") % bytes_per_tx_sample % rx_cpu << std::endl; + std::cout << boost::format(HEADER_TX"CPU bytes per TX sample: %d for '%s'") % bytes_per_tx_sample % tx_cpu << std::endl; size_t wire_bytes_per_tx_sample = uhd::convert::get_bytes_per_item(tx_otw); std::cout << boost::format(HEADER_TX"OTW bytes per TX sample: %d for '%s'") % wire_bytes_per_tx_sample % tx_otw << std::endl; @@ -1635,11 +1752,11 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ else std::cout << HEADER "Waiting for Q..." << std::endl; - while (stop_signal_called == false) + do { // FIXME: Stop time - if (kbhit(interactive_sleep)) + if (kbhit(0)) { char c = fgetc(stdin); if (c == EOF) @@ -1669,7 +1786,9 @@ int UHD_SAFE_MAIN(int argc, char *argv[]){ } print_msgs(); - } + + abort_event.timed_wait(l_stop, boost::posix_time::milliseconds(interactive_sleep)); + } while (stop_signal_called == false); } else if (duration > 0) { diff --git a/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_README.md b/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_README.md new file mode 100644 index 000000000..1425a4d96 --- /dev/null +++ b/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_README.md @@ -0,0 +1,17 @@ +UHD TX/RX DEBUG PRINTS +====================== + +A tool for extensive debugging with UHD. + +Install +------- +Activate it by ticking `UHD_TXRX_DEBUG_PRINTS` in cmake-gui for your UHD installation. Then recompile and reinstall UHD. + +Use +--- +Run your application and pipe stderr to a file. +this is mostly done by <br> +`app_call 2> dbg_print_file.txt`<br> +After finishing the application offline processing of the gathered data is done with a python script called<br> +`uhd_txrx_debug_prints_graph.py`<br> +There are a lot of functions that help to preprocess your data and that describe the actual meaning of all the data points. in the end though, it comes down to the users needs what he wants to plot and see.
\ No newline at end of file diff --git a/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_graph.py b/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_graph.py new file mode 100755 index 000000000..b0f6681f6 --- /dev/null +++ b/tools/uhd_txrx_debug_prints/uhd_txrx_debug_prints_graph.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2013-2014 Ettus Research LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +""" +Plots the output of the streamers that is produced when DEBUG_TXRX +is enabled. +""" + +import argparse +import matplotlib.pyplot as plt +import numpy as np + +# This is a top level function to load a debug file. It will return a list of lists with all the data. +def get_data(filename): + res = [] + with open(filename) as f: + for line in f.readlines(): + line = line.rstrip('\n') + s = line.split(',') + res.append(s) + res = fix_known_data_corruption_patterns(res) + return res + +# Sometimes 'O' etc. get printed to stderr. This disturbs further processing. Thus, known pattern will be removed here. +def fix_known_data_corruption_patterns(data): + # Some lines might be corrupted in the way that e.g. UHD prints sth else in the same line. + # O, D, Press Enter... are known patterns. These should be fixed. + counts = {'O': 0, 'D': 0, 'exit_msg': 0} + for i in range(len(data)): + if data[i][0].startswith('O'): + counts['O'] += 1 + data[i][0] = data[i][0].replace('O', '') + if data[i][0].startswith('D'): + counts['D'] += 1 + data[i][0] = data[i][0].replace('D', '') + if data[i][0].startswith('Press Enter to quit: '): + counts['exit_msg'] += 1 + data[i][0] = data[i][0].replace('Press Enter to quit: ', '') + print counts + return data + +# Extract lines with a certain prefix. +def extract_lines(prefix, data): + res = [] + for line in data: + if line[0] == prefix: + res.append(line[1:]) + return res + +# boolean values are stored as true/false. Convert them to real booleans again. +def convert_to_bool(data): + res = [] + for d in data: + res.append(d == "true") + return res + +# The different outputs have different structures. With this function you can conert them back to their actual type. +def convert_data_set(data, reqtype): + zdata = zip(*data) + res = [] + if len(zdata) == 0: + return res + for i in range(len(reqtype)): + if reqtype[i] == np.bool: + res.append(np.asarray(convert_to_bool(zdata[i]), dtype=np.bool)) + else: + res.append(np.asarray(zdata[i], dtype=reqtype[i])) + return res + +# Wrapper for super_send_packet_handler data to be used with convert_data_set +def convert_super_send_packet_handler_data(data): + # wallclock, timeout, avail_samps, sent_samps, sob, eob, has_time_spec, time_spec (ticks) + reqtype = [np.uint64, np.float, np.int, np.int, np.bool, np.bool, np.bool, np.uint64] + cdata = convert_data_set(data, reqtype) + return cdata + +# same o' same o' +def convert_super_recv_packet_handler_data(data): + # wallclock, timeout, requested_samps, received_samps, one_packet, error_code, sob, eob, more_fragments, fragment_offset, has_timespec, time_spec + reqtype = [np.uint64, np.float, np.int, np.int, np.bool, np.int, np.bool, np.bool, np.bool, np.int, np.bool, np.uint64] + cdata = convert_data_set(data, reqtype) + return cdata + +def extract_super_recv_packet_handler_data(data): + pref1 = "super_recv_packet_handler" + pref2 = "recv" + super_recv = extract_lines(pref1, data) + recv = extract_lines(pref2, super_recv) + recv = convert_super_recv_packet_handler_data(recv) + return recv + +# Sometimes TX or RX is interrupted by system jiffies. Those are found by this function. +def find_jiffy(data, thr): + res = [] + last = data[0] + for i in range(len(data)): + if data[i] - last > thr: + res.append([last, data[i]]) + last = data[i] + return res + + +# Get difference between tx and rx wallclock +def get_diff(tx, rx): + print "get diff, len(rx) = ", len(rx) + diff = [0] * len(rx[0]) + for i in range(len(diff)): + r = rx[3][i] + idx = rx[0][i] - 1 # call count starts at 1. idx is 0 based. + t = tx[3][idx] + diff[i] = t - r + return diff + + +def bps(samps, time): + bp = [] + last = time[0]-1000 + for i in range(len(samps)): + td = time[i] - last + last = time[i] + td = td /1e6 + bp.append(samps[i] * 4 / td) + return bp + + +# same as the other wrappers this time for libusb1 +def extract_libusb(trx, data): + pr1 = "libusb1_zero_copy" + pr2 = "libusb_async_cb" + ldata = extract_lines(pr1, data) + edata = extract_lines(pr2, ldata) + d = extract_lines(trx, edata) + # buff_num, actual_length, status, end_time, start_time + reqtype = [np.int, np.int, np.int, np.uint64, np.uint64] + return convert_data_set(d, reqtype) + + +# Extract data for stream buffers. Typically there are 16 TX and 16 RX buffers. And there numbers are static. Though the number of buffers might be changed and the constant parameters must be adjusted in this case. +def extract_txrx(data): + tx = [[], [], [], [], []] + rx = [[], [], [], [], []] + for i in range(len(data[0])): + print data[0][i] + if data[0][i] > 31 and data[0][i] < 48: + rx[0].append(data[0][i]) + rx[1].append(data[1][i]) + rx[2].append(data[2][i]) + rx[3].append(data[3][i]) + rx[4].append(data[4][i]) + #print "tx\t", data[0][i], "\t", data[3][i], "\t", data[4][i] + if data[0][i] > 47 and data[0][i] < 64: + tx[0].append(data[0][i]) + tx[1].append(data[1][i]) + tx[2].append(data[2][i]) + tx[3].append(data[3][i]) + tx[4].append(data[4][i]) + #print "rx\t", data[0][i], "\t", data[3][i], "\t", data[4][i] + return [tx, rx] + +# Calculate momentary throughput +def throughput(data): + start = data[2][0] + total = data[1][-1] - data[2][0] + print total + thr = np.zeros(total) + + for i in range(len(data[0])): + s = data[2][i] - start + f = data[1][i] - start + ticks = data[1][i] - data[2][i] + pertick = 1. * data[0][i] / ticks + vals = [pertick] * ticks + thr[s:f] = np.add(thr[s:f], vals) + #print pertick + print np.shape(thr) + return thr + + +# Calculate a moving average +def ma(data, wl): + ap = np.zeros(wl) + data = np.concatenate((ap, data, ap)) + print np.shape(data) + res = np.zeros(len(data)-wl) + for i in range(len(data)-wl): + av = np.sum(data[i:i+wl]) / wl + res[i] = av + print i, "\t", av + return res + + +def get_x_axis(stamps): + scale = 10e-6 + return np.multiply(stamps, scale) + + +# plot status codes. +def plot_status_codes_over_time(data, fignum): + print "printing status numbers over time" + + # extract and convert the data + recv = extract_super_recv_packet_handler_data(data) + libusb_rx = extract_libusb("rx", data) + libusb_tx = extract_libusb("tx", data) + + # Plot all data + plt.figure(fignum) # Make sure these plots are printed to a new figure. + #plt.plot(get_x_axis(libusb_rx[3]), libusb_rx[2], marker='x', label='RX') + #plt.plot(get_x_axis(libusb_tx[3]), libusb_tx[2], marker='x', label='TX') + + pos = 5 + recv_error_codes = recv[pos] + plt.plot(get_x_axis(recv[0]), recv_error_codes, marker='x', label='recv') + plt.title("Status codes over timestamps") + plt.ylabel("status codes") + plt.xlabel("timestamps") + plt.grid() + plt.legend() + + for i in range(len(recv[0])): + if recv[pos][i] == 8: + xaxis = get_x_axis([recv[0][i]]) + plt.axvline(xaxis, color='b') + + + # Get some statistics and print them too + codes = [] + code_dict = {} + for i in range(len(recv_error_codes)): + if recv_error_codes[i] != 0: + code = rx_metadata_error_codes[recv_error_codes[i]] + pair = [i, code] + codes.append(pair) + if not code_dict.has_key(code): + code_dict[code] = 0 + code_dict[code] += 1 + print codes + print code_dict + + +# plot rtt times as peaks. That's the fast and easy way. +def plot_rtt_times(data, fignum): + print "plot RTT times" + rx = extract_libusb("rx", data) + tx = extract_libusb("tx", data) + + scale = 10e-6 + rx_diff = np.multiply(np.subtract(rx[3], rx[4]), scale) + tx_diff = np.multiply(np.subtract(tx[3], tx[4]), scale) + + plt.figure(fignum) + plt.plot(get_x_axis(rx[3]), rx_diff, marker='x', ls='', label="rx RTT") + plt.plot(get_x_axis(tx[3]), tx_diff, marker='x', ls='', label="tx RTT") + plt.title("Round trip times") + plt.ylabel("RTT (us)") + plt.grid() + plt.legend() + + +# plot RTT as actual lines as long as buffers are on the fly. +# This can take a long time if the function has to print a lot of small lines. Careful with this! +def plot_rtt_lines(data, fignum): + print "plot RTT lines" + rx = extract_libusb("rx", data) + #tx = extract_libusb("tx", data) + + if len(data) == 0 or len(rx) == 0: + return + + plt.figure(fignum) + for i in range(len(rx[0])): + if rx[0][i] > -1: + start = rx[4][i] + stop = rx[3][i] + status = rx[2][i] + + val = rx[0][i]#(stop - start) * scale + if not status == 0: + if status == 2: + print "status = ", status + val +=0.5 + xaxis = get_x_axis([start, stop]) + plt.plot(xaxis, [val, val], marker='x') + plt.ylabel('buffer number') + + # Careful with these lines here. + # Basically they should always have these values to separate CTRL, TX, RX buffer number blocks. + # But these values can be adjusted. Thus, it requires you to adjust these lines + plt.axhline(15.5) + plt.axhline(31.5) + plt.axhline(47.5) + plt.grid() + + +# only plot on-the-fly buffers. +def plot_buff_otf(trx, nrange, data): + d = extract_libusb(trx, data) + res = [[], []] + num = 0 + for i in range(len(d[0])): + if d[0][i] in nrange: + res + + +# If there are still unknown lines after cleanup, they can be caught and printed here. +# This way you can check what got caught but shouldn't have been caught. +def get_unknown_lines(data): + # These are the 3 known starting lines. More might be added in the future. + known = ['super_recv_packet_handler', 'super_send_packet_handler', 'libusb1_zero_copy'] + res = [] + for i in range(len(data)): + if not data[i][0] in known: + print data[i] + res.append(data[i]) + return res + +# LUT for all the return codes +rx_metadata_error_codes = {0x0: "NONE", 0x1: "TIMEOUT", 0x2: "LATE_COMMAND", 0x4: "BROKEN_CHAIN", 0x8: "OVERFLOW", + 0xc: "ALIGNMENT", 0xf: "BAD_PACKET"} + + +def parse_args(): + """ Parse args, yo """ + parser = argparse.ArgumentParser(description='Plot tool for debug prints.') + parser.add_argument('-f', '--filename', default=None, help='Debug output file') + return parser.parse_args() + +# Be really careful with the input files. They get pretty huge in a small time. +# Doing some of the plotting can eat up a lot of time then. +# Although this file contains a lot of functions by now, it is still left to the user to use everythin correctly. +def main(): + args = parse_args() + filename = args.filename + print "get data from: ", filename + #pref1 = "super_recv_packet_handler" + #pref2 = "recv" + + # Here you get the data from a file + data = get_data(filename) + #print "data len: ", len(data) + + # extract lines with unknown content. + unknown = get_unknown_lines(data) + + # plot status codes and RTT lines. + plot_status_codes_over_time(data, 0) + plot_rtt_lines(data, 0) + #plot_rtt_times(data, 0) + + #[tx, rx] = extract_txrx(data) + #print "txlen\t", len(tx[0]) + #print "rxlen\t", len(rx[0]) + + + #print "plot data" + #plt.title(filename) + #plt.plot(tx_data[0], tx_data[7], 'r', marker='o', label='TX') + #plt.plot(rx_data[0], rx_data[11], 'g', marker='x', label='RX') + #for j in jiffies: + # plt.axvline(x=j[0], color='r') + # plt.axvline(x=j[1], color='g') + # + #plt.xlabel('wallclock (us)') + #plt.ylabel('timestamp (ticks)') + #plt.legend() + + #lencal = 200000 + + # calculate and plot rx throughput + #rxthr = throughput([rx[1], rx[3], rx[4]]) + #plt.plot(rxthr[0:lencal]) + #rxav = ma(rxthr[0:lencal], 2000) + #plt.plot(rxav) + + # calculate and plot tx throughput + #txthr = throughput([tx[1], tx[3], tx[4]]) + #plt.plot(txthr[0:lencal]) + #txav = ma(txthr[0:lencal], 20) + #plt.plot(txav) + + + #bp = bps(data[1], data[2]) + #print np.sum(bp) + #ave = np.sum(bp)/len(bp) + #print ave + #plt.plot(tx_data[0], tx_data[2], 'r') + #plt.plot(rx_data[0], rx_data[2], 'b') + #plt.plot(bp, 'g', marker='+') + #plt.plot(np.multiply(data[1], 1e6), 'r', marker='o') + #plt.plot(np.multiply(data[3],1e2), 'b', marker='x') + #plt.plot(ave) + + # in the end, put a grid on the graph and show it. + plt.grid() + plt.show() + +if __name__ == '__main__': + print "[WARNING] This tool is in alpha status. Only use if you know what you're doing!" + main() + |