diff options
author | Philip Balister <philip@opensdr.com> | 2010-10-21 14:17:47 -0400 |
---|---|---|
committer | Philip Balister <philip@opensdr.com> | 2010-10-21 14:17:47 -0400 |
commit | c31c2d7ef8e4e02854fe51a8a3b8856649464918 (patch) | |
tree | d338268cbdecf21c78a2c77e6e7f95e6f669c4ee /host/lib/usrp/usrp1/io_impl.cpp | |
parent | 3dc06cddaf0a0cf32f76be7077d1427ff4b71a7e (diff) | |
parent | 090b0dd0d38f23cd1d7c4c6a7b0317a6fdfe2b9b (diff) | |
download | uhd-c31c2d7ef8e4e02854fe51a8a3b8856649464918.tar.gz uhd-c31c2d7ef8e4e02854fe51a8a3b8856649464918.tar.bz2 uhd-c31c2d7ef8e4e02854fe51a8a3b8856649464918.zip |
Merge branch 'usrp_e' of ettus.sourcerepo.com:ettus/uhdpriv into usrp_e
Diffstat (limited to 'host/lib/usrp/usrp1/io_impl.cpp')
-rw-r--r-- | host/lib/usrp/usrp1/io_impl.cpp | 477 |
1 files changed, 232 insertions, 245 deletions
diff --git a/host/lib/usrp/usrp1/io_impl.cpp b/host/lib/usrp/usrp1/io_impl.cpp index 92e8bc20a..6728d9b15 100644 --- a/host/lib/usrp/usrp1/io_impl.cpp +++ b/host/lib/usrp/usrp1/io_impl.cpp @@ -33,76 +33,148 @@ using namespace uhd::usrp; using namespace uhd::transport; namespace asio = boost::asio; -struct usrp1_send_state { - uhd::transport::managed_send_buffer::sptr send_buff; - size_t bytes_written; - size_t underrun_poll_samp_count; +static const size_t alignment_padding = 512; - size_t bytes_free() - { - if (send_buff != NULL) - return send_buff->size() - bytes_written; - else - return 0; +/*********************************************************************** + * Helper struct to associate an offset with a buffer + **********************************************************************/ +class offset_send_buffer{ +public: + typedef boost::shared_ptr<offset_send_buffer> sptr; + + static sptr make(managed_send_buffer::sptr buff, size_t offset = 0){ + return sptr(new offset_send_buffer(buff, offset)); } -}; -struct usrp1_recv_state { - uhd::transport::managed_recv_buffer::sptr recv_buff; - size_t bytes_read; - size_t overrun_poll_samp_count; + //member variables + managed_send_buffer::sptr buff; + size_t offset; /* in bytes */ - size_t bytes_avail() - { - if (recv_buff != NULL) - return recv_buff->size() - bytes_read; - else - return 0; - } +private: + offset_send_buffer(managed_send_buffer::sptr buff, size_t offset): + buff(buff), offset(offset){/* NOP */} }; /*********************************************************************** * IO Implementation Details **********************************************************************/ -struct usrp1_impl::io_impl { - io_impl(); - ~io_impl(void); - - //state handling for buffer management - usrp1_recv_state recv_state; - usrp1_send_state send_state; - - //send transport management - bool get_send_buffer(zero_copy_if::sptr zc_if); - size_t copy_convert_send_samps(const void *buff, size_t num_samps, - size_t sample_offset, const io_type_t io_type, - otw_type_t otw_type); - bool conditional_buff_commit(bool force); - bool check_underrun(usrp_ctrl::sptr ctrl_if, - size_t poll_interval, bool force); - - //recv transport management - bool get_recv_buffer(zero_copy_if::sptr zc_if); - size_t copy_convert_recv_samps(void *buff, size_t num_samps, - size_t sample_offset, const io_type_t io_type, - otw_type_t otw_type); - bool check_overrun(usrp_ctrl::sptr ctrl_if, - size_t poll_interval, bool force); +struct usrp1_impl::io_impl{ + io_impl(zero_copy_if::sptr data_transport): + data_transport(data_transport), + underflow_poll_samp_count(0), + overflow_poll_samp_count(0), + curr_buff_committed(true), + curr_buff(offset_send_buffer::make(data_transport->get_send_buff())) + { + /* NOP */ + } + + ~io_impl(void){ + flush_send_buff(); + } + + zero_copy_if::sptr data_transport; + + //state management for the vrt packet handler code + vrt_packet_handler::recv_state packet_handler_recv_state; + vrt_packet_handler::send_state packet_handler_send_state; + + //state management for overflow and underflow + size_t underflow_poll_samp_count; + size_t overflow_poll_samp_count; + + //wrapper around the actual send buffer interface + //all of this to ensure only aligned lengths are committed + //NOTE: you must commit before getting a new buffer + //since the vrt packet handler obeys this, we are ok + bool curr_buff_committed; + offset_send_buffer::sptr curr_buff; + void commit_send_buff(offset_send_buffer::sptr, offset_send_buffer::sptr, size_t); + void flush_send_buff(void); + bool get_send_buffs(vrt_packet_handler::managed_send_buffs_t &, double); }; -usrp1_impl::io_impl::io_impl() -{ - send_state.send_buff = uhd::transport::managed_send_buffer::sptr(); - recv_state.recv_buff = uhd::transport::managed_recv_buffer::sptr(); +/*! + * Perform an actual commit on the send buffer: + * Copy the remainder of alignment to the next buffer. + * Commit the current buffer at multiples of alignment. + */ +void usrp1_impl::io_impl::commit_send_buff( + offset_send_buffer::sptr curr, + offset_send_buffer::sptr next, + size_t num_bytes +){ + //total number of bytes now in the current buffer + size_t bytes_in_curr_buffer = curr->offset + num_bytes; + + //calculate how many to commit and remainder + size_t num_bytes_remaining = bytes_in_curr_buffer % alignment_padding; + size_t num_bytes_to_commit = bytes_in_curr_buffer - num_bytes_remaining; + + //copy the remainder into the next buffer + std::memcpy( + next->buff->cast<char *>() + next->offset, + curr->buff->cast<char *>() + num_bytes_to_commit, + num_bytes_remaining + ); + + //update the offset into the next buffer + next->offset += num_bytes_remaining; + + //commit the current buffer + curr->buff->commit(num_bytes_to_commit); + curr_buff_committed = true; } -usrp1_impl::io_impl::~io_impl(void) -{ - /* NOP */ +/*! + * Flush the current buffer by padding out to alignment and committing. + */ +void usrp1_impl::io_impl::flush_send_buff(void){ + //calculate the number of bytes to alignment + size_t bytes_to_pad = (-1*curr_buff->offset)%alignment_padding; + + //get the buffer, clear, and commit (really current buffer) + vrt_packet_handler::managed_send_buffs_t buffs(1); + if (this->get_send_buffs(buffs, 0.1)){ + std::memset(buffs[0]->cast<void *>(), 0, bytes_to_pad); + buffs[0]->commit(bytes_to_pad); + } } -void usrp1_impl::io_init(void) -{ +/*! + * Get a managed send buffer with the alignment padding: + * Always grab the next send buffer so we can timeout here. + */ +bool usrp1_impl::io_impl::get_send_buffs( + vrt_packet_handler::managed_send_buffs_t &buffs, double timeout +){ + UHD_ASSERT_THROW(curr_buff_committed and buffs.size() == 1); + + //try to get a new managed buffer with timeout + offset_send_buffer::sptr next_buff(offset_send_buffer::make(data_transport->get_send_buff(timeout))); + if (not next_buff->buff.get()) return false; /* propagate timeout here */ + + //calculate the buffer pointer and size given the offset + //references to the buffers are held in the bound function + buffs[0] = managed_send_buffer::make_safe( + boost::asio::buffer( + curr_buff->buff->cast<char *>() + curr_buff->offset, + curr_buff->buff->size() - curr_buff->offset + ), + boost::bind(&usrp1_impl::io_impl::commit_send_buff, this, curr_buff, next_buff, _1) + ); + + //store the next buffer for the next call + curr_buff = next_buff; + curr_buff_committed = false; + + return true; +} + +/*********************************************************************** + * Initialize internals within this file + **********************************************************************/ +void usrp1_impl::io_init(void){ _rx_otw_type.width = 16; _rx_otw_type.shift = 0; _rx_otw_type.byteorder = otw_type_t::BO_LITTLE_ENDIAN; @@ -111,216 +183,131 @@ void usrp1_impl::io_init(void) _tx_otw_type.shift = 0; _tx_otw_type.byteorder = otw_type_t::BO_LITTLE_ENDIAN; - _io_impl = UHD_PIMPL_MAKE(io_impl, ()); + _io_impl = UHD_PIMPL_MAKE(io_impl, (_data_transport)); } /*********************************************************************** - * Data Send + * Data send + helper functions **********************************************************************/ -bool usrp1_impl::io_impl::get_send_buffer(zero_copy_if::sptr zc_if) -{ - if (send_state.send_buff == NULL) { - - send_state.send_buff = zc_if->get_send_buff(); - if (send_state.send_buff == NULL) - return false; - - send_state.bytes_written = 0; - } - - return true; -} - -size_t usrp1_impl::io_impl::copy_convert_send_samps(const void *buff, - size_t num_samps, - size_t sample_offset, - const io_type_t io_type, - otw_type_t otw_type) -{ - UHD_ASSERT_THROW(send_state.bytes_free() % otw_type.get_sample_size() == 0); - - size_t samps_free = send_state.bytes_free() / otw_type.get_sample_size(); - size_t copy_samps = std::min(num_samps - sample_offset, samps_free); - - const boost::uint8_t *io_mem = - reinterpret_cast<const boost::uint8_t *>(buff); - - boost::uint8_t *otw_mem = send_state.send_buff->cast<boost::uint8_t *>(); - - convert_io_type_to_otw_type(io_mem + sample_offset * io_type.size, - io_type, - otw_mem + send_state.bytes_written, - otw_type, - copy_samps); - - send_state.bytes_written += copy_samps * otw_type.get_sample_size(); - send_state.underrun_poll_samp_count += copy_samps; - - return copy_samps; -} - -bool usrp1_impl::io_impl::conditional_buff_commit(bool force) -{ - if (send_state.bytes_written % 512) - return false; - - if (force || send_state.bytes_free() == 0) { - send_state.send_buff->commit(send_state.bytes_written); - send_state.send_buff = uhd::transport::managed_send_buffer::sptr(); - return true; - } - - return false; +static void usrp1_bs_vrt_packer( + boost::uint32_t *, + vrt::if_packet_info_t &if_packet_info +){ + if_packet_info.num_header_words32 = 0; + if_packet_info.num_packet_words32 = if_packet_info.num_payload_words32; } -bool usrp1_impl::io_impl::check_underrun(usrp_ctrl::sptr ctrl_if, - size_t poll_interval, - bool force) -{ - unsigned char underrun = 0; - - bool ready_to_poll = send_state.underrun_poll_samp_count > poll_interval; - - if (force || ready_to_poll) { - int ret = ctrl_if->usrp_control_read(VRQ_GET_STATUS, - 0, - GS_TX_UNDERRUN, - &underrun, sizeof(char)); - if (ret < 0) - std::cerr << "USRP: underrun check failed" << std::endl; - if (underrun) - std::cerr << "U" << std::flush; - - send_state.underrun_poll_samp_count = 0; - } - - return (bool) underrun; +size_t usrp1_impl::get_max_send_samps_per_packet(void) const { + return (_data_transport->get_send_frame_size() - alignment_padding) + / _tx_otw_type.get_sample_size() + / _tx_subdev_spec.size() + ; } -size_t usrp1_impl::send(const std::vector<const void *> &buffs, - size_t num_samps, - const tx_metadata_t &, - const io_type_t &io_type, - send_mode_t) -{ - UHD_ASSERT_THROW(buffs.size() == 1); - - size_t total_samps_sent = 0; - - while (total_samps_sent < num_samps) { - if (!_io_impl->get_send_buffer(_data_transport)) - return 0; - - total_samps_sent += _io_impl->copy_convert_send_samps(buffs[0], - num_samps, - total_samps_sent, - io_type, - _tx_otw_type); - if (total_samps_sent == num_samps) - _io_impl->conditional_buff_commit(true); - else - _io_impl->conditional_buff_commit(false); - - _io_impl->check_underrun(_ctrl_transport, - _tx_samps_per_poll_interval, false); +size_t usrp1_impl::send( + const std::vector<const void *> &buffs, size_t num_samps, + const tx_metadata_t &metadata, const io_type_t &io_type, + send_mode_t send_mode, double timeout +){ + size_t num_samps_sent = vrt_packet_handler::send( + _io_impl->packet_handler_send_state, //last state of the send handler + buffs, num_samps, //buffer to fill + metadata, send_mode, //samples metadata + io_type, _tx_otw_type, //input and output types to convert + _clock_ctrl->get_master_clock_freq(), //master clock tick rate + &usrp1_bs_vrt_packer, + boost::bind(&usrp1_impl::io_impl::get_send_buffs, _io_impl.get(), _1, timeout), + get_max_send_samps_per_packet(), + 0, //vrt header offset + _tx_subdev_spec.size() //num channels + ); + + //Don't honor sob because it is normal to be always bursting... + //handle eob flag (commit the buffer) + if (metadata.end_of_burst) _io_impl->flush_send_buff(); + + //handle the polling for underflow conditions + _io_impl->underflow_poll_samp_count += num_samps_sent; + if (_io_impl->underflow_poll_samp_count >= _tx_samps_per_poll_interval){ + _io_impl->underflow_poll_samp_count = 0; //reset count + boost::uint8_t underflow = 0; + ssize_t ret = _ctrl_transport->usrp_control_read( + VRQ_GET_STATUS, 0, GS_TX_UNDERRUN, + &underflow, sizeof(underflow) + ); + if (ret < 0) std::cerr << "USRP: underflow check failed" << std::endl; + else if (underflow) std::cerr << "U" << std::flush; } - return total_samps_sent; + return num_samps_sent; } /*********************************************************************** - * Data Recv + * Data recv + helper functions **********************************************************************/ -bool usrp1_impl::io_impl::get_recv_buffer(zero_copy_if::sptr zc_if) -{ - if ((recv_state.recv_buff == NULL) || (recv_state.bytes_avail() == 0)) { - - recv_state.recv_buff = zc_if->get_recv_buff(); - if (recv_state.recv_buff == NULL) - return false; - - recv_state.bytes_read = 0; - } - - return true; +static void usrp1_bs_vrt_unpacker( + const boost::uint32_t *, + vrt::if_packet_info_t &if_packet_info +){ + if_packet_info.packet_type = vrt::if_packet_info_t::PACKET_TYPE_DATA; + if_packet_info.num_payload_words32 = if_packet_info.num_packet_words32; + if_packet_info.num_header_words32 = 0; + if_packet_info.packet_count = 0; + if_packet_info.sob = false; + if_packet_info.eob = false; + if_packet_info.has_sid = false; + if_packet_info.has_cid = false; + if_packet_info.has_tsi = false; + if_packet_info.has_tsf = false; + if_packet_info.has_tlr = false; } -size_t usrp1_impl::io_impl::copy_convert_recv_samps(void *buff, - size_t num_samps, - size_t sample_offset, - const io_type_t io_type, - otw_type_t otw_type) -{ - UHD_ASSERT_THROW(recv_state.bytes_avail() % otw_type.get_sample_size() == 0); - - size_t samps_avail = recv_state.bytes_avail() / otw_type.get_sample_size(); - size_t copy_samps = std::min(num_samps - sample_offset, samps_avail); - - const boost::uint8_t *otw_mem = - recv_state.recv_buff->cast<const boost::uint8_t *>(); - - boost::uint8_t *io_mem = reinterpret_cast<boost::uint8_t *>(buff); - - convert_otw_type_to_io_type(otw_mem + recv_state.bytes_read, - otw_type, - io_mem + sample_offset * io_type.size, - io_type, - copy_samps); - - recv_state.bytes_read += copy_samps * otw_type.get_sample_size(); - recv_state.overrun_poll_samp_count += copy_samps; - - return copy_samps; +static bool get_recv_buffs( + zero_copy_if::sptr zc_if, double timeout, + vrt_packet_handler::managed_recv_buffs_t &buffs +){ + UHD_ASSERT_THROW(buffs.size() == 1); + buffs[0] = zc_if->get_recv_buff(timeout); + return buffs[0].get() != NULL; } -bool usrp1_impl::io_impl::check_overrun(usrp_ctrl::sptr ctrl_if, - size_t poll_interval, - bool force) -{ - unsigned char overrun = 0; - - bool ready_to_poll = recv_state.overrun_poll_samp_count > poll_interval; - - if (force || ready_to_poll) { - int ret = ctrl_if->usrp_control_read(VRQ_GET_STATUS, - 0, - GS_RX_OVERRUN, - &overrun, sizeof(char)); - if (ret < 0) - std::cerr << "USRP: overrrun check failed" << std::endl; - if (overrun) - std::cerr << "O" << std::flush; - - recv_state.overrun_poll_samp_count = 0; - } - - return (bool) overrun; +size_t usrp1_impl::get_max_recv_samps_per_packet(void) const { + return _data_transport->get_recv_frame_size() + / _rx_otw_type.get_sample_size() + / _rx_subdev_spec.size() + ; } -size_t usrp1_impl::recv(const std::vector<void *> &buffs, - size_t num_samps, - rx_metadata_t &, - const io_type_t &io_type, - recv_mode_t, - size_t) -{ - UHD_ASSERT_THROW(buffs.size() == 1); - - size_t total_samps_recv = 0; - - while (total_samps_recv < num_samps) { - - if (!_io_impl->get_recv_buffer(_data_transport)) - return 0; - - total_samps_recv += _io_impl->copy_convert_recv_samps(buffs[0], - num_samps, - total_samps_recv, - io_type, - _rx_otw_type); - _io_impl->check_overrun(_ctrl_transport, - _rx_samps_per_poll_interval, false); +size_t usrp1_impl::recv( + const std::vector<void *> &buffs, size_t num_samps, + rx_metadata_t &metadata, const io_type_t &io_type, + recv_mode_t recv_mode, double timeout +){ + size_t num_samps_recvd = vrt_packet_handler::recv( + _io_impl->packet_handler_recv_state, //last state of the recv handler + buffs, num_samps, //buffer to fill + metadata, recv_mode, //samples metadata + io_type, _rx_otw_type, //input and output types to convert + _clock_ctrl->get_master_clock_freq(), //master clock tick rate + &usrp1_bs_vrt_unpacker, + boost::bind(&get_recv_buffs, _data_transport, timeout, _1), + &vrt_packet_handler::handle_overflow_nop, + 0, //vrt header offset + _rx_subdev_spec.size() //num channels + ); + + //handle the polling for overflow conditions + _io_impl->overflow_poll_samp_count += num_samps_recvd; + if (_io_impl->overflow_poll_samp_count >= _rx_samps_per_poll_interval){ + _io_impl->overflow_poll_samp_count = 0; //reset count + boost::uint8_t overflow = 0; + ssize_t ret = _ctrl_transport->usrp_control_read( + VRQ_GET_STATUS, 0, GS_RX_OVERRUN, + &overflow, sizeof(overflow) + ); + if (ret < 0) std::cerr << "USRP: overflow check failed" << std::endl; + else if (overflow) std::cerr << "O" << std::flush; } - return total_samps_recv; + return num_samps_recvd; } |