summaryrefslogtreecommitdiffstats
path: root/host/lib/transport
diff options
context:
space:
mode:
authorJosh Blum <josh@joshknows.com>2011-02-05 12:37:20 -0800
committerJosh Blum <josh@joshknows.com>2011-02-05 12:37:20 -0800
commit5d10aa397f044ea6781b4977c14171abfb4a727d (patch)
treec4578313720b2be2f4ce4d7c5e05c8e8a2a3b7e7 /host/lib/transport
parent3d32e7dc3b28cc384185c580035c0011a5beb167 (diff)
downloaduhd-5d10aa397f044ea6781b4977c14171abfb4a727d.tar.gz
uhd-5d10aa397f044ea6781b4977c14171abfb4a727d.tar.bz2
uhd-5d10aa397f044ea6781b4977c14171abfb4a727d.zip
uhd: change bounded_buffer implementation and code using it
The bounded buffer now uses the detail idiom to hide implementation to inline better. The whole sptr/make idiom was removed from bounded buffer to just construct directly. The code using bounded buffer was changed for the new api: replaces access operators and calls to the factory function.
Diffstat (limited to 'host/lib/transport')
-rw-r--r--[-rwxr-xr-x]host/lib/transport/gen_vrt_if_packet.py2
-rw-r--r--host/lib/transport/libusb1_zero_copy.cpp15
-rw-r--r--host/lib/transport/udp_zero_copy_asio.cpp32
3 files changed, 23 insertions, 26 deletions
diff --git a/host/lib/transport/gen_vrt_if_packet.py b/host/lib/transport/gen_vrt_if_packet.py
index bf740ffa9..3ba562d68 100755..100644
--- a/host/lib/transport/gen_vrt_if_packet.py
+++ b/host/lib/transport/gen_vrt_if_packet.py
@@ -205,7 +205,7 @@ void vrt::if_hdr_unpack_$(suffix)(
if_packet_info.has_tsf = true;
if_packet_info.tsf = boost::uint64_t($(XE_MACRO)(packet_buff[$num_header_words])) << 32;
#set $num_header_words += 1
- if_packet_info.tsf |= boost::uint64_t($(XE_MACRO)(packet_buff[$num_header_words])) << 0;
+ if_packet_info.tsf |= $(XE_MACRO)(packet_buff[$num_header_words]);
#set $num_header_words += 1
#else
if_packet_info.has_tsf = false;
diff --git a/host/lib/transport/libusb1_zero_copy.cpp b/host/lib/transport/libusb1_zero_copy.cpp
index 311a8953b..d4c82617c 100644
--- a/host/lib/transport/libusb1_zero_copy.cpp
+++ b/host/lib/transport/libusb1_zero_copy.cpp
@@ -99,8 +99,7 @@ private:
bool _input;
//! hold a bounded buffer of completed transfers
- typedef bounded_buffer<libusb_transfer *> lut_buff_type;
- lut_buff_type::sptr _completed_list;
+ bounded_buffer<libusb_transfer *> _completed_list;
//! a list of all transfer structs we allocated
std::vector<libusb_transfer *> _all_luts;
@@ -134,7 +133,7 @@ static void callback(libusb_transfer *lut){
* \param pointer to libusb_transfer
*/
void usb_endpoint::callback_handle_transfer(libusb_transfer *lut){
- _completed_list->push_with_wait(lut);
+ _completed_list.push_with_wait(lut);
}
@@ -153,9 +152,9 @@ usb_endpoint::usb_endpoint(
):
_handle(handle),
_endpoint(endpoint),
- _input(input)
+ _input(input),
+ _completed_list(num_transfers)
{
- _completed_list = lut_buff_type::make(num_transfers);
_buffer_pool = buffer_pool::make(num_transfers, transfer_size);
for (size_t i = 0; i < num_transfers; i++){
_all_luts.push_back(allocate_transfer(_buffer_pool->at(i), transfer_size));
@@ -163,7 +162,7 @@ usb_endpoint::usb_endpoint(
//input luts are immediately submitted to be filled
//output luts go into the completed list as free buffers
if (_input) this->submit(_all_luts.back());
- else _completed_list->push_with_wait(_all_luts.back());
+ else _completed_list.push_with_wait(_all_luts.back());
}
}
@@ -272,8 +271,8 @@ void usb_endpoint::print_transfer_status(libusb_transfer *lut){
libusb_transfer *usb_endpoint::get_lut_with_wait(double timeout){
boost::this_thread::disable_interruption di; //disable because the wait can throw
- libusb_transfer *lut;
- if (_completed_list->pop_with_timed_wait(lut, timeout)) return lut;
+ libusb_transfer *lut = NULL;
+ if (_completed_list.pop_with_timed_wait(lut, timeout)) return lut;
return NULL;
}
diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp
index f083b97d8..0f16e7d14 100644
--- a/host/lib/transport/udp_zero_copy_asio.cpp
+++ b/host/lib/transport/udp_zero_copy_asio.cpp
@@ -72,6 +72,9 @@ public:
_num_recv_frames(size_t(hints.cast<double>("num_recv_frames", DEFAULT_NUM_FRAMES))),
_send_frame_size(size_t(hints.cast<double>("send_frame_size", udp_simple::mtu))),
_num_send_frames(size_t(hints.cast<double>("num_send_frames", DEFAULT_NUM_FRAMES))),
+ _recv_buffer_pool(buffer_pool::make(_num_recv_frames, _recv_frame_size)),
+ _send_buffer_pool(buffer_pool::make(_num_send_frames, _send_frame_size)),
+ _pending_recv_buffs(_num_recv_frames), _pending_send_buffs(_num_send_frames),
_concurrency_hint(hints.cast<size_t>("concurrency_hint", CONCURRENCY_HINT)),
_io_service(_concurrency_hint)
{
@@ -96,17 +99,13 @@ public:
}
void init(void){
- //allocate all recv frames and release them to begin xfers
- _pending_recv_buffs = pending_buffs_type::make(_num_recv_frames);
- _recv_buffer_pool = buffer_pool::make(_num_recv_frames, _recv_frame_size);
- for (size_t i = 0; i < _num_recv_frames; i++){
+ //release recv frames for use
+ for (size_t i = 0; i < get_num_recv_frames(); i++){
release(_recv_buffer_pool->at(i));
}
- //allocate all send frames and push them into the fifo
- _pending_send_buffs = pending_buffs_type::make(_num_send_frames);
- _send_buffer_pool = buffer_pool::make(_num_send_frames, _send_frame_size);
- for (size_t i = 0; i < _num_send_frames; i++){
+ //push send frames into the fifo
+ for (size_t i = 0; i < get_num_send_frames(); i++){
handle_send(_send_buffer_pool->at(i));
}
@@ -138,7 +137,7 @@ public:
//! handle a recv callback -> push the filled memory into the fifo
UHD_INLINE void handle_recv(void *mem, size_t len){
- _pending_recv_buffs->push_with_pop_on_full(boost::asio::buffer(mem, len));
+ _pending_recv_buffs.push_with_pop_on_full(boost::asio::buffer(mem, len));
}
////////////////////////////////////////////////////////////////////
@@ -148,7 +147,7 @@ public:
managed_recv_buffer::sptr get_recv_buff(double timeout){
boost::this_thread::disable_interruption di; //disable because the wait can throw
asio::mutable_buffer buff;
- if (_pending_recv_buffs->pop_with_timed_wait(buff, timeout)){
+ if (_pending_recv_buffs.pop_with_timed_wait(buff, timeout)){
return managed_recv_buffer::make_safe(
buff, boost::bind(
&udp_zero_copy_asio_impl::release, this,
@@ -191,7 +190,7 @@ public:
//if the condition is true, call receive and return the managed buffer
if (
::select(_sock_fd+1, &rset, NULL, NULL, &tv) > 0
- and _pending_recv_buffs->pop_with_haste(buff)
+ and _pending_recv_buffs.pop_with_haste(buff)
){
return managed_recv_buffer::make_safe(
asio::buffer(
@@ -220,7 +219,7 @@ public:
//! handle a send callback -> push the emptied memory into the fifo
UHD_INLINE void handle_send(void *mem){
- _pending_send_buffs->push_with_pop_on_full(boost::asio::buffer(mem, this->get_send_frame_size()));
+ _pending_send_buffs.push_with_pop_on_full(boost::asio::buffer(mem, this->get_send_frame_size()));
}
////////////////////////////////////////////////////////////////////
@@ -230,7 +229,7 @@ public:
managed_send_buffer::sptr get_send_buff(double timeout){
boost::this_thread::disable_interruption di; //disable because the wait can throw
asio::mutable_buffer buff;
- if (_pending_send_buffs->pop_with_timed_wait(buff, timeout)){
+ if (_pending_send_buffs.pop_with_timed_wait(buff, timeout)){
return managed_send_buffer::make_safe(
buff, boost::bind(
&udp_zero_copy_asio_impl::commit, this,
@@ -257,7 +256,7 @@ public:
////////////////////////////////////////////////////////////////////
managed_send_buffer::sptr get_send_buff(double){
asio::mutable_buffer buff;
- if (_pending_send_buffs->pop_with_haste(buff)){
+ if (_pending_send_buffs.pop_with_haste(buff)){
return managed_send_buffer::make_safe(
buff, boost::bind(
&udp_zero_copy_asio_impl::commit, this,
@@ -283,11 +282,10 @@ public:
private:
//memory management -> buffers and fifos
boost::thread_group _thread_group;
- buffer_pool::sptr _send_buffer_pool, _recv_buffer_pool;
- typedef bounded_buffer<asio::mutable_buffer> pending_buffs_type;
- pending_buffs_type::sptr _pending_recv_buffs, _pending_send_buffs;
const size_t _recv_frame_size, _num_recv_frames;
const size_t _send_frame_size, _num_send_frames;
+ buffer_pool::sptr _recv_buffer_pool, _send_buffer_pool;
+ bounded_buffer<asio::mutable_buffer> _pending_recv_buffs, _pending_send_buffs;
//asio guts -> socket and service
size_t _concurrency_hint;