summaryrefslogtreecommitdiffstats
path: root/host/lib
diff options
context:
space:
mode:
authorJosh Blum <josh@joshknows.com>2011-02-10 13:58:38 -0800
committerJosh Blum <josh@joshknows.com>2011-02-10 13:58:38 -0800
commit1be2590962669f307dce24ccb0b0011b3f3f25f5 (patch)
treebebf7fe400e2b794809a9c34a58105d1ed64b8d2 /host/lib
parent1daf74483ed3e5e5a70f856aecefa96f30b2cbc2 (diff)
downloaduhd-1be2590962669f307dce24ccb0b0011b3f3f25f5.tar.gz
uhd-1be2590962669f307dce24ccb0b0011b3f3f25f5.tar.bz2
uhd-1be2590962669f307dce24ccb0b0011b3f3f25f5.zip
uhd: tweaks to bounded buffer
Added push with haste. Call with haste first in the wait methods to avoid time compare/wait when not needed. Added new calls to the libusb and udp zero copy impls tests pass
Diffstat (limited to 'host/lib')
-rw-r--r--host/lib/transport/libusb1_zero_copy.cpp4
-rw-r--r--host/lib/transport/udp_zero_copy_asio.cpp10
2 files changed, 7 insertions, 7 deletions
diff --git a/host/lib/transport/libusb1_zero_copy.cpp b/host/lib/transport/libusb1_zero_copy.cpp
index ca37f351f..6fab5ae6f 100644
--- a/host/lib/transport/libusb1_zero_copy.cpp
+++ b/host/lib/transport/libusb1_zero_copy.cpp
@@ -132,7 +132,7 @@ static void callback(libusb_transfer *lut){
* \param pointer to libusb_transfer
*/
void usb_endpoint::callback_handle_transfer(libusb_transfer *lut){
- _completed_list.push_with_wait(lut);
+ _completed_list.push_with_haste(lut);
}
@@ -161,7 +161,7 @@ usb_endpoint::usb_endpoint(
//input luts are immediately submitted to be filled
//output luts go into the completed list as free buffers
if (_input) this->submit(_all_luts.back());
- else _completed_list.push_with_wait(_all_luts.back());
+ else _completed_list.push_with_haste(_all_luts.back());
}
}
diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp
index 87c5ec823..c45b196cf 100644
--- a/host/lib/transport/udp_zero_copy_asio.cpp
+++ b/host/lib/transport/udp_zero_copy_asio.cpp
@@ -216,14 +216,14 @@ public:
managed_recv_buffer::sptr get_recv_buff(double timeout){
udp_zero_copy_asio_mrb *mrb = NULL;
- if (is_recv_socket_ready(timeout) and _pending_recv_buffs.pop_with_haste(mrb)){
+ if (is_recv_socket_ready(timeout) and _pending_recv_buffs.pop_with_timed_wait(mrb, timeout)){
return mrb->get_new(::recv(_sock_fd, mrb->cast<char *>(), _recv_frame_size, 0));
}
return managed_recv_buffer::sptr();
}
UHD_INLINE void handle_recv(udp_zero_copy_asio_mrb *mrb){
- _pending_recv_buffs.push_with_pop_on_full(mrb);
+ _pending_recv_buffs.push_with_haste(mrb);
}
void release(udp_zero_copy_asio_mrb *mrb){
@@ -245,16 +245,16 @@ public:
* - A managed buffer is always available.
* - The queue can never be over-filled.
******************************************************************/
- managed_send_buffer::sptr get_send_buff(double){
+ managed_send_buffer::sptr get_send_buff(double timeout){
udp_zero_copy_asio_msb *msb = NULL;
- if (_pending_send_buffs.pop_with_haste(msb)){
+ if (_pending_send_buffs.pop_with_timed_wait(msb, timeout)){
return msb->get_new(_send_frame_size);
}
return managed_send_buffer::sptr();
}
UHD_INLINE void handle_send(udp_zero_copy_asio_msb *msb){
- _pending_send_buffs.push_with_pop_on_full(msb);
+ _pending_send_buffs.push_with_haste(msb);
}
void commit(udp_zero_copy_asio_msb *msb, size_t len){