aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Braun <martin.braun@ettus.com>2019-08-19 17:14:11 -0700
committerMartin Braun <martin.braun@ettus.com>2019-11-26 11:49:40 -0800
commit4c6ee083ef13184ba828d73b9dd2a95d7fa5453f (patch)
tree3364e7e147ed229732fca66282323c65d5de4102
parenta4274c19ebb42db112aa68a26fa499a52b9dd103 (diff)
downloaduhd-4c6ee083ef13184ba828d73b9dd2a95d7fa5453f.tar.gz
uhd-4c6ee083ef13184ba828d73b9dd2a95d7fa5453f.tar.bz2
uhd-4c6ee083ef13184ba828d73b9dd2a95d7fa5453f.zip
rfnoc: Add nirio_link link object
-rw-r--r--host/lib/include/uhdlib/transport/nirio_link.hpp210
-rw-r--r--host/lib/transport/CMakeLists.txt1
-rw-r--r--host/lib/transport/nirio_link.cpp365
3 files changed, 576 insertions, 0 deletions
diff --git a/host/lib/include/uhdlib/transport/nirio_link.hpp b/host/lib/include/uhdlib/transport/nirio_link.hpp
new file mode 100644
index 000000000..055874146
--- /dev/null
+++ b/host/lib/include/uhdlib/transport/nirio_link.hpp
@@ -0,0 +1,210 @@
+//
+// Copyright 2019 Ettus Research, a National Instruments Brand
+//
+// SPDX-License-Identifier: GPL-3.0-or-later
+//
+
+#ifndef INCLUDED_UHD_TRANSPORT_NIRIO_LINK_HPP
+#define INCLUDED_UHD_TRANSPORT_NIRIO_LINK_HPP
+
+#include <uhd/config.hpp>
+#include <uhd/transport/buffer_pool.hpp>
+#include <uhd/transport/nirio/nirio_fifo.h>
+#include <uhd/transport/nirio/niriok_proxy.h>
+#include <uhd/transport/nirio/niusrprio_session.h>
+#include <uhd/types/device_addr.hpp>
+#include <uhdlib/transport/adapter_info.hpp>
+#include <uhdlib/transport/link_base.hpp>
+#include <uhdlib/transport/links.hpp>
+#include <memory>
+#include <vector>
+
+namespace uhd { namespace transport {
+
+using fifo_data_t = uint64_t;
+
+/*! NI-RIO frame_buff
+ *
+ * NI-RIO internally manages the memory, so these frame_buff objects just need
+ * a way of internally manipulating the _data pointer
+ */
+class nirio_frame_buff : public frame_buff
+{
+public:
+ nirio_frame_buff()
+ {
+ _data = nullptr;
+ }
+
+ fifo_data_t** get_fifo_ptr_ref()
+ {
+ return reinterpret_cast<fifo_data_t**>(&_data);
+ };
+};
+
+class nirio_adapter_info : public adapter_info
+{
+public:
+ nirio_adapter_info(const std::string& resource) : _resource(resource) {}
+
+ ~nirio_adapter_info() {}
+
+ std::string to_string()
+ {
+ return std::string("NIRIO:") + _resource;
+ }
+
+ bool operator==(const nirio_adapter_info& rhs) const
+ {
+ return (_resource == rhs._resource);
+ }
+
+private:
+ const std::string _resource;
+};
+
+/*! Link object to talking to NI-RIO USRPs (X310 variants using PCIe)
+ *
+ * \b Note: This link cannot release frame buffers out of order, which means it
+ * can't be used with an IO service that does that.
+ */
+class nirio_link : public recv_link_base<nirio_link>, public send_link_base<nirio_link>
+{
+public:
+ using sptr = std::shared_ptr<nirio_link>;
+
+ ~nirio_link();
+
+ /*! Make a new NI-RIO link.
+ *
+ * \param addr a string representing the destination address
+ * \param port a string representing the destination port
+ * \param params Values for frame sizes, num frames, and buffer sizes
+ * \param[out] recv_socket_buff_size Returns the recv socket buffer size
+ * \param[out] send_socket_buff_size Returns the send socket buffer size
+ */
+ static sptr make(uhd::niusrprio::niusrprio_session::sptr fpga_session,
+ const uint32_t instance,
+ const link_params_t& params,
+ const uhd::device_addr_t& hints);
+
+ /*!
+ * Get the physical adapter ID used for this link
+ */
+ adapter_id_t get_send_adapter_id() const
+ {
+ return _adapter_id;
+ }
+
+ /*!
+ * Get the physical adapter ID used for this link
+ */
+ adapter_id_t get_recv_adapter_id() const
+ {
+ return _adapter_id;
+ }
+
+private:
+ using recv_link_base_t = recv_link_base<nirio_link>;
+ using send_link_base_t = send_link_base<nirio_link>;
+
+ // Friend declarations to allow base classes to call private methods
+ friend recv_link_base_t;
+ friend send_link_base_t;
+
+ nirio_link(uhd::niusrprio::niusrprio_session::sptr fpga_session,
+ uint32_t instance,
+ const link_params_t& params);
+
+ /**************************************************************************
+ * NI-RIO specific helpers
+ *************************************************************************/
+ void _flush_rx_buff();
+
+ void _wait_until_stream_ready();
+
+ /**************************************************************************
+ * recv_link/send_link API
+ *************************************************************************/
+ // Methods called by recv_link_base
+ UHD_FORCE_INLINE size_t get_recv_buff_derived(frame_buff& buff, int32_t timeout_ms)
+ {
+ using namespace uhd::niusrprio;
+ nirio_status status = 0;
+ size_t elems_acquired = 0;
+ size_t elems_remaining = 0;
+ // This will modify the data pointer in buff if successful:
+ fifo_data_t** data_ptr = static_cast<nirio_frame_buff&>(buff).get_fifo_ptr_ref();
+ nirio_status_chain(_recv_fifo->acquire(*data_ptr,
+ _link_params.recv_frame_size / sizeof(fifo_data_t),
+ static_cast<uint32_t>(timeout_ms * 1000),
+ elems_acquired,
+ elems_remaining),
+ status);
+ const size_t length = elems_acquired * sizeof(fifo_data_t);
+
+ if (nirio_status_not_fatal(status)) {
+ return length;
+ } else if (status == NiRio_Status_CommunicationTimeout) {
+ nirio_status_to_exception(status, "NI-RIO PCIe data transfer failed.");
+ }
+ return 0; // zero for timeout or error.
+ }
+
+ UHD_FORCE_INLINE void release_recv_buff_derived(frame_buff& /*buff*/)
+ {
+ _recv_fifo->release(_link_params.recv_frame_size / sizeof(fifo_data_t));
+ }
+
+ // Methods called by send_link_base
+ UHD_FORCE_INLINE bool get_send_buff_derived(frame_buff& buff, int32_t timeout_ms)
+ {
+ using namespace uhd::niusrprio;
+ nirio_status status = 0;
+ size_t elems_acquired = 0;
+ size_t elems_remaining = 0;
+ // This will modify the data pointer in buff if successful:
+ fifo_data_t** data_ptr = static_cast<nirio_frame_buff&>(buff).get_fifo_ptr_ref();
+ nirio_status_chain(_send_fifo->acquire(*data_ptr,
+ _link_params.send_frame_size / sizeof(fifo_data_t),
+ static_cast<uint32_t>(timeout_ms * 1000),
+ elems_acquired,
+ elems_remaining),
+ status);
+ // const size_t length = elems_acquired * sizeof(fifo_data_t);
+
+ if (nirio_status_not_fatal(status)) {
+ return true;
+ } else if (status == NiRio_Status_CommunicationTimeout) {
+ nirio_status_to_exception(status, "NI-RIO PCIe data transfer failed.");
+ }
+ return false;
+ }
+
+ UHD_FORCE_INLINE void release_send_buff_derived(frame_buff& /*buff*/)
+ {
+ _send_fifo->release(_link_params.send_frame_size / sizeof(fifo_data_t));
+ }
+
+ /**************************************************************************
+ * Private attributes
+ *************************************************************************/
+ //! Reference to the NI-RIO session
+ niusrprio::niusrprio_session::sptr _fpga_session;
+ //! DMA channel index
+ const uint32_t _fifo_instance;
+ //! Recv and send FIFO objects
+ uhd::niusrprio::nirio_fifo<fifo_data_t>::sptr _recv_fifo, _send_fifo;
+
+ const link_params_t _link_params;
+
+ std::vector<nirio_frame_buff> _recv_buffs;
+ std::vector<nirio_frame_buff> _send_buffs;
+
+ adapter_id_t _adapter_id;
+};
+
+}} // namespace uhd::transport
+
+
+#endif /* INCLUDED_UHD_TRANSPORT_NIRIO_LINK_HPP */
diff --git a/host/lib/transport/CMakeLists.txt b/host/lib/transport/CMakeLists.txt
index dd83164bf..50b95155a 100644
--- a/host/lib/transport/CMakeLists.txt
+++ b/host/lib/transport/CMakeLists.txt
@@ -129,6 +129,7 @@ LIBUHD_APPEND_SOURCES(
if(ENABLE_X300)
LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/nirio_link.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nirio_zero_copy.cpp
)
endif(ENABLE_X300)
diff --git a/host/lib/transport/nirio_link.cpp b/host/lib/transport/nirio_link.cpp
new file mode 100644
index 000000000..d35d512a3
--- /dev/null
+++ b/host/lib/transport/nirio_link.cpp
@@ -0,0 +1,365 @@
+//
+// Copyright 2019 Ettus Research, a National Instruments Brand
+//
+// SPDX-License-Identifier: GPL-3.0-or-later
+//
+
+#include <uhd/utils/log.hpp>
+#include <uhd/utils/safe_call.hpp>
+#include <uhdlib/transport/adapter.hpp>
+#include <uhdlib/transport/links.hpp>
+#include <uhdlib/transport/nirio_link.hpp>
+#include <chrono>
+
+// X300 regs
+#include "../usrp/x300/x300_regs.hpp"
+
+using namespace uhd::transport;
+using namespace std::chrono_literals;
+using namespace uhd::niusrprio;
+
+/******************************************************************************
+ * Local helpers
+ *****************************************************************************/
+namespace {
+
+#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
+# include <windows.h>
+size_t get_page_size()
+{
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+}
+#else
+# include <unistd.h>
+size_t get_page_size()
+{
+ return size_t(sysconf(_SC_PAGESIZE));
+}
+#endif
+const size_t page_size = get_page_size();
+
+} // namespace
+
+#define PROXY _fpga_session->get_kernel_proxy()
+
+/******************************************************************************
+ * Structors
+ *****************************************************************************/
+nirio_link::nirio_link(uhd::niusrprio::niusrprio_session::sptr fpga_session,
+ uint32_t instance,
+ const link_params_t& params)
+ : recv_link_base_t(params.num_recv_frames, params.recv_frame_size)
+ , send_link_base_t(params.num_send_frames, params.send_frame_size)
+ , _fpga_session(fpga_session)
+ , _fifo_instance(instance)
+ , _link_params(params)
+{
+ UHD_LOG_TRACE("NIRIO", "Creating PCIe transport for channel " << instance);
+ UHD_LOGGER_TRACE("NIRIO")
+ << boost::format("nirio zero-copy RX transport configured with frame size = "
+ "%u, #frames = %u, buffer size = %u\n")
+ % _link_params.recv_frame_size % _link_params.num_recv_frames
+ % (_link_params.recv_frame_size * _link_params.num_recv_frames);
+ UHD_LOGGER_TRACE("NIRIO")
+ << boost::format("nirio zero-copy TX transport configured with frame size = "
+ "%u, #frames = %u, buffer size = %u\n")
+ % _link_params.send_frame_size % _link_params.num_send_frames
+ % (_link_params.send_frame_size * _link_params.num_send_frames);
+
+ nirio_status status = 0;
+ size_t actual_depth = 0, actual_size = 0;
+
+ // Disable DMA streams in case last shutdown was unclean (cleanup, so don't status
+ // chain)
+ PROXY->poke(PCIE_TX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), DMA_CTRL_DISABLED);
+ PROXY->poke(PCIE_RX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), DMA_CTRL_DISABLED);
+
+ _wait_until_stream_ready();
+
+ // Configure frame width
+ nirio_status_chain(
+ PROXY->poke(PCIE_TX_DMA_REG(DMA_FRAME_SIZE_REG, _fifo_instance),
+ static_cast<uint32_t>(_link_params.send_frame_size / sizeof(fifo_data_t))),
+ status);
+ nirio_status_chain(
+ PROXY->poke(PCIE_RX_DMA_REG(DMA_FRAME_SIZE_REG, _fifo_instance),
+ static_cast<uint32_t>(_link_params.recv_frame_size / sizeof(fifo_data_t))),
+ status);
+ // Config 32-bit word flipping and enable DMA streams
+ nirio_status_chain(PROXY->poke(PCIE_TX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance),
+ DMA_CTRL_SW_BUF_U32 | DMA_CTRL_ENABLED),
+ status);
+ nirio_status_chain(PROXY->poke(PCIE_RX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance),
+ DMA_CTRL_SW_BUF_U32 | DMA_CTRL_ENABLED),
+ status);
+
+ // Create FIFOs
+ nirio_status_chain(_fpga_session->create_rx_fifo(_fifo_instance, _recv_fifo), status);
+ nirio_status_chain(_fpga_session->create_tx_fifo(_fifo_instance, _send_fifo), status);
+
+ if ((_recv_fifo.get() != NULL) && (_send_fifo.get() != NULL)) {
+ // Initialize FIFOs
+ nirio_status_chain(_recv_fifo->initialize((_link_params.recv_frame_size
+ * _link_params.num_recv_frames)
+ / sizeof(fifo_data_t),
+ _link_params.recv_frame_size / sizeof(fifo_data_t),
+ actual_depth,
+ actual_size),
+ status);
+ nirio_status_chain(_send_fifo->initialize((_link_params.send_frame_size
+ * _link_params.num_send_frames)
+ / sizeof(fifo_data_t),
+ _link_params.send_frame_size / sizeof(fifo_data_t),
+ actual_depth,
+ actual_size),
+ status);
+
+ PROXY->get_rio_quirks().add_tx_fifo(_fifo_instance);
+
+ nirio_status_chain(_recv_fifo->start(), status);
+ nirio_status_chain(_send_fifo->start(), status);
+
+ if (!nirio_status_not_fatal(status)) {
+ UHD_LOG_ERROR("NIRIO", "Fatal error while creating RX/TX FIFOs!");
+ }
+ } else {
+ nirio_status_chain(NiRio_Status_ResourceNotInitialized, status);
+ }
+
+ nirio_status_to_exception(status, "Could not create nirio_link!");
+
+ // Preallocate empty frame_buffs
+ // We don't need to reserve any memory, because the DMA engine will do that
+ // for us. We just need to create a bunch of frame_buff objects.
+ _recv_buffs.reserve(_link_params.num_recv_frames);
+ _send_buffs.reserve(_link_params.num_send_frames);
+ for (size_t i = 0; i < _link_params.num_recv_frames; i++) {
+ _recv_buffs.emplace_back();
+ recv_link_base_t::preload_free_buff(&_recv_buffs.back());
+ }
+ for (size_t i = 0; i < _link_params.num_send_frames; i++) {
+ _send_buffs.emplace_back();
+ send_link_base_t::preload_free_buff(&_send_buffs.back());
+ }
+
+ // Create adapter info
+ auto info = nirio_adapter_info(_fpga_session->get_resource());
+ auto& ctx = adapter_ctx::get();
+ _adapter_id = ctx.register_adapter(info);
+}
+
+nirio_link::~nirio_link()
+{
+ PROXY->get_rio_quirks().remove_tx_fifo(_fifo_instance);
+
+ // Disable DMA streams (cleanup, so don't status chain)
+ PROXY->poke(PCIE_TX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), DMA_CTRL_DISABLED);
+ PROXY->poke(PCIE_RX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), DMA_CTRL_DISABLED);
+
+ UHD_SAFE_CALL(_flush_rx_buff();)
+
+ // Stop DMA channels. Stop is called in the fifo dtor but
+ // it doesn't hurt to do it here.
+ _send_fifo->stop();
+ _recv_fifo->stop();
+}
+
+nirio_link::sptr nirio_link::make(uhd::niusrprio::niusrprio_session::sptr fpga_session,
+ const uint32_t instance,
+ const uhd::transport::link_params_t& default_params,
+ const uhd::device_addr_t& hints)
+{
+ UHD_ASSERT_THROW(default_params.num_recv_frames != 0);
+ UHD_ASSERT_THROW(default_params.num_send_frames != 0);
+ UHD_ASSERT_THROW(default_params.recv_frame_size != 0);
+ UHD_ASSERT_THROW(default_params.send_frame_size != 0);
+ UHD_ASSERT_THROW(default_params.recv_buff_size != 0);
+ UHD_ASSERT_THROW(default_params.send_buff_size != 0);
+
+ // Initialize xport_params
+ link_params_t link_params = default_params;
+
+ // The kernel buffer for this transport must be (num_frames * frame_size) big. Unlike
+ // ethernet, where the kernel buffer size is independent of the circular buffer size
+ // for the transport, it is possible for users to over constrain the system when they
+ // set the num_frames and the buff_size. So we give buff_size priority over num_frames
+ // and throw an error if they conflict.
+
+ // RX
+ link_params.recv_frame_size =
+ size_t(hints.cast<double>("recv_frame_size", default_params.recv_frame_size));
+
+ size_t usr_num_recv_frames = static_cast<size_t>(
+ hints.cast<double>("num_recv_frames", double(default_params.num_recv_frames)));
+ size_t usr_recv_buff_size = static_cast<size_t>(
+ hints.cast<double>("recv_buff_size", double(default_params.recv_buff_size)));
+
+ if (hints.has_key("recv_buff_size")) {
+ if (usr_recv_buff_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format("recv_buff_size must be multiple of %d") % page_size)
+ .str());
+ }
+ }
+
+ if (hints.has_key("recv_frame_size") and hints.has_key("num_recv_frames")) {
+ if (usr_num_recv_frames * link_params.recv_frame_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format(
+ "num_recv_frames * recv_frame_size must be an even multiple of %d")
+ % page_size)
+ .str());
+ }
+ }
+
+ if (hints.has_key("num_recv_frames") and hints.has_key("recv_buff_size")) {
+ if (usr_recv_buff_size < link_params.recv_frame_size)
+ throw uhd::value_error("recv_buff_size must be equal to or greater than "
+ "(num_recv_frames * recv_frame_size)");
+
+ if ((usr_recv_buff_size / link_params.recv_frame_size) != usr_num_recv_frames)
+ throw uhd::value_error(
+ "Conflicting values for recv_buff_size and num_recv_frames");
+ }
+
+ if (hints.has_key("recv_buff_size")) {
+ link_params.num_recv_frames = std::max<size_t>(
+ 1, usr_recv_buff_size / link_params.recv_frame_size); // Round down
+ } else if (hints.has_key("num_recv_frames")) {
+ link_params.num_recv_frames = usr_num_recv_frames;
+ }
+
+ if (link_params.num_recv_frames * link_params.recv_frame_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format(
+ "num_recv_frames * recv_frame_size must be an even multiple of %d")
+ % page_size)
+ .str());
+ }
+
+ // TX
+ link_params.send_frame_size =
+ size_t(hints.cast<double>("send_frame_size", default_params.send_frame_size));
+
+ size_t usr_num_send_frames = static_cast<size_t>(
+ hints.cast<double>("num_send_frames", default_params.num_send_frames));
+ size_t usr_send_buff_size = static_cast<size_t>(
+ hints.cast<double>("send_buff_size", default_params.send_buff_size));
+
+ if (hints.has_key("send_buff_size")) {
+ if (usr_send_buff_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format("send_buff_size must be multiple of %d") % page_size)
+ .str());
+ }
+ }
+
+ if (hints.has_key("send_frame_size") and hints.has_key("num_send_frames")) {
+ if (usr_num_send_frames * link_params.send_frame_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format(
+ "num_send_frames * send_frame_size must be an even multiple of %d")
+ % page_size)
+ .str());
+ }
+ }
+
+ if (hints.has_key("num_send_frames") and hints.has_key("send_buff_size")) {
+ if (usr_send_buff_size < link_params.send_frame_size)
+ throw uhd::value_error("send_buff_size must be equal to or greater than "
+ "(num_send_frames * send_frame_size)");
+
+ if ((usr_send_buff_size / link_params.send_frame_size) != usr_num_send_frames)
+ throw uhd::value_error(
+ "Conflicting values for send_buff_size and num_send_frames");
+ }
+
+ if (hints.has_key("send_buff_size")) {
+ link_params.num_send_frames = std::max<size_t>(
+ 1, usr_send_buff_size / link_params.send_frame_size); // Round down
+ } else if (hints.has_key("num_send_frames")) {
+ link_params.num_send_frames = usr_num_send_frames;
+ }
+
+ if (link_params.num_send_frames * link_params.send_frame_size % page_size != 0) {
+ throw uhd::value_error(
+ (boost::format(
+ "num_send_frames * send_frame_size must be an even multiple of %d")
+ % page_size)
+ .str());
+ }
+
+ return nirio_link::sptr(new nirio_link(fpga_session, instance, link_params));
+}
+
+
+/******************************************************************************
+ * NI-RIO-specific helpers
+ *****************************************************************************/
+void nirio_link::_flush_rx_buff()
+{
+ // acquire is called with 0 elements requested first to
+ // get the number of elements in the buffer and then
+ // repeatedly with the number of remaining elements
+ // until the buffer is empty
+ for (size_t num_elems_requested = 0, num_elems_acquired = 0, num_elems_remaining = 1;
+ num_elems_remaining;
+ num_elems_requested = num_elems_remaining) {
+ fifo_data_t* elems_buffer = NULL;
+ nirio_status status = _recv_fifo->acquire(elems_buffer,
+ num_elems_requested,
+ 0, // timeout
+ num_elems_acquired,
+ num_elems_remaining);
+ // throw exception if status is fatal
+ nirio_status_to_exception(
+ status, "NI-RIO PCIe data transfer failed during flush.");
+ _recv_fifo->release(num_elems_acquired);
+ }
+}
+
+void nirio_link::_wait_until_stream_ready()
+{
+ constexpr auto TIMEOUT_IN_MS = 100ms;
+
+ uint32_t reg_data = 0xffffffff;
+ bool tx_busy = true, rx_busy = true;
+ boost::posix_time::ptime start_time;
+ boost::posix_time::time_duration elapsed;
+ nirio_status status = NiRio_Status_Success;
+
+ nirio_status_chain(
+ PROXY->peek(PCIE_TX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), reg_data),
+ status);
+ tx_busy = (reg_data & DMA_STATUS_BUSY) > 0;
+ nirio_status_chain(
+ PROXY->peek(PCIE_RX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), reg_data),
+ status);
+ rx_busy = (reg_data & DMA_STATUS_BUSY) > 0;
+
+ if (nirio_status_not_fatal(status) && (tx_busy || rx_busy)) {
+ const auto end_time = std::chrono::steady_clock::now() + TIMEOUT_IN_MS;
+ do {
+ std::this_thread::sleep_for(50ms); // Avoid flooding the bus
+ nirio_status_chain(
+ PROXY->peek(
+ PCIE_TX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), reg_data),
+ status);
+ tx_busy = (reg_data & DMA_STATUS_BUSY) > 0;
+ nirio_status_chain(
+ PROXY->peek(
+ PCIE_RX_DMA_REG(DMA_CTRL_STATUS_REG, _fifo_instance), reg_data),
+ status);
+ rx_busy = (reg_data & DMA_STATUS_BUSY) > 0;
+ } while (nirio_status_not_fatal(status) && (tx_busy || rx_busy)
+ && (std::chrono::steady_clock::now() < end_time));
+
+ if (tx_busy || rx_busy) {
+ nirio_status_chain(NiRio_Status_FpgaBusy, status);
+ }
+
+ nirio_status_to_exception(status, "Could not create nirio_zero_copy transport.");
+ }
+}