aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Braun <martin.braun@ettus.com>2019-11-12 10:28:31 -0800
committerMartin Braun <martin.braun@ettus.com>2019-11-26 12:21:33 -0800
commit59fae330b0345c4a587862322b16379e563e673b (patch)
tree4b686e3239d0811f98ab1a63bc321812f0baef52
parent59a436038445613b6552c804601297405d736fce (diff)
downloaduhd-59fae330b0345c4a587862322b16379e563e673b.tar.gz
uhd-59fae330b0345c4a587862322b16379e563e673b.tar.bz2
uhd-59fae330b0345c4a587862322b16379e563e673b.zip
detail::graph: Add shutdown capability
In the existing graph, when the shutdown was simply a release. However, any outstanding actions would trigger warnings (because released graphs aren't supposed to still have actions being passed around), which would sometimes be visible at the end of an application. This is a safer solution than simply releasing, because it explicitly sets a shutdown flag that all graph-affecting functions (property propagation and action handling) respect. Once the flag is set, the graph can no longer be booted up again.
-rw-r--r--host/lib/include/uhdlib/rfnoc/graph.hpp16
-rw-r--r--host/lib/rfnoc/graph.cpp29
-rw-r--r--host/lib/rfnoc/rfnoc_graph.cpp4
3 files changed, 35 insertions, 14 deletions
diff --git a/host/lib/include/uhdlib/rfnoc/graph.hpp b/host/lib/include/uhdlib/rfnoc/graph.hpp
index 286f2303f..c7b06d636 100644
--- a/host/lib/include/uhdlib/rfnoc/graph.hpp
+++ b/host/lib/include/uhdlib/rfnoc/graph.hpp
@@ -63,6 +63,15 @@ public:
*/
void release();
+ /*! Shutdown graph: Permenanently release
+ *
+ * This will release the graph permanently and safely. All ongoing property
+ * and action handling is completed and then disabled (this means that
+ * calling shutdown while blocks are still working will cause actions to not
+ * get delivered).
+ */
+ void shutdown();
+
/*! Return a list of all edges
*/
std::vector<graph_edge_t> enumerate_edges();
@@ -271,16 +280,15 @@ private:
//! Flag to ensure serialized handling of actions
std::atomic_flag _action_handling_ongoing;
- //! Mutex for to avoid the user from sending one message before another
- // message is sent
- std::recursive_mutex _action_mutex;
-
//! Changes to the release/commit state of the graph are locked with this mutex
std::recursive_mutex _release_mutex;
//! This counter gets decremented everytime commit() is called. When zero,
// the graph is committed.
size_t _release_count{1};
+
+ //! A flag if the graph has shut down. Is protected by _release_mutex
+ bool _shutdown{false};
};
diff --git a/host/lib/rfnoc/graph.cpp b/host/lib/rfnoc/graph.cpp
index 7be0c0035..7dc72420c 100644
--- a/host/lib/rfnoc/graph.cpp
+++ b/host/lib/rfnoc/graph.cpp
@@ -8,8 +8,9 @@
#include <uhd/utils/log.hpp>
#include <uhdlib/rfnoc/graph.hpp>
#include <uhdlib/rfnoc/node_accessor.hpp>
-#include <boost/graph/topological_sort.hpp>
#include <boost/graph/filtered_graph.hpp>
+#include <boost/graph/topological_sort.hpp>
+#include <limits>
#include <utility>
using namespace uhd::rfnoc;
@@ -194,6 +195,14 @@ void graph_t::release()
_release_count++;
}
+void graph_t::shutdown()
+{
+ std::lock_guard<std::recursive_mutex> l(_release_mutex);
+ UHD_LOG_TRACE(LOG_ID, "graph::shutdown()");
+ _shutdown = true;
+ _release_count = std::numeric_limits<size_t>::max();
+}
+
std::vector<graph_t::graph_edge_t> graph_t::enumerate_edges()
{
auto e_iterators = boost::edges(_graph);
@@ -215,16 +224,19 @@ std::vector<graph_t::graph_edge_t> graph_t::enumerate_edges()
void graph_t::resolve_all_properties(
resolve_context context, rfnoc_graph_t::vertex_descriptor initial_node)
{
- node_accessor_t node_accessor{};
-
if (boost::num_vertices(_graph) == 0) {
return;
}
+
+ node_accessor_t node_accessor{};
// We can't release during property propagation, so we lock this entire
// method to make sure that a) different threads can't interfere with each
// other, and b) that we don't release the graph while this method is still
// running.
std::lock_guard<std::recursive_mutex> l(_release_mutex);
+ if (_shutdown) {
+ return;
+ }
if (_release_count) {
node_ref_t current_node = boost::get(vertex_property_t(), _graph, initial_node);
UHD_LOG_TRACE(LOG_ID,
@@ -373,7 +385,11 @@ void graph_t::enqueue_action(
// We can't release during action handling, so we lock this entire
// method to make sure that we don't release the graph while this method is
// still running.
+ // It also prevents a different thread from throwing in their own actions.
std::lock_guard<std::recursive_mutex> release_lock(_release_mutex);
+ if (_shutdown) {
+ return;
+ }
if (_release_count) {
UHD_LOG_WARNING(LOG_ID,
"Action propagation is not enabled, graph is not committed! Will not "
@@ -381,16 +397,13 @@ void graph_t::enqueue_action(
<< action->key << "'");
return;
}
- // First, make sure that once we start action handling, no other node from
- // a different thread can throw in their own actions
- std::lock_guard<std::recursive_mutex> l(_action_mutex);
// Check if we're already in the middle of handling actions. In that case,
// we're already in the loop below, and then all we want to do is to enqueue
// this action tuple. The first call to enqueue_action() within this thread
// context will have handling_ongoing == false.
const bool handling_ongoing = _action_handling_ongoing.test_and_set();
-
+ // In any case, stash the new action at the end of the action queue
_action_queue.emplace_back(std::make_tuple(src_node, src_edge, action));
if (handling_ongoing) {
UHD_LOG_TRACE(LOG_ID,
@@ -450,7 +463,7 @@ void graph_t::enqueue_action(
// Release the action handling flag
_action_handling_ongoing.clear();
- // Now, the _action_mutex is released, and someone else can start sending
+ // Now, the _release_mutex is released, and someone else can start sending
// actions.
}
diff --git a/host/lib/rfnoc/rfnoc_graph.cpp b/host/lib/rfnoc/rfnoc_graph.cpp
index 6ebfe8612..32a9b0071 100644
--- a/host/lib/rfnoc/rfnoc_graph.cpp
+++ b/host/lib/rfnoc/rfnoc_graph.cpp
@@ -82,8 +82,8 @@ public:
~rfnoc_graph_impl()
{
- UHD_LOG_TRACE(LOG_ID, "Releasing detail::graph...");
- _graph->release();
+ UHD_LOG_TRACE(LOG_ID, "Shutting down detail::graph...");
+ _graph->shutdown();
UHD_LOG_TRACE(LOG_ID, "Shutting down all blocks ...");
_block_registry->shutdown();
_graph.reset();