diff options
author | Brent Stapleton <brent.stapleton@ettus.com> | 2017-11-27 18:21:23 -0800 |
---|---|---|
committer | Martin Braun <martin.braun@ettus.com> | 2017-12-22 15:05:57 -0800 |
commit | 19cdfb4148338a94bdc75194e0fadad993c37758 (patch) | |
tree | b308cd3225fae0c68551c21ad19941d22b56176b /mpm | |
parent | 48d5d72fd76bbf615ea88fa9d5177471ed3d15a9 (diff) | |
download | uhd-19cdfb4148338a94bdc75194e0fadad993c37758.tar.gz uhd-19cdfb4148338a94bdc75194e0fadad993c37758.tar.bz2 uhd-19cdfb4148338a94bdc75194e0fadad993c37758.zip |
mpm: Reset periph manager on update
Upon updating certain components (the FPGA, for example), the
Peripheral Manager is restarted, and the overlay is reapplied. In order
to facilitate this, the RPC server intercepts and handles the
update_component function.
Tested on the RJ45 ethernet connection. It probably won't work as well
if the SFP connection goes down when the overlay is removed.
Diffstat (limited to 'mpm')
-rwxr-xr-x | mpm/python/usrp_hwd.py | 5 | ||||
-rw-r--r-- | mpm/python/usrp_mpm/periph_manager/base.py | 8 | ||||
-rw-r--r-- | mpm/python/usrp_mpm/periph_manager/n310.py | 16 | ||||
-rw-r--r-- | mpm/python/usrp_mpm/rpc_server.py | 91 |
4 files changed, 105 insertions, 15 deletions
diff --git a/mpm/python/usrp_hwd.py b/mpm/python/usrp_hwd.py index fef02087a..d6e860a0c 100755 --- a/mpm/python/usrp_hwd.py +++ b/mpm/python/usrp_hwd.py @@ -137,7 +137,8 @@ def main(): # with cmake (-DMPM_DEVICE). # mgr is thus derived from PeriphManagerBase (see periph_manager/base.py) log.info("Spawning periph manager...") - mgr = periph_manager(args) + mgr_generator = lambda: periph_manager(args) + mgr = mgr_generator() discovery_info = { "type": mgr.get_device_info().get("type", "n/a"), "serial": mgr.get_device_info().get("serial", "n/a"), @@ -162,7 +163,7 @@ def main(): ) log.info("Spawning RPC process...") _PROCESSES.append( - mpm.spawn_rpc_process(mpm.mpmtypes.MPM_RPC_PORT, shared, mgr)) + mpm.spawn_rpc_process(mpm.mpmtypes.MPM_RPC_PORT, shared, mgr, mgr_generator)) log.info("Processes launched. Registering signal handlers.") signal.signal(signal.SIGTERM, kill_time) signal.signal(signal.SIGINT, kill_time) diff --git a/mpm/python/usrp_mpm/periph_manager/base.py b/mpm/python/usrp_mpm/periph_manager/base.py index 87544e451..2be8c7570 100644 --- a/mpm/python/usrp_mpm/periph_manager/base.py +++ b/mpm/python/usrp_mpm/periph_manager/base.py @@ -352,6 +352,13 @@ class PeriphManagerBase(object): self.log.trace("Resetting SID pool...") self._available_endpoints = list(range(256)) + def tear_down(self): + """ + Tear down all members that need to be specially handled before + deconstruction. + """ + self.log.debug("Teardown called for Peripheral Manager base.") + @no_claim def list_updateable_components(self): """ @@ -456,7 +463,6 @@ class PeriphManagerBase(object): update_func(filepath, metadata) return True - def load_fpga_image(self, target=None): """ load a new fpga image diff --git a/mpm/python/usrp_mpm/periph_manager/n310.py b/mpm/python/usrp_mpm/periph_manager/n310.py index 7adbc85f0..79c866aa4 100644 --- a/mpm/python/usrp_mpm/periph_manager/n310.py +++ b/mpm/python/usrp_mpm/periph_manager/n310.py @@ -31,6 +31,7 @@ from ..net import get_mac_addr from ..mpmtypes import SID from usrp_mpm.rpc_server import no_rpc from usrp_mpm import net +from usrp_mpm import dtoverlay from ..sysfs_gpio import SysFSGPIO from ..ethtable import EthDispatcherTable from ..liberiotable import LiberioDispatcherTable @@ -476,10 +477,12 @@ class n310(PeriphManagerBase): 'fpga': { 'callback': "update_fpga", 'path': '/lib/firmware/n3xx.bin', + 'reset': True, }, 'dts': { 'callback': "update_dts", 'path': '/lib/firmware/n3xx.dts', + 'reset': False, }, } @@ -586,6 +589,19 @@ class n310(PeriphManagerBase): for xport_mgr in itervalues(self._xport_mgrs): xport_mgr.deinit() + def tear_down(self): + """ + Tear down all members that need to be specially handled before + deconstruction. + For N310, this means the overlay. + """ + active_overlays = self.list_active_overlays() + self.log.trace("N310 has active device tree overlays: {}".format( + active_overlays + )) + for overlay in active_overlays: + dtoverlay.rm_overlay(overlay) + ########################################################################### # Transport API ########################################################################### diff --git a/mpm/python/usrp_mpm/rpc_server.py b/mpm/python/usrp_mpm/rpc_server.py index 2cd44dfe5..38c9107ee 100644 --- a/mpm/python/usrp_mpm/rpc_server.py +++ b/mpm/python/usrp_mpm/rpc_server.py @@ -54,22 +54,20 @@ class MPMServer(RPCServer): RPC calls to appropiate calls in the periph_manager and dboard_managers. """ # This is a list of methods in this class which require a claim - default_claimed_methods = ['init', 'reclaim', 'unclaim'] + default_claimed_methods = ['init', 'update_component', 'reclaim', 'unclaim'] - def __init__(self, state, mgr, *args, **kwargs): + def __init__(self, state, mgr, mgr_generator=None, *args, **kwargs): self.log = get_main_logger().getChild('RPCServer') self._state = state self._timer = Greenlet() self.session_id = None self.periph_manager = mgr + self._mgr_generator = mgr_generator self._db_methods = [] self._mb_methods = [] self.claimed_methods = copy.copy(self.default_claimed_methods) self._last_error = "" - self._update_component_commands(mgr, '', '_mb_methods') - for db_slot, dboard in enumerate(mgr.dboards): - cmd_prefix = 'db_' + str(db_slot) + '_' - self._update_component_commands(dboard, cmd_prefix, '_db_methods') + self._init_rpc_calls(mgr) # We call the server __init__ function here, and not earlier, because # first the commands need to be registered super(MPMServer, self).__init__( @@ -78,6 +76,15 @@ class MPMServer(RPCServer): **kwargs ) + def _init_rpc_calls(self, mgr): + """ + Register all RPC calls for the motherboard and daughterboards + """ + self._update_component_commands(mgr, '', '_mb_methods') + for db_slot, dboard in enumerate(mgr.dboards): + cmd_prefix = 'db_' + str(db_slot) + '_' + self._update_component_commands(dboard, cmd_prefix, '_db_methods') + def _check_token_valid(self, token): """ Returns True iff: @@ -93,7 +100,6 @@ class MPMServer(RPCServer): len(token) == TOKEN_LEN and \ self._state.claim_token.value == token - def _update_component_commands(self, component, namespace, storage): """ Detect available methods for an object and add them to the RPC server. @@ -137,6 +143,7 @@ class MPMServer(RPCServer): raise RuntimeError("Invalid token!") try: return function(*args) + except Exception as ex: self.log.error( "Uncaught exception in method %s: %s", @@ -251,6 +258,66 @@ class MPMServer(RPCServer): self._reset_timer() return result + def reset_mgr(self): + """ + Reset the Peripheral Manager for this RPC server. + """ + # reassign + self.periph_manager.tear_down() + self.periph_manager = None + if self._mgr_generator is None: + raise RuntimeError("Can't reset peripheral manager- no generator function.") + self.periph_manager = self._mgr_generator() + self._init_rpc_calls(self.periph_manager) + + def update_component(self, token, file_metadata_l, data_l): + """" + Updates the device component files specified by the metadata and data + :param file_metadata_l: List of dictionary of strings containing metadata + :param data_l: List of binary string with the file contents to be written + """ + self._timer.kill() # Stop the timer, update_component can take some time. + # Check the claimed status + if not self._check_token_valid(token): + self._last_error =\ + "Attempt to update component without valid claim from {}".format( + self.client_host + ) + self.log.error(self._last_error) + raise RuntimeError("Attempt to update component without valid claim.") + result = self.periph_manager.update_component(file_metadata_l, data_l) + if not result: + component_ids = [metadata['id'] for metadata in file_metadata_l] + raise RuntimeError("Failed to update components: {}".format(component_ids)) + + # Check if we need to reset the peripheral manager + reset_now = False + for metadata, data in zip(file_metadata_l, data_l): + # Make sure the component is in the updateable_components + component_id = metadata['id'] + if component_id in self.periph_manager.updateable_components: + # Check if that updating that component means the PM should be reset + if self.periph_manager.updateable_components[component_id]['reset']: + reset_now = True + else: + self.log.debug("ID {} not in updateable components ({})".format( + component_id, self.periph_manager.updateable_components)) + + try: + self.log.trace("Reset after updating component? {}".format(reset_now)) + if reset_now: + self.reset_mgr() + self.log.debug("Reset the periph manager") + except Exception as ex: + self.log.error( + "Error in update_component while resetting: {}".format( + ex + )) + self._last_error = str(ex) + + self.log.debug("End of update_component") + self._reset_timer() + def reclaim(self, token): """ reclaim a MPM device with a token. This operation will fail @@ -286,8 +353,8 @@ class MPMServer(RPCServer): self._state.claim_status.value = False self._state.claim_token.value = b'' self.session_id = None - self.periph_manager.claimed = False try: + self.periph_manager.claimed = False self.periph_manager.set_connection_type(None) self.periph_manager.deinit() except Exception as ex: @@ -334,14 +401,14 @@ class MPMServer(RPCServer): -def _rpc_server_process(shared_state, port, mgr): +def _rpc_server_process(shared_state, port, mgr, mgr_generator): """ This is the actual process that's running the RPC server. """ connections = Pool(1000) server = StreamServer( ('0.0.0.0', port), - handle=MPMServer(shared_state, mgr), + handle=MPMServer(shared_state, mgr, mgr_generator), spawn=connections) # catch signals and stop the stream server signal(signal.SIGTERM, lambda *args: server.stop()) @@ -349,12 +416,12 @@ def _rpc_server_process(shared_state, port, mgr): server.serve_forever() -def spawn_rpc_process(state, udp_port, mgr): +def spawn_rpc_process(state, udp_port, mgr, mgr_generator): """ Returns a process that contains the RPC server """ - proc_args = [udp_port, state, mgr] + proc_args = [udp_port, state, mgr, mgr_generator] proc = Process(target=_rpc_server_process, args=proc_args) proc.start() return proc |