aboutsummaryrefslogtreecommitdiffstats
path: root/dpd/src/Heuristics.py
diff options
context:
space:
mode:
authorandreas128 <Andreas>2017-09-29 18:50:41 +0200
committerandreas128 <Andreas>2017-09-29 18:50:41 +0200
commit5e3ca125bfc56d31b9c9ef819eab9171b73b7333 (patch)
tree297d400f754742faf6bfc4fc7eff9858163c845c /dpd/src/Heuristics.py
parent64e0193824262e93e7cd67bb8c3af68d278f990c (diff)
downloaddabmod-5e3ca125bfc56d31b9c9ef819eab9171b73b7333.tar.gz
dabmod-5e3ca125bfc56d31b9c9ef819eab9171b73b7333.tar.bz2
dabmod-5e3ca125bfc56d31b9c9ef819eab9171b73b7333.zip
Cleanup
Diffstat (limited to 'dpd/src/Heuristics.py')
-rw-r--r--dpd/src/Heuristics.py13
1 files changed, 10 insertions, 3 deletions
diff --git a/dpd/src/Heuristics.py b/dpd/src/Heuristics.py
index a32ccff..f98490d 100644
--- a/dpd/src/Heuristics.py
+++ b/dpd/src/Heuristics.py
@@ -1,26 +1,33 @@
# -*- coding: utf-8 -*-
#
-# DPD Calculation Engine, heuristics we use to tune the parameters
+# DPD Calculation Engine, heuristics we use to tune the parameters.
#
# http://www.opendigitalradio.org
# Licence: The MIT License, see notice at the end of this file
import numpy as np
+
def get_learning_rate(idx_run):
+ """Gradually reduce learning rate from lr_max to lr_min within
+ idx_max steps, then keep the learning rate at lr_min"""
idx_max = 10.0
lr_min = 0.05
lr_max = 0.4
lr_delta = lr_max - lr_min
idx_run = min(idx_run, idx_max)
- learning_rate = lr_max - lr_delta * idx_run/idx_max
+ learning_rate = lr_max - lr_delta * idx_run / idx_max
return learning_rate
+
def get_n_meas(idx_run):
+ """Gradually increase number of measurements used to extract
+ a statistic from n_meas_min to n_meas_max within idx_max steps,
+ then keep number of measurements at n_meas_max"""
idx_max = 10.0
n_meas_min = 10
n_meas_max = 20
n_meas_delta = n_meas_max - n_meas_min
idx_run = min(idx_run, idx_max)
- learning_rate = n_meas_delta * idx_run/idx_max + n_meas_min
+ learning_rate = n_meas_delta * idx_run / idx_max + n_meas_min
return int(np.round(learning_rate))