Skip to content
Snippets Groups Projects

test all the different loss functions in each test

Merged Bas Nijholt requested to merge test_loss_functions into master
Files
2
@@ -228,6 +228,11 @@ class Learner1D(BaseLearner):
self.losses = {}
self.losses_combined = {}
# When the scale changes by a factor 2, the losses are
# recomputed. This is tunable such that we can test
# the learners behavior in the tests.
self._recompute_losses_factor = 2
self.data = {}
self.pending_points = set()
@@ -447,7 +452,7 @@ class Learner1D(BaseLearner):
self._update_losses(x, real=True)
# If the scale has increased enough, recompute all losses.
if self._scale[1] > 2 * self._oldscale[1]:
if self._scale[1] > self._recompute_losses_factor * self._oldscale[1]:
for interval in self.losses:
self._update_interpolated_loss_in_interval(*interval)
@@ -563,10 +568,10 @@ class Learner1D(BaseLearner):
def finite_loss(loss, xs):
# If the loss is infinite we return the
# distance between the two points.
loss = loss if not math.isinf(loss)
else (xs[1] - xs[0]) / self._scale[0]
if math.isinf(loss):
loss = (xs[1] - xs[0]) / self._scale[0]
# We round the loss to 12 digets such that losses
# We round the loss to 12 digits such that losses
# are equal up to numerical precision will be considered
# equal.
return round(loss, ndigits=12)
Loading