Skip to content
Snippets Groups Projects
Commit 2f2e80d0 authored by Bas Nijholt's avatar Bas Nijholt
Browse files

Merge branch 'test_loss_functions' into 'master'

test all the different loss functions in each test

See merge request !135
parents d82fbae7 d2d955e0
No related branches found
No related tags found
1 merge request!135test all the different loss functions in each test
Pipeline #13632 passed
......@@ -123,6 +123,7 @@ def triangle_loss(xs, ys):
def curvature_loss_function(area_factor=1, euclid_factor=0.02, horizontal_factor=0.02):
# XXX: add a doc-string
@uses_nth_neighbors(1)
def curvature_loss(xs, ys):
xs_middle = xs[1:3]
......@@ -227,6 +228,11 @@ class Learner1D(BaseLearner):
self.losses = {}
self.losses_combined = {}
# When the scale changes by a factor 2, the losses are
# recomputed. This is tunable such that we can test
# the learners behavior in the tests.
self._recompute_losses_factor = 2
self.data = {}
self.pending_points = set()
......@@ -446,7 +452,7 @@ class Learner1D(BaseLearner):
self._update_losses(x, real=True)
# If the scale has increased enough, recompute all losses.
if self._scale[1] > 2 * self._oldscale[1]:
if self._scale[1] > self._recompute_losses_factor * self._oldscale[1]:
for interval in self.losses:
self._update_interpolated_loss_in_interval(*interval)
......@@ -562,8 +568,13 @@ class Learner1D(BaseLearner):
def finite_loss(loss, xs):
# If the loss is infinite we return the
# distance between the two points.
return (loss if not math.isinf(loss)
else (xs[1] - xs[0]) / self._scale[0])
if math.isinf(loss):
loss = (xs[1] - xs[0]) / self._scale[0]
# We round the loss to 12 digits such that losses
# are equal up to numerical precision will be considered
# equal.
return round(loss, ndigits=12)
quals = [(-finite_loss(loss, x), x, 1)
for x, loss in self.losses_combined.items()]
......
......@@ -15,18 +15,39 @@ import numpy as np
import pytest
import scipy.spatial
from ..learner import (AverageLearner, BalancingLearner, DataSaver,
import adaptive
from adaptive.learner import (AverageLearner, BalancingLearner, DataSaver,
IntegratorLearner, Learner1D, Learner2D, LearnerND)
from ..runner import simple
from adaptive.runner import simple
try:
import skopt
from ..learner import SKOptLearner
from adaptive.learner import SKOptLearner
except ModuleNotFoundError:
SKOptLearner = None
LOSS_FUNCTIONS = {
Learner1D: ('loss_per_interval', (
adaptive.learner.learner1D.default_loss,
adaptive.learner.learner1D.uniform_loss,
adaptive.learner.learner1D.curvature_loss_function(),
)),
Learner2D: ('loss_per_triangle', (
adaptive.learner.learner2D.default_loss,
adaptive.learner.learner2D.uniform_loss,
adaptive.learner.learner2D.minimize_triangle_surface_loss,
adaptive.learner.learner2D.resolution_loss_function(),
)),
LearnerND: ('loss_per_simplex', (
adaptive.learner.learnerND.default_loss,
adaptive.learner.learnerND.std_loss,
adaptive.learner.learnerND.uniform_loss,
)),
}
def generate_random_parametrization(f):
"""Return a realization of 'f' with parameters bound to random values.
......@@ -74,7 +95,6 @@ def maybe_skip(learner):
# All parameters except the first must be annotated with a callable that
# returns a random value for that parameter.
@learn_with(Learner1D, bounds=(-1, 1))
def quadratic(x, m: uniform(0, 10), b: uniform(0, 1)):
return m * x**2 + b
......@@ -108,20 +128,33 @@ def gaussian(n):
# Decorators for tests.
def run_with(*learner_types):
# Create a sequence of learner parameters by adding all
# possible loss functions to an existing parameter set.
def add_loss_to_params(learner_type, existing_params):
if learner_type not in LOSS_FUNCTIONS:
return [existing_params]
loss_param, loss_functions = LOSS_FUNCTIONS[learner_type]
loss_params = [{loss_param: f} for f in loss_functions]
return [dict(**existing_params, **lp) for lp in loss_params]
def run_with(*learner_types, with_all_loss_functions=True):
pars = []
for l in learner_types:
has_marker = isinstance(l, tuple)
if has_marker:
marker, l = l
for f, k in learner_function_combos[l]:
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if has_marker:
pars.append(pytest.param(l, f, dict(k),
marks=[marker]))
else:
pars.append((l, f, dict(k)))
ks = add_loss_to_params(l, k) if with_all_loss_functions else [k]
for k in ks:
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if has_marker:
pars.append(pytest.param(l, f, dict(k),
marks=[marker]))
else:
pars.append((l, f, dict(k)))
return pytest.mark.parametrize('learner_type, f, learner_kwargs', pars)
......@@ -196,22 +229,19 @@ def test_learner_accepts_lists(learner_type, bounds):
simple(learner, goal=lambda l: l.npoints > 10)
@run_with(xfail(Learner1D), Learner2D, LearnerND)
@run_with(Learner1D, Learner2D, LearnerND)
def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
"""Adding already existing data is an idempotent operation.
Either it is idempotent, or it is an error.
This is the only sane behaviour.
This test will fail for the Learner1D because the losses are normalized by
_scale which is updated after every point. After one iteration of adding
points, the _scale could be different from what it was when calculating
the losses of the intervals. Readding the points a second time means
that the losses are now all normalized by the correct _scale.
"""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
N = random.randint(10, 30)
control.ask(N)
......@@ -265,14 +295,11 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
assert set(pls) == set(cpls)
@run_with(xfail(Learner1D), xfail(Learner2D), xfail(LearnerND), AverageLearner)
@run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner)
def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
"""The order of calls to 'tell' between calls to 'ask'
is arbitrary.
This test will fail for the Learner1D for the same reason as described in
the doc-string in `test_adding_existing_data_is_idempotent`.
This test will fail for the Learner2D because
`interpolate.interpnd.estimate_gradients_2d_global` will give different
outputs based on the order of the triangles and values in
......@@ -282,6 +309,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
N = random.randint(10, 30)
control.ask(N)
xs, _ = learner.ask(N)
......@@ -353,7 +384,7 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
learner = learner_type(lambda x: yscale * f(np.array(x) / xscale),
**l_kwargs)
npoints = random.randrange(1000, 2000)
npoints = random.randrange(300, 500)
for n in range(npoints):
cxs, _ = control.ask(1)
......@@ -366,10 +397,11 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
assert np.allclose(xs_unscaled, cxs)
# Check if the losses are close
assert abs(learner.loss() - control.loss()) / learner.loss() < 1e-11
assert math.isclose(learner.loss(), control.loss(), rel_tol=1e-10)
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner)
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
with_all_loss_functions=False)
def test_balancing_learner(learner_type, f, learner_kwargs):
"""Test if the BalancingLearner works with the different types of learners."""
learners = [learner_type(generate_random_parametrization(f), **learner_kwargs)
......@@ -403,19 +435,22 @@ def test_balancing_learner(learner_type, f, learner_kwargs):
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner)
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
simple(learner, lambda l: l.npoints > 100)
fd, path = tempfile.mkstemp()
try:
learner.save(path)
control.load(path)
if learner_type is not Learner1D:
# Because different scales result in differnt losses
np.testing.assert_almost_equal(learner.loss(), control.loss())
np.testing.assert_almost_equal(learner.loss(), control.loss())
# Try if the control is runnable
simple(control, lambda l: l.npoints > 200)
......@@ -424,12 +459,18 @@ def test_saving(learner_type, f, learner_kwargs):
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner)
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
learner = BalancingLearner([learner_type(f, **learner_kwargs)])
control = BalancingLearner([learner_type(f, **learner_kwargs)])
if learner_type is Learner1D:
for l, c in zip(learner.learners, control.learners):
l._recompute_losses_factor = 1
c._recompute_losses_factor = 1
simple(learner, lambda l: l.learners[0].npoints > 100)
folder = tempfile.mkdtemp()
......@@ -437,11 +478,10 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
return folder + 'test'
try:
learner.save(fname)
control.load(fname)
if learner_type is not Learner1D:
# Because different scales result in differnt losses
np.testing.assert_almost_equal(learner.loss(), control.loss())
learner.save(fname=fname)
control.load(fname=fname)
np.testing.assert_almost_equal(learner.loss(), control.loss())
# Try if the control is runnable
simple(control, lambda l: l.learners[0].npoints > 200)
......@@ -450,21 +490,27 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner)
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving_with_datasaver(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
g = lambda x: {'y': f(x), 't': random.random()}
arg_picker = operator.itemgetter('y')
learner = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
control = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
if learner_type is Learner1D:
learner.learner._recompute_losses_factor = 1
control.learner._recompute_losses_factor = 1
simple(learner, lambda l: l.npoints > 100)
fd, path = tempfile.mkstemp()
try:
learner.save(path)
control.load(path)
if learner_type is not Learner1D:
# Because different scales result in differnt losses
np.testing.assert_almost_equal(learner.loss(), control.loss())
np.testing.assert_almost_equal(learner.loss(), control.loss())
assert learner.extra_data == control.extra_data
# Try if the control is runnable
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment