Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
No results found
Select Git revision
Show changes
......@@ -166,6 +166,9 @@ class Learner2D(BaseLearner):
----------
data : dict
Sampled points and values.
pending_points : set
Points that still have to be evaluated and are currently
interpolated, see `data_combined`.
stack_size : int, default 10
The size of the new candidate points stack. Set it to 1
to recalculate the best points at each call to `ask`.
......@@ -180,7 +183,7 @@ class Learner2D(BaseLearner):
-------
data_combined : dict
Sampled points and values so far including
the unknown interpolated ones.
the unknown interpolated points in `pending_points`.
Notes
-----
......@@ -217,7 +220,7 @@ class Learner2D(BaseLearner):
self.bounds = tuple((float(a), float(b)) for a, b in bounds)
self.data = OrderedDict()
self._stack = OrderedDict()
self._interp = set()
self.pending_points = set()
self.xy_mean = np.mean(self.bounds, axis=1)
self._xy_scale = np.ptp(self.bounds, axis=1)
......@@ -263,14 +266,14 @@ class Learner2D(BaseLearner):
@property
def bounds_are_done(self):
return not any((p in self._interp or p in self._stack)
return not any((p in self.pending_points or p in self._stack)
for p in self._bounds_points)
def data_combined(self):
# Interpolate the unfinished points
data_combined = copy(self.data)
if self._interp:
points_interp = list(self._interp)
if self.pending_points:
points_interp = list(self.pending_points)
if self.bounds_are_done:
values_interp = self.ip()(self._scale(points_interp))
else:
......@@ -303,17 +306,17 @@ class Learner2D(BaseLearner):
point = tuple(point)
if value is None:
self._interp.add(point)
self.pending_points.add(point)
self._ip_combined = None
else:
self.data[point] = value
self._interp.discard(point)
self.pending_points.discard(point)
self._ip = None
self._stack.pop(point, None)
def _fill_stack(self, stack_till=1):
if len(self.data) + len(self._interp) < self.ndim + 1:
if len(self.data) + len(self.pending_points) < self.ndim + 1:
raise ValueError("too few points...")
# Interpolate
......@@ -366,7 +369,7 @@ class Learner2D(BaseLearner):
self._stack = OrderedDict(zip(points[:self.stack_size],
loss_improvements))
for point in points[:n]:
self._interp.discard(point)
self.pending_points.discard(point)
return points[:n], loss_improvements[:n]
......@@ -379,7 +382,7 @@ class Learner2D(BaseLearner):
return self._loss
def remove_unfinished(self):
self._interp = set()
self.pending_points = set()
for p in self._bounds_points:
if p not in self.data:
self._stack[p] = np.inf
......
......@@ -14,12 +14,6 @@ import pytest
from ..learner import *
from ..runner import simple, replay_log
try:
import skopt
with_scikit_optimize = True
except ModuleNotFoundError:
with_scikit_optimize = False
def generate_random_parametrization(f):
"""Return a realization of 'f' with parameters bound to random values.
......@@ -129,23 +123,7 @@ def ask_randomly(learner, rounds, points):
return xs, ls
@pytest.mark.skipif(not with_scikit_optimize,
reason='scikit-optimize is not installed')
def test_skopt_learner_runs():
"""The SKOptLearner provides very few guarantees about its
behaviour, so we only test the most basic usage
"""
def g(x, noise_level=0.1):
return (np.sin(5 * x) * (1 - np.tanh(x ** 2))
+ np.random.randn() * noise_level)
learner = SKOptLearner(g, dimensions=[(-2., 2.)])
for _ in range(11):
(x,), _ = learner.ask(1)
learner.tell(x, learner.function(x))
# Tests
@run_with(Learner1D)
def test_uniform_sampling1D(learner_type, f, learner_kwargs):
......@@ -373,6 +351,37 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
assert abs(learner.loss() - control.loss()) / learner.loss() < 1e-11
# XXX: the LearnerND currently fails because there is no `add_data=False` argument in ask.
@run_with(Learner1D, Learner2D, xfail(LearnerND), AverageLearner)
def test_balancing_learner(learner_type, f, learner_kwargs):
"""Test if the BalancingLearner works with the different types of learners."""
learners = [learner_type(generate_random_parametrization(f), **learner_kwargs)
for i in range(5)]
learner = BalancingLearner(learners)
# Emulate parallel execution
stash = []
for i in range(200):
xs, _ = learner.ask(10)
# Save 5 random points out of `xs` for later
random.shuffle(xs)
for _ in range(5):
stash.append(xs.pop())
for x in xs:
learner.tell(x, learner.function(x))
# Evaluate and add 5 random points from `stash`
random.shuffle(stash)
for _ in range(5):
learner.tell(stash.pop(), learner.function(x))
assert all(l.npoints > 20 for l in learner.learners)
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs):
......@@ -392,22 +401,3 @@ def test_learner_subdomain(learner_type, f, learner_kwargs):
perform 'similarly' to learners defined on that subdomain only."""
# XXX: not sure how to implement this. How do we measure "performance"?
raise NotImplementedError()
def test_faiure_case_LearnerND():
log = [
('ask', 4),
('tell', (-1, -1, -1), 1.607873907219222e-101),
('tell', (-1, -1, 1), 1.607873907219222e-101),
('ask', 2),
('tell', (-1, 1, -1), 1.607873907219222e-101),
('tell', (-1, 1, 1), 1.607873907219222e-101),
('ask', 2),
('tell', (1, -1, 1), 2.0),
('tell', (1, -1, -1), 2.0),
('ask', 2),
('tell', (0.0, 0.0, 0.0), 4.288304431237686e-06),
('tell', (1, 1, -1), 2.0)
]
learner = LearnerND(lambda *x: x, bounds=[(-1, 1), (-1, 1), (-1, 1)])
replay_log(learner, log)
# -*- coding: utf-8 -*-
import random
import numpy as np
import pytest
try:
import skopt
with_scikit_optimize = True
from ..learner import SKOptLearner
except ModuleNotFoundError:
with_scikit_optimize = False
@pytest.mark.skipif(not with_scikit_optimize,
reason='scikit-optimize is not installed')
def test_skopt_learner_runs():
"""The SKOptLearner provides very few guarantees about its
behaviour, so we only test the most basic usage
"""
def g(x, noise_level=0.1):
return (np.sin(5 * x) * (1 - np.tanh(x ** 2))
+ np.random.randn() * noise_level)
learner = SKOptLearner(g, dimensions=[(-2., 2.)])
for _ in range(11):
(x,), _ = learner.ask(1)
learner.tell(x, learner.function(x))