Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • 120--learnernd-curvature
  • 133-use-a-itemsorteddict-for-the-loss-in-the-learnernd
  • 134-learner1d-load-throws-exception-when-file-is-empty
  • 74--add-anisotropicity-to-learnerND
  • AverageLearner2D
  • bugfix/suppress
  • ci_benchmarks
  • cython
  • function_in_runner
  • make_notebook_with_content
  • master
  • no_overlay_plotting
  • private_methods_learnernd
  • refactor/triangulating-learner
  • renorm_2d
  • rst_readme
  • rtol_integrator
  • stable-0.7
  • v0.1.0
  • v0.2.0
  • v0.2.0-dev
  • v0.2.1
  • v0.3.0
  • v0.3.0-dev
  • v0.4.0
  • v0.4.0-dev
  • v0.4.1
  • v0.5.0
  • v0.5.0-dev
  • v0.6.0
  • v0.7.0
  • v0.7.0-dev
  • v0.7.2
  • v0.8.0-dev
34 results

Target

Select target project
No results found
Select Git revision
  • 120--learnernd-curvature
  • 133-use-a-itemsorteddict-for-the-loss-in-the-learnernd
  • 134-learner1d-load-throws-exception-when-file-is-empty
  • 74--add-anisotropicity-to-learnerND
  • AverageLearner2D
  • bugfix/suppress
  • ci_benchmarks
  • cython
  • function_in_runner
  • make_notebook_with_content
  • master
  • no_overlay_plotting
  • private_methods_learnernd
  • refactor/triangulating-learner
  • renorm_2d
  • rst_readme
  • rtol_integrator
  • stable-0.7
  • v0.1.0
  • v0.2.0
  • v0.2.0-dev
  • v0.2.1
  • v0.3.0
  • v0.3.0-dev
  • v0.4.0
  • v0.4.0-dev
  • v0.4.1
  • v0.5.0
  • v0.5.0-dev
  • v0.6.0
  • v0.7.0
  • v0.7.0-dev
  • v0.7.2
  • v0.8.0-dev
34 results
Show changes
Commits on Source (7)
......@@ -14,12 +14,19 @@ class AverageLearner(BaseLearner):
The learned function must depend on an integer input variable that
represents the source of randomness.
Parameters:
-----------
Parameters
----------
atol : float
Desired absolute tolerance
rtol : float
Desired relative tolerance
Attributes
----------
data : dict
Sampled points and values.
pending_points : set
Points that still have to be evaluated.
"""
def __init__(self, function, atol=None, rtol=None):
......@@ -31,6 +38,7 @@ class AverageLearner(BaseLearner):
rtol = np.inf
self.data = {}
self.pending_points = set()
self.function = function
self.atol = atol
self.rtol = rtol
......@@ -40,28 +48,35 @@ class AverageLearner(BaseLearner):
@property
def n_requested(self):
return len(self.data)
return len(self.data) + len(self.pending_points)
def ask(self, n, add_data=True):
points = list(range(self.n_requested, self.n_requested + n))
if any(p in self.data or p in self.pending_points for p in points):
# This means some of the points `< self.n_requested` do not exist.
points = list(set(range(self.n_requested + n))
- set(self.data)
- set(self.pending_points))[:n]
loss_improvements = [self.loss_improvement(n) / n] * n
if add_data:
self.tell_many(points, itertools.repeat(None))
return points, loss_improvements
def tell(self, n, value):
value_is_new = not (n in self.data and value == self.data[n])
if not value_is_new:
value_old = self.data[n]
self.data[n] = value
if value is not None:
if n in self.data:
# The point has already been added before.
return
if value is None:
self.pending_points.add(n)
else:
self.data[n] = value
self.pending_points.discard(n)
self.sum_f += value
self.sum_f_sq += value**2
if value_is_new:
self.npoints += 1
else:
self.sum_f -= value_old
self.sum_f_sq -= value_old**2
self.npoints += 1
@property
def mean(self):
......@@ -94,7 +109,7 @@ class AverageLearner(BaseLearner):
def remove_unfinished(self):
"""Remove uncomputed data from the learner."""
pass
self.pending_points = set()
def plot(self):
hv = ensure_holoviews()
......
# -*- coding: utf-8 -*-
from ..learner import AverageLearner
def test_only_returns_new_points():
learner = AverageLearner(lambda x: x, atol=None, rtol=0.01)
# Only tell it n = 5...10
for i in range(5, 10):
learner.tell(i, 1)
learner.tell(0, None) # This means it shouldn't return 0 anymore
assert learner.ask(1)[0][0] == 1
assert learner.ask(1)[0][0] == 2
assert learner.ask(1)[0][0] == 3
assert learner.ask(1)[0][0] == 4
assert learner.ask(1)[0][0] == 10
# -*- coding: utf-8 -*-
import random
import numpy as np
from ..learner import Learner1D
from ..runner import simple, replay_log
def test_pending_loss_intervals():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2)}
l.ask(1)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 4)}
l.tell(3.5, 0)
assert set(l.losses_combined.keys()) == {
(0, 1), (1, 2), (2, 3.5), (3.5, 4.0)}
def test_loss_interpolation_for_unasked_point():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert l.ask(1) == ([4], [np.inf])
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 4.0): np.inf}
# assert l.ask(1) == ([3], [np.inf]) # XXX: This doesn't return np.inf as loss_improvement...
l.ask(1)
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {
(0, 1): 0.25, (1, 2): 0.25, (2, 3.0): np.inf, (3.0, 4.0): np.inf}
l.tell(4, 0)
assert l.losses_combined == {
(0, 1): 0.25, (1, 2): 0.25, (2, 3): 0.25, (3, 4): 0.25}
def test_first_iteration():
"""Edge cases where we ask for a few points at the start."""
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(2)
assert set(points) == set(learner.bounds)
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(3)
assert set(points) == set([-1, 0, 1])
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(1)
assert len(points) == 1 and points[0] in learner.bounds
rest = set([-1, 0, 1]) - set(points)
points, loss_improvements = learner.ask(2)
assert set(points) == set(rest)
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(1)
to_see = set(learner.bounds) - set(points)
points, loss_improvements = learner.ask(1)
assert set(points) == set(to_see)
learner = Learner1D(lambda x: None, (-1, 1))
learner.tell(1, 0)
points, loss_improvements = learner.ask(1)
assert points == [-1]
learner = Learner1D(lambda x: None, (-1, 1))
learner.tell(-1, 0)
points, loss_improvements = learner.ask(1)
assert points == [1]
def test_loss_interpolation():
learner = Learner1D(lambda _: 0, bounds=(-1, 1))
learner.tell(-1, 0)
learner.tell(1, 0)
for i in range(100):
# Add a 100 points with either None or 0
if random.random() < 0.9:
learner.tell(random.uniform(-1, 1), None)
else:
learner.tell(random.uniform(-1, 1), 0)
for (x1, x2), loss in learner.losses_combined.items():
expected_loss = (x2 - x1) / 2
assert abs(expected_loss - loss) < 1e-15, (expected_loss, loss)
def _run_on_discontinuity(x_0, bounds):
def f(x):
return -1 if x < x_0 else +1
learner = Learner1D(f, bounds)
while learner.loss() > 0.1:
(x,), _ = learner.ask(1)
learner.tell(x, learner.function(x))
return learner
def test_termination_on_discontinuities():
learner = _run_on_discontinuity(0, (-1, 1))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= np.finfo(float).eps
learner = _run_on_discontinuity(1, (-2, 2))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= np.finfo(float).eps
learner = _run_on_discontinuity(0.5E3, (-1E3, 1E3))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= 0.5E3 * np.finfo(float).eps
def test_order_adding_points():
# and https://gitlab.kwant-project.org/qt/adaptive/issues/98
l = Learner1D(lambda x: x, (0, 1))
l.tell_many([1, 0, 0.5], [0, 0, 0])
assert l.losses_combined == {(0, 0.5): 0.5, (0.5, 1): 0.5}
assert l.losses == {(0, 0.5): 0.5, (0.5, 1): 0.5}
l.ask(1)
def test_adding_existing_point_passes_silently():
# See https://gitlab.kwant-project.org/qt/adaptive/issues/97
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
l.tell(1, None)
def test_loss_at_machine_precision_interval_is_zero():
"""The loss of an interval smaller than _dx_eps
should be set to zero."""
def f(x):
return 1 if x == 0 else 0
def goal(l):
return l.loss() < 0.01 or l.npoints >= 1000
learner = Learner1D(f, bounds=(-1, 1))
simple(learner, goal=goal)
# this means loss < 0.01 was reached
assert learner.npoints != 1000
def small_deviations(x):
return 0 if x <= 1 else 1 + 10**(-random.randint(12, 14))
def test_small_deviations():
"""This tests whether the Learner1D can handle small deviations.
See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and
https://gitlab.kwant-project.org/qt/adaptive/issues/61."""
eps = 5e-14
learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps))
# Some non-determinism is needed to make this test fail so we keep
# a list of points that will be evaluated later to emulate
# parallel execution
stash = []
for i in range(100):
xs, _ = learner.ask(10)
# Save 5 random points out of `xs` for later
random.shuffle(xs)
for _ in range(5):
stash.append(xs.pop())
for x in xs:
learner.tell(x, learner.function(x))
# Evaluate and add 5 random points from `stash`
random.shuffle(stash)
for _ in range(5):
learner.tell(stash.pop(), learner.function(x))
if learner.loss() == 0:
# If this condition is met, the learner can't return any
# more points.
break
# -*- coding: utf-8 -*-
from ..learner import LearnerND
from ..runner import replay_log
def test_faiure_case_LearnerND():
log = [
('ask', 4),
('tell', (-1, -1, -1), 1.607873907219222e-101),
('tell', (-1, -1, 1), 1.607873907219222e-101),
('ask', 2),
('tell', (-1, 1, -1), 1.607873907219222e-101),
('tell', (-1, 1, 1), 1.607873907219222e-101),
('ask', 2),
('tell', (1, -1, 1), 2.0),
('tell', (1, -1, -1), 2.0),
('ask', 2),
('tell', (0.0, 0.0, 0.0), 4.288304431237686e-06),
('tell', (1, 1, -1), 2.0)
]
learner = LearnerND(lambda *x: x, bounds=[(-1, 1), (-1, 1), (-1, 1)])
replay_log(learner, log)
......@@ -108,7 +108,8 @@ def run_with(*learner_types):
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if is_xfail:
pars.append(pytest.param(l, f, dict(k), marks=[pytest.mark.xfail]))
pars.append(pytest.param(l, f, dict(k),
marks=[pytest.mark.xfail]))
else:
pars.append((l, f, dict(k)))
return pytest.mark.parametrize('learner_type, f, learner_kwargs', pars)
......@@ -163,42 +164,6 @@ def test_uniform_sampling1D(learner_type, f, learner_kwargs):
assert max(ivals) / min(ivals) < 2 + 1e-8
def test_learner1D_loss_interpolation_for_unasked_point():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert l.ask(1) == ([4], [np.inf])
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 4.0): np.inf}
# assert l.ask(1) == ([3], [np.inf]) # XXX: This doesn't return np.inf as loss_improvement...
l.ask(1)
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 3.0): np.inf, (3.0, 4.0): np.inf}
l.tell(4, 0)
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 3): 0.25, (3, 4): 0.25}
def test_learner1D_pending_loss_intervals():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2)}
l.ask(1)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 4)}
l.tell(3.5, 0)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 3.5), (3.5, 4.0)}
@pytest.mark.xfail
@run_with(Learner2D, LearnerND)
def test_uniform_sampling2D(learner_type, f, learner_kwargs):
......@@ -408,158 +373,6 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
assert abs(learner.loss() - control.loss()) / learner.loss() < 1e-11
def test_learner1d_first_iteration():
"""Edge cases where we ask for a few points at the start."""
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(2)
assert set(points) == set(learner.bounds)
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(3)
assert set(points) == set([-1, 0, 1])
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(1)
assert len(points) == 1 and points[0] in learner.bounds
rest = set([-1, 0, 1]) - set(points)
points, loss_improvements = learner.ask(2)
assert set(points) == set(rest)
learner = Learner1D(lambda x: None, (-1, 1))
points, loss_improvements = learner.ask(1)
to_see = set(learner.bounds) - set(points)
points, loss_improvements = learner.ask(1)
assert set(points) == set(to_see)
learner = Learner1D(lambda x: None, (-1, 1))
learner.tell(1, 0)
points, loss_improvements = learner.ask(1)
assert points == [-1]
learner = Learner1D(lambda x: None, (-1, 1))
learner.tell(-1, 0)
points, loss_improvements = learner.ask(1)
assert points == [1]
def test_learner1d_loss_interpolation():
learner = Learner1D(lambda _: 0, bounds=(-1, 1))
learner.tell(-1, 0)
learner.tell(1, 0)
for i in range(100):
# Add a 100 points with either None or 0
if random.random() < 0.9:
learner.tell(random.uniform(-1, 1), None)
else:
learner.tell(random.uniform(-1, 1), 0)
for (x1, x2), loss in learner.losses_combined.items():
expected_loss = (x2 - x1) / 2
assert abs(expected_loss - loss) < 1e-15, (expected_loss, loss)
def _run_on_discontinuity(x_0, bounds):
def f(x):
return -1 if x < x_0 else +1
learner = Learner1D(f, bounds)
while learner.loss() > 0.1:
(x,), _ = learner.ask(1)
learner.tell(x, learner.function(x))
return learner
def test_termination_on_discontinuities():
learner = _run_on_discontinuity(0, (-1, 1))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= np.finfo(float).eps
learner = _run_on_discontinuity(1, (-2, 2))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= np.finfo(float).eps
learner = _run_on_discontinuity(0.5E3, (-1E3, 1E3))
smallest_interval = min(abs(a - b) for a, b in learner.losses.keys())
assert smallest_interval >= 0.5E3 * np.finfo(float).eps
def test_order_adding_points():
# and https://gitlab.kwant-project.org/qt/adaptive/issues/98
l = Learner1D(lambda x: x, (0, 1))
l.tell_many([1, 0, 0.5], [0, 0, 0])
assert l.losses_combined == {(0, 0.5): 0.5, (0.5, 1): 0.5}
assert l.losses == {(0, 0.5): 0.5, (0.5, 1): 0.5}
l.ask(1)
def test_adding_existing_point_passes_silently():
# See https://gitlab.kwant-project.org/qt/adaptive/issues/97
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
l.tell(1, None)
def test_loss_at_machine_precision_interval_is_zero():
"""The loss of an interval smaller than _dx_eps
should be set to zero."""
def f(x):
return 1 if x == 0 else 0
def goal(l):
return l.loss() < 0.01 or l.npoints >= 1000
learner = Learner1D(f, bounds=(-1, 1))
simple(learner, goal=goal)
# this means loss < 0.01 was reached
assert learner.npoints != 1000
def small_deviations(x):
return 0 if x <= 1 else 1 + 10**(-random.randint(12, 14))
def test_small_deviations():
"""This tests whether the Learner1D can handle small deviations.
See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and
https://gitlab.kwant-project.org/qt/adaptive/issues/61."""
eps = 5e-14
learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps))
# Some non-determinism is needed to make this test fail so we keep
# a list of points that will be evaluated later to emulate
# parallel execution
stash = []
for i in range(100):
xs, _ = learner.ask(10)
# Save 5 random points out of `xs` for later
random.shuffle(xs)
for _ in range(5):
stash.append(xs.pop())
for x in xs:
learner.tell(x, learner.function(x))
# Evaluate and add 5 random points from `stash`
random.shuffle(stash)
for _ in range(5):
learner.tell(stash.pop(), learner.function(x))
if learner.loss() == 0:
# If this condition is met, the learner can't return any
# more points.
break
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs):
......