From 03e68f4ae28146f851afc0d15cb5d2dae23fb7eb Mon Sep 17 00:00:00 2001
From: Bas Nijholt <basnijholt@gmail.com>
Date: Fri, 23 Nov 2018 12:01:56 +0100
Subject: [PATCH] add '_recompute_losses_factor' such that we can set it in the
 test

and have fewer xfailing tests
---
 adaptive/learner/learner1D.py   |  7 ++++++-
 adaptive/tests/test_learners.py | 20 +++++++++-----------
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py
index f1559954..ebac1b2d 100644
--- a/adaptive/learner/learner1D.py
+++ b/adaptive/learner/learner1D.py
@@ -228,6 +228,11 @@ class Learner1D(BaseLearner):
         self.losses = {}
         self.losses_combined = {}
 
+        # When the scale changes by a factor 2, the losses are
+        # recomputed. This is tunable such that we can test
+        # the learners behavior in the tests.
+        self._recompute_losses_factor = 2
+
         self.data = {}
         self.pending_points = set()
 
@@ -447,7 +452,7 @@ class Learner1D(BaseLearner):
         self._update_losses(x, real=True)
 
         # If the scale has increased enough, recompute all losses.
-        if self._scale[1] > 2 * self._oldscale[1]:
+        if self._scale[1] > self._recompute_losses_factor * self._oldscale[1]:
 
             for interval in self.losses:
                 self._update_interpolated_loss_in_interval(*interval)
diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py
index 3cbe43ce..6f99e3da 100644
--- a/adaptive/tests/test_learners.py
+++ b/adaptive/tests/test_learners.py
@@ -229,22 +229,19 @@ def test_learner_accepts_lists(learner_type, bounds):
     simple(learner, goal=lambda l: l.npoints > 10)
 
 
-@run_with(xfail(Learner1D), Learner2D, LearnerND)
+@run_with(Learner1D, Learner2D, LearnerND)
 def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
     """Adding already existing data is an idempotent operation.
 
     Either it is idempotent, or it is an error.
     This is the only sane behaviour.
-
-    This test will fail for the Learner1D because the losses are normalized by
-    _scale which is updated after every point. After one iteration of adding
-    points, the _scale could be different from what it was when calculating
-    the losses of the intervals. Readding the points a second time means
-    that the losses are now all normalized by the correct _scale.
     """
     f = generate_random_parametrization(f)
     learner = learner_type(f, **learner_kwargs)
     control = learner_type(f, **learner_kwargs)
+    if learner_type is Learner1D:
+        learner._recompute_losses_factor = 1
+        control._recompute_losses_factor = 1
 
     N = random.randint(10, 30)
     control.ask(N)
@@ -298,14 +295,11 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
     assert set(pls) == set(cpls)
 
 
-@run_with(xfail(Learner1D), xfail(Learner2D), xfail(LearnerND), AverageLearner)
+@run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner)
 def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
     """The order of calls to 'tell' between calls to 'ask'
     is arbitrary.
 
-    This test will fail for the Learner1D for the same reason as described in
-    the doc-string in `test_adding_existing_data_is_idempotent`.
-
     This test will fail for the Learner2D because
     `interpolate.interpnd.estimate_gradients_2d_global` will give different
     outputs based on the order of the triangles and values in
@@ -315,6 +309,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
     learner = learner_type(f, **learner_kwargs)
     control = learner_type(f, **learner_kwargs)
 
+    if learner_type is Learner1D:
+        learner._recompute_losses_factor = 1
+        control._recompute_losses_factor = 1
+
     N = random.randint(10, 30)
     control.ask(N)
     xs, _ = learner.ask(N)
-- 
GitLab