Skip to content
Snippets Groups Projects

More efficient 'tell_many'

Merged Bas Nijholt requested to merge efficient_tell_many into master
All threads resolved!
Compare and Show latest version
1 file
+ 38
16
Compare changes
  • Side-by-side
  • Inline
@@ -249,24 +249,46 @@ def test_tell_many():
y = x + a**2 / (a**2 + (x - offset)**2)
return [y, 0.5 * y, y**2]
def test_equal(learner, learner2):
assert learner2.pending_points == learner.pending_points
assert learner2.neighbors == learner.neighbors
assert learner2.data == learner.data
assert learner2._scale == learner._scale
assert learner2._bbox[0] == learner._bbox[0]
assert (np.array(learner2._bbox[1]) == np.array(learner._bbox[1])).all()
assert not learner.losses_combined.keys() - learner2.losses_combined.keys()
assert abs(sum(learner.losses_combined.values()) - sum(learner2.losses_combined.values())) < 1e-13
assert learner2.neighbors_combined == learner.neighbors_combined
assert learner2.losses == learner.losses
def assert_equal_dicts(d1, d2):
xs1, ys1 = zip(*sorted(d1.items()))
xs2, ys2 = zip(*sorted(d2.items()))
ys1 = np.array(ys1, dtype=np.float)
ys2 = np.array(ys2, dtype=np.float)
np.testing.assert_almost_equal(xs1, xs2)
np.testing.assert_almost_equal(ys1, ys2)
def assert_equal_sets(s1, s2):
xs1 = np.sort(list(s1))
xs2 = np.sort(list(s2))
np.testing.assert_almost_equal(xs1, xs2)
def test_equal(l1, l2):
assert_equal_dicts(l1.neighbors, l2.neighbors)
assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined)
assert_equal_dicts(l1.data, l2.data)
assert_equal_dicts(l2.losses, l1.losses)
assert_equal_dicts(l2.losses_combined, l1.losses_combined)
assert_equal_sets(l1.pending_points, l2.pending_points)
np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1])
assert l1._scale == l2._scale
assert l1._bbox[0] == l2._bbox[0]
for function in [f, f_vec]:
learner = Learner1D(function, bounds=(-1, 1))
learner2 = Learner1D(function, bounds=(-1, 1))
simple(learner, goal=lambda l: l.npoints > 200)
xs, ys = zip(*learner.data.items())
learner2 = Learner1D(function, bounds=(-1, 1))
learner2.tell_many(*zip(*learner.data.items()))
# Make the scale huge to no get a scale doubling
x = 1e-6
max_value = 1e6 if learner.vdim == 1 else np.array(learner.vdim * [1e6])
learner.tell(x, max_value)
learner2.tell(x, max_value)
for x in xs:
learner2.tell_pending(x)
learner2.tell_many(xs, ys)
test_equal(learner, learner2)
# Test non-determinism. We keep a list of points that will be
@@ -274,8 +296,8 @@ def test_tell_many():
def _random_run(learner, learner2, scale_doubling=True):
if not scale_doubling:
# Make the scale huge to no get a scale doubling
x = 0.0000001
max_value = 1000000000
x = 1e-6
max_value = 1e6
learner.tell(x, max_value)
learner2.tell(x, max_value)
@@ -308,7 +330,7 @@ def test_tell_many():
if scale_doubling:
# Double the scale to trigger the loss updates
max_value = max(learner.data.values())
x = 0.0000000001
x = 1e-6
learner.tell(x, max_value * 10)
learner2.tell(x, max_value * 10)
Loading