Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
No results found
Select Git revision
Show changes
Commits on Source (36)
image: quantumtinkerer/research
test:
script:
- pip install -r test-requirements.txt
......@@ -7,3 +8,9 @@ test:
artifacts:
paths:
- htmlcov
authors check:
script:
- MISSING_AUTHORS=$(git shortlog -s HEAD | sed -e "s/^[0-9\t ]*//"| xargs -i sh -c 'grep -q "{}" AUTHORS.md || echo "{} missing from authors"')
- if [ ! -z "$MISSING_AUTHORS" ]; then { echo $MISSING_AUTHORS; exit 1; }; fi
allow_failure: true
# Making a Adaptive release
This document guides a contributor through creating a release of Adaptive.
## Preflight checks
The following checks should be made *before* tagging the release.
#### Check that all issues are resolved
Check that all the issues and merge requests for the appropriate
[milestone](https://gitlab.kwant-project.org/qt/adaptive/issues)
have been resolved. Any unresolved issues should have their milestone
bumped.
#### Ensure that all tests pass
For major and minor releases we will be tagging the ``master`` branch.
This should be as simple as verifying that the
[latest CI pipeline](https://gitlab.kwant-project.org/qt/adaptive/pipelines)
succeeded.
#### Verify that `AUTHORS.md` is up-to-date
The following command shows the number of commits per author since the last
annotated tag:
```
t=$(git describe --abbrev=0); echo Commits since $t; git shortlog -s $t..
```
## Make a release, but do not publish it yet
### Tag the release
Make an **annotated, signed** tag for the release. The tag must have the name:
```
git tag -s v<version> -m "version <version>"
```
### Build a source tarball and wheels and test it
```
rm -fr build dist
python setup.py sdist bdist_wheel
```
This creates the file `dist/adaptive-<version>.tar.gz`. It is a good idea to unpack it
and check that the tests run:
```
tar xzf dist/adaptive*.tar.gz
cd adaptive-*
py.test .
```
## Upload to PyPI
```
twine upload dist/*
```
## Update the [conda-forge recipe](https://github.com/conda-forge/adaptive-feedstock)
* Fork the [feedstock repo](https://github.com/conda-forge/adaptive-feedstock)
* Change the version number and sha256 in `recipe/meta.yaml` and commit to your fork
* Open a [Pull Request](https://github.com/conda-forge/adaptive-feedstock/compare)
* Type `@conda-forge-admin, please rerender` as a comment
* When the tests succeed, merge
......@@ -64,21 +64,24 @@ class BalancingLearner(BaseLearner):
def _ask_and_tell(self, n):
points = []
loss_improvements = []
for _ in range(n):
loss_improvements = []
improvements_per_learner = []
pairs = []
for index, learner in enumerate(self.learners):
if index not in self._points:
self._points[index] = learner.ask(
n=1, add_data=False)
point, loss_improvement = self._points[index]
loss_improvements.append(loss_improvement[0])
improvements_per_learner.append(loss_improvement[0])
pairs.append((index, point[0]))
x, _ = max(zip(pairs, loss_improvements), key=itemgetter(1))
x, l = max(zip(pairs, improvements_per_learner),
key=itemgetter(1))
points.append(x)
loss_improvements.append(l)
self.tell(x, None)
return points, None
return points, loss_improvements
def ask(self, n, add_data=True):
"""Chose points for learners."""
......
......@@ -137,44 +137,64 @@ class Learner1D(BaseLearner):
self._scale, self.data)
self.losses[x_left, x_right] = loss
start = self.neighbors_combined.bisect_right(x_left)
end = self.neighbors_combined.bisect_left(x_right)
for i in range(start, end):
a, b = (self.neighbors_combined.iloc[i],
self.neighbors_combined.iloc[i + 1])
# Iterate over all interpolated intervals in between
# x_left and x_right and set the newly interpolated loss.
a, b = x_left, None
while b != x_right:
b = self.neighbors_combined[a][1]
self.losses_combined[a, b] = (b - a) * loss / dx
if start == end:
self.losses_combined[x_left, x_right] = loss
a = b
def update_losses(self, x, real=True):
# When we add a new point x, we should update the losses
# (x_left, x_right) are the "real" neighbors of 'x'.
x_left, x_right = self.find_neighbors(x, self.neighbors)
# (a, b) are the neighbors of the combined interpolated
# and "real" intervals.
a, b = self.find_neighbors(x, self.neighbors_combined)
# (a, b) is splitted into (a, x) and (x, b) so if (a, b) exists
self.losses_combined.pop((a, b), None) # we get rid of (a, b).
if real:
x_left, x_right = self.find_neighbors(x, self.neighbors)
# We need to update all interpolated losses in the interval
# (x_left, x) and (x, x_right). Since the addition of the point
# 'x' could change their loss.
self.update_interpolated_loss_in_interval(x_left, x)
self.update_interpolated_loss_in_interval(x, x_right)
# Since 'x' is in between (x_left, x_right),
# we get rid of the interval.
self.losses.pop((x_left, x_right), None)
self.losses_combined.pop((x_left, x_right), None)
else:
losses_combined = self.losses_combined
x_left, x_right = self.find_neighbors(x, self.neighbors)
a, b = self.find_neighbors(x, self.neighbors_combined)
if x_left is not None and x_right is not None:
# 'x' happens to be in between two real points,
# so we can interpolate the losses.
dx = x_right - x_left
loss = self.losses[x_left, x_right]
losses_combined[a, x] = (x - a) * loss / dx
losses_combined[x, b] = (b - x) * loss / dx
else:
if a is not None:
losses_combined[a, x] = float('inf')
if b is not None:
losses_combined[x, b] = float('inf')
self.losses_combined[a, x] = (x - a) * loss / dx
self.losses_combined[x, b] = (b - x) * loss / dx
losses_combined.pop((a, b), None)
# (no real point left of x) or (no real point right of a)
left_loss_is_unknown = ((x_left is None) or
(not real and x_right is None))
if (a is not None) and left_loss_is_unknown:
self.losses_combined[a, x] = float('inf')
# (no real point right of x) or (no real point left of b)
right_loss_is_unknown = ((x_right is None) or
(not real and x_left is None))
if (b is not None) and right_loss_is_unknown:
self.losses_combined[x, b] = float('inf')
def find_neighbors(self, x, neighbors):
if x in neighbors:
return neighbors[x]
pos = neighbors.bisect_left(x)
x_left = neighbors.iloc[pos - 1] if pos != 0 else None
x_right = neighbors.iloc[pos] if pos != len(neighbors) else None
keys = neighbors.keys()
x_left = keys[pos - 1] if pos != 0 else None
x_right = keys[pos] if pos != len(neighbors) else None
return x_left, x_right
def update_neighbors(self, x, neighbors):
......@@ -212,9 +232,16 @@ class Learner1D(BaseLearner):
self._scale[1] = self._bbox[1][1] - self._bbox[1][0]
def tell(self, x, y):
real = y is not None
if x in self.data:
# The point is already evaluated before
return
real = y is not None
if real:
# either it is a float/int, if not, try casting to a np.array
if not isinstance(y, (float, int)):
y = np.asarray(y, dtype=float)
# Add point to the real data dict
self.data[x] = y
# remove from set of pending points
......
This diff is collapsed.
......@@ -163,6 +163,42 @@ def test_uniform_sampling1D(learner_type, f, learner_kwargs):
assert max(ivals) / min(ivals) < 2 + 1e-8
def test_learner1D_loss_interpolation_for_unasked_point():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert l.ask(1) == ([4], [np.inf])
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 4.0): np.inf}
# assert l.ask(1) == ([3], [np.inf]) # XXX: This doesn't return np.inf as loss_improvement...
l.ask(1)
assert l.losses == {(0, 1): 0.25, (1, 2): 0.25}
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 3.0): np.inf, (3.0, 4.0): np.inf}
l.tell(4, 0)
assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 3): 0.25, (3, 4): 0.25}
def test_learner1D_pending_loss_intervals():
# https://gitlab.kwant-project.org/qt/adaptive/issues/99
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2)}
l.ask(1)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 4)}
l.tell(3.5, 0)
assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 3.5), (3.5, 4.0)}
@pytest.mark.xfail
@run_with(Learner2D, LearnerND)
def test_uniform_sampling2D(learner_type, f, learner_kwargs):
......@@ -188,6 +224,19 @@ def test_uniform_sampling2D(learner_type, f, learner_kwargs):
assert max(distances) < math.sqrt(dx**2 + dy**2)
@pytest.mark.parametrize('learner_type, bounds', [
(Learner1D, (-1, 1)),
(Learner2D, [(-1, 1), (-1, 1)]),
(LearnerND, [(-1, 1), (-1, 1), (-1, 1)]),
])
def test_learner_accepts_lists(learner_type, bounds):
def f(x):
return [0, 1]
learner = learner_type(f, bounds=bounds)
simple(learner, goal=lambda l: l.npoints > 10)
@run_with(xfail(Learner1D), Learner2D, LearnerND)
def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
"""Adding already existing data is an idempotent operation.
......@@ -438,6 +487,24 @@ def test_termination_on_discontinuities():
assert smallest_interval >= 0.5E3 * np.finfo(float).eps
def test_order_adding_points():
# and https://gitlab.kwant-project.org/qt/adaptive/issues/98
l = Learner1D(lambda x: x, (0, 1))
l.tell_many([1, 0, 0.5], [0, 0, 0])
assert l.losses_combined == {(0, 0.5): 0.5, (0.5, 1): 0.5}
assert l.losses == {(0, 0.5): 0.5, (0.5, 1): 0.5}
l.ask(1)
def test_adding_existing_point_passes_silently():
# See https://gitlab.kwant-project.org/qt/adaptive/issues/97
l = Learner1D(lambda x: x, (0, 4))
l.tell(0, 0)
l.tell(1, 0)
l.tell(2, 0)
l.tell(1, None)
def test_loss_at_machine_precision_interval_is_zero():
"""The loss of an interval smaller than _dx_eps
should be set to zero."""
......