diff --git a/AUTHORS.md b/AUTHORS.md index aa414900e5b21d8b02c33e157f406b0e321254e3..3da642df6d1392011733b153bd67c62047636246 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -1,4 +1,4 @@ -# Adaptive Authors +## Authors Below is a list of the contributors to Adaptive: + [Anton Akhmerov](<https://antonakhmerov.org>) diff --git a/README.rst b/README.rst index f92ed5d7567ff98920597701568287eddb99dfaa..c32d133a0130b81e71628a1319a4e705a7c99b7f 100644 --- a/README.rst +++ b/README.rst @@ -1,12 +1,10 @@ .. summary-start -.. _logo-adaptive: +|logo| adaptive +=============== -|image0| adaptive -================= - -|PyPI| |Conda| |Downloads| |pipeline status| |DOI| |Binder| |Join the -chat at https://gitter.im/python-adaptive/adaptive| |Documentation Status| +|PyPI| |Conda| |Downloads| |Pipeline status| |DOI| |Binder| |Gitter| +|Documentation| |GitHub| **Tools for adaptive parallel sampling of mathematical functions.** @@ -126,7 +124,9 @@ We would like to give credits to the following people: Mathematical Software, 37 (3), art. no. 26, 2010. - Pauli Virtanen for his ``AdaptiveTriSampling`` script (no longer available online since SciPy Central went down) which served as - inspiration for the ``~adaptive.Learner2D``. + inspiration for the `~adaptive.Learner2D`. + +.. credits-end For general discussion, we have a `Gitter chat channel <https://gitter.im/python-adaptive/adaptive>`_. If you find any @@ -136,21 +136,23 @@ or submit a `merge request <https://gitlab.kwant-project.org/qt/adaptive/merge_requests>`_. .. references-start -.. |image0| image:: https://gitlab.kwant-project.org/qt/adaptive/uploads/d20444093920a4a0499e165b5061d952/logo.png +.. |logo| image:: https://adaptive.readthedocs.io/en/latest/_static/logo.png .. |PyPI| image:: https://img.shields.io/pypi/v/adaptive.svg :target: https://pypi.python.org/pypi/adaptive -.. |Conda| image:: https://anaconda.org/conda-forge/adaptive/badges/installer/conda.svg +.. |Conda| image:: https://img.shields.io/badge/install%20with-conda-green.svg :target: https://anaconda.org/conda-forge/adaptive -.. |Downloads| image:: https://anaconda.org/conda-forge/adaptive/badges/downloads.svg +.. |Downloads| image:: https://img.shields.io/conda/dn/conda-forge/adaptive.svg :target: https://anaconda.org/conda-forge/adaptive -.. |pipeline status| image:: https://gitlab.kwant-project.org/qt/adaptive/badges/master/pipeline.svg +.. |Pipeline status| image:: https://gitlab.kwant-project.org/qt/adaptive/badges/master/pipeline.svg :target: https://gitlab.kwant-project.org/qt/adaptive/pipelines .. |DOI| image:: https://zenodo.org/badge/113714660.svg - :target: https://zenodo.org/badge/latestdoi/113714660 + :target: https://doi.org/10.5281/zenodo.1446400 .. |Binder| image:: https://mybinder.org/badge.svg :target: https://mybinder.org/v2/gh/python-adaptive/adaptive/master?filepath=learner.ipynb -.. |Join the chat at https://gitter.im/python-adaptive/adaptive| image:: https://img.shields.io/gitter/room/nwjs/nw.js.svg +.. |Gitter| image:: https://img.shields.io/gitter/room/nwjs/nw.js.svg :target: https://gitter.im/python-adaptive/adaptive -.. |Documentation Status| image:: https://readthedocs.org/projects/adaptive/badge/?version=latest +.. |Documentation| image:: https://readthedocs.org/projects/adaptive/badge/?version=latest :target: https://adaptive.readthedocs.io/en/latest/?badge=latest +.. |GitHub| image:: https://img.shields.io/github/stars/python-adaptive/adaptive.svg?style=social + :target: https://github.com/python-adaptive/adaptive/stargazers .. references-end diff --git a/adaptive/learner/average_learner.py b/adaptive/learner/average_learner.py index 312101bfeb2e8790bbde008b81f7ca545a46655f..704a9bb8bbec42267f087726529c12ea05603274 100644 --- a/adaptive/learner/average_learner.py +++ b/adaptive/learner/average_learner.py @@ -27,6 +27,8 @@ class AverageLearner(BaseLearner): Sampled points and values. pending_points : set Points that still have to be evaluated. + npoints : int + Number of evaluated points. """ def __init__(self, function, atol=None, rtol=None): @@ -48,7 +50,7 @@ class AverageLearner(BaseLearner): @property def n_requested(self): - return len(self.data) + len(self.pending_points) + return self.npoints + len(self.pending_points) def ask(self, n, tell_pending=True): points = list(range(self.n_requested, self.n_requested + n)) @@ -59,7 +61,7 @@ class AverageLearner(BaseLearner): - set(self.data) - set(self.pending_points))[:n] - loss_improvements = [self.loss_improvement(n) / n] * n + loss_improvements = [self._loss_improvement(n) / n] * n if tell_pending: for p in points: self.tell_pending(p) @@ -81,10 +83,13 @@ class AverageLearner(BaseLearner): @property def mean(self): + """The average of all values in `data`.""" return self.sum_f / self.npoints @property def std(self): + """The corrected sample standard deviation of the values + in `data`.""" n = self.npoints if n < 2: return np.inf @@ -106,7 +111,7 @@ class AverageLearner(BaseLearner): return max(standard_error / self.atol, standard_error / abs(self.mean) / self.rtol) - def loss_improvement(self, n): + def _loss_improvement(self, n): loss = self.loss() if np.isfinite(loss): return loss - self.loss(n=self.npoints + n) @@ -118,6 +123,12 @@ class AverageLearner(BaseLearner): self.pending_points = set() def plot(self): + """Returns a histogram of the evaluated data. + + Returns + ------- + holoviews.element.Histogram + A histogram of the evaluated data.""" hv = ensure_holoviews() vals = [v for v in self.data.values() if v is not None] if not vals: diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py index 764758ba6feb1aff38218336c477ca68046863d4..f8ec0b08ac491dd67b492dc7a625162348e7f77c 100644 --- a/adaptive/learner/balancing_learner.py +++ b/adaptive/learner/balancing_learner.py @@ -22,7 +22,7 @@ class BalancingLearner(BaseLearner): Parameters ---------- - learners : sequence of `BaseLearner` + learners : sequence of `~adaptive.BaseLearner`\s The learners from which to choose. These must all have the same type. cdims : sequence of dicts, or (keys, iterable of values), optional Constant dimensions; the parameters that label the learners. Used @@ -42,6 +42,13 @@ class BalancingLearner(BaseLearner): >>> cdims = (['A', 'B'], [(True, 0), (True, 1), ... (False, 0), (False, 1)]) + Attributes + ---------- + learners : list + The sequence of `~adaptive.BaseLearner`\s. + function : callable + A function that calls the functions of the underlying learners. + Its signature is ``function(learner_index, point)``. strategy : 'loss_improvements' (default), 'loss', or 'npoints' The points that the `BalancingLearner` choses can be either based on: the best 'loss_improvements', the smallest total 'loss' of the @@ -51,13 +58,13 @@ class BalancingLearner(BaseLearner): Notes ----- - This learner compares the 'loss' calculated from the "child" learners. + This learner compares the `loss` calculated from the "child" learners. This requires that the 'loss' from different learners *can be meaningfully compared*. For the moment we enforce this restriction by requiring that all learners are the same type but (depending on the internals of the learner) it may be that the loss cannot be compared *even between learners - of the same type*. In this case the `BalancingLearner` will behave in an - undefined way. + of the same type*. In this case the `~adaptive.BalancingLearner` will + behave in an undefined way. Change the `strategy` in that case. """ def __init__(self, learners, *, cdims=None, strategy='loss_improvements'): @@ -81,6 +88,12 @@ class BalancingLearner(BaseLearner): @property def strategy(self): + """Can be either 'loss_improvements' (default), 'loss', or 'npoints' + The points that the `BalancingLearner` choses can be either based on: + the best 'loss_improvements', the smallest total 'loss' of the + child learners, or the number of points per learner, using 'npoints'. + One can dynamically change the strategy while the simulation is + running by changing the ``learner.strategy`` attribute.""" return self._strategy @strategy.setter @@ -122,7 +135,7 @@ class BalancingLearner(BaseLearner): points = [] loss_improvements = [] for _ in range(n): - losses = self.losses(real=False) + losses = self._losses(real=False) max_ind = np.argmax(losses) xs, ls = self.learners[max_ind].ask(1) points.append((max_ind, xs[0])) @@ -165,7 +178,7 @@ class BalancingLearner(BaseLearner): self._loss.pop(index, None) self.learners[index].tell_pending(x) - def losses(self, real=True): + def _losses(self, real=True): losses = [] loss_dict = self._loss if real else self._pending_loss @@ -178,7 +191,7 @@ class BalancingLearner(BaseLearner): @cache_latest def loss(self, real=True): - losses = self.losses(real) + losses = self._losses(real) return max(losses) def plot(self, cdims=None, plotter=None, dynamic=True): @@ -215,8 +228,8 @@ class BalancingLearner(BaseLearner): Returns ------- dm : `holoviews.core.DynamicMap` (default) or `holoviews.core.HoloMap` - A `DynamicMap` (dynamic=True) or `HoloMap` (dynamic=False) with - sliders that are defined by `cdims`. + A `DynamicMap` ``(dynamic=True)`` or `HoloMap` + ``(dynamic=False)`` with sliders that are defined by `cdims`. """ hv = ensure_holoviews() cdims = cdims or self._cdims_default @@ -295,7 +308,7 @@ class BalancingLearner(BaseLearner): Notes ----- The order of the child learners inside `learner.learners` is the same - as `adaptive.utils.named_product(**combos)`. + as ``adaptive.utils.named_product(**combos)``. """ learners = [] arguments = named_product(**combos) @@ -313,7 +326,7 @@ class BalancingLearner(BaseLearner): folder : str Directory in which the learners's data will be saved. compress : bool, default True - Compress the data upon saving using 'gzip'. When saving + Compress the data upon saving using `gzip`. When saving using compression, one must load it with compression too. Notes @@ -364,7 +377,7 @@ class BalancingLearner(BaseLearner): Example ------- - See the example in the 'BalancingLearner.save' doc-string. + See the example in the `BalancingLearner.save` doc-string. """ for l in self.learners: l.load(os.path.join(folder, l.fname), compress=compress) diff --git a/adaptive/learner/base_learner.py b/adaptive/learner/base_learner.py index b33cc0180c317fe07127c23334a08411d9d09221..eed2c5a8dbd6bb30293d87cf70b9d734fa7adfe9 100644 --- a/adaptive/learner/base_learner.py +++ b/adaptive/learner/base_learner.py @@ -20,6 +20,9 @@ class BaseLearner(metaclass=abc.ABCMeta): npoints : int, optional The number of evaluated points that have been added to the learner. Subclasses do not *have* to implement this attribute. + pending_points : set, optional + Points that have been requested but have not been evaluated yet. + Subclasses do not *have* to implement this attribute. Notes ----- @@ -118,10 +121,11 @@ class BaseLearner(metaclass=abc.ABCMeta): Notes ----- - There are __two ways__ of naming the files: - 1. Using the 'fname' argument in 'learner.save(fname='example.p') - 2. Setting the 'fname' attribute, like - 'learner.fname = "data/example.p"' and then 'learner.save()'. + There are **two ways** of naming the files: + + 1. Using the ``fname`` argument in ``learner.save(fname='example.p')`` + 2. Setting the ``fname`` attribute, like + ``learner.fname = "data/example.p"`` and then ``learner.save()``. """ fname = fname or self.fname data = self._get_data() @@ -142,7 +146,7 @@ class BaseLearner(metaclass=abc.ABCMeta): Notes ----- - See the notes in the 'BaseLearner.save' doc-string. + See the notes in the `save` doc-string. """ fname = fname or self.fname with suppress(FileNotFoundError, EOFError): @@ -157,6 +161,9 @@ class BaseLearner(metaclass=abc.ABCMeta): @property def fname(self): + """Filename for the learner when it is saved (or loaded) using + `~adaptive.BaseLearner.save` (or `~adaptive.BaseLearner.load` ). + """ # This is a property because then it will be availible in the DataSaver try: return self._fname diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index 832c1a9e1d55955701ce0c9e34e2bb1cacf4e261..8e035994fb6f851724110b297d7792e6ea71e63f 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -35,11 +35,13 @@ class DataSaver: def __getattr__(self, attr): return getattr(self.learner, attr) + @copy_docstring_from(BaseLearner.tell) def tell(self, x, result): y = self.arg_picker(result) self.extra_data[x] = result self.learner.tell(x, y) + @copy_docstring_from(BaseLearner.tell_pending) def tell_pending(self, x): self.learner.tell_pending(x) diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py index 931d45315b5b25811345b812fd7b93a11b0dce6a..0290a387a4bf5f05df89aaed0494cf6a3d92da10 100644 --- a/adaptive/learner/integrator_learner.py +++ b/adaptive/learner/integrator_learner.py @@ -487,6 +487,7 @@ class IntegratorLearner(BaseLearner): @property def npoints(self): + """Number of evaluated points.""" return len(self.done_points) @property diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py index 343a9def2184b96833ecebcfd2e01ab322fd7fbf..8868800d8d49b48e5e9a9af81b3c3d81d5b44daa 100644 --- a/adaptive/learner/learner1D.py +++ b/adaptive/learner/learner1D.py @@ -34,7 +34,7 @@ def uniform_loss(interval, scale, function_values): def default_loss(interval, scale, function_values): - """Calculate loss on a single interval + """Calculate loss on a single interval. Currently returns the rescaled length of the interval. If one of the y-values is missing, returns 0 (so the intervals with missing data are @@ -148,6 +148,12 @@ class Learner1D(BaseLearner): @property def vdim(self): + """Length of the output of ``learner.function``. + If the output is unsized (when it's a scalar) + then `vdim = 1`. + + As long as no data is known `vdim = 1`. + """ if self._vdim is None: if self.data: y = next(iter(self.data.values())) @@ -162,6 +168,7 @@ class Learner1D(BaseLearner): @property def npoints(self): + """Number of evaluated points.""" return len(self.data) @cache_latest @@ -169,7 +176,7 @@ class Learner1D(BaseLearner): losses = self.losses if real else self.losses_combined return max(losses.values()) if len(losses) > 0 else float('inf') - def update_interpolated_loss_in_interval(self, x_left, x_right): + def _update_interpolated_loss_in_interval(self, x_left, x_right): if x_left is not None and x_right is not None: dx = x_right - x_left if dx < self._dx_eps: @@ -187,13 +194,13 @@ class Learner1D(BaseLearner): self.losses_combined[a, b] = (b - a) * loss / dx a = b - def update_losses(self, x, real=True): + def _update_losses(self, x, real=True): # When we add a new point x, we should update the losses # (x_left, x_right) are the "real" neighbors of 'x'. - x_left, x_right = self.find_neighbors(x, self.neighbors) + x_left, x_right = self._find_neighbors(x, self.neighbors) # (a, b) are the neighbors of the combined interpolated # and "real" intervals. - a, b = self.find_neighbors(x, self.neighbors_combined) + a, b = self._find_neighbors(x, self.neighbors_combined) # (a, b) is splitted into (a, x) and (x, b) so if (a, b) exists self.losses_combined.pop((a, b), None) # we get rid of (a, b). @@ -202,8 +209,8 @@ class Learner1D(BaseLearner): # We need to update all interpolated losses in the interval # (x_left, x) and (x, x_right). Since the addition of the point # 'x' could change their loss. - self.update_interpolated_loss_in_interval(x_left, x) - self.update_interpolated_loss_in_interval(x, x_right) + self._update_interpolated_loss_in_interval(x_left, x) + self._update_interpolated_loss_in_interval(x, x_right) # Since 'x' is in between (x_left, x_right), # we get rid of the interval. @@ -230,7 +237,7 @@ class Learner1D(BaseLearner): self.losses_combined[x, b] = float('inf') @staticmethod - def find_neighbors(x, neighbors): + def _find_neighbors(x, neighbors): if x in neighbors: return neighbors[x] pos = neighbors.bisect_left(x) @@ -239,14 +246,14 @@ class Learner1D(BaseLearner): x_right = keys[pos] if pos != len(neighbors) else None return x_left, x_right - def update_neighbors(self, x, neighbors): + def _update_neighbors(self, x, neighbors): if x not in neighbors: # The point is new - x_left, x_right = self.find_neighbors(x, neighbors) + x_left, x_right = self._find_neighbors(x, neighbors) neighbors[x] = [x_left, x_right] neighbors.get(x_left, [None, None])[1] = x neighbors.get(x_right, [None, None])[0] = x - def update_scale(self, x, y): + def _update_scale(self, x, y): """Update the scale with which the x and y-values are scaled. For a learner where the function returns a single scalar the scale @@ -291,16 +298,16 @@ class Learner1D(BaseLearner): if not self.bounds[0] <= x <= self.bounds[1]: return - self.update_neighbors(x, self.neighbors_combined) - self.update_neighbors(x, self.neighbors) - self.update_scale(x, y) - self.update_losses(x, real=True) + self._update_neighbors(x, self.neighbors_combined) + self._update_neighbors(x, self.neighbors) + self._update_scale(x, y) + self._update_losses(x, real=True) # If the scale has increased enough, recompute all losses. if self._scale[1] > 2 * self._oldscale[1]: for interval in self.losses: - self.update_interpolated_loss_in_interval(*interval) + self._update_interpolated_loss_in_interval(*interval) self._oldscale = deepcopy(self._scale) @@ -309,8 +316,8 @@ class Learner1D(BaseLearner): # The point is already evaluated before return self.pending_points.add(x) - self.update_neighbors(x, self.neighbors_combined) - self.update_losses(x, real=False) + self._update_neighbors(x, self.neighbors_combined) + self._update_losses(x, real=False) def tell_many(self, xs, ys, *, force=False): if not force and not (len(xs) > 0.5 * len(self.data) and len(xs) > 2): @@ -379,10 +386,10 @@ class Learner1D(BaseLearner): if ival in self.losses: # If this interval does not exist it should already # have an inf loss. - self.update_interpolated_loss_in_interval(*ival) + self._update_interpolated_loss_in_interval(*ival) def ask(self, n, tell_pending=True): - """Return n points that are expected to maximally reduce the loss.""" + """Return 'n' points that are expected to maximally reduce the loss.""" points, loss_improvements = self._ask_points_without_adding(n) if tell_pending: @@ -392,7 +399,7 @@ class Learner1D(BaseLearner): return points, loss_improvements def _ask_points_without_adding(self, n): - """Return n points that are expected to maximally reduce the loss. + """Return 'n' points that are expected to maximally reduce the loss. Without altering the state of the learner""" # Find out how to divide the n points over the intervals # by finding positive integer n_i that minimize max(L_i / n_i) subject @@ -466,6 +473,14 @@ class Learner1D(BaseLearner): return points, loss_improvements def plot(self): + """Returns a plot of the evaluated data. + + Returns + ------- + plot : `holoviews.element.Scatter` (if vdim=1)\ + else `holoviews.element.Path` + Plot of the evaluated data. + """ hv = ensure_holoviews() if not self.data: p = hv.Scatter([]) * hv.Path([]) diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py index bbc3f64483d95e5fe88402ab005c47fab570cba8..11ac92a4691fdbaf87fef462169f51914be03d39 100644 --- a/adaptive/learner/learner2D.py +++ b/adaptive/learner/learner2D.py @@ -15,6 +15,19 @@ from ..utils import cache_latest # Learner2D and helper functions. def deviations(ip): + """Returns the deviation of the linear estimate. + + Is useful when defining custom loss functions. + + Parameters + ---------- + ip : `scipy.interpolate.LinearNDInterpolator` instance + + Returns + ------- + numpy array + The deviation per triangle. + """ values = ip.values / (ip.values.ptp(axis=0).max() or 1) gradients = interpolate.interpnd.estimate_gradients_2d_global( ip.tri, values, tol=1e-6) @@ -37,6 +50,20 @@ def deviations(ip): def areas(ip): + """Returns the area per triangle of the triangulation inside + a `LinearNDInterpolator` instance. + + Is useful when defining custom loss functions. + + Parameters + ---------- + ip : `scipy.interpolate.LinearNDInterpolator` instance + + Returns + ------- + numpy array + The area per triangle in ``ip.tri``. + """ p = ip.tri.points[ip.tri.vertices] q = p[:, :-1, :] - p[:, -1, None, :] areas = abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2 @@ -289,10 +316,17 @@ class Learner2D(BaseLearner): @property def npoints(self): + """Number of evaluated points.""" return len(self.data) @property def vdim(self): + """Length of the output of ``learner.function``. + If the output is unsized (when it's a scalar) + then `vdim = 1`. + + As long as no data is known `vdim = 1`. + """ if self._vdim is None and self.data: try: value = next(iter(self.data.values())) @@ -337,11 +371,15 @@ class Learner2D(BaseLearner): return points_combined, values_combined def data_combined(self): + """Like `data`, however this includes the points in + `pending_points` for which the values are interpolated.""" # Interpolate the unfinished points points, values = self._data_combined() return {tuple(k): v for k, v in zip(points, values)} def ip(self): + """A `scipy.interpolate.LinearNDInterpolator` instance + containing the learner's data.""" if self._ip is None: points, values = self._data_in_bounds() points = self._scale(points) @@ -349,6 +387,9 @@ class Learner2D(BaseLearner): return self._ip def ip_combined(self): + """A `scipy.interpolate.LinearNDInterpolator` instance + containing the learner's data *and* interpolated data of + the `pending_points`.""" if self._ip_combined is None: points, values = self._data_combined() points = self._scale(points) diff --git a/adaptive/learner/learnerND.py b/adaptive/learner/learnerND.py index 274ae16ff9f9a6505c0c58edff67e0d710974a3d..c8081878d84004d802d0a2f6ef0f6be10998e03b 100644 --- a/adaptive/learner/learnerND.py +++ b/adaptive/learner/learnerND.py @@ -188,10 +188,17 @@ class LearnerND(BaseLearner): @property def npoints(self): + """Number of evaluated points.""" return len(self.data) @property def vdim(self): + """Length of the output of ``learner.function``. + If the output is unsized (when it's a scalar) + then `vdim = 1`. + + As long as no data is known `vdim = 1`. + """ if self._vdim is None and self.data: try: value = next(iter(self.data.values())) @@ -205,6 +212,8 @@ class LearnerND(BaseLearner): return all(p in self.data for p in self._bounds_points) def ip(self): + """A `scipy.interpolate.LinearNDInterpolator` instance + containing the learner's data.""" # XXX: take our own triangulation into account when generating the ip return interpolate.LinearNDInterpolator(self.points, self.values) @@ -227,10 +236,12 @@ class LearnerND(BaseLearner): @property def values(self): + """Get the values from `data` as a numpy array.""" return np.array(list(self.data.values()), dtype=float) @property def points(self): + """Get the points from `data` as a numpy array.""" return np.array(list(self.data.keys()), dtype=float) def tell(self, point, value): @@ -262,6 +273,7 @@ class LearnerND(BaseLearner): return simplex in self.tri.simplices def inside_bounds(self, point): + """Check whether a point is inside the bounds.""" return all(mn <= p <= mx for p, (mn, mx) in zip(point, self.bounds)) def tell_pending(self, point, *, simplex=None): diff --git a/adaptive/learner/skopt_learner.py b/adaptive/learner/skopt_learner.py index 2b778ba513f2d1627575523062505012466cd218..9aac8d4c1e44ca4acd53c8e4c44c5133f608503f 100644 --- a/adaptive/learner/skopt_learner.py +++ b/adaptive/learner/skopt_learner.py @@ -8,18 +8,18 @@ from ..utils import cache_latest class SKOptLearner(Optimizer, BaseLearner): - """Learn a function minimum using 'skopt.Optimizer'. + """Learn a function minimum using ``skopt.Optimizer``. - This is an 'Optimizer' from 'scikit-optimize', + This is an ``Optimizer`` from ``scikit-optimize``, with the necessary methods added to make it conform - to the 'adaptive' learner interface. + to the ``adaptive`` learner interface. Parameters ---------- function : callable The function to learn. **kwargs : - Arguments to pass to 'skopt.Optimizer'. + Arguments to pass to ``skopt.Optimizer``. """ def __init__(self, function, **kwargs): @@ -63,6 +63,7 @@ class SKOptLearner(Optimizer, BaseLearner): @property def npoints(self): + """Number of evaluated points.""" return len(self.Xi) def plot(self, nsamples=200): diff --git a/adaptive/runner.py b/adaptive/runner.py index ce2b61ad8625e6f0fb377bb932e7ea0698ff178a..b6d0de494833ded337cbab87cd9045b42eb1327f 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -54,7 +54,7 @@ else: class BaseRunner: - """Base class for runners that use concurrent.futures.Executors. + """Base class for runners that use `concurrent.futures.Executors`. Parameters ---------- @@ -346,7 +346,7 @@ class BlockingRunner(BaseRunner): class AsyncRunner(BaseRunner): - """Run a learner asynchronously in an executor using asyncio. + """Run a learner asynchronously in an executor using `asyncio`. Parameters ---------- @@ -548,7 +548,7 @@ class AsyncRunner(BaseRunner): Parameters ---------- save_kwargs : dict - Key-word arguments for 'learner.save(**save_kwargs)'. + Key-word arguments for ``learner.save(**save_kwargs)``. interval : int Number of seconds between saving the learner. @@ -586,7 +586,7 @@ def simple(learner, goal): Parameters ---------- - learner : adaptive.BaseLearner + learner : ~`adaptive.BaseLearner` instance goal : callable The end condition for the calculation. This function must take the learner as its sole argument, and return True if we should stop. @@ -605,9 +605,10 @@ def replay_log(learner, log): Parameters ---------- - learner : learner.BaseLearner + learner : `~adaptive.BaseLearner` instance + New learner where the log will be applied. log : list - contains tuples: '(method_name, *args)'. + contains tuples: ``(method_name, *args)``. """ for method, *args in log: getattr(learner, method)(*args) diff --git a/docs/source/_static/logo.png b/docs/source/_static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..ff7a0f4e8abb071626512f5eb9e9d9df245fba6f Binary files /dev/null and b/docs/source/_static/logo.png differ diff --git a/docs/source/conf.py b/docs/source/conf.py index 5a70aa0c5e81f5d33841b3d72a8360a05cebee0e..707b987bf0f4b0f0fcd5052a3f23046a845c5e09 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -43,6 +43,7 @@ release = adaptive.__version__ extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', + 'sphinx.ext.autosectionlabel', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', diff --git a/docs/source/rest_of_readme.rst b/docs/source/docs.rst similarity index 85% rename from docs/source/rest_of_readme.rst rename to docs/source/docs.rst index 2fcab204047d145169723a0cc74ffb384f3bd821..299cfc46a82cc30cf58e60f27d7f96c934ab7dd5 100644 --- a/docs/source/rest_of_readme.rst +++ b/docs/source/docs.rst @@ -19,9 +19,13 @@ The following learners are implemented: - `~adaptive.AverageLearner`, For stochastic functions where you want to average the result over many evaluations, - `~adaptive.IntegratorLearner`, for - when you want to intergrate a 1D function ``f: ℠→ â„``, + when you want to intergrate a 1D function ``f: ℠→ â„``. + +Meta-learners (to be used with other learners): + - `~adaptive.BalancingLearner`, for when you want to run several learners at once, - selecting the “best†one each time you get more points. + selecting the “best†one each time you get more points, +- `~adaptive.DataSaver`, for when your function doesn't just return a scalar or a vector. In addition to the learners, ``adaptive`` also provides primitives for running the sampling across several cores and even several machines, @@ -47,8 +51,6 @@ on the *Play* :fa:`play` button or move the sliders. adaptive.notebook_extension() %output holomap='scrubber' - - `adaptive.Learner1D` ~~~~~~~~~~~~~~~~~~~~ @@ -82,8 +84,6 @@ on the *Play* :fa:`play` button or move the sliders. (get_hm(uniform_loss).relabel('homogeneous samping') + get_hm(default_loss).relabel('with adaptive')) - - `adaptive.Learner2D` ~~~~~~~~~~~~~~~~~~~~ @@ -111,8 +111,6 @@ on the *Play* :fa:`play` button or move the sliders. plots = {n: plot(learner, n) for n in range(4, 1010, 20)} hv.HoloMap(plots, kdims=['npoints']).collate() - - `adaptive.AverageLearner` ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -134,15 +132,31 @@ on the *Play* :fa:`play` button or move the sliders. plots = {n: plot(learner, n) for n in range(10, 10000, 200)} hv.HoloMap(plots, kdims=['npoints']) +`adaptive.LearnerND` +~~~~~~~~~~~~~~~~~~~~ -see more in the :ref:`Tutorial Adaptive`. +.. jupyter-execute:: + :hide-code: + def sphere(xyz): + import numpy as np + x, y, z = xyz + a = 0.4 + return np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4) -.. include:: ../../README.rst - :start-after: not-in-documentation-end + learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) + adaptive.runner.simple(learner, lambda l: l.npoints == 3000) + + learner.plot_3D() +see more in the :ref:`Tutorial Adaptive`. -Authors -------- +.. include:: ../../README.rst + :start-after: not-in-documentation-end + :end-before: credits-end .. mdinclude:: ../../AUTHORS.md + +.. include:: ../../README.rst + :start-after: credits-end + :end-before: references-start diff --git a/docs/source/index.rst b/docs/source/index.rst index 42f0d3932fec75fbbb9cc4d2dddc9190c3472c13..c95748e14bc6c2b01fc9430be5eb51e5f76bf9f8 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -16,6 +16,6 @@ :maxdepth: 2 :hidden: - rest_of_readme + docs tutorial/tutorial reference/adaptive diff --git a/docs/source/reference/adaptive.learner.base_learner.rst b/docs/source/reference/adaptive.learner.base_learner.rst index 28ff6160661cc6dadb041c48ce9addddf05eb913..7a908ab5763cdfe96c150d221881b7ce2b91a9d2 100644 --- a/docs/source/reference/adaptive.learner.base_learner.rst +++ b/docs/source/reference/adaptive.learner.base_learner.rst @@ -1,4 +1,4 @@ -adaptive.learner.BaseLearner +adaptive.BaseLearner ============================ .. autoclass:: adaptive.learner.BaseLearner diff --git a/docs/source/reference/adaptive.learner.triangulation.rst b/docs/source/reference/adaptive.learner.triangulation.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e4e4dfc2dd972c71c2ebedd1fb85a2f621d49d3 --- /dev/null +++ b/docs/source/reference/adaptive.learner.triangulation.rst @@ -0,0 +1,7 @@ +adaptive.learner.triangulation module +===================================== + +.. automodule:: adaptive.learner.triangulation + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/reference/adaptive.rst b/docs/source/reference/adaptive.rst index ab5049c9547f2f678c3b73f12cad949f585e6ced..54eeb9aaabfae7105ae136b6722a93cb49b93fee 100644 --- a/docs/source/reference/adaptive.rst +++ b/docs/source/reference/adaptive.rst @@ -7,6 +7,7 @@ Learners .. toctree:: adaptive.learner.average_learner + adaptive.learner.base_learner adaptive.learner.balancing_learner adaptive.learner.data_saver adaptive.learner.integrator_learner @@ -22,6 +23,7 @@ Runners adaptive.runner.Runner adaptive.runner.AsyncRunner adaptive.runner.BlockingRunner + adaptive.runner.BaseRunner adaptive.runner.extras Other diff --git a/docs/source/reference/adaptive.runner.extras.rst b/docs/source/reference/adaptive.runner.extras.rst index 00b809c584c55dd5f72b7bc3dd4a2a48f156b713..90786f4e8afa2007331289447354dd9b18529eb4 100644 --- a/docs/source/reference/adaptive.runner.extras.rst +++ b/docs/source/reference/adaptive.runner.extras.rst @@ -1,5 +1,5 @@ -adaptive.runner.simple -====================== +Runner extras +============= Simple executor --------------- @@ -9,7 +9,7 @@ Simple executor Sequential excecutor -------------------- -.. autofunction:: adaptive.runner.SequentialExecutor +.. autoclass:: adaptive.runner.SequentialExecutor Replay log diff --git a/docs/source/tutorial/tutorial.custom_loss.rst b/docs/source/tutorial/tutorial.custom_loss.rst index f19151f428796013a3e2c6e7c7d64c51a3e3e357..b1cca2951424a751d522f945ec5cfdd86a58e5f3 100644 --- a/docs/source/tutorial/tutorial.custom_loss.rst +++ b/docs/source/tutorial/tutorial.custom_loss.rst @@ -8,7 +8,7 @@ Custom adaptive logic for 1D and 2D .. seealso:: The complete source code of this tutorial can be found in - :jupyter-download:notebook:`tutorial.custom-loss-function` + :jupyter-download:notebook:`tutorial.custom-loss` .. jupyter-execute:: :hide-code: diff --git a/docs/source/tutorial/tutorial.rst b/docs/source/tutorial/tutorial.rst index 2afd001140696e18204acfad221e8549d493e695..e14760823dbe0338d99d20fc2739e5514edce1e7 100644 --- a/docs/source/tutorial/tutorial.rst +++ b/docs/source/tutorial/tutorial.rst @@ -22,6 +22,7 @@ on the following packages - ``bokeh`` - ``ipywidgets`` +We recommend to start with the :ref:`Tutorial `~adaptive.Learner1D``. .. note:: Because this documentation consists of static html, the ``live_plot``