Skip to content
Snippets Groups Projects
Commit 846a781a authored by Bas Nijholt's avatar Bas Nijholt
Browse files

add another comparison figure

parent 56c110c9
No related branches found
No related tags found
No related merge requests found
Pipeline #21147 failed
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` ```
import numpy as np import numpy as np
import matplotlib import matplotlib
matplotlib.use("agg") matplotlib.use("agg")
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
%config InlineBackend.figure_format = 'svg' %config InlineBackend.figure_format = 'svg'
golden_mean = (np.sqrt(5) - 1) / 2 # Aesthetic ratio golden_mean = (np.sqrt(5) - 1) / 2 # Aesthetic ratio
fig_width_pt = 246.0 # Columnwidth fig_width_pt = 246.0 # Columnwidth
inches_per_pt = 1 / 72.27 # Convert pt to inches inches_per_pt = 1 / 72.27 # Convert pt to inches
fig_width = fig_width_pt * inches_per_pt fig_width = fig_width_pt * inches_per_pt
fig_height = fig_width * golden_mean # height in inches fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height] fig_size = [fig_width, fig_height]
params = { params = {
"backend": "ps", "backend": "ps",
"axes.labelsize": 13, "axes.labelsize": 13,
"font.size": 13, "font.size": 13,
"legend.fontsize": 10, "legend.fontsize": 10,
"xtick.labelsize": 10, "xtick.labelsize": 10,
"ytick.labelsize": 10, "ytick.labelsize": 10,
"text.usetex": True, "text.usetex": True,
"figure.figsize": fig_size, "figure.figsize": fig_size,
"font.family": "serif", "font.family": "serif",
"font.serif": "Computer Modern Roman", "font.serif": "Computer Modern Roman",
"legend.frameon": True, "legend.frameon": True,
"savefig.dpi": 300, "savefig.dpi": 300,
} }
plt.rcParams.update(params) plt.rcParams.update(params)
plt.rc("text.latex", preamble=[r"\usepackage{xfrac}", r"\usepackage{siunitx}"]) plt.rc("text.latex", preamble=[r"\usepackage{xfrac}", r"\usepackage{siunitx}"])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Fig 1. # Fig 1.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` ```
np.random.seed(1) np.random.seed(1)
xs = np.array([0.1, 0.3, 0.35, 0.45]) xs = np.array([0.1, 0.3, 0.35, 0.45])
f = lambda x: x**3 f = lambda x: x**3
ys = f(xs) ys = f(xs)
means = lambda x: np.convolve(x, np.ones(2) / 2, mode="valid") means = lambda x: np.convolve(x, np.ones(2) / 2, mode="valid")
xs_means = means(xs) xs_means = means(xs)
ys_means = means(ys) ys_means = means(ys)
fig, ax = plt.subplots(figsize=fig_size) fig, ax = plt.subplots(figsize=fig_size)
ax.scatter(xs, ys, c="k") ax.scatter(xs, ys, c="k")
ax.plot(xs, ys, c="k") ax.plot(xs, ys, c="k")
# ax.scatter() # ax.scatter()
ax.annotate( ax.annotate(
s=r"$L_{1,2} = \sqrt{\Delta x^2 + \Delta y^2}$", s=r"$L_{1,2} = \sqrt{\Delta x^2 + \Delta y^2}$",
xy=(np.mean([xs[0], xs[1]]), np.mean([ys[0], ys[1]])), xy=(np.mean([xs[0], xs[1]]), np.mean([ys[0], ys[1]])),
xytext=(xs[0]+0.05, ys[0] - 0.05), xytext=(xs[0]+0.05, ys[0] - 0.05),
arrowprops=dict(arrowstyle="->"), arrowprops=dict(arrowstyle="->"),
ha="center", ha="center",
zorder=10, zorder=10,
) )
for i, (x, y) in enumerate(zip(xs, ys)): for i, (x, y) in enumerate(zip(xs, ys)):
sign = [1, -1][i % 2] sign = [1, -1][i % 2]
ax.annotate( ax.annotate(
s=fr"$x_{i+1}, y_{i+1}$", s=fr"$x_{i+1}, y_{i+1}$",
xy=(x, y), xy=(x, y),
xytext=(x + 0.01, y + sign * 0.04), xytext=(x + 0.01, y + sign * 0.04),
arrowprops=dict(arrowstyle="->"), arrowprops=dict(arrowstyle="->"),
ha="center", ha="center",
) )
ax.scatter(xs, ys, c="green", s=5, zorder=5, label="existing data") ax.scatter(xs, ys, c="green", s=5, zorder=5, label="existing data")
losses = np.hypot(xs[1:] - xs[:-1], ys[1:] - ys[:-1]) losses = np.hypot(xs[1:] - xs[:-1], ys[1:] - ys[:-1])
ax.scatter(xs_means, ys_means, c="red", s=300*losses, zorder=8, label="candidate points") ax.scatter(xs_means, ys_means, c="red", s=300*losses, zorder=8, label="candidate points")
xs_dense = np.linspace(xs[0], xs[-1], 400) xs_dense = np.linspace(xs[0], xs[-1], 400)
ax.plot(xs_dense, f(xs_dense), alpha=0.3, zorder=7, label="function") ax.plot(xs_dense, f(xs_dense), alpha=0.3, zorder=7, label="function")
ax.legend() ax.legend()
ax.axis("off") ax.axis("off")
plt.savefig("figures/loss_1D.pdf", bbox_inches="tight", transparent=True) plt.savefig("figures/loss_1D.pdf", bbox_inches="tight", transparent=True)
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Fig 2. # Fig 2.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` ```
import adaptive import adaptive
```
%% Cell type:code id: tags:
```
def f(x, offset=0.123): def f(x, offset=0.123):
a = 0.02 a = 0.02
return x + a**2 / (a**2 + (x - offset)**2) return x + a**2 / (a**2 + (x - offset)**2)
def g(x): def g(x):
return np.tanh(x*40) return np.tanh(x*40)
def h(x): def h(x):
return np.sin(100*x) * np.exp(-x**2 / 0.1**2) return np.sin(100*x) * np.exp(-x**2 / 0.1**2)
funcs = [dict(function=f, bounds=(-1, 1)), dict(function=g, bounds=(-1, 1)), dict(function=h, bounds=(-0.3, 0.3))] funcs = [dict(function=f, bounds=(-1, 1), title="peak"), dict(function=g, bounds=(-1, 1), title="tanh"), dict(function=h, bounds=(-0.3, 0.3), title="wave packet")]
fig, axs = plt.subplots(2, len(funcs), figsize=(fig_width, 1.5*fig_height)) fig, axs = plt.subplots(2, len(funcs), figsize=(fig_width, 1.5*fig_height))
n_points = 50 n_points = 50
for i, ax in enumerate(axs.T.flatten()): for i, ax in enumerate(axs.T.flatten()):
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
if i % 2 == 0: if i % 2 == 0:
d = funcs[i // 2] d = funcs[i // 2]
# homogeneous # homogeneous
xs = np.linspace(*d['bounds'], n_points) xs = np.linspace(*d['bounds'], n_points)
ys = d['function'](xs) ys = d['function'](xs)
ax.set_title(rf"\textrm{{{d['title']}}}")
else: else:
d = funcs[(i - 1) // 2] d = funcs[(i - 1) // 2]
loss = adaptive.learner.learner1D.curvature_loss_function() loss = adaptive.learner.learner1D.curvature_loss_function()
learner = adaptive.Learner1D(**d, loss_per_interval=loss) learner = adaptive.Learner1D(d['function'], bounds=d['bounds'], loss_per_interval=loss)
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= n_points) adaptive.runner.simple(learner, goal=lambda l: l.npoints >= n_points)
# adaptive # adaptive
xs, ys = zip(*sorted(learner.data.items())) xs, ys = zip(*sorted(learner.data.items()))
xs_dense = np.linspace(*d['bounds'], 1000) xs_dense = np.linspace(*d['bounds'], 1000)
ax.plot(xs_dense, d['function'](xs_dense), c='red', alpha=0.3, lw=0.5) ax.plot(xs_dense, d['function'](xs_dense), c='red', alpha=0.3, lw=0.5)
# ax.plot(xs, ys, c='k', alpha=0.3, lw=0.3)
ax.scatter(xs, ys, s=0.5, c='k') ax.scatter(xs, ys, s=0.5, c='k')
```
%% Cell type:code id: tags:
```
```
%% Cell type:code id: tags:
```
import adaptive
adaptive.notebook_extension()
```
%% Cell type:code id: tags: axs[0][0].set_ylabel(r'$\textrm{homogeneous}$')
axs[1][0].set_ylabel(r'$\textrm{adaptive}$')
``` plt.savefig("figures/adaptive_vs_grid.pdf", bbox_inches="tight", transparent=True)
def f(x, offset=0.12312):
a = 0.01
return x + a**2 / (a**2 + (x - offset)**2)
learner = adaptive.Learner1D(f, bounds=(-1, 1))
adaptive.runner.simple(learner, goal=lambda l: l.npoints > 100)
```
%% Cell type:code id: tags:
```
xs, ys = zip(*learner.data.items())
```
%% Cell type:code id: tags:
```
fig, ax = plt.subplots()
for i in range(1, len(xs)):
if i % 10 != 0:
continue
alpha = np.linspace(0.2, 1, 101)[i]
offset = i / len(xs)
xs_part, ys_part = xs[:i], ys[:i]
xs_part, ys_part = zip(*sorted(zip(xs_part, ys_part)))
ax.plot(xs_part, offset + np.array(ys_part), alpha=alpha, c='grey', lw=0.5)
plt.show()
```
%% Cell type:code id: tags:
```
xs_part, ys_part
```
%% Cell type:code id: tags:
```
``` ```
......
...@@ -53,6 +53,10 @@ Each candidate point has a loss $L$ indicated by the size of the red dots. ...@@ -53,6 +53,10 @@ Each candidate point has a loss $L$ indicated by the size of the red dots.
The candidate point with the largest loss will be chosen, which in this case is the one with $L_{1,2}$. The candidate point with the largest loss will be chosen, which in this case is the one with $L_{1,2}$.
](figures/loss_1D.pdf){#fig:loss_1D} ](figures/loss_1D.pdf){#fig:loss_1D}
![Comparison of homogeneous sampling (top) with adaptive sampling (bottom) for different one-dimensional functions (red).
We see that when the function has a distince feature---such as with the peak and tanh---adaptive sampling performs much better.
When the features are homogeneously spaced, such as with the wave packet, adaptive sampling is not as effective as in the other cases.](figures/adaptive_vs_grid.pdf){#fig:adaptive_vs_grid}
#### We provide a reference implementation, the Adaptive package, and demonstrate its performance. #### We provide a reference implementation, the Adaptive package, and demonstrate its performance.
We provide a reference implementation, the open-source Python package called Adaptive[@Nijholt2019a], which has previously been used in several scientific publications[@vuik2018reproducing; @laeven2019enhanced; @bommer2019spin; @melo2019supercurrent]. We provide a reference implementation, the open-source Python package called Adaptive[@Nijholt2019a], which has previously been used in several scientific publications[@vuik2018reproducing; @laeven2019enhanced; @bommer2019spin; @melo2019supercurrent].
It has algorithms for $f \colon \R^N \to \R^M$, where $N, M \in \mathbb{Z}^+$ but which work best when $N$ is small; integration in $\R$; and the averaging of stochastic functions. It has algorithms for $f \colon \R^N \to \R^M$, where $N, M \in \mathbb{Z}^+$ but which work best when $N$ is small; integration in $\R$; and the averaging of stochastic functions.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment