Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
adaptive-paper
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Quantum Tinkerer
adaptive-paper
Commits
ca7a7cc5
Commit
ca7a7cc5
authored
5 years ago
by
Bas Nijholt
Browse files
Options
Downloads
Patches
Plain Diff
add 2D line loss
parent
2baa6d68
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
figures.ipynb
+112
-41
112 additions, 41 deletions
figures.ipynb
with
112 additions
and
41 deletions
figures.ipynb
+
112
−
41
View file @
ca7a7cc5
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Adaptive paper figures"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## imports and function definitions"
]
},
{
"cell_type": "code",
"execution_count": null,
...
...
@@ -101,8 +115,9 @@
"\n",
"def phase_diagram(xy, fname):\n",
" ip, _ = phase_diagram_setup(fname)\n",
" return np.round(ip(xy))\n",
"\n",
" zs = ip(xy)\n",
" return np.round(zs)\n",
" \n",
"\n",
"def density(x, eps=0):\n",
" e = [0.8, 0.2]\n",
...
...
@@ -127,14 +142,15 @@
"\n",
"\n",
"funcs_2D = [\n",
" dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33),\n",
" dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33
, title=\"ring\"
),\n",
" dict(\n",
" function=
phase_diagram
,\n",
" function=
functools.partial(phase_diagram, fname=\"phase_diagram.pickle\")
,\n",
" bounds=phase_diagram_setup(\"phase_diagram.pickle\")[1],\n",
" npoints=100,\n",
" fname=\"phase_diagram.pickle\",\n",
" title=\"PD\",\n",
" ),\n",
" dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50),\n",
" dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50
, title=\"LC\"
),\n",
"]"
]
},
...
...
@@ -142,7 +158,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# loss
_1D
"
"#
# Interval and
loss"
]
},
{
...
...
@@ -200,7 +216,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"#
adaptive_
vs
_
grid"
"#
# Learner1D
vs
grid"
]
},
{
...
...
@@ -245,7 +261,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"#
adaptive_2D
"
"#
# Learner2D vs grid
"
]
},
{
...
...
@@ -337,7 +353,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Algo explaination"
"#
#
Algo
rithm
explaination
: Learner1D step-by-step
"
]
},
{
...
...
@@ -494,7 +510,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Line loss"
"#
#
Line loss
visualization
"
]
},
{
...
...
@@ -573,7 +589,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Line loss error"
"#
#
Line loss
L1-
error
(N)
"
]
},
{
...
...
@@ -582,52 +598,104 @@
"metadata": {},
"outputs": [],
"source": [
"import scipy\n",
"from adaptive import Learner1D, LearnerND\n",
"from scipy import interpolate\n",
"\n",
"\n",
"def err(xs, ys, xs_rand):\n",
" ip =
scipy.
interpolate.interp1d(xs, ys)\n",
"def err
_1D
(xs, ys, xs_rand
, f
):\n",
" ip = interpolate.interp1d(xs, ys)\n",
" abserr = np.abs(ip(xs_rand) - f(xs_rand))\n",
" return np.average(abserr ** 2) ** 0.5\n",
"\n",
"\n",
"def get_err(learner, N
, bounds
):\n",
" xs_rand = np.random.uniform(*bounds, size=100_000)\n",
"def get_err
_1D
(learner, N):\n",
" xs_rand = np.random.uniform(*
learner.
bounds, size=100_000)\n",
"\n",
" xs = np.linspace(*bounds, N)\n",
" xs = np.linspace(*
learner.
bounds, N)\n",
" ys = f(xs)\n",
" err_lin = err(xs, ys, xs_rand)\n",
" err_lin = err
_1D
(xs, ys, xs_rand
, learner.function
)\n",
"\n",
" xs, ys = zip(*learner.data.items())\n",
" xs, ys = xs[:N], ys[:N]\n",
" ip =
scipy.
interpolate.interp1d(xs, ys)\n",
" err_learner = err(xs, ys, xs_rand)\n",
" ip = interpolate.interp1d(xs, ys)\n",
" err_learner = err
_1D
(xs, ys, xs_rand
, learner.function
)\n",
" return err_lin, err_learner\n",
"\n",
"def err_2D(zs, zs_rand):\n",
" abserr = np.abs(zs - zs_rand)\n",
" return np.average(abserr ** 2) ** 0.5\n",
"\n",
"N_max = 10000\n",
"Ns = np.geomspace(4, N_max, 100).astype(int)\n",
"loss = adaptive.learner.learner1D.curvature_loss_function()\n",
"def get_err_2D(learner, N):\n",
" xys_rand = np.vstack(\n",
" [\n",
" np.random.uniform(*learner.bounds[0], size=int(100_000**0.5)),\n",
" np.random.uniform(*learner.bounds[1], size=int(100_000**0.5)),\n",
" ]\n",
" )\n",
"\n",
"fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height))\n",
" xys_hom = np.array(\n",
" [\n",
" (x, y)\n",
" for x in np.linspace(*learner.bounds[0], int(N**0.5))\n",
" for y in np.linspace(*learner.bounds[1], int(N**0.5))\n",
" ]\n",
" )\n",
"\n",
"for i, d in enumerate(funcs_1D):\n",
" f = d[\"function\"]\n",
" bounds = d[\"bounds\"]\n",
" learner = adaptive.Learner1D(f, bounds, loss_per_interval=loss)\n",
" adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
" errs = [get_err(learner, N, bounds) for N in Ns]\n",
" err_hom, err_adaptive = zip(*errs)\n",
" color = f\"C{i}\"\n",
" label = \"abc\"[i]\n",
" ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
" ax.loglog(Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color)\n",
"\n",
" d[\"err_hom\"] = err_hom\n",
" d[\"err_adaptive\"] = err_adaptive\n",
" try:\n",
" # Vectorized\n",
" zs_hom = f(xys_hom.T)\n",
" zs_rand = f(xys_rand)\n",
" except:\n",
" # Non-vectorized\n",
" zs_hom = np.array([f(xy) for xy in xys_hom])\n",
" zs_rand = np.array([f(xy) for xy in xys_rand.T])\n",
"\n",
" ip = interpolate.LinearNDInterpolator(xys_hom, zs_hom)\n",
" zs = ip(xys_rand.T)\n",
" err_lin = err_2D(zs, zs_rand)\n",
"\n",
" ip = interpolate.LinearNDInterpolator(learner.points[:len(xys_hom)], learner.values[:len(xys_hom)])\n",
" zs = ip(xys_rand.T)\n",
" err_learner = err_2D(zs, zs_rand)\n",
"\n",
" return err_lin, err_learner\n",
"\n",
"N_max = 10000\n",
"Ns = np.geomspace(4, N_max, 20).astype(int)\n",
"\n",
"fig, axs = plt.subplots(2, 1, figsize=(fig_width, 1.6 * fig_height))\n",
"\n",
"loss_1D = adaptive.learner.learner1D.curvature_loss_function()\n",
"loss_2D = adaptive.learner.learnerND.curvature_loss_function()\n",
"\n",
"for ax, funcs, loss, Learner, get_err in zip(\n",
" axs,\n",
" [funcs_1D, funcs_2D],\n",
" [loss_1D, loss_2D],\n",
" [Learner1D, LearnerND],\n",
" [get_err_1D, get_err_2D],\n",
"):\n",
" ax.set_xlabel(\"$N$\")\n",
" ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
"\n",
" for i, d in enumerate(funcs):\n",
" f = d[\"function\"]\n",
" bounds = d[\"bounds\"]\n",
" learner = Learner(f, bounds, loss)\n",
" adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
" errs = [get_err(learner, N) for N in Ns]\n",
" err_hom, err_adaptive = zip(*errs)\n",
" color = f\"C{i}\"\n",
" label = \"abc\"[i]\n",
" ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
" ax.loglog(\n",
" Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color\n",
" )\n",
"\n",
" d[\"err_hom\"] = err_hom\n",
" d[\"err_adaptive\"] = err_adaptive\n",
"\n",
"plt.legend()\n",
"ax.set_xlabel(\"$N$\")\n",
"ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
"plt.savefig(\"figures/line_loss_error.pdf\", bbox_inches=\"tight\", transparent=True)\n",
"plt.show()"
]
...
...
@@ -640,6 +708,9 @@
"source": [
"# Error reduction\n",
"for d in funcs_1D:\n",
" print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])\n",
"\n",
"for d in funcs_2D:\n",
" print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])"
]
},
...
...
@@ -647,7 +718,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"#
Iso surface
"
"#
# iso-line plots
"
]
},
{
...
...
%% Cell type:markdown id: tags:
# Adaptive paper figures
%% Cell type:markdown id: tags:
## imports and function definitions
%% Cell type:code id: tags:
```
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
%matplotlib inline
# %config InlineBackend.figure_format = 'svg'
golden_mean = (np.sqrt(5) - 1) / 2 # Aesthetic ratio
fig_width_pt = 246.0 # Columnwidth
inches_per_pt = 1 / 72.27 # Convert pt to inches
fig_width = fig_width_pt * inches_per_pt
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
params = {
"backend": "ps",
"axes.labelsize": 13,
"font.size": 13,
"legend.fontsize": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"text.usetex": True,
"figure.figsize": fig_size,
"font.family": "serif",
"font.serif": "Computer Modern Roman",
"legend.frameon": True,
"savefig.dpi": 300,
}
plt.rcParams.update(params)
plt.rc("text.latex", preamble=[r"\usepackage{xfrac}", r"\usepackage{siunitx}"])
import adaptive
from scipy import interpolate
import functools
import itertools
import adaptive
import holoviews.plotting.mpl
import time
import matplotlib.tri as mtri
```
%% Cell type:code id: tags:
```
# 1D funcs
def f(x, offset=0.123):
a = 0.02
return x + a ** 2 / (a ** 2 + (x - offset) ** 2)
def g(x):
return np.tanh(x * 40)
def h(x):
return np.sin(100 * x) * np.exp(-x ** 2 / 0.1 ** 2)
funcs_1D = [
dict(function=f, bounds=(-1, 1), title="peak"),
dict(function=g, bounds=(-1, 1), title="tanh"),
dict(function=h, bounds=(-0.3, 0.3), title="wave packet"),
]
# 2D funcs
def ring(xy, offset=0.123):
a = 0.2
x, y = xy
return x + np.exp(-(x ** 2 + y ** 2 - 0.75 ** 2) ** 2 / a ** 4)
@functools.lru_cache()
def phase_diagram_setup(fname):
data = adaptive.utils.load(fname)
points = np.array(list(data.keys()))
values = np.array(list(data.values()), dtype=float)
bounds = [
(points[:, 0].min(), points[:, 0].max()),
(points[:, 1].min(), points[:, 1].max()),
]
ll, ur = np.reshape(bounds, (2, 2)).T
inds = np.all(np.logical_and(ll <= points, points <= ur), axis=1)
points, values = points[inds], values[inds].reshape(-1, 1)
return interpolate.LinearNDInterpolator(points, values), bounds
def phase_diagram(xy, fname):
ip, _ = phase_diagram_setup(fname)
return np.round(ip(xy))
zs = ip(xy)
return np.round(zs)
def density(x, eps=0):
e = [0.8, 0.2]
delta = [0.5, 0.5, 0.5]
c = 3
omega = [0.02, 0.05]
H = np.array(
[
[e[0] + 1j * omega[0], delta[0], delta[1]],
[delta[0], e[1] + c * x + 1j * omega[1], delta[1]],
[delta[1], delta[2], e[1] - c * x + 1j * omega[1]],
]
)
H += np.eye(3) * eps
return np.trace(np.linalg.inv(H)).imag
def level_crossing(xy):
x, y = xy
return density(x, y) + y
funcs_2D = [
dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33),
dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33
, title="ring"
),
dict(
function=
phase_diagram
,
function=
functools.partial(phase_diagram, fname="phase_diagram.pickle")
,
bounds=phase_diagram_setup("phase_diagram.pickle")[1],
npoints=100,
fname="phase_diagram.pickle",
title="PD",
),
dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50),
dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50
, title="LC"
),
]
```
%% Cell type:markdown id: tags:
# loss
_1D
#
#
Interval and
loss
%% Cell type:code id: tags:
```
np.random.seed(1)
xs = np.array([0.1, 0.3, 0.35, 0.45])
f = lambda x: x ** 3
ys = f(xs)
means = lambda x: np.convolve(x, np.ones(2) / 2, mode="valid")
xs_means = means(xs)
ys_means = means(ys)
fig, ax = plt.subplots(figsize=fig_size)
ax.scatter(xs, ys, c="k")
ax.plot(xs, ys, c="k")
ax.annotate(
s=r"$L_{1,2} = \sqrt{\Delta x^2 + \Delta y^2}$",
xy=(np.mean([xs[0], xs[1]]), np.mean([ys[0], ys[1]])),
xytext=(xs[0] + 0.05, ys[0] - 0.05),
arrowprops=dict(arrowstyle="->"),
ha="center",
zorder=10,
)
for i, (x, y) in enumerate(zip(xs, ys)):
sign = [1, -1][i % 2]
ax.annotate(
s=fr"$x_{i+1}, y_{i+1}$",
xy=(x, y),
xytext=(x + 0.01, y + sign * 0.04),
arrowprops=dict(arrowstyle="->"),
ha="center",
)
ax.scatter(xs, ys, c="green", s=5, zorder=5, label="existing data")
losses = np.hypot(xs[1:] - xs[:-1], ys[1:] - ys[:-1])
ax.scatter(
xs_means, ys_means, c="red", s=300 * losses, zorder=8, label="candidate points"
)
xs_dense = np.linspace(xs[0], xs[-1], 400)
ax.plot(xs_dense, f(xs_dense), alpha=0.3, zorder=7, label="function")
ax.legend()
ax.axis("off")
plt.savefig("figures/loss_1D.pdf", bbox_inches="tight", transparent=True)
plt.show()
```
%% Cell type:markdown id: tags:
#
adaptive_
vs
_
grid
#
#
Learner1D
vs
grid
%% Cell type:code id: tags:
```
fig, axs = plt.subplots(2, len(funcs_1D), figsize=(fig_width, 1.5 * fig_height))
n_points = 50
for i, ax in enumerate(axs.T.flatten()):
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
kind = "homogeneous" if i % 2 == 0 else "adaptive"
index = i // 2 if kind == "homogeneous" else (i - 1) // 2
d = funcs_1D[index]
bounds = d["bounds"]
f = d["function"]
if kind == "homogeneous":
xs = np.linspace(*bounds, n_points)
ys = f(xs)
ax.set_title(rf"\textrm{{{d['title']}}}")
elif kind == "adaptive":
loss = adaptive.learner.learner1D.curvature_loss_function()
learner = adaptive.Learner1D(
f, bounds=bounds, loss_per_interval=loss
)
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= n_points)
xs, ys = zip(*sorted(learner.data.items()))
xs_dense = np.linspace(*bounds, 1000)
ax.plot(xs_dense, f(xs_dense), c="red", alpha=0.3, lw=0.5)
ax.scatter(xs, ys, s=0.5, c="k")
axs[0][0].set_ylabel(r"$\textrm{homogeneous}$")
axs[1][0].set_ylabel(r"$\textrm{adaptive}$")
plt.savefig("figures/Learner1D.pdf", bbox_inches="tight", transparent=True)
```
%% Cell type:markdown id: tags:
#
adaptive_2D
#
#
Learner2D vs grid
%% Cell type:code id: tags:
```
fig, axs = plt.subplots(2, len(funcs_2D), figsize=(fig_width, 1.5 * fig_height))
plt.subplots_adjust(hspace=-0.1, wspace=0.1)
with_tri = False
for i, ax in enumerate(axs.T.flatten()):
label = "abcdef"[i]
ax.text(
0.5,
1.05,
f"$\mathrm{{({label})}}$",
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="bottom",
)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
kind = "homogeneous" if i % 2 == 0 else "adaptive"
d = funcs_2D[i // 2] if kind == "homogeneous" else funcs_2D[(i - 1) // 2]
bounds = d["bounds"]
npoints = d["npoints"]
f = d["function"]
fname = d.get("fname")
if fname is not None:
f = functools.partial(f, fname=fname)
if kind == "homogeneous":
xs, ys = [np.linspace(*bound, npoints) for bound in bounds]
data = {xy: f(xy) for xy in itertools.product(xs, ys)}
learner = adaptive.Learner2D(f, bounds=bounds)
learner.data = data
d["learner_hom"] = learner
elif kind == "adaptive":
learner = adaptive.Learner2D(f, bounds=bounds)
if fname is not None:
learner.load(fname)
learner.data = {
k: v for i, (k, v) in enumerate(learner.data.items()) if i <= npoints ** 2
}
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= npoints ** 2)
d["learner"] = learner
if with_tri:
tri = learner.ip().tri
triang = mtri.Triangulation(*tri.points.T, triangles=tri.vertices)
ax.triplot(triang, c="w", lw=0.2, alpha=0.8)
values = np.array(list(learner.data.values()))
ax.imshow(
learner.plot(npoints if kind == "homogeneous" else None).Image.I.data,
extent=(-0.5, 0.5, -0.5, 0.5),
interpolation="none",
)
ax.set_xticks([])
ax.set_yticks([])
axs[0][0].set_ylabel(r"$\textrm{homogeneous}$")
axs[1][0].set_ylabel(r"$\textrm{adaptive}$")
plt.savefig("figures/Learner2D.pdf", bbox_inches="tight", transparent=True)
```
%% Cell type:code id: tags:
```
learner = adaptive.Learner1D(funcs_1D[0]['function'], bounds=funcs_1D[0]['bounds'])
times = []
for i in range(10000):
t_start = time.time()
points, _ = learner.ask(1)
t_end = time.time()
times.append(t_end - t_start)
learner.tell(points[0], learner.function(points[0]))
plt.plot(np.cumsum(times))
```
%% Cell type:markdown id: tags:
# Algo explaination
#
# Algo
rithm
explaination
: Learner1D step-by-step
%% Cell type:code id: tags:
```
fig, axs = plt.subplots(1, 1, figsize=(fig_width, 3 * fig_height))
def f(x, offset=0.123):
a = 0.2
return a ** 2 / (a ** 2 + (x - offset) ** 2)
learner = adaptive.Learner1D(
f, (0, 2), loss_per_interval=adaptive.learner.learner1D.curvature_loss_function()
)
learner._recompute_losses_factor = 0.1
xs_dense = np.linspace(*learner.bounds, 400)
ys_dense = f(xs_dense)
step = 0.4
for i in range(11):
offset = -i * step
x = learner.ask(1)[0][0]
y = f(x)
learner.tell(x, y)
xs, ys = map(np.array, zip(*sorted(learner.data.items())))
ys = ys + offset
if i >= 1:
axs.plot(xs_dense, ys_dense + offset, c="k", alpha=0.3, zorder=0)
axs.plot(xs, ys, zorder=1, c="k")
axs.scatter(xs, ys, alpha=1, zorder=2, c="k")
(x_left, x_right), loss = list(learner.losses.items())[
0
] # it's a ItemSortedDict
(y_left, y_right) = [
learner.data[x_left] + offset,
learner.data[x_right] + offset,
]
axs.scatter([x_left, x_right], [y_left, y_right], c="r", s=10, zorder=3)
x_mid = np.mean((x_left, x_right))
y_mid = np.interp(x_mid, (x_left, x_right), (y_left, y_right))
axs.scatter(x_mid, y_mid, zorder=4, marker="x", c="green")
axs.text(
-0.1,
0.5,
(r"$\mathrm{iterations}$" + "\n" + "$\longleftarrow$"),
transform=axs.transAxes,
horizontalalignment="center",
verticalalignment="center",
rotation=90,
fontsize=18,
)
# legend
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
class LargestInterval:
pass
class Interval:
pass
class Function:
pass
class IntervalHandler:
def __init__(self, with_inner=True, length=20, *args, **kwargs):
super().__init__(*args, **kwargs)
self.with_inner = with_inner
self.length = length
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
offsets = [0, self.length]
line = mlines.Line2D((0, offsets[-1]), (0, 0), zorder=0, c="k")
handlebox.add_artist(line)
for offset in offsets:
circle1 = mpatches.Circle(
[x0 + offset, y0], 4, facecolor="k", lw=3, zorder=1
)
handlebox.add_artist(circle1)
if self.with_inner:
circle2 = mpatches.Circle(
[x0 + offset, y0], 3, facecolor="red", lw=3, zorder=1
)
handlebox.add_artist(circle2)
class FunctionHandler:
def __init__(self, xs, ys, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xs = xs / xs.ptp() * 20
self.ys = ys - ys.mean()
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
line = mlines.Line2D(self.xs, self.ys * 10, zorder=0, c="k", alpha=0.3)
handlebox.add_artist(line)
plt.legend(
[
Function(),
mlines.Line2D([], [], marker="o", lw=0, c="k"),
LargestInterval(),
Interval(),
mlines.Line2D([], [], marker="x", lw=0, c="green"),
],
[
"original function",
"known point",
"interval",
"largest loss interval",
"next candidate point",
],
handler_map={
LargestInterval: IntervalHandler(False),
Interval: IntervalHandler(True),
Function: FunctionHandler(xs, ys),
},
bbox_to_anchor=(0.25, 0.9, 1.0, 0.0),
ncol=1,
)
# On grid, uncomment for homogeneous plot
# axs.plot(learner.bounds, [-(i + 0.5) * step, -(i + 0.5) * step], c='k', ls='--')
# xs_hom = np.linspace(*learner.bounds, i)
# ys_hom = f(xs_hom) - (i + 3) * step
# axs.plot(xs_hom, ys_hom, zorder=1, c="k")
# axs.scatter(xs_hom, ys_hom, alpha=1, zorder=2, c="k")
axs.axis("off")
plt.savefig("figures/algo.pdf", bbox_inches="tight", transparent=True)
plt.show()
```
%% Cell type:markdown id: tags:
# Line loss
#
# Line loss
visualization
%% Cell type:code id: tags:
```
from matplotlib.patches import Polygon
fig, axs = plt.subplots(5, 1, figsize=(fig_width, 1.5 * fig_height))
f = lambda x: np.sin(x)**2
xs = np.array([0, 1.3, 3, 5, 7, 8])
ys = f(xs)
def plot(xs, ax):
ys = f(xs)
xs_dense = np.linspace(xs[0], xs[-1], 300)
ys_dense = f(xs_dense)
ax.plot(xs_dense, ys_dense, alpha=0.3, c="k")
ax.plot(xs, ys, c="k")
ax.scatter(xs, ys, zorder=10, s=14, c="k")
plot(xs, axs[0])
plot(xs, axs[1])
for i, ax in enumerate(axs):
ax.axis("off")
ax.set_ylim(-1.5, 1.5)
label = "abcde"[i]
ax.text(
0.5,
0.9,
f"$\mathrm{{({label})}}$",
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="bottom",
)
def plot_tri(xs, ax):
ys = f(xs)
for i in range(len(xs)):
if i == 0 or i == len(xs) - 1:
continue
color = f"C{i}"
verts = [(xs[i - 1], ys[i - 1]), (xs[i], ys[i]), (xs[i + 1], ys[i + 1])]
poly = Polygon(verts, facecolor=color, alpha=0.4)
ax.add_patch(poly)
ax.scatter([xs[i]], [ys[i]], c=color, s=6, zorder=11)
ax.plot(
[xs[i - 1], xs[i + 1]], [ys[i - 1], ys[i + 1]], c="k", ls="--", alpha=0.3
)
plot_tri(xs, axs[1])
for i in [2, 3, 4]:
ax = axs[i]
learner = adaptive.Learner1D(
f,
(xs[0], xs[-1]),
loss_per_interval=adaptive.learner.learner1D.curvature_loss_function(),
)
learner.tell_many(xs, ys)
x_new = learner.ask(1)[0][0]
learner.tell(x_new, f(x_new))
xs, ys = zip(*sorted(learner.data.items()))
plot(xs, ax)
plot_tri(xs, ax)
plt.savefig("figures/line_loss.pdf", bbox_inches="tight", transparent=True)
plt.show()
```
%% Cell type:markdown id: tags:
# Line loss error
#
# Line loss
L1-
error
(N)
%% Cell type:code id: tags:
```
import scipy
from adaptive import Learner1D, LearnerND
from scipy import interpolate
def err(xs, ys, xs_rand):
ip = scipy.interpolate.interp1d(xs, ys)
def err_1D(xs, ys, xs_rand, f):
ip = interpolate.interp1d(xs, ys)
abserr = np.abs(ip(xs_rand) - f(xs_rand))
return np.average(abserr ** 2) ** 0.5
def get_err(learner, N
, bounds
):
xs_rand = np.random.uniform(*bounds, size=100_000)
def get_err
_1D
(learner, N):
xs_rand = np.random.uniform(*
learner.
bounds, size=100_000)
xs = np.linspace(*bounds, N)
xs = np.linspace(*
learner.
bounds, N)
ys = f(xs)
err_lin = err(xs, ys, xs_rand)
err_lin = err
_1D
(xs, ys, xs_rand
, learner.function
)
xs, ys = zip(*learner.data.items())
xs, ys = xs[:N], ys[:N]
ip =
scipy.
interpolate.interp1d(xs, ys)
err_learner = err(xs, ys, xs_rand)
ip = interpolate.interp1d(xs, ys)
err_learner = err
_1D
(xs, ys, xs_rand
, learner.function
)
return err_lin, err_learner
def err_2D(zs, zs_rand):
abserr = np.abs(zs - zs_rand)
return np.average(abserr ** 2) ** 0.5
def get_err_2D(learner, N):
xys_rand = np.vstack(
[
np.random.uniform(*learner.bounds[0], size=int(100_000**0.5)),
np.random.uniform(*learner.bounds[1], size=int(100_000**0.5)),
]
)
xys_hom = np.array(
[
(x, y)
for x in np.linspace(*learner.bounds[0], int(N**0.5))
for y in np.linspace(*learner.bounds[1], int(N**0.5))
]
)
try:
# Vectorized
zs_hom = f(xys_hom.T)
zs_rand = f(xys_rand)
except:
# Non-vectorized
zs_hom = np.array([f(xy) for xy in xys_hom])
zs_rand = np.array([f(xy) for xy in xys_rand.T])
ip = interpolate.LinearNDInterpolator(xys_hom, zs_hom)
zs = ip(xys_rand.T)
err_lin = err_2D(zs, zs_rand)
ip = interpolate.LinearNDInterpolator(learner.points[:len(xys_hom)], learner.values[:len(xys_hom)])
zs = ip(xys_rand.T)
err_learner = err_2D(zs, zs_rand)
return err_lin, err_learner
N_max = 10000
Ns = np.geomspace(4, N_max, 100).astype(int)
loss = adaptive.learner.learner1D.curvature_loss_function()
Ns = np.geomspace(4, N_max, 20).astype(int)
fig, ax = plt.subplots(
1
, 1, figsize=(fig_width, fig_height))
fig, ax
s
= plt.subplots(
2
, 1, figsize=(fig_width,
1.6 *
fig_height))
for i, d in enumerate(funcs_1D):
f = d["function"]
bounds = d["bounds"]
learner = adaptive.Learner1D(f, bounds, loss_per_interval=loss)
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)
errs = [get_err(learner, N, bounds) for N in Ns]
err_hom, err_adaptive = zip(*errs)
color = f"C{i}"
label = "abc"[i]
ax.loglog(Ns, err_hom, ls="--", c=color)
ax.loglog(Ns, err_adaptive, label=f"$\mathrm{{({label})}}$ {d['title']}", c=color)
loss_1D = adaptive.learner.learner1D.curvature_loss_function()
loss_2D = adaptive.learner.learnerND.curvature_loss_function()
for ax, funcs, loss, Learner, get_err in zip(
axs,
[funcs_1D, funcs_2D],
[loss_1D, loss_2D],
[Learner1D, LearnerND],
[get_err_1D, get_err_2D],
):
ax.set_xlabel("$N$")
ax.set_ylabel(r"$\text{Err}_{1}(\tilde{f})$")
for i, d in enumerate(funcs):
f = d["function"]
bounds = d["bounds"]
learner = Learner(f, bounds, loss)
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)
errs = [get_err(learner, N) for N in Ns]
err_hom, err_adaptive = zip(*errs)
color = f"C{i}"
label = "abc"[i]
ax.loglog(Ns, err_hom, ls="--", c=color)
ax.loglog(
Ns, err_adaptive, label=f"$\mathrm{{({label})}}$ {d['title']}", c=color
)
d["err_hom"] = err_hom
d["err_adaptive"] = err_adaptive
d["err_hom"] = err_hom
d["err_adaptive"] = err_adaptive
plt.legend()
ax.set_xlabel("$N$")
ax.set_ylabel(r"$\text{Err}_{1}(\tilde{f})$")
plt.savefig("figures/line_loss_error.pdf", bbox_inches="tight", transparent=True)
plt.show()
```
%% Cell type:code id: tags:
```
# Error reduction
for d in funcs_1D:
print(d["err_hom"][-1] / d["err_adaptive"][-1])
for d in funcs_2D:
print(d["err_hom"][-1] / d["err_adaptive"][-1])
```
%% Cell type:markdown id: tags:
#
I
so
surface
#
#
i
so
-line plots
%% Cell type:code id: tags:
```
from functools import lru_cache
import numpy as np
import scipy.linalg
import scipy.spatial
import kwant
@lru_cache()
def create_syst(unit_cell):
lat = kwant.lattice.Polyatomic(unit_cell, [(0, 0, 0)])
syst = kwant.Builder(kwant.TranslationalSymmetry(*lat.prim_vecs))
syst[lat.shape(lambda _: True, (0, 0, 0))] = 6
syst[lat.neighbors()] = -1
return kwant.wraparound.wraparound(syst).finalized()
def get_brillouin_zone(unit_cell):
syst = create_syst(unit_cell)
A = get_A(syst)
neighbours = kwant.linalg.lll.voronoi(A)
lattice_points = np.concatenate(([[0, 0, 0]], neighbours))
lattice_points = 2 * np.pi * (lattice_points @ A.T)
vor = scipy.spatial.Voronoi(lattice_points)
brillouin_zone = vor.vertices[vor.regions[vor.point_region[0]]]
return scipy.spatial.ConvexHull(brillouin_zone)
def momentum_to_lattice(k, syst):
A = get_A(syst)
k, residuals = scipy.linalg.lstsq(A, k)[:2]
if np.any(abs(residuals) > 1e-7):
raise RuntimeError(
"Requested momentum doesn't correspond to any lattice momentum."
)
return k
def get_A(syst):
B = np.asarray(syst._wrapped_symmetry.periods).T
return np.linalg.pinv(B).T
def energies(k, unit_cell):
syst = create_syst(unit_cell)
k_x, k_y, k_z = momentum_to_lattice(k, syst)
params = {"k_x": k_x, "k_y": k_y, "k_z": k_z}
H = syst.hamiltonian_submatrix(params=params)
energies = np.linalg.eigvalsh(H)
return min(energies)
from functools import partial
from ipywidgets import interact_manual
# Define the lattice vectors of some common unit cells
lattices = dict(
hexegonal=((0, 1, 0), (np.cos(-np.pi / 6), np.sin(-np.pi / 6), 0), (0, 0, 1)),
simple_cubic=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
fcc=((0, 0.5, 0.5), (0.5, 0.5, 0), (0.5, 0, 0.5)),
bcc=((-0.5, 0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, -0.5)),
)
def isoline_loss_function(y_iso, sigma, priority=1):
from adaptive.learner.learnerND import default_loss
def gaussian(x, mu, sigma):
return np.exp(-(x - mu) ** 2 / sigma ** 2)
def loss(simplex, ys):
distance = np.mean([abs(y_iso - y) for y in ys])
return priority * gaussian(distance, 0, sigma) + default_loss(simplex, ys)
return loss
loss = isoline_loss_function(y_iso=0.1, sigma=1, priority=0.0)
learners = []
fnames = []
for name, unit_cell in lattices.items():
hull = get_brillouin_zone(unit_cell)
learner = adaptive.LearnerND(
partial(energies, unit_cell=unit_cell), hull, loss_per_simplex=loss
)
fnames.append(name)
learners.append(learner)
mapping = dict(zip(fnames, learners))
learner = adaptive.BalancingLearner(learners, strategy="npoints")
def select(name, learners, fnames):
return learners[fnames.index(name)]
def iso(unit_cell, level=8.5):
l = select(unit_cell, learners, fnames)
adaptive.runner.simple(l, goal=lambda l: l.npoints > 1000)
return l.plot_isosurface(level=level)
interact_manual(iso, level=(-6, 9, 0.1), unit_cell=lattices.keys())
```
%% Cell type:code id: tags:
```
def f(xy):
x, y = xy
return x ** 2 + y ** 3
bounds = [(-1, 1), (-1, 1)]
npoints = 17**2
def isoline_loss_function(y_iso, sigma, priority=1):
from adaptive.learner.learnerND import default_loss
def gaussian(x, mu, sigma):
return np.exp(-(x - mu) ** 2 / sigma ** 2 / 2)
def loss(simplex, values, value_scale):
distance = np.mean([abs(y_iso * value_scale - y) for y in values])
return priority * gaussian(distance, 0, sigma) + default_loss(simplex, values, value_scale)
return loss
level = 0.1
loss = isoline_loss_function(level, sigma=0.4, priority=0.5)
with_tri = True
fig, axs = plt.subplots(1, 2, figsize=(fig_width, fig_height))
plt.subplots_adjust(wspace=0.3)
for i, ax in enumerate(axs.flatten()):
learner = adaptive.LearnerND(f, bounds, loss_per_simplex=loss)
label = "ab"[i]
ax.text(
0.5,
1.05,
f"$\mathrm{{({label})}}$",
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="bottom",
)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
kind = "homogeneous" if i == 0 else "adaptive"
if kind == "homogeneous":
xs, ys = [np.linspace(*bound, int(npoints**0.5)) for bound in bounds]
data = {xy: f(xy) for xy in itertools.product(xs, ys)}
learner.data = data
elif kind == "adaptive":
learner.data = {
k: v for i, (k, v) in enumerate(learner.data.items()) if i <= npoints
}
adaptive.runner.simple(learner, goal=lambda l: l.npoints >= npoints)
if with_tri:
xy = np.array([learner.tri.get_vertices(s)
for s in learner.tri.simplices])
print(f'{len(xy)} triangles')
triang = mtri.Triangulation(*xy.reshape(-1, 2).T)
ax.triplot(triang, c="w", lw=0.2, alpha=0.8)
# Isolines
vertices, lines = learner._get_iso(level, which='line')
paths = np.array([[vertices[i], vertices[j]] for i, j in lines]).T
print('{} line segments'.format(len(paths.T)))
ax.plot(*paths, c='k')
values = np.array(list(learner.data.values()))
ax.imshow(
learner.plot(npoints if kind == "homogeneous" else None).Image.I.data,
extent=(-1, 1, -1, 1),
interpolation="none",
)
ax.set_xticks([])
ax.set_yticks([])
axs[0].set_ylabel(r"$\textrm{homogeneous}$")
axs[1].set_ylabel(r"$\textrm{adaptive}$")
plt.savefig("figures/isoline.pdf", bbox_inches="tight", transparent=True)
```
%% Cell type:code id: tags:
```
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment