diff --git a/figures.ipynb b/figures.ipynb
index eee3291227390e2be97cf16175f95786caa4336b..37a3a6bcd4bf79255cccd5531e4031a707473359 100644
--- a/figures.ipynb
+++ b/figures.ipynb
@@ -1,5 +1,19 @@
 {
  "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Adaptive paper figures"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## imports and function definitions"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -101,8 +115,9 @@
     "\n",
     "def phase_diagram(xy, fname):\n",
     "    ip, _ = phase_diagram_setup(fname)\n",
-    "    return np.round(ip(xy))\n",
-    "\n",
+    "    zs = ip(xy)\n",
+    "    return np.round(zs)\n",
+    "    \n",
     "\n",
     "def density(x, eps=0):\n",
     "    e = [0.8, 0.2]\n",
@@ -127,14 +142,15 @@
     "\n",
     "\n",
     "funcs_2D = [\n",
-    "    dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33),\n",
+    "    dict(function=ring, bounds=[(-1, 1), (-1, 1)], npoints=33, title=\"ring\"),\n",
     "    dict(\n",
-    "        function=phase_diagram,\n",
+    "        function=functools.partial(phase_diagram, fname=\"phase_diagram.pickle\"),\n",
     "        bounds=phase_diagram_setup(\"phase_diagram.pickle\")[1],\n",
     "        npoints=100,\n",
     "        fname=\"phase_diagram.pickle\",\n",
+    "        title=\"PD\",\n",
     "    ),\n",
-    "    dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50),\n",
+    "    dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50, title=\"LC\"),\n",
     "]"
    ]
   },
@@ -142,7 +158,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# loss_1D"
+    "## Interval and loss"
    ]
   },
   {
@@ -200,7 +216,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# adaptive_vs_grid"
+    "## Learner1D vs grid"
    ]
   },
   {
@@ -245,7 +261,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# adaptive_2D"
+    "## Learner2D vs grid"
    ]
   },
   {
@@ -337,7 +353,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Algo explaination"
+    "## Algorithm explaination: Learner1D step-by-step"
    ]
   },
   {
@@ -494,7 +510,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Line loss"
+    "## Line loss visualization"
    ]
   },
   {
@@ -573,7 +589,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Line loss error"
+    "## Line loss L1-error(N)"
    ]
   },
   {
@@ -582,52 +598,104 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import scipy\n",
+    "from adaptive import Learner1D, LearnerND\n",
+    "from scipy import interpolate\n",
+    "\n",
     "\n",
-    "def err(xs, ys, xs_rand):\n",
-    "    ip = scipy.interpolate.interp1d(xs, ys)\n",
+    "def err_1D(xs, ys, xs_rand, f):\n",
+    "    ip = interpolate.interp1d(xs, ys)\n",
     "    abserr = np.abs(ip(xs_rand) - f(xs_rand))\n",
     "    return np.average(abserr ** 2) ** 0.5\n",
     "\n",
     "\n",
-    "def get_err(learner, N, bounds):\n",
-    "    xs_rand = np.random.uniform(*bounds, size=100_000)\n",
+    "def get_err_1D(learner, N):\n",
+    "    xs_rand = np.random.uniform(*learner.bounds, size=100_000)\n",
     "\n",
-    "    xs = np.linspace(*bounds, N)\n",
+    "    xs = np.linspace(*learner.bounds, N)\n",
     "    ys = f(xs)\n",
-    "    err_lin = err(xs, ys, xs_rand)\n",
+    "    err_lin = err_1D(xs, ys, xs_rand, learner.function)\n",
     "\n",
     "    xs, ys = zip(*learner.data.items())\n",
     "    xs, ys = xs[:N], ys[:N]\n",
-    "    ip = scipy.interpolate.interp1d(xs, ys)\n",
-    "    err_learner = err(xs, ys, xs_rand)\n",
+    "    ip = interpolate.interp1d(xs, ys)\n",
+    "    err_learner = err_1D(xs, ys, xs_rand, learner.function)\n",
     "    return err_lin, err_learner\n",
     "\n",
+    "def err_2D(zs, zs_rand):\n",
+    "    abserr = np.abs(zs - zs_rand)\n",
+    "    return np.average(abserr ** 2) ** 0.5\n",
     "\n",
-    "N_max = 10000\n",
-    "Ns = np.geomspace(4, N_max, 100).astype(int)\n",
-    "loss = adaptive.learner.learner1D.curvature_loss_function()\n",
+    "def get_err_2D(learner, N):\n",
+    "    xys_rand = np.vstack(\n",
+    "        [\n",
+    "            np.random.uniform(*learner.bounds[0], size=int(100_000**0.5)),\n",
+    "            np.random.uniform(*learner.bounds[1], size=int(100_000**0.5)),\n",
+    "        ]\n",
+    "    )\n",
     "\n",
-    "fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height))\n",
+    "    xys_hom = np.array(\n",
+    "        [\n",
+    "            (x, y)\n",
+    "            for x in np.linspace(*learner.bounds[0], int(N**0.5))\n",
+    "            for y in np.linspace(*learner.bounds[1], int(N**0.5))\n",
+    "        ]\n",
+    "    )\n",
     "\n",
-    "for i, d in enumerate(funcs_1D):\n",
-    "    f = d[\"function\"]\n",
-    "    bounds = d[\"bounds\"]\n",
-    "    learner = adaptive.Learner1D(f, bounds, loss_per_interval=loss)\n",
-    "    adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
-    "    errs = [get_err(learner, N, bounds) for N in Ns]\n",
-    "    err_hom, err_adaptive = zip(*errs)\n",
-    "    color = f\"C{i}\"\n",
-    "    label = \"abc\"[i]\n",
-    "    ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
-    "    ax.loglog(Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color)\n",
-    "\n",
-    "    d[\"err_hom\"] = err_hom\n",
-    "    d[\"err_adaptive\"] = err_adaptive\n",
+    "    try:\n",
+    "        # Vectorized\n",
+    "        zs_hom = f(xys_hom.T)\n",
+    "        zs_rand = f(xys_rand)\n",
+    "    except:\n",
+    "        # Non-vectorized\n",
+    "        zs_hom = np.array([f(xy) for xy in xys_hom])\n",
+    "        zs_rand = np.array([f(xy) for xy in xys_rand.T])\n",
+    "\n",
+    "    ip = interpolate.LinearNDInterpolator(xys_hom, zs_hom)\n",
+    "    zs = ip(xys_rand.T)\n",
+    "    err_lin = err_2D(zs, zs_rand)\n",
+    "\n",
+    "    ip = interpolate.LinearNDInterpolator(learner.points[:len(xys_hom)], learner.values[:len(xys_hom)])\n",
+    "    zs = ip(xys_rand.T)\n",
+    "    err_learner = err_2D(zs, zs_rand)\n",
+    "\n",
+    "    return err_lin, err_learner\n",
+    "\n",
+    "N_max = 10000\n",
+    "Ns = np.geomspace(4, N_max, 20).astype(int)\n",
+    "\n",
+    "fig, axs = plt.subplots(2, 1, figsize=(fig_width, 1.6 * fig_height))\n",
+    "\n",
+    "loss_1D = adaptive.learner.learner1D.curvature_loss_function()\n",
+    "loss_2D = adaptive.learner.learnerND.curvature_loss_function()\n",
+    "\n",
+    "for ax, funcs, loss, Learner, get_err in zip(\n",
+    "    axs,\n",
+    "    [funcs_1D, funcs_2D],\n",
+    "    [loss_1D, loss_2D],\n",
+    "    [Learner1D, LearnerND],\n",
+    "    [get_err_1D, get_err_2D],\n",
+    "):\n",
+    "    ax.set_xlabel(\"$N$\")\n",
+    "    ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
+    "\n",
+    "    for i, d in enumerate(funcs):\n",
+    "        f = d[\"function\"]\n",
+    "        bounds = d[\"bounds\"]\n",
+    "        learner = Learner(f, bounds, loss)\n",
+    "        adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
+    "        errs = [get_err(learner, N) for N in Ns]\n",
+    "        err_hom, err_adaptive = zip(*errs)\n",
+    "        color = f\"C{i}\"\n",
+    "        label = \"abc\"[i]\n",
+    "        ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
+    "        ax.loglog(\n",
+    "            Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color\n",
+    "        )\n",
+    "\n",
+    "        d[\"err_hom\"] = err_hom\n",
+    "        d[\"err_adaptive\"] = err_adaptive\n",
     "\n",
     "plt.legend()\n",
-    "ax.set_xlabel(\"$N$\")\n",
-    "ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
     "plt.savefig(\"figures/line_loss_error.pdf\", bbox_inches=\"tight\", transparent=True)\n",
     "plt.show()"
    ]
@@ -640,6 +708,9 @@
    "source": [
     "# Error reduction\n",
     "for d in funcs_1D:\n",
+    "    print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])\n",
+    "\n",
+    "for d in funcs_2D:\n",
     "    print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])"
    ]
   },
@@ -647,7 +718,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Iso surface"
+    "## iso-line plots"
    ]
   },
   {