diff --git a/figures.ipynb b/figures.ipynb
index 063bea03b721cefa2a0d2ae2520c06d651d8e509..8187dba0f45838e964cfcd51d825f3d819038047 100644
--- a/figures.ipynb
+++ b/figures.ipynb
@@ -22,6 +22,7 @@
    "source": [
     "import functools\n",
     "import itertools\n",
+    "import os\n",
     "import pickle\n",
     "\n",
     "import holoviews.plotting.mpl\n",
@@ -146,9 +147,9 @@
     "        bounds=phase_diagram_setup(\"phase_diagram.pickle\")[1],\n",
     "        npoints=100,\n",
     "        fname=\"phase_diagram.pickle\",\n",
-    "        title=\"PD\",\n",
+    "        title=\"phase diagram\",\n",
     "    ),\n",
-    "    dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50, title=\"LC\"),\n",
+    "    dict(function=level_crossing, bounds=[(-1, 1), (-3, 3)], npoints=50, title=\"level crossing\"),\n",
     "]"
    ]
   },
@@ -631,8 +632,8 @@
     "def get_err_2D(learner, N):\n",
     "    xys_rand = np.vstack(\n",
     "        [\n",
-    "            np.random.uniform(*learner.bounds[0], size=int(100_000 ** 0.5)),\n",
-    "            np.random.uniform(*learner.bounds[1], size=int(100_000 ** 0.5)),\n",
+    "            np.random.uniform(*learner.bounds[0], size=int(100_000)),\n",
+    "            np.random.uniform(*learner.bounds[1], size=int(100_000)),\n",
     "        ]\n",
     "    )\n",
     "\n",
@@ -665,33 +666,36 @@
     "    return err_lin, err_learner\n",
     "\n",
     "\n",
+    "recalculate = False\n",
     "N_max = 10_000\n",
     "Ns = np.geomspace(4, N_max, 50).astype(int)\n",
-    "\n",
-    "loss_1D = adaptive.learner.learner1D.curvature_loss_function()\n",
-    "loss_2D = adaptive.learner.learnerND.curvature_loss_function()\n",
-    "\n",
-    "err = collections.defaultdict(dict)\n",
-    "for i, (funcs, loss, Learner, get_err) in enumerate(\n",
-    "    zip(\n",
-    "        [funcs_1D, funcs_2D],\n",
-    "        [loss_1D, loss_2D],\n",
-    "        [Learner1D, LearnerND],\n",
-    "        [get_err_1D, get_err_2D],\n",
-    "    )\n",
-    "):\n",
-    "    for d in funcs:\n",
-    "        learner = Learner(d[\"function\"], d[\"bounds\"], loss)\n",
-    "        adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
-    "        errs = [get_err(learner, N) for N in Ns]\n",
-    "        err_hom, err_adaptive = zip(*errs)\n",
-    "        err[i][d[\"title\"]] = (err_hom, err_adaptive)\n",
-    "\n",
-    "        d[\"err_hom\"] = err_hom\n",
-    "        d[\"err_adaptive\"] = err_adaptive\n",
-    "\n",
-    "with open(\"error_line_loss.pickle\", \"wb\") as f:\n",
-    "    pickle.dump(err, f)"
+    "fname = \"error_line_loss.pickle\"\n",
+    "\n",
+    "if not os.path.exists(fname) and not recalculate:\n",
+    "    loss_1D = adaptive.learner.learner1D.curvature_loss_function()\n",
+    "    loss_2D = adaptive.learner.learnerND.curvature_loss_function()\n",
+    "\n",
+    "    err = collections.defaultdict(dict)\n",
+    "    for i, (funcs, loss, Learner, get_err) in enumerate(\n",
+    "        zip(\n",
+    "            [funcs_1D, funcs_2D],\n",
+    "            [loss_1D, loss_2D],\n",
+    "            [Learner1D, LearnerND],\n",
+    "            [get_err_1D, get_err_2D],\n",
+    "        )\n",
+    "    ):\n",
+    "        for d in funcs:\n",
+    "            learner = Learner(d[\"function\"], d[\"bounds\"], loss)\n",
+    "            adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
+    "            errs = [get_err(learner, N) for N in Ns]\n",
+    "            err_hom, err_adaptive = zip(*errs)\n",
+    "            err[i][d[\"title\"]] = (err_hom, err_adaptive)\n",
+    "\n",
+    "    with open(fname, \"wb\") as f:\n",
+    "        pickle.dump(err, f)\n",
+    "else:\n",
+    "    with open(fname, \"rb\") as f:\n",
+    "        err = pickle.load(f)"
    ]
   },
   {
@@ -700,10 +704,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import pickle, os\n",
-    "with open('error_line_loss.pickle', 'rb') as f:\n",
-    "    err = pickle.load(f)\n",
-    "    \n",
     "fig, axs = plt.subplots(2, 1, figsize=(fig_width, 1.6 * fig_height))\n",
     "plt.subplots_adjust(hspace=0.3)\n",
     "\n",
@@ -715,8 +715,14 @@
     "    for j, (title, (err_hom, err_adaptive)) in enumerate(err[i].items()):\n",
     "        color = f\"C{j}\"\n",
     "        label = \"abc\"[j]\n",
-    "        ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
-    "        ax.loglog(Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {title}\", c=color)\n",
+    "        label = f\"$\\mathrm{{({label})}}$ {title}\"\n",
+    "#         ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
+    "#         ax.loglog(Ns, err_adaptive, label=label, c=color)\n",
+    "        error = np.array(err_hom) / np.array(err_adaptive)\n",
+    "        if i == 0:\n",
+    "            ax.loglog(Ns[:36], error[:36], c=color, label=label)\n",
+    "        else:\n",
+    "            ax.loglog(Ns, error, c=color, label=label)\n",
     "        ax.legend()\n",
     "\n",
     "plt.savefig(\"figures/line_loss_error.pdf\", bbox_inches=\"tight\", transparent=True)\n",
@@ -734,7 +740,7 @@
     "for title, (err_hom, err_adaptive) in err[0].items():\n",
     "    print(title, err_hom[-1] / err_adaptive[-1])\n",
     "\n",
-    "print(\"2D\")\n",
+    "print(\"\\n2D\")\n",
     "for title, (err_hom, err_adaptive) in err[1].items():\n",
     "    print(title, err_hom[-1] / err_adaptive[-1])"
    ]