diff --git a/figures.ipynb b/figures.ipynb
index a7b1c466b7fc1bb2f46783ec918cb1ae0944acf5..81f605af2470d3146df7d13d0c55d3414c146967 100644
--- a/figures.ipynb
+++ b/figures.ipynb
@@ -479,6 +479,13 @@
     "plt.show()"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Line loss"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -550,6 +557,101 @@
     "plt.savefig(\"figures/line_loss.pdf\", bbox_inches=\"tight\", transparent=True)\n",
     "plt.show()"
    ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Line loss error"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import scipy\n",
+    "\n",
+    "\n",
+    "def f(x, offset=0.123):\n",
+    "    a = 0.02\n",
+    "    return x + a ** 2 / (a ** 2 + (x - offset) ** 2)\n",
+    "\n",
+    "\n",
+    "def g(x):\n",
+    "    return np.tanh(x * 40)\n",
+    "\n",
+    "\n",
+    "def h(x):\n",
+    "    return np.sin(100 * x) * np.exp(-x ** 2 / 0.1 ** 2)\n",
+    "\n",
+    "\n",
+    "funcs_1D = [\n",
+    "    dict(function=f, bounds=(-1, 1), title=\"peak\"),\n",
+    "    dict(function=g, bounds=(-1, 1), title=\"tanh\"),\n",
+    "    dict(function=h, bounds=(-0.3, 0.3), title=\"wave packet\"),\n",
+    "]\n",
+    "\n",
+    "\n",
+    "def err(xs, ys, xs_rand):\n",
+    "    ip = scipy.interpolate.interp1d(xs, ys)\n",
+    "    abserr = np.abs(ip(xs_rand) - f(xs_rand))\n",
+    "    return np.average(abserr ** 2) ** 0.5\n",
+    "\n",
+    "\n",
+    "def get_err(learner, N, bounds):\n",
+    "    xs_rand = np.random.uniform(*bounds, size=100_000)\n",
+    "\n",
+    "    xs = np.linspace(*bounds, N)\n",
+    "    ys = f(xs)\n",
+    "    err_lin = err(xs, ys, xs_rand)\n",
+    "\n",
+    "    xs, ys = zip(*learner.data.items())\n",
+    "    xs, ys = xs[:N], ys[:N]\n",
+    "    ip = scipy.interpolate.interp1d(xs, ys)\n",
+    "    err_learner = err(xs, ys, xs_rand)\n",
+    "    return err_lin, err_learner\n",
+    "\n",
+    "\n",
+    "N_max = 10000\n",
+    "Ns = np.geomspace(4, N_max, 100).astype(int)\n",
+    "loss = adaptive.learner.learner1D.curvature_loss_function()\n",
+    "\n",
+    "fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height))\n",
+    "\n",
+    "for i, d in enumerate(funcs_1D):\n",
+    "    f = d[\"function\"]\n",
+    "    bounds = d[\"bounds\"]\n",
+    "    learner = adaptive.Learner1D(f, bounds, loss_per_interval=loss)\n",
+    "    adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
+    "    errs = [get_err(learner, N, bounds) for N in Ns]\n",
+    "    err_hom, err_adaptive = zip(*errs)\n",
+    "    color = f\"C{i}\"\n",
+    "    label = \"abc\"[i]\n",
+    "    ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
+    "    ax.loglog(Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color)\n",
+    "\n",
+    "    d[\"err_hom\"] = err_hom\n",
+    "    d[\"err_adaptive\"] = err_adaptive\n",
+    "\n",
+    "plt.legend()\n",
+    "ax.set_xlabel(\"$N$\")\n",
+    "ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
+    "plt.savefig(\"figures/line_loss_error.pdf\", bbox_inches=\"tight\", transparent=True)\n",
+    "plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Error reduction\n",
+    "for d in funcs_1D:\n",
+    "    print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])"
+   ]
   }
  ],
  "metadata": {
diff --git a/figures/line_loss_error.pdf b/figures/line_loss_error.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..6aeeccb01b7e0a336579f6ea16c2baa77792d805
Binary files /dev/null and b/figures/line_loss_error.pdf differ
diff --git a/paper.md b/paper.md
index 67a292764d5ef7a5a8b0d3b5808f695f3c3e4603..03aa7d1e158efba80a5b94b7f6f54b895d685fb3 100755
--- a/paper.md
+++ b/paper.md
@@ -202,14 +202,29 @@ Inspired by a method commonly employed in digital cartography for coastline simp
 Here, at each point (ignoring the boundary points), we compute the effective area associated with its triangle, see Fig. @fig:line_loss(b).
 The loss then becomes the average area of two adjacent traingles.
 By Taylor expanding $f$ around $x$ it can be shown that the area of the triangles relates to the contributions of the second derivative.
-We can generalize this loss to $N$ dimensions
+We can generalize this loss to $N$ dimensions, where the triangle is replaced by a $(N+1)$ dimensional simplex.
 
 ![Line loss visualization.
-We start with 6 points (a) on the function (grey).
+In this example, we start with 6 points (a) on the function (grey).
 Ignoring the endpoints, the effective area of each point is determined by its associated triangle (b).
 The loss of each interval can be computed by taking the average area of the adjacent triangles.
 Subplots (c), (d), and (e) show the subsequent interations following (b).](figures/line_loss.pdf){#fig:line_loss}
 
+In order to compare sampling strategies, we need to define some error.
+We construct a linear interpolation function $\tilde{f}$, which is an approximation of $f$.
+We calculate the error in the $L^{1}$-norm, defined as,
+$$
+\text{Err}_{1}(\tilde{f})=\left\Vert \tilde{f}-f\right\Vert _{L^{1}}=\int_{a}^{b}\left|\tilde{f}(x)-f(x)\right|\text{d}x.
+$$
+This error approaches zero as the approximation becomes better.
+
+![The $L^{1}$-norm error as a function of number of points $N$ for the functions in Fig. @fig:adaptive_vs_grid (a,b,c).
+The interupted lines correspond to homogeneous sampling and the solid line to the sampling with the line loss.
+In all cases adaptive sampling performs better, where the error is a factor 1.6-20 lower for $N=10000$.
+](figures/line_loss_error.pdf){#fig:line_loss_error}
+
+Figure @fig:line_loss_error shows this error as function of the number of points $N$.
+
 <!-- https://bost.ocks.org/mike/simplify/ -->
 
 ## A parallelizable adaptive integration algorithm based on cquad