From eadc6e52420126e2b66d0db5ec4c54de8622fb17 Mon Sep 17 00:00:00 2001
From: Bas Nijholt <basnijholt@gmail.com>
Date: Mon, 23 Sep 2019 14:31:37 +0200
Subject: [PATCH] separate plotting and calculation

---
 figures.ipynb | 74 +++++++++++++++++++++++++++++----------------------
 1 file changed, 42 insertions(+), 32 deletions(-)

diff --git a/figures.ipynb b/figures.ipynb
index c497a6d..d954248 100644
--- a/figures.ipynb
+++ b/figures.ipynb
@@ -598,6 +598,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "import collections\n",
     "from adaptive import Learner1D, LearnerND\n",
     "from scipy import interpolate\n",
     "\n",
@@ -612,7 +613,7 @@
     "    xs_rand = np.random.uniform(*learner.bounds, size=100_000)\n",
     "\n",
     "    xs = np.linspace(*learner.bounds, N)\n",
-    "    ys = f(xs)\n",
+    "    ys = learner.function(xs)\n",
     "    err_lin = err_1D(xs, ys, xs_rand, learner.function)\n",
     "\n",
     "    xs, ys = zip(*learner.data.items())\n",
@@ -646,12 +647,12 @@
     "\n",
     "    try:\n",
     "        # Vectorized\n",
-    "        zs_hom = f(xys_hom.T)\n",
-    "        zs_rand = f(xys_rand)\n",
+    "        zs_hom = learner.function(xys_hom.T)\n",
+    "        zs_rand = learner.function(xys_rand)\n",
     "    except:\n",
     "        # Non-vectorized\n",
-    "        zs_hom = np.array([f(xy) for xy in xys_hom])\n",
-    "        zs_rand = np.array([f(xy) for xy in xys_rand.T])\n",
+    "        zs_hom = np.array([learner.function(xy) for xy in xys_hom])\n",
+    "        zs_rand = np.array([learner.function(xy) for xy in xys_rand.T])\n",
     "\n",
     "    ip = interpolate.LinearNDInterpolator(xys_hom, zs_hom)\n",
     "    zs = ip(xys_rand.T)\n",
@@ -664,40 +665,49 @@
     "    return err_lin, err_learner\n",
     "\n",
     "\n",
-    "N_max = 10000\n",
+    "N_max = 100\n",
     "Ns = np.geomspace(4, N_max, 20).astype(int)\n",
     "\n",
-    "fig, axs = plt.subplots(2, 1, figsize=(fig_width, 1.6 * fig_height))\n",
-    "\n",
     "loss_1D = adaptive.learner.learner1D.curvature_loss_function()\n",
     "loss_2D = adaptive.learner.learnerND.curvature_loss_function()\n",
     "\n",
-    "for ax, funcs, loss, Learner, get_err in zip(\n",
-    "    axs,\n",
-    "    [funcs_1D, funcs_2D],\n",
-    "    [loss_1D, loss_2D],\n",
-    "    [Learner1D, LearnerND],\n",
-    "    [get_err_1D, get_err_2D],\n",
+    "err = collections.defaultdict(dict)\n",
+    "for i, (funcs, loss, Learner, get_err) in enumerate(\n",
+    "    zip(\n",
+    "        [funcs_1D, funcs_2D],\n",
+    "        [loss_1D, loss_2D],\n",
+    "        [Learner1D, LearnerND],\n",
+    "        [get_err_1D, get_err_2D],\n",
+    "    )\n",
     "):\n",
-    "    ax.set_xlabel(\"$N$\")\n",
-    "    ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
-    "\n",
-    "    for i, d in enumerate(funcs):\n",
-    "        f = d[\"function\"]\n",
-    "        bounds = d[\"bounds\"]\n",
-    "        learner = Learner(f, bounds, loss)\n",
+    "    for d in funcs:\n",
+    "        learner = Learner(d[\"function\"], d[\"bounds\"], loss)\n",
     "        adaptive.runner.simple(learner, goal=lambda l: l.npoints >= N_max)\n",
     "        errs = [get_err(learner, N) for N in Ns]\n",
     "        err_hom, err_adaptive = zip(*errs)\n",
-    "        color = f\"C{i}\"\n",
-    "        label = \"abc\"[i]\n",
-    "        ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
-    "        ax.loglog(\n",
-    "            Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {d['title']}\", c=color\n",
-    "        )\n",
+    "        err[i][d[\"title\"]] = (err_hom, err_adaptive)\n",
     "\n",
     "        d[\"err_hom\"] = err_hom\n",
-    "        d[\"err_adaptive\"] = err_adaptive\n",
+    "        d[\"err_adaptive\"] = err_adaptive"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig, axs = plt.subplots(2, 1, figsize=(fig_width, 1.6 * fig_height))\n",
+    "\n",
+    "for i, ax in enumerate(axs):\n",
+    "    ax.set_xlabel(\"$N$\")\n",
+    "    ax.set_ylabel(r\"$\\text{Err}_{1}(\\tilde{f})$\")\n",
+    "\n",
+    "    for j, (title, (err_hom, err_adaptive)) in enumerate(err[i].items()):\n",
+    "        color = f\"C{j}\"\n",
+    "        label = \"abc\"[j]\n",
+    "        ax.loglog(Ns, err_hom, ls=\"--\", c=color)\n",
+    "        ax.loglog(Ns, err_adaptive, label=f\"$\\mathrm{{({label})}}$ {title}\", c=color)\n",
     "\n",
     "plt.legend()\n",
     "plt.savefig(\"figures/line_loss_error.pdf\", bbox_inches=\"tight\", transparent=True)\n",
@@ -711,11 +721,11 @@
    "outputs": [],
    "source": [
     "# Error reduction\n",
-    "for d in funcs_1D:\n",
-    "    print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])\n",
+    "for title, (err_hom, err_adaptive) in err[0].items():\n",
+    "    print(title, err_hom[-1] / err_adaptive[-1])\n",
     "\n",
-    "for d in funcs_2D:\n",
-    "    print(d[\"err_hom\"][-1] / d[\"err_adaptive\"][-1])"
+    "for title, (err_hom, err_adaptive) in err[1].items():\n",
+    "    print(title, err_hom[-1] / err_adaptive[-1])"
    ]
   },
   {
-- 
GitLab