diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py
index 11ac92a4691fdbaf87fef462169f51914be03d39..f8d37b821466050944bcc63d050c1ec3f85c3de8 100644
--- a/adaptive/learner/learner2D.py
+++ b/adaptive/learner/learner2D.py
@@ -89,7 +89,7 @@ def uniform_loss(ip):
     return np.sqrt(areas(ip))
 
 
-def resolution_loss(ip, min_distance=0, max_distance=1):
+def resolution_loss_function(min_distance=0, max_distance=1):
     """Loss function that is similar to the `default_loss` function, but you
     can set the maximimum and minimum size of a triangle.
 
@@ -104,27 +104,24 @@ def resolution_loss(ip, min_distance=0, max_distance=1):
     ...     x, y = xy
     ...     return x**2 + y**2
     >>>
-    >>> from functools import partial
-    >>> loss = partial(resolution_loss, min_distance=0.01)
+    >>> loss = resolution_loss_function(min_distance=0.01, max_distance=1)
     >>> learner = adaptive.Learner2D(f,
     ...                              bounds=[(-1, -1), (1, 1)],
     ...                              loss_per_triangle=loss)
     >>>
     """
-    A = areas(ip)
-    dev = np.sum(deviations(ip), axis=0)
-
-    # similar to the default_loss
-    loss = np.sqrt(A) * dev + A
+    def resolution_loss(ip):
+        loss = default_loss(ip)
 
-    # Setting areas with a small area to zero such that they won't be chosen again
-    loss[A < min_distance**2] = 0
+        # Setting areas with a small area to zero such that they won't be chosen again
+        loss[A < min_distance**2] = 0
 
-    # Setting triangles that have a size larger than max_distance to infinite loss
-    # such that these triangles will be picked
-    loss[A > max_distance**2] = np.inf
+        # Setting triangles that have a size larger than max_distance to infinite loss
+        # such that these triangles will be picked
+        loss[A > max_distance**2] = np.inf
 
-    return loss
+        return loss
+    return resolution_loss
 
 
 def minimize_triangle_surface_loss(ip):
diff --git a/docs/source/reference/adaptive.learner.learner2D.rst b/docs/source/reference/adaptive.learner.learner2D.rst
index f19d144b16c827b3815562feb7e72036b661e1b7..11d14e3c2fc9df37b40c6d031f8ea2d9b5741bbd 100644
--- a/docs/source/reference/adaptive.learner.learner2D.rst
+++ b/docs/source/reference/adaptive.learner.learner2D.rst
@@ -15,7 +15,7 @@ Custom loss functions
 
 .. autofunction:: adaptive.learner.learner2D.uniform_loss
 
-.. autofunction:: adaptive.learner.learner2D.resolution_loss
+.. autofunction:: adaptive.learner.learner2D.resolution_loss_function
 
 
 Helper functions
diff --git a/docs/source/tutorial/tutorial.custom_loss.rst b/docs/source/tutorial/tutorial.custom_loss.rst
index a00ee9a5bcab3165e3fd3f03938e7adefe3bef94..053e58725c3f8fa9e1168ae2d7b0b79ff6bdb599 100644
--- a/docs/source/tutorial/tutorial.custom_loss.rst
+++ b/docs/source/tutorial/tutorial.custom_loss.rst
@@ -49,7 +49,7 @@ tl;dr, one can use the following *loss functions* that
 + `adaptive.learner.learner2D.default_loss`
 + `adaptive.learner.learner2D.uniform_loss`
 + `adaptive.learner.learner2D.minimize_triangle_surface_loss`
-+ `adaptive.learner.learner2D.resolution_loss`
++ `adaptive.learner.learner2D.resolution_loss_function`
 
 
 Uniform sampling
@@ -132,34 +132,22 @@ small (0 loss).
 
     %%opts EdgePaths (color='w') Image [logz=True colorbar=True]
 
-    def resolution_loss(ip, min_distance=0, max_distance=1):
+    def resolution_loss_function(min_distance=0, max_distance=1):
         """min_distance and max_distance should be in between 0 and 1
         because the total area is normalized to 1."""
+        def resolution_loss(ip):
+            from adaptive.learner.learner2D import default_loss
+            loss = default_loss(ip)
 
-        from adaptive.learner.learner2D import areas, deviations
+            # Setting areas with a small area to zero such that they won't be chosen again
+            loss[A < min_distance**2] = 0
 
-        A = areas(ip)
-
-        # 'deviations' returns an array of shape '(n, len(ip))', where
-        # 'n' is the  is the dimension of the output of the learned function
-        # In this case we know that the learned function returns a scalar,
-        # so 'deviations' returns an array of shape '(1, len(ip))'.
-        # It represents the deviation of the function value from a linear estimate
-        # over each triangular subdomain.
-        dev = deviations(ip)[0]
-
-        # we add terms of the same dimension: dev == [distance], A == [distance**2]
-        loss = np.sqrt(A) * dev + A
-
-        # Setting areas with a small area to zero such that they won't be chosen again
-        loss[A < min_distance**2] = 0
-
-        # Setting triangles that have a size larger than max_distance to infinite loss
-        loss[A > max_distance**2] = np.inf
-
-        return loss
+            # Setting triangles that have a size larger than max_distance to infinite loss
+            loss[A > max_distance**2] = np.inf
 
-    loss = partial(resolution_loss, min_distance=0.01)
+            return loss
+        return resolution_loss
+    loss = resolution_loss_function(min_distance=0.01)
 
     learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
     runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)
@@ -169,4 +157,4 @@ Awesome! We zoom in on the singularity, but not at the expense of
 sampling the rest of the domain a reasonable amount.
 
 The above strategy is available as
-`adaptive.learner.learner2D.resolution_loss`.
+`adaptive.learner.learner2D.resolution_loss_function`.