diff --git a/kwant/lattice.py b/kwant/lattice.py
index cb4e0634e41d7e40b6705577d5bb151b90eae1e4..899ce5474216421b97f35b922a51a62cd85b9602 100644
--- a/kwant/lattice.py
+++ b/kwant/lattice.py
@@ -286,7 +286,8 @@ class Polyatomic:
         Parameters
         ----------
         n : integer
-            Order of the hoppings to return.
+            Order of the hoppings to return. Note that the zeroth neighbor is
+            the site itself or any other sites with the same position.
         eps : float
             Tolerance relative to the length of the shortest lattice vector for
             when to consider lengths to be approximately equal.
@@ -309,6 +310,7 @@ class Polyatomic:
         sls = self.sublattices
         shortest_hopping = sls[0].n_closest(
             sls[0].pos(([0] * sls[0].lattice_dim)), 2)[-1]
+        rtol = eps
         eps *= np.linalg.norm(self.vec(shortest_hopping))
         nvec = len(self._prim_vecs)
         sublat_pairs = [(i, j) for (i, j) in product(sls, sls)
@@ -323,36 +325,25 @@ class Polyatomic:
                     continue
             return True
 
+        # Find the `n` closest neighbors (with multiplicity) for each
+        # pair of lattices, this surely includes the `n` closest neighbors overall.
+        sites = []
+        for i, j in sublat_pairs:
+            origin = Site(j, ta.zeros(nvec)).pos
+            tags = i.n_closest(origin, n=n+1, group_by_length=True, rtol=rtol)
+            ij_dist = [np.linalg.norm(Site(i, tag).pos - origin)
+                          for tag in tags]
+            sites.append((tags, (j, i), ij_dist))
+        distances = np.r_[tuple((i[2] for i in sites))]
+        distances = np.sort(distances)
+        group_boundaries = np.where(np.diff(distances) > eps)[0]
+        # Find distance in `n`-th group.
+        if len(group_boundaries) == n:
+            n_dist = distances[-1]
+        else:
+            n_dist = distances[group_boundaries[n]]
 
-        # Find the correct number of neighbors to calculate on each lattice.
-        cutoff = n + 2
-        while True:
-            max_dist = []
-            sites = []
-            for i, j in sublat_pairs:
-                origin = Site(j, ta.zeros(nvec)).pos
-                tags = i.n_closest(origin, n=cutoff**nvec)
-
-                ij_dist = [np.linalg.norm(Site(i, tag).pos - origin)
-                              for tag in tags]
-                sites.append((tags, (j, i), ij_dist))
-            max_dist = [i[2][-1] for i in sites]
-            distances = np.r_[tuple((i[2] for i in sites))]
-            distances = np.sort(distances)
-            group_boundaries = np.argwhere(np.diff(distances) > eps)
-            if len(group_boundaries) < n:
-                cutoff += 1
-                continue
-            try:
-                n_dist = distances[group_boundaries[n]]
-            except IndexError:
-                cutoff += 1
-                continue
-            if np.all(max_dist > n_dist):
-                break
-            cutoff += 1
-
-        # We now have all the required sites, we need to find n-th.
+        # We now have all the required sites, select the ones in `n`-th group.
         result = []
         for group in sites:
             tags, distance = group[0], group[2]
@@ -485,7 +476,7 @@ class Monatomic(builder.SiteFamily, Polyatomic):
             raise ValueError("Dimensionality mismatch.")
         return tag
 
-    def n_closest(self, pos, n=1):
+    def n_closest(self, pos, n=1, group_by_length=False, rtol=1e-9):
         """Find n sites closest to position `pos`.
 
         Returns
@@ -494,7 +485,8 @@ class Monatomic(builder.SiteFamily, Polyatomic):
             An array with sites coordinates.
         """
         # TODO (Anton): transform to tinyarrays, once ta indexing is better.
-        return np.dot(lll.cvp(pos - self.offset, self.reduced_vecs, n),
+        return np.dot(lll.cvp(pos - self.offset, self.reduced_vecs,
+                              n=n, group_by_length=group_by_length, rtol=rtol),
                       self.transf.T)
 
     def closest(self, pos):
diff --git a/kwant/linalg/lll.py b/kwant/linalg/lll.py
index 0fdb599df8fd46f47dd8fcdbd9afa5fd8638ed60..b4cca6141460c22009425beb3116f8c8fcdd85f5 100644
--- a/kwant/linalg/lll.py
+++ b/kwant/linalg/lll.py
@@ -9,6 +9,7 @@
 __all__ = ['lll', 'cvp', 'voronoi']
 
 import numpy as np
+from scipy import linalg as la
 from itertools import product
 
 
@@ -109,12 +110,12 @@ def lll(basis, c=1.34):
     return vecs, np.array(np.round(coefs), int)
 
 
-def cvp(vec, basis, n=1):
+def cvp(vec, basis, n=1, group_by_length=False, rtol=1e-09):
     """
     Solve the n-closest vector problem for a vector, given a basis.
 
     This algorithm performs poorly in general, so it should be supplied
-    with LLL-reduced bases.
+    with LLL-reduced basis.
 
     Parameters
     ----------
@@ -123,12 +124,21 @@ def cvp(vec, basis, n=1):
     basis : 2d array-like of floats
         Sequence of basis vectors
     n : int
-        Number of lattice vectors closest to the point that need to be found.
+        Number of lattice vectors closest to the point that need to be found. If
+        `group_by_length=True`, all vectors with the `n` shortest distinct distances
+        are found.
+    group_by_length : bool
+        Whether to count points with distances that are within `rtol` as one
+        towards `n`. Useful for finding `n`-th nearest neighbors of high symmetry points.
+    rtol : float
+        If `group_by_length=True`, relative tolerance when deciding whether
+        distances are equal.
 
     Returns
     -------
     coords : numpy array
-        An array with the coefficients of the lattice vectors closest to the
+        An array with the coefficients of the `n` closest lattice vectors, or all
+        the lattice vectors with the `n` shortest distances from the
         requested point.
 
     Notes
@@ -142,32 +152,84 @@ def cvp(vec, basis, n=1):
     if basis.ndim != 2:
         raise ValueError('`basis` must be a 2d array-like object.')
     vec = np.asarray(vec)
-    # TODO: change to rcond=None once we depend on numpy >= 1.14.
-    center_coords = np.linalg.lstsq(basis.T, vec, rcond=-1)[0]
-    center_coords = np.array(np.round(center_coords), int)
-    # Cutoff radius for n-th nearest neighbor.
-    rad = 1
-    nth_dist = np.inf
+    # Project the coordinates on the space spanned by `basis`, in
+    # units of `basis` vectors.
+    vec_bas = la.lstsq(basis.T, vec)[0]
+    # Round the coordinates, this finds the lattice point which is
+    # the center of the parallelepiped that contains `vec`.
+    # `vec` is surely in the parallelepiped with edges `basis`
+    # centered at `center_coords`.
+    center_coords = np.array(np.round(vec_bas), int)
+    # Make `vec` the projection of `vec` onto the space spanned by `basis`.
+    vec = vec_bas @ basis
+    bbt = basis @ basis.T
+    # The volume of a parallelepiped spanned by `basis` is `sqrt(det(bbt))`.
+    # (We use this instead of `det(basis)` because basis does not necessarily
+    # span the full space, hence not always a square matrix.)
+    # We calculate the radius `rad` of the largest sphere that can be inscribed
+    # in the parallelepiped spanned by `basis`. The area of each face spanned
+    # by one less vector in `basis` is given by the above formula applied to `bbt`
+    # with one row and column discarded (first minor). We utilizie the relation
+    # between entries of the inverse matrix and first minors. The perpendicular
+    # size of the parallelepiped is given by the volume of the parallelepiped
+    # divided by the area of the face. We choose the smallest perpendicular
+    # size as the diameter of the largest sphere inside.
+    rad = 0.5 / np.sqrt(np.max(np.diag(la.inv(bbt))))
+
+    l = 1
     while True:
-        r = np.round(rad * np.linalg.cond(basis)) + 1
-        points = np.mgrid[tuple(slice(i - r, i + r) for i in center_coords)]
+        # Make all the lattice points in and on the edges of a parallelepiped
+        # of `2*l` size around `center_coords`.
+        points = np.mgrid[tuple(slice(i - l, i + l + 1) for i in center_coords)]
         points = points.reshape(basis.shape[0], -1).T
+        # If there are less than `n` points, make more.
         if len(points) < n:
-            rad += 1
+            l += 1
             continue
-        point_coords = np.dot(points, basis)
-        point_coords -= vec.T
-        distances = np.sqrt(np.sum(point_coords**2, 1))
+        point_coords = points @ basis
+        point_coords = point_coords - vec.T
+        distances = la.norm(point_coords, axis = 1)
         order = np.argsort(distances)
-        new_nth_dist = distances[order[n - 1]]
-        if new_nth_dist < nth_dist:
-            nth_dist = new_nth_dist
-            rad += 1
+        distances = distances[order]
+        points = points[order]
+        if group_by_length:
+            # Group points that are equidistant within `rtol * rad`.
+            group_boundaries = np.where(np.diff(distances) > rtol * rad)[0]
+            if len(group_boundaries) + 1 < n:
+                # Make more points if there are less than `n` groups.
+                l += 1
+                continue
+            elif len(group_boundaries) == n - 1:
+                # If there are exactly `n` groups, choose largest distance.
+                dist_n = distances[-1]
+                points_keep = points
+            else:
+                # If there are more than `n` groups, choose the largest
+                # distance in the `n`-th group.
+                dist_n = distances[group_boundaries[n-1]]
+                # Only return points that are in the first `n` groups.
+                points_keep = points[:group_boundaries[n-1] + 1]
         else:
-            return np.array(points[order[:n]], int)
+            rtol = 0
+            # Choose the `n`-th distance.
+            dist_n = distances[n-1]
+            # Only return the first `n` points.
+            points_keep = points[:n]
+        # We covered all points within radius `(2*l-1) * rad` from the
+        # parallelepiped spanned by `basis` centered at `center_coords`.
+        # Because `vec` is guaranteed to be in this parallelepiped, if
+        # the current `dist_n` is smaller than `(2*l-1) * rad`, we surely
+        # found all the smaller vectors.
+        if dist_n < (2*l - 1 - rtol) * rad:
+            return np.array(points_keep, int)
+        else:
+            # Otherwise there may be smaller vectors we haven't found,
+            # increase `l`.
+            l += 1
+            continue
 
 
-def voronoi(basis):
+def voronoi(basis, reduced=False, rtol=1e-09):
     """
     Return an array of lattice vectors forming its voronoi cell.
 
@@ -175,27 +237,53 @@ def voronoi(basis):
     ----------
     basis : 2d array-like of floats
         Basis vectors for which the Voronoi neighbors have to be found.
+    reduced : bool
+        If False, exactly `2 (2^D - 1)` vectors are returned (where `D`
+        is the number of lattice vectors), these are not always the minimal
+        set of Voronoi vectors.
+        If True, only the minimal set of Voronoi vectors are returned.
+    rtol : float
+        Tolerance when deciding whether a vector is in the minimal set,
+        vectors associated with small faces compared to the size of the
+        unit cell are discarded.
 
     Returns
     -------
     voronoi_neighbors : numpy array of ints
-        All the lattice vectors that may potentially neighbor the origin.
-
-    Notes
-    -----
-    This algorithm does not calculate the minimal Voronoi cell of the lattice
-    and can be optimized. Its main aim is flood-fill, however, and better
-    safe than sorry.
+        All the lattice vectors that (potentially) neighbor the origin.
+        List of length `2n` where the second half of the list contains
+        is `-1` times the vectors in the first half.
     """
     basis = np.asarray(basis)
     if basis.ndim != 2:
         raise ValueError('`basis` must be a 2d array-like object.')
+    # Find halved lattice points, every face of the VC contains half
+    # of the lattice vector which is the normal of the face.
+    # These points are all potentially on a face a VC,
+    # but not necessarily the VC centered at the origin.
     displacements = list(product(*(len(basis) * [[0, .5]])))[1:]
-    vertices = np.array([cvp(np.dot(vec, basis), basis)[0] for vec in
+    # Find the nearest lattice point, this is the lattice point whose
+    # VC this face belongs to.
+    vertices = np.array([cvp(vec @ basis, basis)[0] for vec in
                          displacements])
+    # The lattice vector for a face is exactly twice the vector to the
+    # closest lattice point from the halved lattice point on the face.
     vertices = np.array(np.round((vertices - displacements) * 2), int)
-    for i in range(len(vertices)):
-        if not np.any(vertices[i]):
-            vertices[i] += 2 * np.array(displacements[i])
+    if reduced:
+        # Discard vertices that are not associated with a face of the VC.
+        # This happens if half of the lattice vector is outside the convex
+        # polytope defined by the rest of the vertices in `keep`.
+        bbt = basis @ basis.T
+        products = vertices @ bbt @ vertices.T
+        keep = np.array([True] * len(vertices))
+        for i in range(len(vertices)):
+            # Relevant if the projection of half of the lattice vector `vertices[i]`
+            # onto every other lattice vector in `veritces[keep]` is less than `0.5`.
+            mask = np.array([False if k == i else b for k, b in enumerate(keep)])
+            projections = 0.5 * products[i, mask] / np.diag(products)[mask]
+            if not np.all(np.abs(projections) < 0.5 - rtol):
+                keep[i] = False
+        vertices = vertices[keep]
+
     vertices = np.concatenate([vertices, -vertices])
     return vertices
diff --git a/kwant/linalg/tests/test_lll.py b/kwant/linalg/tests/test_lll.py
index 2d2616615813faab5b3284095a24dab830cf6c72..0a4b2b350246144f733f32a640333772b85fa94d 100644
--- a/kwant/linalg/tests/test_lll.py
+++ b/kwant/linalg/tests/test_lll.py
@@ -32,3 +32,35 @@ def test_cvp():
                 point = 50 * rng.randn(j)
                 assert np.array_equal(lll.cvp(point, mat, 10)[:3],
                                       lll.cvp(point, mat, 3))
+
+    # Test equidistant vectors
+    # Cubic lattice
+    basis = np.eye(3)
+    vec = np.zeros((3))
+    assert len(lll.cvp(vec, basis, n=2, group_by_length=True)) == 7
+    assert len(lll.cvp(vec, basis, n=3, group_by_length=True)) == 19
+    vec = 0.5 * np.array([1, 1, 1])
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 8
+    vec = 0.5 * np.array([1, 1, 0])
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 4
+    vec = 0.5 * np.array([1, 0, 0])
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 2
+    # Square lattice with offset
+    offset = np.array([0, 0, 1])
+    basis = np.eye(3)[:2]
+    vec = np.zeros((3)) + rng.rand() * offset
+    assert len(lll.cvp(vec, basis, n=2, group_by_length=True)) == 5
+    assert len(lll.cvp(vec, basis, n=3, group_by_length=True)) == 9
+    vec = 0.5 * np.array([1, 1, 0]) + rng.rand() * offset
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 4
+    vec = 0.5 * np.array([1, 0, 0]) + rng.rand() * offset
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 2
+    # Hexagonal lattice
+    basis = np.array([[1, 0], [-0.5, 0.5 * np.sqrt(3)]])
+    vec = np.zeros((2))
+    assert len(lll.cvp(vec, basis, n=2, group_by_length=True)) == 7
+    assert len(lll.cvp(vec, basis, n=3, group_by_length=True)) == 13
+    vec = np.array([0.5, 0.5 / np.sqrt(3)])
+    assert len(lll.cvp(vec, basis, group_by_length=True)) == 3
+    assert len(lll.cvp(vec, basis, n=2, group_by_length=True)) == 6
+    assert len(lll.cvp(vec, basis, n=3, group_by_length=True)) == 12