diff --git a/kwant/graph/dijkstra.pyx b/kwant/graph/dijkstra.pyx new file mode 100644 index 0000000000000000000000000000000000000000..846a04bcb159ab713515d6fbbb5faebb32aee1cf --- /dev/null +++ b/kwant/graph/dijkstra.pyx @@ -0,0 +1,401 @@ +# +# (2018) Modified by Kwant Authors +# +# Modifications +# ============= +# Merged and modified from scipy/sparse/csgraph/_shortest_path.pyx +# +# All shortest path algorithms except for Dijkstra's removed. +# Implementation of Dijkstra's algorithm modified to allow for specific +# use-cases required by Flux. The changes are documented in the docstring. +# +# +# Copyright (c) 2001, 2002 Enthought, Inc. +# All rights reserved. +# +# Copyright (c) 2003-2017 SciPy Developers. +# All rights reserved. +# +# Copyright (c) 2011 Jake Vanderplas <vanderplas@astro.washington.edu> +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of Enthought nor the names of the SciPy Developers +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +cimport numpy as np +from numpy.math cimport INFINITY as inf + +from libc.stdlib cimport malloc, free +from libc.string cimport memset + +ctypedef np.float64_t DTYPE_t +ctypedef np.int32_t ITYPE_t +ITYPE = np.int32 + + +def dijkstra_directed( + object graph, + ITYPE_t[:] sources, + ITYPE_t[:] targets, + bint return_paths=True, + bint return_predecessors=False): + """Modified directed Dijkstra algorithm. + + Edges with infinite weight are treated as if they do not exist. + + The shortest paths between edge pairs 'zip(sources, targets)' + are found. + + If 'len(sources) == 1' then this routine can be used to + compute the one-to-all distances by passing an integer + greater than any node in 'graph' as the 'target'. + In this case 'return_predecessors' may be specified, and + the predecessor matrix will also be returned. + + 'return_predecessors' and 'return_paths' are mutually exclusive. + + Returns + ------- + if return_paths: + (paths, path_lengths) + elif return_predecessors: + (path_lengths, predecessors) + else: + path_lengths + + """ +# Implementation of Dijkstra's algorithm modified to allow for +# early exit (when target is found) and return the path from source +# to target, rather than the whole predecessor matrix. In addition +# graph edges with infinite weight are treated as if they do not exist. + + cdef ITYPE_t[:] csr_indices = graph.indices, csr_indptr = graph.indptr + cdef DTYPE_t[:] csr_weights = graph.data + + # This implementation of Dijkstra's algorithm is very tightly coupled + # to our use-case in 'flux', so we allow ourselves to assert + assert sources.shape[0] == targets.shape[0] + assert graph.shape[0] == graph.shape[1] + assert not (return_predecessors and return_paths) + assert not (return_predecessors and (sources.shape[0] > 1)) + + cdef unsigned int num_links = sources.shape[0], num_nodes = graph.shape[0] + + cdef unsigned int i, k, j_source, j_target, j_current + cdef ITYPE_t j + + cdef DTYPE_t next_val + + cdef FibonacciHeap heap + cdef FibonacciNode *v + cdef FibonacciNode *current_node + cdef FibonacciNode* nodes = <FibonacciNode*> malloc(num_nodes * sizeof(FibonacciNode)) + + cdef ITYPE_t[:] pred = np.empty((num_nodes,), dtype=ITYPE) + + + # outputs + cdef DTYPE_t[:] path_lengths = np.zeros((num_links,), float) + cdef list paths + if return_paths: + paths = [] + + for i in range(num_links): + j_source = sources[i] + j_target = targets[i] + + for k in range(num_nodes): + initialize_node(&nodes[k], k) + pred[:] = -1 # only useful for debugging + + heap.min_node = NULL + insert_node(&heap, &nodes[j_source]) + + while heap.min_node: + v = remove_min(&heap) + v.state = SCANNED + + if v.index == j_target: + path_lengths[i] = v.val + if return_paths: + paths.append(_calculate_path(pred, j_source, j_target)) + break # next iteration of outer 'for' loop + + for j in range(csr_indptr[v.index], csr_indptr[v.index + 1]): + if csr_weights[j] == inf: + # Treat infinite weight links as missing + continue + j_current = csr_indices[j] + current_node = &nodes[j_current] + if current_node.state != SCANNED: + next_val = v.val + csr_weights[j] + if current_node.state == NOT_IN_HEAP: + current_node.state = IN_HEAP + current_node.val = next_val + insert_node(&heap, current_node) + pred[j_current] = v.index + elif current_node.val > next_val: + decrease_val(&heap, current_node, + next_val) + pred[j_current] = v.index + + free(nodes) + if return_paths: + return paths, path_lengths + elif return_predecessors: + return path_lengths, pred + else: + return path_lengths + + +cdef list _calculate_path(ITYPE_t[:] pred, int j_source, int j_target): + visited = [] + cdef int node = j_target + while node != j_source: + visited.append(node) + node = pred[node] + visited.append(j_source) + return visited + + +###################################################################### +# FibonacciNode structure +# This structure and the operations on it are the nodes of the +# Fibonacci heap. +# +cdef enum FibonacciState: + SCANNED + NOT_IN_HEAP + IN_HEAP + + +cdef struct FibonacciNode: + unsigned int index + unsigned int rank + FibonacciState state + DTYPE_t val + FibonacciNode* parent + FibonacciNode* left_sibling + FibonacciNode* right_sibling + FibonacciNode* children + + +cdef void initialize_node(FibonacciNode* node, + unsigned int index, + DTYPE_t val=0): + # Assumptions: - node is a valid pointer + # - node is not currently part of a heap + node.index = index + node.val = val + node.rank = 0 + node.state = NOT_IN_HEAP + + node.parent = NULL + node.left_sibling = NULL + node.right_sibling = NULL + node.children = NULL + + +cdef FibonacciNode* rightmost_sibling(FibonacciNode* node): + # Assumptions: - node is a valid pointer + cdef FibonacciNode* temp = node + while(temp.right_sibling): + temp = temp.right_sibling + return temp + + +cdef FibonacciNode* leftmost_sibling(FibonacciNode* node): + # Assumptions: - node is a valid pointer + cdef FibonacciNode* temp = node + while(temp.left_sibling): + temp = temp.left_sibling + return temp + + +cdef void add_child(FibonacciNode* node, FibonacciNode* new_child): + # Assumptions: - node is a valid pointer + # - new_child is a valid pointer + # - new_child is not the sibling or child of another node + new_child.parent = node + + if node.children: + add_sibling(node.children, new_child) + else: + node.children = new_child + new_child.right_sibling = NULL + new_child.left_sibling = NULL + node.rank = 1 + + +cdef void add_sibling(FibonacciNode* node, FibonacciNode* new_sibling): + # Assumptions: - node is a valid pointer + # - new_sibling is a valid pointer + # - new_sibling is not the child or sibling of another node + cdef FibonacciNode* temp = rightmost_sibling(node) + temp.right_sibling = new_sibling + new_sibling.left_sibling = temp + new_sibling.right_sibling = NULL + new_sibling.parent = node.parent + if new_sibling.parent: + new_sibling.parent.rank += 1 + + +cdef void remove(FibonacciNode* node): + # Assumptions: - node is a valid pointer + if node.parent: + node.parent.rank -= 1 + if node.left_sibling: + node.parent.children = node.left_sibling + elif node.right_sibling: + node.parent.children = node.right_sibling + else: + node.parent.children = NULL + + if node.left_sibling: + node.left_sibling.right_sibling = node.right_sibling + if node.right_sibling: + node.right_sibling.left_sibling = node.left_sibling + + node.left_sibling = NULL + node.right_sibling = NULL + node.parent = NULL + + +###################################################################### +# FibonacciHeap structure +# This structure and operations on it use the FibonacciNode +# routines to implement a Fibonacci heap + +ctypedef FibonacciNode* pFibonacciNode + + +cdef struct FibonacciHeap: + FibonacciNode* min_node + pFibonacciNode[100] roots_by_rank # maximum number of nodes is ~2^100. + + +cdef void insert_node(FibonacciHeap* heap, + FibonacciNode* node): + # Assumptions: - heap is a valid pointer + # - node is a valid pointer + # - node is not the child or sibling of another node + if heap.min_node: + add_sibling(heap.min_node, node) + if node.val < heap.min_node.val: + heap.min_node = node + else: + heap.min_node = node + + +cdef void decrease_val(FibonacciHeap* heap, + FibonacciNode* node, + DTYPE_t newval): + # Assumptions: - heap is a valid pointer + # - newval <= node.val + # - node is a valid pointer + # - node is not the child or sibling of another node + # - node is in the heap + node.val = newval + if node.parent and (node.parent.val >= newval): + remove(node) + insert_node(heap, node) + elif heap.min_node.val > node.val: + heap.min_node = node + + +cdef void link(FibonacciHeap* heap, FibonacciNode* node): + # Assumptions: - heap is a valid pointer + # - node is a valid pointer + # - node is already within heap + + cdef FibonacciNode *linknode + cdef FibonacciNode *parent + cdef FibonacciNode *child + + if heap.roots_by_rank[node.rank] == NULL: + heap.roots_by_rank[node.rank] = node + else: + linknode = heap.roots_by_rank[node.rank] + heap.roots_by_rank[node.rank] = NULL + + if node.val < linknode.val or node == heap.min_node: + remove(linknode) + add_child(node, linknode) + link(heap, node) + else: + remove(node) + add_child(linknode, node) + link(heap, linknode) + + +cdef FibonacciNode* remove_min(FibonacciHeap* heap): + # Assumptions: - heap is a valid pointer + # - heap.min_node is a valid pointer + cdef FibonacciNode *temp + cdef FibonacciNode *temp_right + cdef FibonacciNode *out + cdef unsigned int i + + # make all min_node children into root nodes + if heap.min_node.children: + temp = leftmost_sibling(heap.min_node.children) + temp_right = NULL + + while temp: + temp_right = temp.right_sibling + remove(temp) + add_sibling(heap.min_node, temp) + temp = temp_right + + heap.min_node.children = NULL + + # choose a root node other than min_node + temp = leftmost_sibling(heap.min_node) + if temp == heap.min_node: + if heap.min_node.right_sibling: + temp = heap.min_node.right_sibling + else: + out = heap.min_node + heap.min_node = NULL + return out + + # remove min_node, and point heap to the new min + out = heap.min_node + remove(heap.min_node) + heap.min_node = temp + + # re-link the heap + for i in range(100): + heap.roots_by_rank[i] = NULL + + while temp: + if temp.val < heap.min_node.val: + heap.min_node = temp + temp_right = temp.right_sibling + link(heap, temp) + temp = temp_right + + return out diff --git a/kwant/physics/__init__.py b/kwant/physics/__init__.py index c3e0564129bc323d324909f6a2de6a262d5e173d..d59fc85e3f6f531df879fde2d50e46b3dd3fb5ca 100644 --- a/kwant/physics/__init__.py +++ b/kwant/physics/__init__.py @@ -1,5 +1,4 @@ -# Copyright 2011-2013 Kwant authors. -# +# Copyright 2011-2018 Kwant authors. # This file is part of Kwant. It is subject to the license terms in the file # LICENSE.rst found in the top-level directory of this distribution and at # http://kwant-project.org/license. A list of Kwant authors can be found in @@ -10,7 +9,7 @@ # Merge the public interface of all submodules. __all__ = [] -for module in ['leads', 'dispersion', 'noise', 'symmetry']: +for module in ['leads', 'dispersion', 'noise', 'symmetry', 'gauge']: exec('from . import {0}'.format(module)) exec('from .{0} import *'.format(module)) exec('__all__.extend({0}.__all__)'.format(module)) diff --git a/kwant/physics/gauge.py b/kwant/physics/gauge.py new file mode 100644 index 0000000000000000000000000000000000000000..164d3185ac1d18a9f00fe16eed7a73a92d91b3fc --- /dev/null +++ b/kwant/physics/gauge.py @@ -0,0 +1,408 @@ +# Copyright 2011-2018 Kwant authors. +# +# This file is part of Kwant. It is subject to the license terms in the file +# LICENSE.rst found in the top-level directory of this distribution and at +# http://kwant-project.org/license. A list of Kwant authors can be found in +# the file AUTHORS.rst at the top-level directory of this distribution and at +# http://kwant-project.org/authors. +"""Functions for fixing the magnetic gauge automatically in a Kwant system.""" + +import functools as ft + +import numpy as np +import scipy +from scipy.integrate import dblquad + +from .. import system, builder + +from ..graph.dijkstra import dijkstra_directed + +__all__ = ['magnetic_gauge'] + + +### Integation + +# Integrate vector field over triangle, for internal use by 'surface_integral' +# Triangle is (origin, origin + v1, origin + v2), 'n' is np.cross(v1, v2) + +def _quad_triangle(f, origin, v1, v2, n, tol): + if np.dot(n, n) < tol**2: # does triangle have significant area? + return 0 + + def g(x, y): + return np.dot(n, f(origin + x * v1 + y * v2)) + + result, *_ = dblquad(g, 0, 1, lambda x: 0, lambda x: 1-x) + return result.real + + +def _const_triangle(f, origin, v1, v2, n, tol): + return np.dot(f, n) / 2 + + +def _average_triangle(f, origin, v1, v2, n, tol): + return np.dot(n, f(origin + 1/3 * (v1 + v2))) / 2 + + +def surface_integral(f, loop, tol=1e-8, average=False): + """Calculate the surface integral of 'f' over a surface enclosed by 'loop'. + + This function only works for *divergence free* vector fields, where the + surface integral depends only on the boundary. + + Parameters + ---------- + f : callable or real n-vector + The vector field for which to calculate the surface integral. + If callable, takes a real n-vector as argument and returns a + real n-vector. + loop : sequence of vectors + Ordered sequence of real n-vectors (positions) that define the + vertices of the polygon that encloses the surface to integrate over. + tol : float, default: 1e-8 + Error tolerance on the result. + average : bool, default: False + If True, approximate the integral over each triangle using a + single function evaluation at the centre of the triangle. + """ + if callable(f): + integrator = _average_triangle if average else _quad_triangle + else: + integrator = _const_triangle + + origin, *points = loop + integral = 0 + # Split loop into triangles with 1 vertex on 'origin', evaluate + # the integral over each triangle and sum the result + for p1, p2 in zip(points, points[1:]): + v1 = p1 - origin + v2 = p2 - origin + n = np.cross(v1, v2) + integral += integrator(f, origin, v1, v2, n, tol) + return integral + + +### Loop finding graph algorithm + +def find_loops(graph, subgraph): + """ + Parameters + ---------- + graph : COO matrix + The complete undirected graph, where the values of the matrix are + the weights of the corresponding graph links. + subgraph : COO matrix + An subgraph of 'graph', with missing edges denoted by infinities. + Must have the same sparsity structure as 'graph'. + + Returns + ------- + A sequence of paths which are partially contained in the subgraph. + The loop is formed by adding a link between the first and last node. + + The ordering is such that the paths are made of links that belong to + the subgraph or to the previous closed loops. + """ + # For each link we do 1 update of 'subgraph' and a call to + # 'csgraph.shortest_path'. It is cheaper to update the CSR + # matrix rather than convert to LIL and back every iteration. + subgraph = subgraph.tocsr() + graph = graph.tocsr() + assert same_sparsity_structure(subgraph, graph) + + # Links in graph, but not in subgraph. + links_to_find = scipy.sparse.triu(graph - subgraph).tocoo() + links_to_find = np.vstack((links_to_find.row, links_to_find.col)).transpose() + + links_to_find, min_length = order_links(subgraph, links_to_find) + + # Find shortest path between each link in turn, updating the subgraph with + # the links as we go. + loops = [] + while len(links_to_find) > 0: + frm, to = links_to_find[0] + (path,), (path_length,) = dijkstra_directed(subgraph, + sources=np.array([frm]), + targets=np.array([to])) + + # Reorder links that are still to find based on the loop length in + # the updated graph. We only reorder when the path length for *this* + # link is a "little bit" longer that the perviously determined minimum. + # The "little bit" is needed so we don't needlessly re-order the links + # on amorphous lattices. + if path_length > min_length * 1.1: + links_to_find, min_length = order_links(subgraph, links_to_find) + else: + # Assumes that 'graph' and 'subgraph' have the same sparsity structure. + assign_csr(subgraph, graph, (frm, to)) + assign_csr(subgraph, graph, (to, frm)) + loops.append(path) + links_to_find = links_to_find[1:] + + return loops + + +def order_links(subgraph, links_to_find): + if len(links_to_find) == 0: + return [], None + # Order 'links_to_find' by length of shortest path between the nodes of the link + path_lengths = dijkstra_directed(subgraph, + sources=links_to_find[:, 0], + targets=links_to_find[:, 1], + return_paths=False) + idxs = np.argsort(path_lengths) + return links_to_find[idxs], path_lengths[idxs[0]] + + +### Generic sparse matrix utilities + +def assign_csr(a, b, element): + """Assign a single element from a CSR matrix to another. + + Parameters + ---------- + a : CSR matrix + b : CSR matrix or scalar + If a CSR matrix, must have the same sparsity structure + as 'a'. If a scalar, must be the same dtype as 'a'. + element: (int, int) + Row and column indices of the element to set. + """ + assert isinstance(a, scipy.sparse.csr_matrix) + row, col = element + for j in range(a.indptr[row], a.indptr[row + 1]): + if a.indices[j] == col: + break + else: + raise ValueError('{} not in sparse matrix'.format(element)) + if isinstance(b, scipy.sparse.csr_matrix): + a.data[j] = b.data[j] + else: + a.data[j] = b + + +def same_sparsity_structure(a, b): + a = a.tocsr().sorted_indices() + b = b.tocsr().sorted_indices() + return (np.array_equal(a.indices, b.indices) + and np.array_equal(a.indptr, b.indptr)) + + +def add_coo_matrices(*mats, shape): + """Add a sequence of COO matrices by appending their constituent arrays.""" + values = np.hstack([mat.data for mat in mats]) + rows = np.hstack([mat.row for mat in mats]) + cols = np.hstack([mat.col for mat in mats]) + return scipy.sparse.coo_matrix((values, (rows, cols)), shape=shape) + + +def shift_diagonally(mat, shift, shape): + """Shift the row/column indices of a COO matrix.""" + return scipy.sparse.coo_matrix( + (mat.data, (mat.row + shift, mat.col + shift)), + shape=shape) + + +def distance_matrix(links, pos, shape): + """Return the distances between the provided links as a COO matrix. + + Parameters + ---------- + links : sequence of pairs of int + The links for which to find the lengths. + pos : callable: int -> vector + Map from link ends (integers) to realspace position. + shape : tuple + """ + if len(links) == 0: # numpy does not like 'if array' + return scipy.sparse.coo_matrix(shape) + links = np.array(links) + distances = np.array([pos(i) - pos(j) for i, j in links]) + distances = np.linalg.norm(distances, axis=1) + return scipy.sparse.coo_matrix((distances, links.T), shape=shape) + + +### Loop finding +# +# All of these functions take a finalized Kwant system and return +# a sequence of loops. Each loop is a sequence of sites (integers) +# that one visits when traversing the loop. The first and final sites +# are assumed to be linked, which closes the loop. The links that one +# traverses when going round a loop is thus: +# +# list(zip(loop, loop[1:])) + [(loop[-1], loop[0])] +# +# These loops are later used to fix the magnetic gauge in the system. +# All of the links except the final one are assumed to have their gauge +# fixed (i.e. the phase across them is known), and gauge of the final +# link is the one to be determined. + + +def loops_in_finite(syst): + """Find the loops in a finite system with no leads. + + The site indices in the returned loops are those of the system, + so they may be used as indices to 'syst.sites', or with 'syst.pos'. + """ + assert isinstance(syst, system.FiniteSystem) and syst.leads == [] + nsites = len(syst.sites) + + # Fix the gauge across the minimum spanning tree of the system graph. + graph = distance_matrix(list(syst.graph), + pos=syst.pos, shape=(nsites, nsites)) + spanning_tree = shortest_distance_forest(graph) + return find_loops(graph, spanning_tree) + + +def shortest_distance_forest(graph): + # Grow a forest of minimum distance trees for all connected components of the graph + graph = graph.tocsr() + tree = graph.copy() + # set every entry in tree to infinity + tree.data[:] = np.inf + unvisited = set(range(graph.shape[0])) + # set the target node to be greater than any node in the graph. + # This way we explore the whole graph. + end = np.array([graph.shape[0] + 1], dtype=np.int32) + + while unvisited: + # Choose an arbitrary element as the root + root = unvisited.pop() + root = np.array([root], dtype=np.int32) + _, pred = dijkstra_directed(graph, sources=root, targets=end, + return_predecessors=True, return_paths=False) + for i, p in enumerate(pred): + # -1 if node 'i' has no predecessor. Either it is the root node, + # or it was not reached. + if p != -1: + unvisited.remove(i) + assign_csr(tree, graph, (i, p)) + assign_csr(tree, graph, (p, i)) + return tree + + +### Phase calculation + +def calculate_phases(loops, pos, previous_phase, flux): + """Calculate the phase across the terminal links of a set of loops + + Parameters + ---------- + loops : sequence of sequences of int + The loops over which to calculate the flux. We wish to find the phase + over the link connecting the first and last sites in each loop. + The phase across all other links in a given loop is assumed known. + pos : callable : int -> ndarray + A map from site (integer) to realspace position. + previous_phase : callable + Takes a dict that maps from links to phases, and a loop and returns + the sum of the phases across each link in the loop, *except* the link + between the first and last site in the loop. + flux : callable + Takes a sequence of positions and returns the magnetic flux through the + surface defined by the provided loop. + + Returns + ------- + phases : dict : (int, int) -> float + A map from links to the phase across those links. + """ + phases = dict() + for loop in loops: + tail, head = loop[-1], loop[0] + integral = flux([pos(p) for p in loop]) + phases[tail, head] = integral - previous_phase(phases, loop) + return phases + + +# These functions are to be used with 'calculate_phases'. +# 'phases' always stores *either* the phase across (i, j) *or* +# (j, i), and never both. If a phase is not present it is assumed to +# be zero. + + +def _previous_phase_finite(phases, path): + previous_phase = 0 + for i, j in zip(path, path[1:]): + previous_phase += phases.get((i, j), 0) + previous_phase -= phases.get((j, i), 0) + return previous_phase + + +### High-level interface +# +# These functions glue all the above functionality together. + +# Wrapper for phase dict that takes high-level sites +def _finite_wrapper(syst, phases, a, b): + i = syst.id_by_site[a] + j = syst.id_by_site[b] + # We only store *either* (i, j) *or* (j, i). If not present + # then the phase is zero by definition. + return phases.get((i, j), -phases.get((j, i), 0)) + + +def _gauge_finite(syst): + loops = loops_in_finite(syst) + + def _gauge(syst_field, tol=1E-8, average=False): + integrate = ft.partial(surface_integral, syst_field, + tol=tol, average=average) + phases = calculate_phases( + loops, + syst.pos, + _previous_phase_finite, + integrate, + ) + return ft.partial(_finite_wrapper, syst, phases) + + return _gauge + + +def magnetic_gauge(syst): + """Fix the magnetic gauge for a finalized system. + + Fix the magnetic gauge for a Kwant system. This can + be used to later calculate the Peierls phases that + should be applied to each hopping, given a magnetic field. + + Parameters + ---------- + syst : kwant.builder.FiniteSystem + May not have leads attached (this restriction will + be lifted in the future). + + Returns + ------- + gauge : callable + When called with a magnetic field as argument, returns + another callable 'phase' that returns the Peierls phase to + apply to a given hopping. + + Examples + -------- + The following illustrates basic usage: + + >>> import numpy as np + >>> import kwant + >>> + >>> def hopping(a, b, t, phi): + >>> return -t * np.exp(-1j * phi(a, b)) + >>> + >>> syst = make_system(hopping).finalized() + >>> gauge = kwant.physics.magnetic_gauge(syst) + >>> + >>> def B(pos): + >>> return np.exp(-np.sum(pos * pos)) + >>> + >>> kwant.hamiltonian_submatrix(syst, params=dict(t=1, phi=gauge(B)) + """ + if isinstance(syst, builder.FiniteSystem): + if syst.leads: + raise ValueError('Can only fix magnetic gauge for finite systems ' + 'without leads') + else: + return _gauge_finite(syst) + else: + raise TypeError('Can only fix magnetic gauge for finite systems ' + 'without leads') diff --git a/kwant/physics/tests/test_gauge.py b/kwant/physics/tests/test_gauge.py new file mode 100644 index 0000000000000000000000000000000000000000..762c4a0c3010538201cf32344c24bb829d3526f4 --- /dev/null +++ b/kwant/physics/tests/test_gauge.py @@ -0,0 +1,308 @@ +from collections import namedtuple, Counter +from math import sqrt +import numpy as np +import pytest + +from ... import lattice +from ...builder import HoppingKind, Builder, NoSymmetry, Site +from .. import gauge + +## Utilities + +# TODO: remove in favour of 'scipy.stats.special_ortho_group' once +# we depend on scipy 0.18 +class special_ortho_group_gen: + + def rvs(self, dim): + H = np.eye(dim) + D = np.empty((dim,)) + for n in range(dim-1): + x = np.random.normal(size=(dim-n,)) + D[n] = np.sign(x[0]) if x[0] != 0 else 1 + x[0] += D[n]*np.sqrt((x*x).sum()) + # Householder transformation + Hx = (np.eye(dim-n) + - 2.*np.outer(x, x)/(x*x).sum()) + mat = np.eye(dim) + mat[n:, n:] = Hx + H = np.dot(H, mat) + D[-1] = (-1)**(dim-1)*D[:-1].prod() + # Equivalent to np.dot(np.diag(D), H) but faster, apparently + H = (D*H.T).T + return H + +special_ortho_group = special_ortho_group_gen() + + +square_lattice = lattice.square(norbs=1, name='square') +honeycomb_lattice = lattice.honeycomb(norbs=1, name='honeycomb') +cubic_lattice = lattice.cubic(norbs=1, name='cubic') + +def rectangle(W, L): + return ( + lambda s: 0 <= s.pos[0] < L and 0 <= s.pos[1] < W, + (L/2, W/2) + ) + + +def ring(r_inner, r_outer): + return ( + lambda s: r_inner <= np.linalg.norm(s.pos) <= r_outer, + ((r_inner + r_outer) / 2, 0) + ) + +def wedge(W): + return ( + lambda s: (0 <= s.pos[0] < W) and (0 <= s.pos[1] <= s.pos[0]), + (0, 0) + ) + + +def half_ring(r_inner, r_outer): + in_ring, _ = ring(r_inner, r_outer) + return ( + lambda s: s.pos[0] <= 0 and in_ring(s), + (-(r_inner + r_outer) / 2, 0) + ) + + +def cuboid(a, b, c): + return ( + lambda s: 0 <= s.pos[0] < a and 0 <= s.pos[1] < b and 0 <= s.pos[2] < c, + (a/2, b/2, c/2) + ) + +def hypercube(dim, W): + return ( + lambda s: all(0 <= x < W for x in s.pos), + (W / 2,) * dim + ) + + +def circle(r): + return ( + lambda s: np.linalg.norm(s.pos) < r, + (0, 0) + ) + +def ball(dim, r): + return ( + lambda s: np.linalg.norm(s.pos) < r, + (0,) * dim + ) + + +def model(lat, neighbors): + syst = Builder(lattice.TranslationalSymmetry(*lat.prim_vecs)) + if hasattr(lat, 'sublattices'): + for l in lat.sublattices: + zv = (0,) * len(l.prim_vecs) + syst[l(*zv)] = None + else: + zv = (0,) * len(l.prim_vecs) + syst[lat(*zv)] = None + for r in range(neighbors): + syst[lat.neighbors(r + 1)] = None + return syst + + +def check_loop_kind(loop_kind): + (_, first_fam_a, prev_fam_b), *rest = loop_kind + for (_, fam_a, fam_b) in rest: + if prev_fam_b != fam_a: + raise ValueError('Invalid loop kind: does not close') + prev_fam_b = fam_b + # loop closes + net_delta = np.sum([hk.delta for hk in loop_kind]) + if first_fam_a != fam_b or np.any(net_delta != 0): + raise ValueError('Invalid loop kind: does not close') + + +def available_loops(syst, loop_kind): + + def maybe_loop(site): + loop = [site] + a = site + for delta, family_a, family_b in loop_kind: + b = Site(family_b, a.tag + delta, True) + if family_a != a.family or (a, b) not in syst: + return None + loop.append(b) + a = b + return loop + + check_loop_kind(loop_kind) + + return list(filter(None, map(maybe_loop, syst.sites()))) + + +def loop_to_links(loop): + return list(zip(loop, loop[1:])) + + +def no_symmetry(lat, neighbors): + return NoSymmetry() + + +def translational_symmetry(lat, neighbors): + return lattice.TranslationalSymmetry(int((neighbors + 1)/2) * lat.prim_vecs[0]) + + +## Tests + +# Tests that phase around a loop is equal to the flux through the loop. +# First we define the loops that we want to test, for various latticeutils. +# If a system does not support a particular kind of loop, they will simply +# not be generated. + +Loop = namedtuple('Loop', ('path', 'flux')) + +square_loops = [([HoppingKind(d, square_lattice) for d in l.path], l.flux) + for l in [ + # 1st nearest neighbors + Loop(path=[(1, 0), (0, 1), (-1, 0), (0, -1)], flux=1), + # 2nd nearest neighbors + Loop(path=[(1, 0), (0, 1), (-1, -1)], flux=0.5), + Loop(path=[(1, 0), (-1, 1), (0, -1)], flux=0.5), + # 3rd nearest neighbors + Loop(path=[(2, 0), (0, 1), (-2, 0), (0, -1)], flux=2), + Loop(path=[(2, 0), (-1, 1), (-1, 0), (0, -1)], flux=1.5), +]] + +a, b = honeycomb_lattice.sublattices +honeycomb_loops = [([HoppingKind(d, a, b) for *d, a, b in l.path], l.flux) + for l in [ + # 1st nearest neighbors + Loop(path=[(0, 0, a, b), (-1, 1, b, a), (0, -1, a, b), (0, 0, b, a), + (1, -1, a, b), (0, 1, b, a)], + flux=sqrt(3)/2), + # 2nd nearest neighbors + Loop(path=[(-1, 1, a, a), (0, -1, a, a), (1, 0, a, a)], + flux=sqrt(3)/4), + Loop(path=[(-1, 0, b, b), (1, -1, b, b), (0, 1, b, b)], + flux=sqrt(3)/4), +]] + +cubic_loops = [([HoppingKind(d, cubic_lattice) for d in l.path], l.flux) + for l in [ + # 1st nearest neighbors + Loop(path=[(1, 0, 0), (0, 1, 0), (-1, 0, 0), (0, -1, 0)], flux=1), + Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, 0), (0, 0, -1)], flux=0), + Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, 0), (0, 0, -1)], flux=0), + # 2nd nearest neighbors + Loop(path=[(1, 0, 0), (-1, 1, 0), (0, -1, 0)], flux=0.5), + Loop(path=[(1, 0, 0), (0, 1, 0), (-1, -1, 0)], flux=0.5), + Loop(path=[(1, 0, 0), (-1, 0, 1), (0, 0, -1)], flux=0), + Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, -1)], flux=0), + Loop(path=[(0, 1, 0), (0, -1, 1), (0, 0, -1)], flux=0), + Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, -1)], flux=0), + # 3rd nearest neighbors + Loop(path=[(1, 1, 1), (0, 0, -1), (-1, -1, 0)], flux=0), + Loop(path=[(1, 1, 1), (-1, 0, -1), (0, -1, 0)], flux=0.5), +]] + +square = (square_lattice, square_loops) +honeycomb = (honeycomb_lattice, honeycomb_loops) +cubic = (cubic_lattice, cubic_loops) + + +def _test_phase_loops(syst, phases, loops): + for loop_kind, loop_flux in loops: + for loop in available_loops(syst, loop_kind): + loop_phase = sum(phases(a, b) for a, b in loop_to_links(loop)) + assert np.isclose(loop_phase, loop_flux) + + +@pytest.mark.parametrize("neighbors", [1, 2, 3]) +@pytest.mark.parametrize("symmetry", [no_symmetry], + ids=['finite']) +@pytest.mark.parametrize("lattice, loops", [square, honeycomb, cubic], + ids=['square', 'honeycomb', 'cubic']) +def test_phases(lattice, neighbors, symmetry, loops): + """Check that the phases around common loops are equal to the flux, for + finite and infinite systems with uniform magnetic field. + """ + W = 4 + dim = len(lattice.prim_vecs) + field = np.array([0, 0, 1]) if dim == 3 else 1 + + syst = Builder(symmetry(lattice, neighbors)) + syst.fill(model(lattice, neighbors), *hypercube(dim, W)) + + this_gauge = gauge.magnetic_gauge(syst.finalized()) + phases = this_gauge(field) + + _test_phase_loops(syst, phases, loops) + + +# Test internal parts of magnetic_gauge + +@pytest.mark.parametrize("shape", + [rectangle(5, 5), circle(4), + half_ring(5, 10)], + ids=['rectangle', 'circle', 'half-ring'] + ) +@pytest.mark.parametrize("lattice", [square_lattice, honeycomb_lattice], + ids=['square', 'honeycomb']) +@pytest.mark.parametrize("neighbors", [1, 2, 3]) +def test_minimal_cycle_basis(lattice, neighbors, shape): + """Check that for lattice models on genus 0 shapes, nearly + all loops have the same (minimal) length. This is not an + equality, as there may be weird loops on the edges. + """ + syst = Builder() + syst.fill(model(lattice, neighbors), *shape) + syst = syst.finalized() + + loops = gauge.loops_in_finite(syst) + loop_counts = Counter(map(len, loops)) + min_loop = min(loop_counts) + # arbitrarily allow 1% of slightly longer loops; + # we don't make stronger guarantees about the quality + # of our loop basis + assert loop_counts[min_loop] / len(loops) > 0.99, loop_counts + + +def random_loop(n, max_radius=10, planar=False): + """Return a loop of 'n' points. + + The loop is in the x-y plane if 'planar is False', otherwise + each point is given a random perturbation in the z direction + """ + theta = np.sort(2 * np.pi * np.random.rand(n)) + r = max_radius * np.random.rand(n) + if planar: + z = np.zeros((n,)) + else: + z = 2 * (max_radius / 5) * (np.random.rand(n) - 1) + return np.array([r * np.cos(theta), r * np.sin(theta), z]).transpose() + + +def test_constant_surface_integral(): + field_direction = np.random.rand(3) + field_direction /= np.linalg.norm(field_direction) + loop = random_loop(7) + + integral = gauge.surface_integral + + I = integral(lambda r: field_direction, loop) + assert np.isclose(I, integral(field_direction, loop)) + assert np.isclose(I, integral(lambda r: field_direction, loop, average=True)) + + +def circular_field(r_vec): + return np.array([r_vec[1], -r_vec[0], 0]) + + +def test_invariant_surface_integral(): + """Surface integral should be identical if we apply a random + rotation to loop and vector field. + """ + integral = gauge.surface_integral + # loop with random orientation + orig_loop = loop = random_loop(7) + I = integral(circular_field, loop) + for _ in range(4): + rot = special_ortho_group.rvs(3) + loop = orig_loop @ rot.transpose() + assert np.isclose(I, integral(lambda r: rot @ circular_field(rot.transpose() @ r), loop)) diff --git a/setup.py b/setup.py index 5808b9b0f09d255eab761e172245776e3ab18957..57bf904cd5e8222a9c8b8012df24b31d5d9baf3f 100755 --- a/setup.py +++ b/setup.py @@ -532,6 +532,8 @@ def main(): dict(sources=['kwant/graph/core.pyx'], depends=['kwant/graph/core.pxd', 'kwant/graph/defs.h', 'kwant/graph/defs.pxd'])), + ('kwant.graph.dijkstra', + dict(sources=['kwant/graph/dijkstra.pyx'])), ('kwant.linalg.lapack', dict(sources=['kwant/linalg/lapack.pyx'])), ('kwant.linalg._mumps',