remove workarounds against fixed dependency limitations

parent 2b6dda7f
......@@ -9,21 +9,8 @@ Otherwise, this package has only very limited functionality of its own.
Generic functionality
---------------------
..
TODO: Once we depend on Sphinx 1.8, the documentation of __version__ can be
put into the "docstring": https://github.com/sphinx-doc/sphinx/issues/344
The version of Kwant is available under the name ``__version__``.
This string respects `PEP 440 <https://www.python.org/dev/peps/pep-0440/>`_
and has the following format
- Released version: '1.3.0', '1.3.1', etc.
- Alpha version: '1.2.0a0', '1.2.0a1', etc.
- Beta version: '1.1.0b0', '1.1.0b1', etc.
- Development version (derived from ``git describe --first-parent --dirty``):
'1.3.2.dev27+gdecf6893', '1.1.1.dev10+gabcd012.dirty', etc.
- Development version with incomplete information: 'unknown',
'unknown+g0123abc', etc.
.. autodata:: kwant.__version__
.. autosummary::
:toctree: generated/
......
......@@ -11,6 +11,20 @@ __all__ = []
from . import version
version.ensure_python()
__version__ = version.version
"""The version of Kwant is available under the name ``__version__``.
This string respects `PEP 440 <https://www.python.org/dev/peps/pep-0440/>`_
and has the following format
- Released version: '1.3.0', '1.3.1', etc.
- Alpha version: '1.2.0a0', '1.2.0a1', etc.
- Beta version: '1.1.0b0', '1.1.0b1', etc.
- Development version (derived from ``git describe --first-parent --dirty``):
'1.3.2.dev27+gdecf6893', '1.1.1.dev10+gabcd012.dirty', etc.
- Development version with incomplete information: 'unknown',
'unknown+g0123abc', etc.
"""
import numpy # Needed by C. Gohlke's Windows package.
import warnings
......
......@@ -269,11 +269,11 @@ if mpl_available:
phi = np.linspace(0, pi, 21)
xyz = np.c_[np.cos(phi), np.sin(phi), 0 * phi].T.reshape(-1, 1, 21)
# TODO: use np.block once we depend on numpy >= 1.13.
unit_sphere = np.vstack([
np.hstack([xyz[0], xyz[2]]),
np.hstack([xyz[1], xyz[0]]),
np.hstack([xyz[2], xyz[1]]),
unit_sphere = np.block([
[xyz[0], xyz[2]],
[xyz[1], xyz[0]],
[xyz[2], xyz[1]],
])
def projected_length(ax, length):
......
......@@ -90,16 +90,8 @@ def make_sparse(ham, args, params, CGraph gr, diag,
rows_cols[1, k] = j + from_off[n_fs]
k += 1
# Hack around a bug in Scipy + Python 3 + memoryviews
# see https://github.com/scipy/scipy/issues/5123 for details.
# TODO: remove this once we depend on scipy >= 0.18.
np_data = np.asarray(data)
np_rows_cols = np.asarray(rows_cols)
np_to_off = np.asarray(to_off)
np_from_off = np.asarray(from_off)
return sp.coo_matrix((np_data[:k], np_rows_cols[:, :k]),
shape=(np_to_off[-1], np_from_off[-1]))
return sp.coo_matrix((data[:k], rows_cols[:, :k]),
shape=(to_off[-1], from_off[-1]))
@cython.boundscheck(False)
......@@ -161,16 +153,8 @@ def make_sparse_full(ham, args, params, CGraph gr, diag,
rows_cols[0, k + 1] = rows_cols[1, k] = j + from_off[fs]
k += 2
# hack around a bug in Scipy + Python 3 + memoryviews
# see https://github.com/scipy/scipy/issues/5123 for details
# TODO: remove this once we depend on scipy >= 0.18.
np_data = np.asarray(data)
np_rows_cols = np.asarray(rows_cols)
np_to_off = np.asarray(to_off)
np_from_off = np.asarray(from_off)
return sp.coo_matrix((np_data[:k], np_rows_cols[:, :k]),
shape=(np_to_off[-1], np_from_off[-1]))
return sp.coo_matrix((data[:k], rows_cols[:, :k]),
shape=(to_off[-1], from_off[-1]))
@cython.boundscheck(False)
......
......@@ -39,7 +39,7 @@ extra_ns.update({'kron': sympy.physics.quantum.TensorProduct,
extra_ns.update({'sigma_{}'.format(c): p for c, p in zip('0xyz', pauli)})
# workaroud for https://github.com/sympy/sympy/issues/12060
# TODO: remove this once https://github.com/sympy/sympy/issues/12060 is fixed
del extra_ns['I']
del extra_ns['pi']
......
......@@ -328,7 +328,7 @@ def build_discretized(tb_hamiltonian, coords, *, grid=None, locals=None,
is_diagonal = lambda m: np.allclose(m, np.diag(np.diagonal(m)))
if not (lat.prim_vecs.shape[0] == grid_dim and
is_diagonal(lat.prim_vecs)):
raise ValueError('"grid" is expected to by an orthogonal lattice '
raise ValueError('"grid" has to be an orthogonal lattice '
'of dimension matching number of "coords".')
if (lat.norbs is not None) and (lat.norbs != norbs):
......
......@@ -541,10 +541,7 @@ class TranslationalSymmetry(system.Symmetry):
def __init__(self, *periods):
self.periods = ta.array(periods)
if self.periods.ndim != 2:
# TODO: remove the second part of the following message once
# everybody got used to it.
msg = ("TranslationalSymmetry takes 1d sequences as parameters.\n"
"See What's new in Kwant 0.2 in the documentation.")
msg = ("TranslationalSymmetry takes 1d sequences as parameters.")
raise ValueError(msg)
if np.linalg.matrix_rank(periods) < len(periods):
raise ValueError("Translational symmetry periods must be "
......
......@@ -101,8 +101,7 @@ def lll(basis, c=1.34):
if abs(u[i+1, i]) > 0.5:
ll_reduce(i+1)
i = max(i-1, 0)
# TODO: change to rcond=None once we depend on numpy >= 1.14.
coefs = np.linalg.lstsq(vecs_orig.T, vecs.T, rcond=-1)[0]
coefs = np.linalg.lstsq(vecs_orig.T, vecs.T, rcond=None)[0]
if not np.allclose(np.round(coefs), coefs, atol=1e-6):
raise RuntimeError('LLL algorithm instability.')
if not is_c_reduced(vecs, c):
......
......@@ -24,36 +24,6 @@ dot = np.dot
__all__ = ['selfenergy', 'modes', 'PropagatingModes', 'StabilizedModes']
# TODO: Use scipy block_diag once we depend on scipy>=0.19
try:
# Throws ValueError, but if fixed ensure that works as intended
bdiag_broken = block_diag(np.zeros((1,1)), np.zeros((2,0))).shape != (3, 1)
except ValueError: # skip coverage
bdiag_broken = True
if bdiag_broken: # skip coverage
def block_diag(*matrices):
"""Construct a block diagonal matrix out of the input matrices.
Like scipy.linalg.block_diag, but works for zero size matrices."""
rows, cols = np.sum([mat.shape for mat in matrices], axis=0)
b_mat = np.zeros((rows,cols), dtype='complex')
rows, cols = 0, 0
for mat in matrices:
new_rows = rows + mat.shape[0]
new_cols = cols + mat.shape[1]
b_mat[rows:new_rows, cols:new_cols] = mat
rows, cols = new_rows, new_cols
return b_mat
# TODO: Remove the workaround once we depend on scipy >= 1.0
def lstsq(a, b):
"""Least squares version that works also with 0-shaped matrices."""
if a.shape[1] == 0:
return np.empty((0, 0), dtype=np.common_type(a, b))
return la.lstsq(a, b)[0]
def nonzero_symm_projection(matrix):
"""Check whether a discrete symmetry relation between two blocks of the
Hamiltonian vanishes or not.
......@@ -838,7 +808,7 @@ def make_proper_modes(lmbdainv, psi, extract, tol, particle_hole,
# reverse the order of the product at the end.
wf_neg_k = particle_hole.dot(
(full_psi[:, :N][:, positive_k]).conj())[:, ::-1]
rot = lstsq(orig_neg_k, wf_neg_k)
rot = la.lstsq(orig_neg_k, wf_neg_k)[0]
full_psi[:, :N][:, positive_k[::-1]] = wf_neg_k
psi[:, :N][:, positive_k[::-1]] = \
psi[:, :N][:, positive_k[::-1]].dot(rot)
......@@ -853,7 +823,7 @@ def make_proper_modes(lmbdainv, psi, extract, tol, particle_hole,
# Reverse order at the end to match momenta of opposite sign.
wf_neg_k = particle_hole.dot(
full_psi[:, N:][:, positive_k].conj())[:, ::-1]
rot = lstsq(orig_neg_k, wf_neg_k)
rot = la.lstsq(orig_neg_k, wf_neg_k)[0]
full_psi[:, N:][:, positive_k[::-1]] = wf_neg_k
psi[:, N:][:, positive_k[::-1]] = \
psi[:, N:][:, positive_k[::-1]].dot(rot)
......@@ -865,7 +835,7 @@ def make_proper_modes(lmbdainv, psi, extract, tol, particle_hole,
# of propagating modes, not either left or right movers.
out_orig = full_psi[:, nmodes//2:]
out = time_reversal.dot(full_psi[:, :nmodes//2].conj())
rot = lstsq(out_orig, out)
rot = la.lstsq(out_orig, out)[0]
full_psi[:, nmodes//2:] = out
psi[:, nmodes//2:] = psi[:, nmodes//2:].dot(rot)
......
from collections import namedtuple, Counter
import warnings
from math import sqrt
import numpy as np
from scipy.stats import special_ortho_group
import pytest
import kwant
......@@ -13,31 +15,6 @@ from .. import gauge
## Utilities
# TODO: remove in favour of 'scipy.stats.special_ortho_group' once
# we depend on scipy 0.18
class special_ortho_group_gen:
def rvs(self, dim):
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = np.random.normal(size=(dim-n,))
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n:, n:] = Hx
H = np.dot(H, mat)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
square_lattice = lattice.square(norbs=1, name='square')
honeycomb_lattice = lattice.honeycomb(norbs=1, name='honeycomb')
cubic_lattice = lattice.cubic(norbs=1, name='cubic')
......
......@@ -134,8 +134,7 @@ def gaussian(n, sym='A', v=1., rng=None):
h = randn(n, n) + 1j * randn(n, n)
# Ensure Hermiticity.
# TODO: write this as h += h.T.conj() once we rely on numpy >= 1.13.0
h = h + h.T.conj()
h += h.T.conj()
# Ensure Chiral symmetry.
if c(sym):
......
......@@ -18,17 +18,6 @@ from .._common import ensure_isinstance, deprecate_args
from .. import system, physics
from functools import reduce
# Until v0.13.0, scipy.sparse did not support making block matrices out of
# matrices with one dimension being zero:
# https://github.com/scipy/scipy/issues/2127 Additionally, 'scipy.sparse.bmat'
# didn't support matrices with zero size until v0.18:
# https://github.com/scipy/scipy/issues/5976. For now we use NumPy dense
# matrices as a replacement.
# TODO: Once we depend on scipy >= 0.18, code for the special cases can be
# removed from _make_linear_sys, _solve_linear_sys and possibly other places
# marked by the line "See comment about zero-shaped sparse matrices at the top
# of common.py".
LinearSys = namedtuple('LinearSys', ['lhs', 'rhs', 'indices', 'num_orb'])
......@@ -296,11 +285,8 @@ class SparseSolver(metaclass=abc.ABCMeta):
zero_rows = (lhs.shape[0] - mats[0].shape[0] -
mats[1].shape[0])
if zero_rows:
zero_mat = sprhsmat((zero_rows, mats[0].shape[1]))
bmat = [[mats[0]], [mats[1]], [zero_mat]]
else:
bmat = [[mats[0]], [mats[1]]]
zero_mat = sprhsmat((zero_rows, mats[0].shape[1]))
bmat = [[mats[0]], [mats[1]], [zero_mat]]
rhs[i] = sp.bmat(bmat, format=self.rhsformat)
elif mats is None:
......@@ -407,9 +393,7 @@ class SparseSolver(metaclass=abc.ABCMeta):
return SMatrix(np.zeros((len_kv, len_rhs)), lead_info,
out_leads, in_leads, check_hermiticity)
# See comment about zero-shaped sparse matrices at the top of common.py.
rhs = sp.bmat([[i for i in linsys.rhs if i.shape[1]]],
format=self.rhsformat)
rhs = sp.bmat([linsys.rhs], format=self.rhsformat)
flhs = self._factorized(linsys.lhs)
data = self._solve_linear_sys(flhs, rhs, kept_vars)
......@@ -505,9 +489,7 @@ class SparseSolver(metaclass=abc.ABCMeta):
return GreensFunction(np.zeros((len_kv, len_rhs)), lead_info,
out_leads, in_leads, check_hermiticity)
# See comment about zero-shaped sparse matrices at the top of common.py.
rhs = sp.bmat([[i for i in linsys.rhs if i.shape[1]]],
format=self.rhsformat)
rhs = sp.bmat([linsys.rhs], format=self.rhsformat)
flhs = self._factorized(linsys.lhs)
data = self._solve_linear_sys(flhs, rhs, kept_vars)
......@@ -569,9 +551,7 @@ class SparseSolver(metaclass=abc.ABCMeta):
factored = self._factorized(linsys.lhs)
# See comment about zero-shaped sparse matrices at the top of common.py.
rhs = sp.bmat([[i for i in linsys.rhs if i.shape[1]]],
format=self.rhsformat)
rhs = sp.bmat([linsys.rhs], format=self.rhsformat)
for j in range(0, rhs.shape[1], self.nrhs):
jend = min(j + self.nrhs, rhs.shape[1])
psi = self._solve_linear_sys(factored, rhs[:, j:jend],
......
......@@ -15,16 +15,7 @@ from . import common
# Note: previous code would have failed if UMFPACK was provided by scikit
import scipy.sparse.linalg.dsolve.linsolve as linsolve
# check if we are actually using UMFPACK or rather SuperLU
# TODO: remove the try (only using the except clause) once we depend on
# scipy >= 0.14.0.
try:
uses_umfpack = linsolve.isUmfpack
except AttributeError:
uses_umfpack = linsolve.useUmfpack
if uses_umfpack:
umfpack = linsolve.umfpack
uses_umfpack = linsolve.useUmfpack
if uses_umfpack:
def factorized(A, piv_tol=1.0, sym_piv_tol=1.0):
......@@ -51,6 +42,7 @@ if uses_umfpack:
x1 = solve(rhs1) # Uses the LU factors.
x2 = solve(rhs2) # Uses again the LU factors.
"""
umfpack = linsolve.umfpack
if not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment