diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b74a9ddeeac4b35c223b1655378a841982c260d6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+*~
+MANIFEST
+*.pyc
+*.so
+/kwant/*/*.c
+/kwant/_static_version.py
+/build
+/dist
+/doc/build
+/doc/source/reference/generated/
+/doc/source/images/*.png
+/doc/source/images/*.pdf
+/doc/source/images/.*_flag
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..fbf40cfdc0fe02a65a524b642322fc57d3c90f4c
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,17 @@
+# This file specifies the files to be included in the source distribution
+# in addition to the default ones.
+
+recursive-include kwant *.pxd
+recursive-include kwant *.h
+recursive-include kwant test_*.py
+
+include TODO.txt
+recursive-include examples *.py
+
+include doc/other/*[a-zA-Z]
+include doc/Makefile
+recursive-include doc/source *.rst *.py *.svg
+recursive-include doc/source/_static *[a-zA-Z]
+recursive-include doc/templates *[a-zA-Z]
+prune doc/source/reference/generated
+recursive-include doc/sphinxext *.py *.txt *.in
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9db4534a1458e5a123de49dab71b9b78591c8659
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,110 @@
+=============================================================
+kwant, a package for numerical quantum transport calculations
+=============================================================
+
+Licence
+=======
+
+This software is NOT TO BE DISTRIBUTED, neither in part nor as a whole.
+
+The only exception to this is the ``doc/sphinxext`` subdirectory, which is free
+software.  (See the file ``LICENSE.txt`` in that subdirectory.)
+
+
+Installation
+============
+
+The prerequisites are
+
+ - More or less current versions of `Python <http://python.org>`_ and `SciPy
+   <http://scipy.org>`_.  Python 2.6 and scipy 0.7.2 should be enough.
+
+ - `Cython <http://cython.org/>`_ -- Version 0.13 works for us.
+
+optional:
+
+ - `pycairo <http://cairographics.org/pycairo/>`_ (for plotting)
+
+ - `matplotlib <http://matplotlib.sourceforge.net/>`_ (for some of the
+   examples)
+
+kwant can be build and installed using distutils, following standard python
+conventions.  To build and install, run the following commands in the root
+directory of the package. ::
+
+    python setup.py build
+    python setup.py install
+
+The second command has to be run as root (e.g. prefixing it with ``sudo``).  By
+default the package will be installed under ``/usr/local``.  You can change
+this using the ``--prefix`` option, e.g.::
+
+    python setup.py install --prefix=/opt
+
+If you would like to install kwant into your home directory only you can use ::
+
+    python setup.py install --home=~
+
+This does not require superuser priviledges.  If you install kwant in this way
+be sure to tell python where to find it.  This can be done by setting the
+``PYTHONPATH`` environment variable::
+
+    export PYTHONPATH=$HOME/lib/python
+
+You can make this setting permanent by adding this line to your the file
+``.bashrc`` in your home directory.
+
+To check successful installation try executing some examples in the
+``examples`` subdirectory.
+
+
+Documentation
+=============
+
+To build the documentation, kwant has to be installed as described in the
+previous section.  The `sphinx documentation generator
+<http://sphinx.pocoo.org/>`_ is required.
+
+HTML documentation can be built by entering the ``doc`` subdirectory of the
+kwant package and executing ``make html``.  PDF documentation is generated by
+executing ``make latex`` followed by a ``make all-pdf`` in ``doc/build/latex``.
+
+Because of some quirks of how sphinx works, it might be necessary to execute
+``make clean`` between building HTML and PDF documentation.  If this is not
+done, sphinx might mistakenly use PNG files for PDF output.
+
+Please consult the documentation for further information on how to use kwant.
+
+
+Hacking
+=======
+
+To work on the library itself it is useful to build it in-place.  This can be
+done with the following command ::
+
+    python setup.py build_ext -i
+
+The ``kwant`` subdirectory of the source distribution will be thus turned into
+a proper python package which can be imported.  To be able to import kwant from
+within python, one can either work in the root directory of the distribution
+(where the subdirectory ``kwant`` is located), or make a (symbolic) link from
+somewhere in the Python search path to the the package subdirectory.
+
+Some conventions to keep in mind:
+
+* Please keep the code consistent by adhering to the already used naming and
+  formatting conventions.  We generally follow the `"Style Guide for Python
+  Code" <http://www.python.org/dev/peps/pep-0008/>`_ and the `"Docstring
+  Conventions" <http://www.python.org/dev/peps/pep-0257/>`_.
+
+* Write tests for all the important functionality you add.  Be sure not to
+  break existing tests.
+
+
+Tests
+=====
+
+We use the `nose testing framework
+<http://somethingaboutorange.com/mrl/projects/nose/>`_.  To run the tests,
+execute the command ``nosetests`` from the root directory of the package after
+it has been built in place.
diff --git a/TODO.txt b/TODO.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4efe011278e4261036cba1cf5d0d7f5ece005722
--- /dev/null
+++ b/TODO.txt
@@ -0,0 +1,34 @@
+Roughly in order of importance.                                     -*-org-*-
+
+* Define a few benchmarks and check performance.  Optimize the code.
+
+* Write a fast tiny array module.
+  (If this turns out to be a performance bottleneck.)
+
+* Provide support for calculating and nicely plotting LDOS.
+  Make a tutorial example for this.
+
+* Allow attaching lead with further then nearest slice hoppings.
+  The most easy way to do this is increasing the period of the lead.
+
+* Optionally show site coordinates when plotting a system.
+
+* Add support for easily adding magnetic field to a system.
+
+* Generalize InfiniteSystem to multiple directions.
+
+* Add support for optimization of lead fundamental domains.
+
+* Write a module to generate "functional" random numbers.
+  This is a good starting point:
+  http://www.cs.umbc.edu/~olano/papers/GPUTEA.pdf
+
+* Write a RGF solver which uses graph/slicer.
+
+* Implement the C solver interface.
+
+* Wrap TB_SIM as a solver.
+
+* Wrap umfpack or some other sparse linear algebra library with Cython.
+  Use it directly in sparse solver.  This will allow to fine-tune the solution
+  of sparse systems.
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..abe401adc84b60ffbe53ad557ef3df2911120ed6
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,156 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+# In difference to the original Makefile, we convert all SVG files to PDF for
+# LaTeX output.  For HTML output, we don't create PNGs but rather use the SVG
+# files directly.
+IMAGESOURCES    = $(shell find source -name "*.svg")
+GENERATEDPDF    = $(patsubst %.svg, %.pdf, $(IMAGESOURCES))
+
+# Tutorial images.
+TUTORIAL1A_IMAGES = source/images/tutorial1a_result.png source/images/tutorial1a_result.pdf source/images/tutorial1a_sys.png source/images/tutorial1a_sys.pdf
+TUTORIAL2A_IMAGES = source/images/tutorial2a_result.png source/images/tutorial2a_result.pdf
+TUTORIAL2B_IMAGES = source/images/tutorial2b_result.png source/images/tutorial2b_result.pdf
+TUTORIAL2C_IMAGES = source/images/tutorial2c_result.png source/images/tutorial2c_result.pdf source/images/tutorial2c_sys.png source/images/tutorial2c_sys.pdf source/images/tutorial2c_note1.png source/images/tutorial2c_note1.pdf source/images/tutorial2c_note2.png source/images/tutorial2c_note2.pdf
+TUTORIAL3A_IMAGES = source/images/tutorial3a_result.png source/images/tutorial3a_result.pdf
+TUTORIAL3B_IMAGES = source/images/tutorial3b_result.png source/images/tutorial3b_result.pdf source/images/tutorial3b_sys.png source/images/tutorial3b_sys.pdf
+TUTORIAL4_IMAGES = source/images/tutorial4_result.png source/images/tutorial4_result.pdf source/images/tutorial4_sys1.png source/images/tutorial4_sys1.pdf source/images/tutorial4_sys2.png source/images/tutorial4_sys2.pdf source/images/tutorial4_bs.png source/images/tutorial4_bs.pdf
+ALL_TUTORIAL_IMAGES = $(TUTORIAL1A_IMAGES)  $(TUTORIAL2A_IMAGES) $(TUTORIAL2B_IMAGES) $(TUTORIAL2C_IMAGES) $(TUTORIAL3A_IMAGES) $(TUTORIAL3B_IMAGES) $(TUTORIAL4_IMAGES)
+
+.PHONY: help clean realclean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/* $(GENERATEDPDF)
+	-rm -rf source/reference/generated
+
+realclean: clean
+	-rm -f $(ALL_TUTORIAL_IMAGES) source/images/.*_flag
+
+html:	$(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml: $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle: $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:   $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp: $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp: $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/kwant.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/kwant.qhc"
+
+latex:  $(GENERATEDPDF) $(ALL_TUTORIAL_IMAGES)
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+%.pdf: %.svg
+	inkscape --export-pdf=$@ $<
+
+# Generation of tutorial images.  This requires some serious make trickery, see
+# http://article.gmane.org/gmane.comp.gnu.make.general/5806
+$(TUTORIAL1A_IMAGES): source/images/.tutorial1a_flag
+	@:
+source/images/.tutorial1a_flag: source/images/tutorial1a.py
+	cd source/images/ && python tutorial1a.py
+	touch source/images/.tutorial1a_flag
+
+$(TUTORIAL2A_IMAGES): source/images/.tutorial2a_flag
+	@:
+source/images/.tutorial2a_flag: source/images/tutorial2a.py
+	cd source/images/ && python tutorial2a.py
+	touch source/images/.tutorial2a_flag
+
+$(TUTORIAL2B_IMAGES): source/images/.tutorial2b_flag
+	@:
+source/images/.tutorial2b_flag: source/images/tutorial2b.py
+	cd source/images/ && python tutorial2b.py
+	touch source/images/.tutorial2b_flag
+
+$(TUTORIAL2C_IMAGES): source/images/.tutorial2c_flag
+	@:
+source/images/.tutorial2c_flag: source/images/tutorial2c.py
+	cd source/images/ && python tutorial2c.py
+	touch source/images/.tutorial2c_flag
+
+$(TUTORIAL3A_IMAGES): source/images/.tutorial3a_flag
+	@:
+source/images/.tutorial3a_flag: source/images/tutorial3a.py
+	cd source/images/ && python tutorial3a.py
+	touch source/images/.tutorial3a_flag
+
+$(TUTORIAL3B_IMAGES): source/images/.tutorial3b_flag
+	@:
+source/images/.tutorial3b_flag: source/images/tutorial3b.py
+	cd source/images/ && python tutorial3b.py
+	touch source/images/.tutorial3b_flag
+
+$(TUTORIAL4_IMAGES): source/images/.tutorial4_flag
+	@:
+source/images/.tutorial4_flag: source/images/tutorial4.py
+	cd source/images/ && python tutorial4.py
+	touch source/images/.tutorial4_flag
diff --git a/doc/other/linear_system.pdf b/doc/other/linear_system.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..384562b861868e7d3b961721c4a74698f6837c7f
Binary files /dev/null and b/doc/other/linear_system.pdf differ
diff --git a/doc/other/reduce_modes.pdf b/doc/other/reduce_modes.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..a01aad6df8305badad3a88190589bfb5608c26a2
Binary files /dev/null and b/doc/other/reduce_modes.pdf differ
diff --git a/doc/source/_static/kwant.css b/doc/source/_static/kwant.css
new file mode 100644
index 0000000000000000000000000000000000000000..00875a2d0f364fe12f4e97a3a64fff874cc3b121
--- /dev/null
+++ b/doc/source/_static/kwant.css
@@ -0,0 +1,232 @@
+@import "default.css";
+
+/**
+ * Spacing fixes
+
+
+div.body p, div.body dd, div.body li {
+  line-height: 125%;
+}
+
+ul.simple {
+    margin-top: 0;
+    margin-bottom: 0;
+    padding-top: 0;
+    padding-bottom: 0;
+}
+*/
+
+/* spacing around blockquoted fields in parameters/attributes/returns */
+/*Essential. Otherwise there is way too much space around*/
+td.field-body > blockquote {
+    margin-top: 0.1em;
+    margin-bottom: 0.5em;
+}
+
+/* THE NEXT TWO ARE EVENTUALLY IMPORTANT I THINK */
+/* spacing around example code
+div.highlight > pre {
+    padding: 2px 5px 2px 5px;
+}
+*/
+
+/* spacing in see also definition lists
+dl.last > dd {
+    margin-top: 1px;
+    margin-bottom: 5px;
+    margin-left: 30px;
+}
+*/
+/* hide overflowing content in the sidebar
+div.sphinxsidebarwrapper p.topless {
+    overflow: hidden;
+}
+*/
+
+/**
+ * Hide dummy toctrees
+
+
+ul {
+  padding-top: 0;
+  padding-bottom: 0;
+  margin-top: 0;
+  margin-bottom: 0;
+}
+ul li {
+  padding-top: 0;
+  padding-bottom: 0;
+  margin-top: 0;
+  margin-bottom: 0;
+}
+ul li a.reference {
+  padding-top: 0;
+  padding-bottom: 0;
+  margin-top: 0;
+  margin-bottom: 0;
+}
+
+*/
+
+/**
+ * Make high-level subsections easier to distinguish from top-level ones
+
+div.body h3 {
+  background-color: transparent;
+}
+
+div.body h4 {
+  border: none;
+  background-color: transparent;
+}
+*/
+
+/**
+ * Scipy colors
+
+body {
+  background-color: rgb(100,135,220);
+}
+
+div.document {
+  background-color: rgb(230,230,230);
+}
+
+div.sphinxsidebar {
+  background-color: rgb(230,230,230);
+}
+
+div.related {
+  background-color: rgb(100,135,220);
+}
+
+div.sphinxsidebar h3 {
+  color: rgb(0,102,204);
+}
+
+div.sphinxsidebar h3 a {
+  color: rgb(0,102,204);
+}
+
+div.sphinxsidebar h4 {
+  color: rgb(0,82,194);
+}
+
+div.sphinxsidebar p {
+  color: black;
+}
+
+div.sphinxsidebar a {
+  color: #355f7c;
+}
+
+div.sphinxsidebar ul.want-points {
+  list-style: disc;
+}
+*/
+
+.field-list th {
+  color: rgb(0,50,150);
+  background-color: #EEE8AA;
+  white-space: nowrap; /*Essential. Otherwise the colons can break
+			 into a new line*/
+}
+
+
+
+/**
+ * Extra admonitions
+
+
+div.tip {
+  background-color: #ffffe4;
+  border: 1px solid #ee6;
+}
+
+div.plot-output {
+  clear-after: both;
+}
+
+div.plot-output .figure {
+  float: left;
+  text-align: center;
+  margin-bottom: 0;
+  padding-bottom: 0;
+}
+
+div.plot-output .caption {
+  margin-top: 2;
+  padding-top: 0;
+}
+
+div.plot-output p.admonition-title {
+  display: none;
+}
+
+div.plot-output:after {
+  content: "";
+  display: block;
+  height: 0;
+  clear: both;
+}
+*/
+
+
+/*
+div.admonition-example {
+    background-color: #e4ffe4;
+    border: 1px solid #ccc;
+}*/
+
+
+/**
+ * Styling for field lists
+ */
+
+table.field-list th {
+  border-left: 2px solid #999 !important;
+  padding-left: 5px;
+}
+
+table.field-list {
+  border-collapse: separate; /*Essential. Otherwise Parameters and Returns
+			       are sharing one solid colored field. That looks
+			       weird.*/
+  border-spacing: 10px;
+}
+
+/**
+ * Styling for footnotes
+
+
+table.footnote td, table.footnote th {
+  border: none;
+}
+*/
+
+div.specialnote-title {
+    font-size: 105%;
+    font-weight: bold;
+    font-color: #3B4D3C;
+    background-color: #DCE4DC;
+    padding: 1em;
+    padding-top: 0.4em;
+    padding-bottom: 0.4em;
+    margin-top: 1em;
+    margin-bottom: 0px;
+    border-width: 1px;
+    border-color: #546C55;
+    border-style: solid;
+}
+
+div.specialnote-body {
+    background-color: #DCE4DC;
+    padding: 1em;
+    padding-top: 0.1em;
+    padding-bottom: 0.4em;
+    margin-top: 0px;
+    border-width: 1px;
+    border-top-width: 0px;
+    border-color: #546C55;
+    border-style: solid;
+}
diff --git a/doc/source/_static/togglediv.js b/doc/source/_static/togglediv.js
new file mode 100644
index 0000000000000000000000000000000000000000..01f50d9a75e467582e30f87758c8aad883b0dc8d
--- /dev/null
+++ b/doc/source/_static/togglediv.js
@@ -0,0 +1,14 @@
+function togglediv(id) {
+    var buttontext;
+
+    buttontext = $("#" + id + "-button").text();
+
+    if(buttontext == 'show') {
+        $("#" + id + "-button").text('hide');
+    }
+    else {
+        $("#" + id + "-button").text('show');
+    }
+
+    $("#" + id + "-body").slideToggle('swing');
+}
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..476550f65968ea680de20e0dacbf2d9ba71c81fe
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+#
+# kwant documentation build configuration file, created by
+# sphinx-quickstart on Tue Jan 11 09:39:28 2011.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+import kwant
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+
+sys.path.insert(0, os.path.abspath('../sphinxext'))
+
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
+              'sphinx.ext.todo', 'sphinx.ext.pngmath', 'numpydoc',
+              'kwantdoc']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['../templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'kwant'
+copyright = u'2011-2012, A. R. Akhmerov, C. W. Groth, X. Waintal, M. Wimmer'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = kwant.version.version[:]
+
+for i, s in enumerate(release):
+    if s not in '0123456790.':
+        break
+
+# The short X.Y version.
+version = release[:i]
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+default_role = "autolink"
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# Do not show all class members automatically in the class documentation
+numpydoc_show_class_members = False
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+html_style = 'kwant.css'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+html_use_modindex = False
+
+# This is needed too.
+html_domain_indices = False
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'kwantdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+latex_paper_size = 'a4'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'kwant.tex', u'kwant Documentation',
+   u'A. R. Akhmerov, C. W. Groth, X. Waintal, M. Wimmer',
+   'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+latex_use_modindex = False
+
+# This is needed too.
+latex_domain_indices = False
+
+# -- Options for autodoc -------------------------------------------------------
+# Generate stub pages for autosummary directives.
+autosummary_generate = True
+
+autoclass_content = "both"
+autodoc_default_flags = ['show-inheritance']
diff --git a/doc/source/images/html.py b/doc/source/images/html.py
new file mode 100644
index 0000000000000000000000000000000000000000..120f7de2110c088c243dd72b3be884a431a0eb79
--- /dev/null
+++ b/doc/source/images/html.py
@@ -0,0 +1,4 @@
+# Default width of figures in pixels
+figwidth_px = 600
+# Width for smaller figures
+figwidth_small_px = 400
diff --git a/doc/source/images/latex.py b/doc/source/images/latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..870c858e678a4d1a9f1a9ebdec2b5664f7410e5d
--- /dev/null
+++ b/doc/source/images/latex.py
@@ -0,0 +1,13 @@
+pt_to_in = 1./72.
+
+# Default width of figures in pts
+figwidth_pt = 300
+
+# Width for smaller figures
+figwidth_small_pt = 200
+figwidth_small_in = figwidth_small_pt * pt_to_in
+
+# Sizes for matplotlib figures
+mpl_width_in = figwidth_pt * pt_to_in
+mpl_label_size = 10 # font sizes in points
+mpl_tick_size = 9
diff --git a/doc/source/images/tutorial1a.py b/doc/source/images/tutorial1a.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce906eadcb3f961bd8ca51bc339b616ec5affd33
--- /dev/null
+++ b/doc/source/images/tutorial1a.py
@@ -0,0 +1,120 @@
+# Physics background
+# ------------------
+#  Conductance of a quantum wire; subbands
+#
+# Kwant features highlighted
+# --------------------------
+#  - Builder for setting up transport systems easily
+#  - Making scattering region and leads
+#  - Using the simple sparse solver for computing Landauer conductance
+
+import kwant
+
+import latex, html
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+
+lat = kwant.lattice.Square()
+sys.default_site_group = lat
+
+t = 1.0
+W = 10
+L = 30
+
+# Define the scattering region
+
+for i in xrange(L):
+    for j in xrange(W):
+        sys[(i, j)] = 4 * t
+
+        # hoppig in y-direction
+        if j > 0 :
+            sys[(i, j), (i, j-1)] = - t
+
+        #hopping in x-direction
+        if i > 0:
+            sys[(i, j), (i-1, j)] = -t
+
+# Then, define the leads:
+
+# First the lead to the left
+
+# (Note: in the current version, TranslationalSymmetry takes a
+# realspace vector)
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+for j in xrange(W):
+    lead0[(0, j)] = 4 * t
+
+    if j > 0:
+        lead0[(0, j), (0, j-1)] = - t
+
+    lead0[(1, j), (0, j)] = - t
+
+# Then the lead to the right
+
+sym_lead1 = kwant.TranslationalSymmetry([lat.vec((1, 0))])
+lead1 = kwant.Builder(sym_lead1)
+lead1.default_site_group = lat
+
+for j in xrange(W):
+    lead1[(0, j)] = 4 * t
+
+    if j > 0:
+        lead1[(0, j), (0, j-1)] = - t
+
+    lead1[(1, j), (0, j)] = - t
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys, "tutorial1a_sys.pdf", width=latex.figwidth_pt)
+kwant.plot(fsys, "tutorial1a_sys.png", width=html.figwidth_px)
+
+# Now that we have the system, we can compute conductance
+
+energies = []
+data = []
+for ie in xrange(100):
+    energy = ie * 0.01
+
+    # compute the scattering matrix at energy energy
+    smatrix = kwant.solvers.sparse.solve(fsys, energy)
+
+    # compute the transmission probability from lead 0 to
+    # lead 1
+    energies.append(energy)
+    data.append(smatrix.transmission(1, 0))
+
+# Use matplotlib to write output
+# We should see conductance steps
+import pylab
+
+pylab.plot(energies, data)
+pylab.xlabel("energy [in units of t]",
+                 fontsize=latex.mpl_label_size)
+pylab.ylabel("conductance [in units of e^2/h]",
+                 fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial1a_result.pdf")
+fig.savefig("tutorial1a_result.png", dpi=(html.figwidth_px/latex.mpl_width_in))
diff --git a/doc/source/images/tutorial2a.py b/doc/source/images/tutorial2a.py
new file mode 100644
index 0000000000000000000000000000000000000000..c078ae6efe66c00170151c0dba7a8d56d815563d
--- /dev/null
+++ b/doc/source/images/tutorial2a.py
@@ -0,0 +1,123 @@
+# Physics background
+# ------------------
+#  Gaps in quantum wires with spin-orbit coupling and Zeeman splititng,
+#  as theoretically predicted in
+#   http://prl.aps.org/abstract/PRL/v90/i25/e256601
+#  and (supposedly) experimentally oberved in
+#   http://www.nature.com/nphys/journal/v6/n5/abs/nphys1626.html
+#
+# Kwant features highlighted
+# --------------------------
+#  - Numpy matrices as values in Builder
+
+import kwant
+import numpy
+
+import latex, html
+
+# define sigma-matrices for convenience
+sigma_0 = numpy.eye(2)
+sigma_x = numpy.array([[0, 1], [1, 0]])
+sigma_y = numpy.array([[0, -1j], [1j, 0]])
+sigma_z = numpy.array([[1, 0], [0, -1]])
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+
+# for simplicity, take lattice constant = 1
+a = 1
+lat = kwant.lattice.Square(a)
+
+t = 1.0
+alpha = 0.5
+e_z = 0.08
+W = 10
+L = 30
+
+# Define the scattering region
+
+def rectangle(pos):
+    (x, y) = pos
+    return ( -0.5 < x < L - 0.5 ) and ( -0.5 < y < W - 0.5 )
+
+sys[lat.shape(rectangle, (0, 0))] = 4 * t * sigma_0 + e_z * sigma_z
+# hoppings in x-direction
+sys[sys.possible_hoppings((1, 0), lat, lat)] = - t * sigma_0 - \
+    1j * alpha * sigma_y
+# hoppings in y-directions
+sys[sys.possible_hoppings((0, 1), lat, lat)] = - t * sigma_0 + \
+    1j * alpha * sigma_x
+
+# Then, define the leads:
+
+# First the lead to the left
+
+# (Note: in the current version, TranslationalSymmetry takes a
+# realspace vector)
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+def lead_shape(pos):
+    (x, y) = pos
+    return (-1 < x < 1) and ( -0.5 < y < W - 0.5 )
+
+lead0[lat.shape(lead_shape, (0, 0))] = 4 * t * sigma_0 + e_z * sigma_z
+# hoppings in x-direction
+lead0[lead0.possible_hoppings((1, 0), lat, lat)] = - t * sigma_0 - \
+    1j * alpha * sigma_y
+# hoppings in y-directions
+lead0[lead0.possible_hoppings((0, 1), lat, lat)] = - t * sigma_0 + \
+    1j * alpha * sigma_x
+
+# Then the lead to the right
+# there we can use a special function that simply reverses the direction
+
+lead1 = lead0.reversed()
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# Now that we have the system, we can compute conductance
+
+energies = []
+data = []
+for ie in xrange(100):
+    energy = ie * 0.01 - 0.3
+
+    # compute the scattering matrix at energy energy
+    smatrix = kwant.solvers.sparse.solve(fsys, energy)
+
+    # compute the transmission probability from lead 0 to
+    # lead 1
+    energies.append(energy)
+    data.append(smatrix.transmission(1, 0))
+
+# Use matplotlib to write output
+# We should see conductance steps
+import pylab
+
+pylab.plot(energies, data)
+pylab.xlabel("energy [in units of t]",
+             fontsize=latex.mpl_label_size)
+pylab.ylabel("conductance [in units of e^2/h]",
+             fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial2a_result.pdf")
+fig.savefig("tutorial2a_result.png",
+            dpi=(html.figwidth_px/latex.mpl_width_in))
diff --git a/doc/source/images/tutorial2b.py b/doc/source/images/tutorial2b.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e00c7b70776b7e386d58cbacd582bf822c9e457
--- /dev/null
+++ b/doc/source/images/tutorial2b.py
@@ -0,0 +1,115 @@
+# Physics background
+# ------------------
+#  transmission through a quantum well
+#
+# Kwant features highlighted
+# --------------------------
+#  - Functions as values in Builder
+
+import kwant
+
+import latex, html
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+
+# for simplicity, take lattice constant = 1
+a = 1
+lat = kwant.lattice.Square(a)
+
+t = 1.0
+alpha = 0.5
+e_z = 0.08
+W = 10
+L = 30
+
+# Define the scattering region
+
+def rectangle(pos):
+    (x, y) = pos
+    return ( -0.5 < x < L - 0.5 ) and ( -0.5 < y < W - 0.5 )
+
+def potential(site):
+    (x, y) = site.pos
+    if 10 < x < 20:
+        return pot
+    else:
+        return 0
+
+def onsite(site):
+    return 4 * t + potential(site)
+
+sys[lat.shape(rectangle, (0, 0))] = onsite
+for hopping in lat.nearest:
+    sys[sys.possible_hoppings(*hopping)] = - t
+
+# Then, define the leads:
+
+# First the lead to the left
+
+# (Note: in the current version, TranslationalSymmetry takes a
+# realspace vector)
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+def lead_shape(pos):
+    (x, y) = pos
+    return (-1 < x < 1) and ( -0.5 < y < W - 0.5 )
+
+lead0[lat.shape(lead_shape, (0, 0))] = 4 * t
+for hopping in lat.nearest:
+    lead0[lead0.possible_hoppings(*hopping)] = - t
+
+# Then the lead to the right
+# there we can use a special function that simply reverses the direction
+
+lead1 = lead0.reversed()
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# Now that we have the system, we can compute conductance
+
+energy = 0.2
+wellpot = []
+data = []
+for ipot in xrange(100):
+    pot = - ipot * 0.01
+
+    # compute the scattering matrix at energy energy
+    smatrix = kwant.solvers.sparse.solve(fsys, energy)
+
+    # compute the transmission probability from lead 0 to
+    # lead 1
+    wellpot.append(-pot)
+    data.append(smatrix.transmission(1, 0))
+
+# Use matplotlib to write output
+# We should see conductance steps
+import pylab
+
+pylab.plot(wellpot, data)
+pylab.xlabel("well depth [in units of t]",
+             fontsize=latex.mpl_label_size)
+pylab.ylabel("conductance [in units of e^2/h]",
+             fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial2b_result.pdf")
+fig.savefig("tutorial2b_result.png",
+            dpi=(html.figwidth_px/latex.mpl_width_in))
diff --git a/doc/source/images/tutorial2c.py b/doc/source/images/tutorial2c.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c1ea86abffbfa5d97b2cc5da3c2eba8a98eb559
--- /dev/null
+++ b/doc/source/images/tutorial2c.py
@@ -0,0 +1,214 @@
+# Physics background
+# ------------------
+#  Flux-dependent transmission through a quantum ring
+#
+# Kwant features highlighted
+# --------------------------
+#  - More complex shapes with lattices
+#  - Allows for discussion of subtleties of `attach_lead` (not in the
+#    example, but in the tutorial main text)
+#  - Modifcations of hoppings/sites after they have been added
+
+from cmath import exp
+from math import pi
+import kwant
+
+import latex, html
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+
+# for simplicity, take lattice constant = 1
+a = 1
+lat = kwant.lattice.Square(a)
+
+t = 1.0
+W = 10
+r1 = 10
+r2 = 20
+
+# Define the scattering region
+# Now, we aim for a more compelx shape, namely a ring (or annulus)
+
+def ring(pos):
+    (x, y) = pos
+    rsq = x**2 + y**2
+    return ( r1**2 < rsq < r2**2)
+
+sys[lat.shape(ring, (0, 11))] = 4 * t
+for hopping in lat.nearest:
+    sys[sys.possible_hoppings(*hopping)] = - t
+
+# In order to introduce a flux through the ring, we introduce a phase
+# on the hoppings on the line cut through one of the arms
+
+# since we want to change the flux without modifying Builder repeatedly,
+# we define the modified hoppings as a function that takes the flux
+# as a global variable.
+
+def fluxphase(site1, site2):
+    return exp(1j * phi)
+
+# Now go through all the hoppings and modify those in the lower
+# arm of the ring that go from x=0 to x=1
+
+for (site1, site2) in sys.hoppings():
+    ix1, iy1 = site1.tag
+    ix2, iy2 = site2.tag
+
+    hopx = tuple(sorted((ix1, ix2)))
+
+    if hopx == (0, 1) and iy1 == iy2 and iy1 < 0:
+        sys[lat(hopx[1], iy1), lat(hopx[0], iy1)] = fluxphase
+
+# Then, define the leads:
+
+# First the lead to the left
+
+# (Note: in the current version, TranslationalSymmetry takes a
+# realspace vector)
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+def lead_shape(pos):
+    (x, y) = pos
+    return (-1 < x < 1) and ( -W/2 < y < W/2  )
+
+lead0[lat.shape(lead_shape, (0, 0))] = 4 * t
+for hopping in lat.nearest:
+    lead0[lead0.possible_hoppings(*hopping)] = - t
+
+# Then the lead to the right
+# there we can use a special function that simply reverses the direction
+
+lead1 = lead0.reversed()
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys, "tutorial2c_sys.pdf", width=latex.figwidth_pt)
+kwant.plot(fsys, "tutorial2c_sys.png", width=html.figwidth_px)
+
+# Now that we have the system, we can compute conductance
+
+energy = 0.15
+phases = []
+data = []
+for iphi in xrange(100):
+    phi = iphi * 0.01 * 3 * 2 * pi
+
+    # compute the scattering matrix at energy energy
+    smatrix = kwant.solve(fsys, energy)
+
+    # compute the transmission probability from lead 0 to
+    # lead 1
+    phases.append(phi / (2 * pi))
+    data.append(smatrix.transmission(1, 0))
+
+# Use matplotlib to write output
+# We should see conductance steps
+import pylab
+
+pylab.plot(phases, data)
+pylab.xlabel("flux [in units of the flux quantum]",
+             fontsize=latex.mpl_label_size)
+pylab.ylabel("conductance [in units of e^2/h]",
+             fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial2c_result.pdf")
+fig.savefig("tutorial2c_result.png",
+            dpi=(html.figwidth_px/latex.mpl_width_in))
+
+# Finally, some plots needed for the notes
+
+sys = kwant.Builder()
+
+sys[lat.shape(ring, (0, 11))] = 4 * t
+for hopping in lat.nearest:
+    sys[sys.possible_hoppings(*hopping)] = - t
+
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+def lead_shape(pos):
+    (x, y) = pos
+    return (-1 < x < 1) and ( 0.5 * W < y < 1.5 * W )
+
+lead0[lat.shape(lead_shape, (0, W))] = 4 * t
+for hopping in lat.nearest:
+    lead0[lead0.possible_hoppings(*hopping)] = - t
+
+# Then the lead to the right
+# there we can use a special function that simply reverses the direction
+
+lead1 = lead0.reversed()
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys, "tutorial2c_note1.pdf", width=latex.figwidth_small_pt)
+kwant.plot(fsys, "tutorial2c_note1.png", width=html.figwidth_small_px)
+
+sys = kwant.Builder()
+
+sys[lat.shape(ring, (0, 11))] = 4 * t
+for hopping in lat.nearest:
+    sys[sys.possible_hoppings(*hopping)] = - t
+
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+def lead_shape(pos):
+    (x, y) = pos
+    return (-1 < x < 1) and ( -W/2 < y < W/2  )
+
+lead0[lat.shape(lead_shape, (0, 0))] = 4 * t
+for hopping in lat.nearest:
+    lead0[lead0.possible_hoppings(*hopping)] = - t
+
+# Then the lead to the right
+# there we can use a special function that simply reverses the direction
+
+lead1 = lead0.reversed()
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1, lat(0, 0))
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys, "tutorial2c_note2.pdf", width=latex.figwidth_small_pt)
+kwant.plot(fsys, "tutorial2c_note2.png", width=html.figwidth_small_px)
diff --git a/doc/source/images/tutorial2c_sketch.svg b/doc/source/images/tutorial2c_sketch.svg
new file mode 100644
index 0000000000000000000000000000000000000000..0b2e94288c0eb404ff434ee596e5cbaf7230a032
--- /dev/null
+++ b/doc/source/images/tutorial2c_sketch.svg
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="250"
+   height="205"
+   id="svg2"
+   sodipodi:version="0.32"
+   inkscape:version="0.47 r22583"
+   sodipodi:docname="tutorial2c_sketch.svg"
+   inkscape:output_extension="org.inkscape.output.svg.inkscape"
+   version="1.0">
+  <defs
+     id="defs4">
+    <inkscape:perspective
+       sodipodi:type="inkscape:persp3d"
+       inkscape:vp_x="0 : 526.18109 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_z="744.09448 : 526.18109 : 1"
+       inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+       id="perspective3026" />
+    <inkscape:perspective
+       id="perspective3635"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="2.5106232"
+     inkscape:cx="54.499548"
+     inkscape:cy="102.0132"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="true"
+     inkscape:object-nodes="true"
+     inkscape:grid-points="true"
+     gridtolerance="1.3"
+     objecttolerance="0.8"
+     gridanglex="8.4666669mm"
+     gridanglez="8.4666669mm"
+     grid_units="mm"
+     inkscape:window-width="1399"
+     inkscape:window-height="974"
+     inkscape:window-x="57"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0"
+     units="pt">
+    <inkscape:grid
+       id="GridFromPre046Settings"
+       type="xygrid"
+       originx="0px"
+       originy="0px"
+       spacingx="2mm"
+       spacingy="2mm"
+       color="#0000ff"
+       empcolor="#ff0400"
+       opacity="0.2"
+       empopacity="0.37647059"
+       empspacing="5"
+       units="mm"
+       visible="true"
+       enabled="true"
+       snapvisiblegridlinesonly="true" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Ebene 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-114.19886,19.255464)">
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2.91644573;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       d="m 292.43863,75.559443 c 0,29.083897 -23.60435,52.688207 -52.68822,52.688207 -29.08392,0 -52.68818,-23.60431 -52.68818,-52.688207 0,-29.083901 23.60426,-52.688202 52.68818,-52.688202 29.08391,0 52.68822,23.604301 52.68822,52.688202 z"
+       id="path37307" />
+    <g
+       id="g38103"
+       style="fill:none;stroke:#000000;stroke-width:0.625;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       transform="matrix(4.6663129,0,0,4.6663129,-1242.7479,-57.338034)">
+      <path
+         id="path38105"
+         style="fill:none;stroke:#000000;stroke-width:0.625;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         d="m 291.10885,24.772314 6.48497,0.02214 c 3.35206,-13.407233 11.42324,-16.3201141 20.32977,-16.3207844 8.90637,-6.702e-4 16.34147,2.3839234 19.96964,16.3207844 l 6.16611,0.03299 m -0.0331,6.854657 -6.1677,-0.03489 c -3.36971,13.202794 -11.16975,17.013197 -20.07612,17.012527 -8.90653,-6.7e-4 -16.67166,-3.363359 -20.32977,-17.012527 l -6.27014,-0.02403"
+         sodipodi:nodetypes="ccsccccscc" />
+    </g>
+    <path
+       sodipodi:type="arc"
+       style="opacity:0.95;fill:#ff0000;fill-opacity:1;stroke:none"
+       id="path3602"
+       sodipodi:cx="64.400978"
+       sodipodi:cy="74.404541"
+       sodipodi:rx="0.98904526"
+       sodipodi:ry="0.98904526"
+       d="m 65.390023,74.404541 c 0,0.546235 -0.44281,0.989045 -0.989045,0.989045 -0.546235,0 -0.989045,-0.44281 -0.989045,-0.989045 0,-0.546235 0.44281,-0.989045 0.989045,-0.989045 0.546235,0 0.989045,0.44281 0.989045,0.989045 z"
+       transform="matrix(4.6663129,0,0,4.6663129,-62.675572,-272.11353)" />
+    <path
+       style="fill:none;stroke:#ff0000;stroke-width:2.33315659;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.66631287, 2.33315643;stroke-dashoffset:0"
+       d="M 237.70766,74.685746 237.44391,183.60434"
+       id="path3604"
+       sodipodi:nodetypes="cc" />
+    <g
+       transform="matrix(1.2202792,0,0,-1.2202792,244.56422,79.414671)"
+       inkscape:label="phi"
+       id="g3637">
+      <g
+         transform="scale(0.1,0.1)"
+         id="g3639">
+        <path
+           id="path3641"
+           style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           d="m 125.012,52.4102 c 55.929,4.3203 96,37.7695 96,74.1018 0,37.41 -41.153,70.136 -96,74.476 l 0,19.934 c 0,12.098 0,17.09 34.136,17.09 l 11.864,0 0,11 c -12.91,-1.071 -45.91,-1.071 -60.614,-1.071 -14.7066,0 -48.0582,0 -60.9683,1.071 l 0,-11 11.75,0 c 33.832,0 33.832,-4.633 33.832,-17.09 l 0,-19.934 C 40.2812,195.57 2.01172,162.48 2.01172,126.871 2.01172,89.8203 41.6992,57.8086 95.0117,52.4102 l 0,-20.1407 C 95.0117,20.0391 95.0117,15 61.1797,15 l -11.75,0 0,-11 c 12.9101,1.07031 45.9101,1.07031 60.6093,1.07031 14.711,0 48.063,0 60.973,-1.07031 l 0,11 -11.864,0 c -34.136,0 -34.136,4.6797 -34.136,17.2695 l 0,20.1407 z M 95,60 c -50.5781,6.8789 -56.9883,43.84 -56.9883,66.301 0,18.84 3.5586,59.418 56.9883,66.687 L 95,60 z m 30.012,132.988 C 174.871,187.219 185,154.059 185,126.672 185,105.051 179.57,66.4805 125.012,60" />
+      </g>
+    </g>
+  </g>
+</svg>
diff --git a/doc/source/images/tutorial2c_sketch2.svg b/doc/source/images/tutorial2c_sketch2.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2bf7bf06d8887295d88941f5cad2bfcbb026c50b
--- /dev/null
+++ b/doc/source/images/tutorial2c_sketch2.svg
@@ -0,0 +1,319 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="800"
+   height="213"
+   id="svg2"
+   sodipodi:version="0.32"
+   inkscape:version="0.47 r22583"
+   sodipodi:docname="tutorial2c_sketch2.svg"
+   inkscape:output_extension="org.inkscape.output.svg.inkscape"
+   version="1.0">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend"
+       style="overflow:visible">
+      <path
+         id="path3770"
+         style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Send"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Send"
+       style="overflow:visible">
+      <path
+         id="path3758"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible">
+      <path
+         id="path3764"
+         style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <inkscape:perspective
+       sodipodi:type="inkscape:persp3d"
+       inkscape:vp_x="0 : 526.18109 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_z="744.09448 : 526.18109 : 1"
+       inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+       id="perspective3026" />
+    <inkscape:perspective
+       id="perspective3635"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <inkscape:perspective
+       id="perspective2871"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <inkscape:perspective
+       id="perspective3667"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <inkscape:perspective
+       id="perspective3701"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <inkscape:perspective
+       id="perspective3701-2"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <inkscape:perspective
+       id="perspective4576"
+       inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+       inkscape:vp_z="1 : 0.5 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 0.5 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-0"
+       style="overflow:visible">
+      <path
+         id="path3770-5"
+         style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.96"
+     inkscape:cx="391.07137"
+     inkscape:cy="106.28166"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="true"
+     inkscape:object-nodes="true"
+     inkscape:grid-points="true"
+     gridtolerance="1.3"
+     objecttolerance="0.8"
+     gridanglex="8.4666669mm"
+     gridanglez="8.4666669mm"
+     grid_units="mm"
+     inkscape:window-width="1680"
+     inkscape:window-height="960"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Ebene 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(199.8724,26.246742)">
+    <rect
+       style="opacity:0.95;fill:#9c9c9c;fill-opacity:1;stroke:none"
+       id="rect3734"
+       width="20.276669"
+       height="41.462551"
+       x="-45.860664"
+       y="59.196667" />
+    <g
+       id="g3681"
+       transform="matrix(3.8946736,0,0,5.5523606,-554.05901,-371.33435)"
+       style="stroke:#c4c4c4;stroke-width:0.59557372;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none">
+      <path
+         sodipodi:nodetypes="cc"
+         id="path3657"
+         d="m 200.95336,77.53144 72.03134,0"
+         style="fill:none;stroke:#c4c4c4;stroke-width:0.59557372;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+      <path
+         sodipodi:nodetypes="cc"
+         id="path3657-9"
+         d="m 200.87605,84.852566 72.03134,0"
+         style="fill:none;stroke:#c4c4c4;stroke-width:0.59557372;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2.76955438;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       d="m 406.39299,-24.861965 c -57.92207,0 -104.89686,46.974867 -104.89686,104.896874 0,57.922011 46.97479,104.896891 104.89686,104.896891 57.92192,0 104.89689,-46.97488 104.89689,-104.896891 0,-57.922007 -46.97486,-104.896874 -104.89689,-104.896874 z m 0,42.408802 c 34.52383,0 62.48808,27.964263 62.48808,62.488072 0,34.523811 -27.9643,62.661171 -62.48808,62.661171 -34.52381,0 -62.48806,-28.13736 -62.48806,-62.661171 0,-34.523809 27.96425,-62.488072 62.48806,-62.488072 z"
+       id="path37307" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2.76955438;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       d="m 509.44175,59.196676 34.11393,0 39.97331,0"
+       id="path3657-8"
+       sodipodi:nodetypes="ccc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2.76955438;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       d="m 509.36221,99.749189 74.08724,0"
+       id="path3657-9-0"
+       sodipodi:nodetypes="cc" />
+    <g
+       id="g3681-4"
+       transform="matrix(-4.9045266,0,0,5.5391089,1140.3731,-370.25841)"
+       style="stroke-width:0.53136313;stroke-miterlimit:4;stroke-dasharray:none">
+      <path
+         sodipodi:nodetypes="cc"
+         id="path3657-83"
+         d="m 200.95336,77.53144 72.03134,0"
+         style="fill:none;stroke:#000000;stroke-width:0.53136313;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+      <path
+         sodipodi:nodetypes="cc"
+         id="path3657-9-9"
+         d="m 200.87605,84.852566 72.03134,0"
+         style="fill:none;stroke:#000000;stroke-width:0.53136313;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+    </g>
+    <path
+       style="fill:none;stroke:#000000;stroke-width:4.15433168;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
+       d="m -33.937237,79.776421 56.83818,0"
+       id="path3738"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:4.15433168;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
+       d="m 537.78747,79.080327 56.83819,0"
+       id="path3738-2"
+       sodipodi:nodetypes="cc" />
+    <g
+       style="font-size:3px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Times;-inkscape-font-specification:Times"
+       id="text4596"
+       transform="matrix(5.5391089,0,0,5.5391089,-908.40072,-363.2872)">
+      <path
+         d="m 151.33955,86.880928 0,-0.993164 0.26954,0 0,0.98291 c -1e-5,0.155274 0.0303,0.271973 0.0908,0.350098 0.0605,0.07715 0.15136,0.115723 0.27246,0.115722 0.1455,10e-7 0.26025,-0.04639 0.34423,-0.13916 0.085,-0.09277 0.12744,-0.219237 0.12745,-0.379394 l 0,-0.930176 0.26953,0 0,1.640625 -0.26953,0 0,-0.251953 c -0.0654,0.09961 -0.14161,0.173828 -0.22852,0.222656 -0.0859,0.04785 -0.18604,0.07178 -0.30029,0.07178 -0.18848,0 -0.33155,-0.05859 -0.4292,-0.175781 -0.0977,-0.117187 -0.14649,-0.288574 -0.14649,-0.51416"
+         id="path3652" />
+      <path
+         d="m 154.63545,86.538155 0,0.990234 -0.26953,0 0,-0.981445 c 0,-0.155273 -0.0303,-0.271483 -0.0908,-0.348633 -0.0606,-0.07715 -0.15137,-0.115721 -0.27246,-0.115723 -0.14551,2e-6 -0.26025,0.04639 -0.34424,0.13916 -0.084,0.09277 -0.12598,0.21924 -0.12598,0.379395 l 0,0.927246 -0.27099,0 0,-1.640625 0.27099,0 0,0.254883 c 0.0645,-0.09863 0.14014,-0.172362 0.22706,-0.221192 0.0879,-0.04883 0.18896,-0.07324 0.30322,-0.07324 0.18847,2e-6 0.33105,0.0586 0.42773,0.175781 0.0967,0.116213 0.14502,0.287599 0.14502,0.514161"
+         id="path3654" />
+      <path
+         d="m 155.17598,85.887764 0.26953,0 0,1.640625 -0.26953,0 0,-1.640625 m 0,-0.638672 0.26953,0 0,0.341309 -0.26953,0 0,-0.341309"
+         id="path3656" />
+      <path
+         d="m 156.27461,85.421944 0,0.46582 0.55518,0 0,0.209473 -0.55518,0 0,0.890625 c 0,0.133789 0.0181,0.219727 0.0542,0.257812 0.0371,0.03809 0.11182,0.05713 0.22412,0.05713 l 0.27686,0 0,0.225586 -0.27686,0 c -0.20801,0 -0.35156,-0.03857 -0.43066,-0.115723 -0.0791,-0.07812 -0.11865,-0.219726 -0.11865,-0.424804 l 0,-0.890625 -0.19776,0 0,-0.209473 0.19776,0 0,-0.46582 0.27099,0"
+         id="path3658" />
+      <path
+         d="m 159.32149,85.950752 0,0.251953 c -0.0762,-0.04199 -0.15284,-0.07324 -0.22998,-0.09375 -0.0762,-0.02148 -0.15332,-0.03222 -0.23145,-0.03223 -0.1748,10e-7 -0.31055,0.05567 -0.40722,0.166992 -0.0967,0.110353 -0.14502,0.265626 -0.14502,0.46582 0,0.200196 0.0483,0.355958 0.14502,0.467285 0.0967,0.110352 0.23242,0.165528 0.40722,0.165528 0.0781,0 0.15527,-0.01025 0.23145,-0.03076 0.0771,-0.02148 0.1538,-0.05322 0.22998,-0.09522 l 0,0.249024 c -0.0752,0.03516 -0.15332,0.06152 -0.23438,0.0791 -0.0801,0.01758 -0.16553,0.02637 -0.25635,0.02637 -0.24707,0 -0.44336,-0.07764 -0.58886,-0.23291 -0.14551,-0.155273 -0.21826,-0.364745 -0.21826,-0.628418 0,-0.267577 0.0732,-0.478026 0.21972,-0.631347 0.14746,-0.153319 0.34912,-0.229979 0.60498,-0.229981 0.083,2e-6 0.16406,0.0088 0.24317,0.02637 0.0791,0.0166 0.15576,0.04199 0.22998,0.07617"
+         id="path3660" />
+      <path
+         d="m 161.19063,86.640694 0,0.131836 -1.23926,0 c 0.0117,0.185547 0.0674,0.327148 0.16699,0.424804 0.10059,0.09668 0.24024,0.14502 0.41895,0.14502 0.10351,0 0.20361,-0.01269 0.30029,-0.03809 0.0977,-0.02539 0.19434,-0.06348 0.29004,-0.114258 l 0,0.254883 c -0.0967,0.04102 -0.1958,0.07227 -0.29736,0.09375 -0.10157,0.02148 -0.20459,0.03223 -0.30909,0.03223 -0.26172,0 -0.46923,-0.07617 -0.62255,-0.228515 -0.15235,-0.152344 -0.22852,-0.358398 -0.22852,-0.618164 0,-0.268554 0.0723,-0.481444 0.2168,-0.638672 0.1455,-0.158202 0.3413,-0.237303 0.5874,-0.237305 0.2207,2e-6 0.39502,0.07129 0.52295,0.213867 0.1289,0.141603 0.19336,0.334474 0.19336,0.578614 m -0.26953,-0.0791 c -0.002,-0.14746 -0.0435,-0.265135 -0.12451,-0.353027 -0.0801,-0.08789 -0.18653,-0.131835 -0.31934,-0.131836 -0.15039,1e-6 -0.271,0.04248 -0.36182,0.127441 -0.0898,0.08496 -0.1416,0.204591 -0.15527,0.358887 l 0.96094,-0.0015"
+         id="path3662" />
+      <path
+         d="m 161.62715,85.249092 0.26953,0 0,2.279297 -0.26953,0 0,-2.279297"
+         id="path3664" />
+      <path
+         d="m 162.45918,85.249092 0.26953,0 0,2.279297 -0.26953,0 0,-2.279297"
+         id="path3666" />
+    </g>
+    <g
+       style="font-size:3px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Times;-inkscape-font-specification:Times"
+       id="text4600"
+       transform="matrix(5.5391089,0,0,5.5391089,-908.40072,-363.2872)">
+      <path
+         d="m 170.44072,79.012825 0,0.46582 0.55518,0 0,0.209473 -0.55518,0 0,0.890625 c 0,0.133789 0.0181,0.219727 0.0542,0.257812 0.0371,0.03809 0.11181,0.05713 0.22412,0.05713 l 0.27686,0 0,0.225586 -0.27686,0 c -0.20801,0 -0.35156,-0.03857 -0.43066,-0.115722 -0.0791,-0.07813 -0.11866,-0.219727 -0.11866,-0.424805 l 0,-0.890625 -0.19775,0 0,-0.209473 0.19775,0 0,-0.46582 0.271,0"
+         id="path3669" />
+      <path
+         d="m 172.30254,79.730598 c -0.0303,-0.01758 -0.0635,-0.03027 -0.0996,-0.03808 -0.0352,-0.0088 -0.0742,-0.01318 -0.11719,-0.01318 -0.15235,10e-7 -0.26953,0.04981 -0.35156,0.149414 -0.0811,0.09863 -0.12159,0.240724 -0.12159,0.42627 l 0,0.864257 -0.27099,0 0,-1.640625 0.27099,0 0,0.254883 c 0.0566,-0.09961 0.13038,-0.173338 0.2212,-0.221191 0.0908,-0.04883 0.20117,-0.07324 0.33105,-0.07324 0.0186,1e-6 0.0391,0.0015 0.0615,0.0044 0.0225,0.002 0.0474,0.0054 0.0747,0.01025 l 0.001,0.276855"
+         id="path3671" />
+      <path
+         d="m 173.33379,80.294563 c -0.21778,10e-7 -0.36866,0.0249 -0.45264,0.07471 -0.084,0.04981 -0.12598,0.134767 -0.12598,0.254883 0,0.0957 0.0312,0.171875 0.0937,0.228516 0.0635,0.05566 0.14942,0.0835 0.25782,0.0835 0.14941,0 0.26904,-0.05273 0.35888,-0.158203 0.0908,-0.106445 0.13623,-0.247558 0.13623,-0.42334 l 0,-0.06006 -0.26806,0 m 0.53759,-0.111328 0,0.936035 -0.26953,0 0,-0.249023 c -0.0615,0.09961 -0.13818,0.17334 -0.22998,0.221191 -0.0918,0.04687 -0.2041,0.07031 -0.33691,0.07031 -0.16797,0 -0.30176,-0.04687 -0.40137,-0.140625 -0.0986,-0.09473 -0.14795,-0.221191 -0.14795,-0.379395 0,-0.184569 0.0615,-0.323729 0.18457,-0.41748 0.12402,-0.09375 0.30859,-0.140624 0.55371,-0.140625 l 0.37793,0 0,-0.02637 c 0,-0.124023 -0.041,-0.219726 -0.12304,-0.28711 -0.0811,-0.06836 -0.19532,-0.102537 -0.34278,-0.102539 -0.0937,2e-6 -0.18506,0.01123 -0.27392,0.03369 -0.0889,0.02246 -0.17432,0.05615 -0.25635,0.101074 l 0,-0.249024 c 0.0986,-0.03808 0.19433,-0.0664 0.28711,-0.08496 0.0928,-0.01953 0.1831,-0.02929 0.27099,-0.0293 0.23731,1e-6 0.41455,0.06153 0.53174,0.18457 0.11719,0.123048 0.17578,0.309571 0.17578,0.55957"
+         id="path3673" />
+      <path
+         d="m 175.79765,80.129036 0,0.990234 -0.26953,0 0,-0.981445 c 0,-0.155272 -0.0303,-0.271483 -0.0908,-0.348633 -0.0606,-0.07715 -0.15137,-0.115721 -0.27246,-0.115722 -0.14551,1e-6 -0.26025,0.04639 -0.34424,0.13916 -0.084,0.09277 -0.12597,0.219239 -0.12597,0.379394 l 0,0.927246 -0.271,0 0,-1.640625 0.271,0 0,0.254883 c 0.0645,-0.09863 0.14013,-0.172362 0.22705,-0.221191 0.0879,-0.04883 0.18896,-0.07324 0.30322,-0.07324 0.18847,10e-7 0.33105,0.0586 0.42773,0.175781 0.0967,0.116212 0.14502,0.287599 0.14502,0.51416"
+         id="path3675" />
+      <path
+         d="m 177.38408,79.526985 0,0.254883 c -0.0762,-0.03906 -0.15528,-0.06836 -0.23731,-0.08789 -0.082,-0.01953 -0.16699,-0.0293 -0.25488,-0.0293 -0.13379,2e-6 -0.23437,0.02051 -0.30176,0.06152 -0.0664,0.04102 -0.0996,0.10254 -0.0996,0.18457 0,0.0625 0.0239,0.111818 0.0718,0.147949 0.0479,0.03516 0.14404,0.06885 0.28858,0.101075 l 0.0923,0.02051 c 0.19141,0.04102 0.32715,0.09912 0.40723,0.174317 0.0811,0.07422 0.12158,0.178223 0.12158,0.312012 0,0.152344 -0.0606,0.272949 -0.18164,0.361816 -0.12012,0.08887 -0.28565,0.133301 -0.49658,0.133301 -0.0879,0 -0.17969,-0.0088 -0.27539,-0.02637 -0.0947,-0.0166 -0.19483,-0.04199 -0.3003,-0.07617 l 0,-0.278321 c 0.0996,0.05176 0.19776,0.09082 0.29444,0.117188 0.0967,0.02539 0.19238,0.03809 0.28711,0.03809 0.12695,0 0.2246,-0.02148 0.29297,-0.06445 0.0684,-0.04395 0.10253,-0.105469 0.10253,-0.184571 0,-0.07324 -0.0249,-0.129394 -0.0747,-0.168457 -0.0488,-0.03906 -0.15674,-0.07666 -0.32373,-0.112793 l -0.0937,-0.02197 c -0.167,-0.03516 -0.2876,-0.08887 -0.36182,-0.161133 -0.0742,-0.07324 -0.11133,-0.173339 -0.11133,-0.300293 0,-0.154296 0.0547,-0.273436 0.16406,-0.357422 0.10938,-0.08398 0.26465,-0.125975 0.46582,-0.125976 0.0996,10e-7 0.19336,0.0073 0.28125,0.02197 0.0879,0.01465 0.16895,0.03662 0.24317,0.06592"
+         id="path3677" />
+      <path
+         d="m 177.90263,78.839973 0.26953,0 0,2.279297 -0.26953,0 0,-2.279297"
+         id="path3679" />
+      <path
+         d="m 179.48027,80.294563 c -0.21777,10e-7 -0.36865,0.0249 -0.45264,0.07471 -0.084,0.04981 -0.12597,0.134767 -0.12597,0.254883 0,0.0957 0.0312,0.171875 0.0937,0.228516 0.0635,0.05566 0.14941,0.0835 0.25781,0.0835 0.14941,0 0.26904,-0.05273 0.35889,-0.158203 0.0908,-0.106445 0.13623,-0.247558 0.13623,-0.42334 l 0,-0.06006 -0.26807,0 m 0.5376,-0.111328 0,0.936035 -0.26953,0 0,-0.249023 c -0.0615,0.09961 -0.13819,0.17334 -0.22998,0.221191 -0.0918,0.04687 -0.20411,0.07031 -0.33692,0.07031 -0.16797,0 -0.30176,-0.04687 -0.40136,-0.140625 -0.0986,-0.09473 -0.14795,-0.221191 -0.14795,-0.379395 0,-0.184569 0.0615,-0.323729 0.18457,-0.41748 0.12402,-0.09375 0.30859,-0.140624 0.55371,-0.140625 l 0.37793,0 0,-0.02637 c 0,-0.124023 -0.041,-0.219726 -0.12305,-0.28711 -0.0811,-0.06836 -0.19531,-0.102537 -0.34277,-0.102539 -0.0937,2e-6 -0.18506,0.01123 -0.27393,0.03369 -0.0889,0.02246 -0.17432,0.05615 -0.25635,0.101074 l 0,-0.249024 c 0.0986,-0.03808 0.19434,-0.0664 0.28711,-0.08496 0.0928,-0.01953 0.18311,-0.02929 0.271,-0.0293 0.2373,1e-6 0.41455,0.06153 0.53174,0.18457 0.11718,0.123048 0.17578,0.309571 0.17578,0.55957"
+         id="path3681" />
+      <path
+         d="m 180.84697,79.012825 0,0.46582 0.55518,0 0,0.209473 -0.55518,0 0,0.890625 c 0,0.133789 0.0181,0.219727 0.0542,0.257812 0.0371,0.03809 0.11181,0.05713 0.22412,0.05713 l 0.27686,0 0,0.225586 -0.27686,0 c -0.20801,0 -0.35156,-0.03857 -0.43066,-0.115722 -0.0791,-0.07813 -0.11866,-0.219727 -0.11866,-0.424805 l 0,-0.890625 -0.19775,0 0,-0.209473 0.19775,0 0,-0.46582 0.271,0"
+         id="path3683" />
+      <path
+         d="m 181.7581,79.478645 0.26953,0 0,1.640625 -0.26953,0 0,-1.640625 m 0,-0.638672 0.26953,0 0,0.341309 -0.26953,0 0,-0.341309"
+         id="path3685" />
+      <path
+         d="m 183.22588,79.66761 c -0.14454,2e-6 -0.25879,0.05664 -0.34278,0.169922 -0.084,0.112306 -0.12597,0.266603 -0.12597,0.462891 0,0.196289 0.0415,0.351074 0.12451,0.464355 0.084,0.112305 0.19873,0.168457 0.34424,0.168457 0.14355,0 0.25732,-0.05664 0.3413,-0.169922 0.084,-0.11328 0.12598,-0.267577 0.12598,-0.46289 0,-0.194335 -0.042,-0.348144 -0.12598,-0.461426 -0.084,-0.114257 -0.19775,-0.171385 -0.3413,-0.171387 m 0,-0.228515 c 0.23437,1e-6 0.41845,0.07617 0.55224,0.228515 0.13379,0.152345 0.20068,0.363283 0.20069,0.632813 -1e-5,0.268555 -0.0669,0.479492 -0.20069,0.632812 -0.13379,0.152344 -0.31787,0.228516 -0.55224,0.228516 -0.23536,0 -0.41993,-0.07617 -0.55372,-0.228516 -0.13281,-0.15332 -0.19921,-0.364257 -0.19921,-0.632812 0,-0.26953 0.0664,-0.480468 0.19921,-0.632813 0.13379,-0.152342 0.31836,-0.228514 0.55372,-0.228515"
+         id="path3687" />
+      <path
+         d="m 185.78789,80.129036 0,0.990234 -0.26953,0 0,-0.981445 c -1e-5,-0.155272 -0.0303,-0.271483 -0.0908,-0.348633 -0.0606,-0.07715 -0.15137,-0.115721 -0.27246,-0.115722 -0.14551,1e-6 -0.26026,0.04639 -0.34424,0.13916 -0.084,0.09277 -0.12598,0.219239 -0.12598,0.379394 l 0,0.927246 -0.271,0 0,-1.640625 0.271,0 0,0.254883 c 0.0645,-0.09863 0.14014,-0.172362 0.22705,-0.221191 0.0879,-0.04883 0.18896,-0.07324 0.30322,-0.07324 0.18848,10e-7 0.33106,0.0586 0.42774,0.175781 0.0967,0.116212 0.14502,0.287599 0.14502,0.51416"
+         id="path3689" />
+      <path
+         d="m 187.07402,80.294563 c -0.21777,10e-7 -0.36865,0.0249 -0.45264,0.07471 -0.084,0.04981 -0.12597,0.134767 -0.12597,0.254883 0,0.0957 0.0312,0.171875 0.0937,0.228516 0.0635,0.05566 0.14941,0.0835 0.25781,0.0835 0.14941,0 0.26904,-0.05273 0.35889,-0.158203 0.0908,-0.106445 0.13623,-0.247558 0.13623,-0.42334 l 0,-0.06006 -0.26807,0 m 0.5376,-0.111328 0,0.936035 -0.26953,0 0,-0.249023 c -0.0615,0.09961 -0.13819,0.17334 -0.22998,0.221191 -0.0918,0.04687 -0.20411,0.07031 -0.33692,0.07031 -0.16797,0 -0.30176,-0.04687 -0.40136,-0.140625 -0.0986,-0.09473 -0.14795,-0.221191 -0.14795,-0.379395 0,-0.184569 0.0615,-0.323729 0.18457,-0.41748 0.12402,-0.09375 0.30859,-0.140624 0.55371,-0.140625 l 0.37793,0 0,-0.02637 c 0,-0.124023 -0.041,-0.219726 -0.12305,-0.28711 -0.0811,-0.06836 -0.19531,-0.102537 -0.34277,-0.102539 -0.0937,2e-6 -0.18506,0.01123 -0.27393,0.03369 -0.0889,0.02246 -0.17432,0.05615 -0.25635,0.101074 l 0,-0.249024 c 0.0986,-0.03808 0.19434,-0.0664 0.28711,-0.08496 0.0928,-0.01953 0.18311,-0.02929 0.271,-0.0293 0.2373,1e-6 0.41455,0.06153 0.53174,0.18457 0.11718,0.123048 0.17578,0.309571 0.17578,0.55957"
+         id="path3691" />
+      <path
+         d="m 188.17412,78.839973 0.26953,0 0,2.279297 -0.26953,0 0,-2.279297"
+         id="path3693" />
+      <path
+         d="m 189.76787,79.478645 0.28564,0 0.5127,1.376953 0.51269,-1.376953 0.28565,0 -0.61524,1.640625 -0.36621,0 -0.61523,-1.640625"
+         id="path3695" />
+      <path
+         d="m 193.13994,80.231575 0,0.131836 -1.23926,0 c 0.0117,0.185547 0.0674,0.327149 0.16699,0.424805 0.10059,0.09668 0.24024,0.145019 0.41895,0.145019 0.10351,0 0.20361,-0.01269 0.30029,-0.03809 0.0977,-0.02539 0.19434,-0.06348 0.29004,-0.114258 l 0,0.254883 c -0.0967,0.04102 -0.1958,0.07227 -0.29736,0.09375 -0.10157,0.02148 -0.20459,0.03223 -0.30909,0.03223 -0.26171,0 -0.46923,-0.07617 -0.62255,-0.228516 -0.15235,-0.152343 -0.22852,-0.358398 -0.22852,-0.618164 0,-0.268554 0.0723,-0.481444 0.2168,-0.638672 0.1455,-0.158201 0.34131,-0.237303 0.5874,-0.237304 0.2207,10e-7 0.39502,0.07129 0.52295,0.213867 0.1289,0.141603 0.19336,0.334474 0.19336,0.578613 m -0.26953,-0.0791 c -0.002,-0.147459 -0.0435,-0.265135 -0.12451,-0.353027 -0.0801,-0.08789 -0.18653,-0.131834 -0.31934,-0.131836 -0.15039,2e-6 -0.271,0.04248 -0.36182,0.127442 -0.0898,0.08496 -0.1416,0.204591 -0.15527,0.358886 l 0.96094,-0.0015"
+         id="path3697" />
+      <path
+         d="m 194.75713,79.541634 0,0.251953 c -0.0762,-0.04199 -0.15284,-0.07324 -0.22998,-0.09375 -0.0762,-0.02148 -0.15333,-0.03222 -0.23145,-0.03223 -0.17481,2e-6 -0.31055,0.05567 -0.40723,0.166992 -0.0967,0.110353 -0.14502,0.265626 -0.14502,0.465821 0,0.200196 0.0483,0.355957 0.14502,0.467285 0.0967,0.110352 0.23242,0.165527 0.40723,0.165527 0.0781,0 0.15527,-0.01025 0.23145,-0.03076 0.0771,-0.02148 0.1538,-0.05322 0.22998,-0.09521 l 0,0.249023 c -0.0752,0.03516 -0.15333,0.06152 -0.23438,0.0791 -0.0801,0.01758 -0.16553,0.02637 -0.25635,0.02637 -0.24707,0 -0.44336,-0.07764 -0.58886,-0.23291 -0.14551,-0.155273 -0.21827,-0.364746 -0.21827,-0.628418 0,-0.267577 0.0733,-0.478026 0.21973,-0.631348 0.14746,-0.153319 0.34912,-0.229979 0.60498,-0.22998 0.083,10e-7 0.16406,0.0088 0.24317,0.02637 0.0791,0.0166 0.15576,0.04199 0.22998,0.07617"
+         id="path3699" />
+      <path
+         d="m 195.48955,79.012825 0,0.46582 0.55517,0 0,0.209473 -0.55517,0 0,0.890625 c 0,0.133789 0.0181,0.219727 0.0542,0.257812 0.0371,0.03809 0.11181,0.05713 0.22412,0.05713 l 0.27685,0 0,0.225586 -0.27685,0 c -0.20801,0 -0.35156,-0.03857 -0.43067,-0.115722 -0.0791,-0.07813 -0.11865,-0.219727 -0.11865,-0.424805 l 0,-0.890625 -0.19775,0 0,-0.209473 0.19775,0 0,-0.46582 0.271,0"
+         id="path3701" />
+      <path
+         d="m 197.03642,79.66761 c -0.14453,2e-6 -0.25879,0.05664 -0.34277,0.169922 -0.084,0.112306 -0.12598,0.266603 -0.12598,0.462891 0,0.196289 0.0415,0.351074 0.12451,0.464355 0.084,0.112305 0.19873,0.168457 0.34424,0.168457 0.14356,0 0.25733,-0.05664 0.34131,-0.169922 0.084,-0.11328 0.12598,-0.267577 0.12598,-0.46289 0,-0.194335 -0.042,-0.348144 -0.12598,-0.461426 -0.084,-0.114257 -0.19775,-0.171385 -0.34131,-0.171387 m 0,-0.228515 c 0.23438,1e-6 0.41846,0.07617 0.55225,0.228515 0.13379,0.152345 0.20068,0.363283 0.20068,0.632813 0,0.268555 -0.0669,0.479492 -0.20068,0.632812 -0.13379,0.152344 -0.31787,0.228516 -0.55225,0.228516 -0.23535,0 -0.41992,-0.07617 -0.55371,-0.228516 -0.13281,-0.15332 -0.19922,-0.364257 -0.19922,-0.632812 0,-0.26953 0.0664,-0.480468 0.19922,-0.632813 0.13379,-0.152342 0.31836,-0.228514 0.55371,-0.228515"
+         id="path3703" />
+      <path
+         d="m 199.18535,79.730598 c -0.0303,-0.01758 -0.0635,-0.03027 -0.0996,-0.03808 -0.0352,-0.0088 -0.0742,-0.01318 -0.11719,-0.01318 -0.15234,10e-7 -0.26953,0.04981 -0.35156,0.149414 -0.0811,0.09863 -0.12158,0.240724 -0.12158,0.42627 l 0,0.864257 -0.271,0 0,-1.640625 0.271,0 0,0.254883 c 0.0566,-0.09961 0.13037,-0.173338 0.22119,-0.221191 0.0908,-0.04883 0.20117,-0.07324 0.33105,-0.07324 0.0186,1e-6 0.0391,0.0015 0.0615,0.0044 0.0225,0.002 0.0474,0.0054 0.0747,0.01025 l 10e-4,0.276855"
+         id="path3705" />
+    </g>
+  </g>
+</svg>
diff --git a/doc/source/images/tutorial3a.py b/doc/source/images/tutorial3a.py
new file mode 100644
index 0000000000000000000000000000000000000000..440a000ab681322edf7bab990351332c078708d1
--- /dev/null
+++ b/doc/source/images/tutorial3a.py
@@ -0,0 +1,58 @@
+# Physics background
+# ------------------
+#  band structure of a simple quantum wire in tight-binding approximation
+#
+# Kwant features highlighted
+# --------------------------
+#  - Computing the band structure of a finalized lead.
+
+import kwant
+import numpy as np
+from math import pi
+
+import latex, html
+
+a = 1
+lat = kwant.lattice.Square(a)
+
+t = 1.0
+W = 10
+
+# Define a lead:
+
+sym_lead = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead = kwant.Builder(sym_lead)
+lead.default_site_group = lat
+
+for j in xrange(W):
+    lead[(0, j)] = 4 * t
+
+    if j > 0:
+        lead[(0, j), (0, j-1)] = - t
+
+    lead[(1, j), (0, j)] = - t
+
+# Now compute the band structure
+
+# Only a finalized lead has the information about bandstructure
+flead = lead.finalized()
+
+momenta = np.arange(-pi, pi + .01, 0.02 * pi)
+energy_list = [flead.energies(k) for k in momenta]
+
+import pylab
+pylab.plot(momenta, energy_list)
+pylab.xlabel("momentum [in untis of (lattice constant)^-1]",
+             fontsize=latex.mpl_label_size)
+pylab.ylabel("energy [in units of t]",
+             fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial3a_result.pdf")
+fig.savefig("tutorial3a_result.png",
+            dpi=(html.figwidth_px/latex.mpl_width_in))
diff --git a/doc/source/images/tutorial3b.py b/doc/source/images/tutorial3b.py
new file mode 100644
index 0000000000000000000000000000000000000000..dff11c641d2810d5c705c78b2c81c7a567c3f66a
--- /dev/null
+++ b/doc/source/images/tutorial3b.py
@@ -0,0 +1,94 @@
+# Physics background
+# ------------------
+#  Fock-darwin spectrum of a quantum dot (energy spectrum in
+#  as a function of a magnetic field)
+#
+# Kwant features highlighted
+# --------------------------
+#  - Use of `hamiltonian_submatrix` in order to obtain a Hamiltonian
+#    matrix.
+
+
+from cmath import exp
+import kwant
+
+import latex, html
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+
+# for simplicity, take lattice constant = 1
+a = 1
+lat = kwant.lattice.Square(a)
+
+t = 1.0
+r = 10
+
+# Define the quantum dot
+
+def circle(pos):
+    (x, y) = pos
+    rsq = x**2 + y**2
+    return rsq < r**2
+
+def hopx(site1, site2):
+    y = site1.pos[1]
+    return - t * exp(-1j * B * y)
+
+sys[lat.shape(circle, (0, 0))] = 4 * t
+# hoppings in x-direction
+sys[sys.possible_hoppings((1, 0), lat, lat)] = hopx
+# hoppings in y-directions
+sys[sys.possible_hoppings((0, 1), lat, lat)] = - t
+
+# It's a closed system for a change, so no leads
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys, "tutorial3b_sys.pdf")
+kwant.plot(fsys, "tutorial3b_sys.png")
+
+# In the following, we compute the spectrum of the quantum dot
+# using dense matrix methods. This works in this toy example,
+# as the system is tiny. In a real example, one would want to use
+# sparse matrix methods
+
+import scipy.linalg as la
+
+Bs = []
+energies = []
+for iB in xrange(100):
+    B = iB * 0.002
+
+# Obtain the Hamiltonian as a dense matrix
+    ham_mat = fsys.hamiltonian_submatrix()
+
+    ev = la.eigh(ham_mat, eigvals_only=True)
+
+    Bs.append(B)
+    energies.append(ev[:15])
+
+import pylab
+
+pylab.plot(Bs, energies)
+pylab.xlabel("magnetic field [some arbitrary units]",
+             fontsize=latex.mpl_label_size)
+pylab.ylabel("energy [in units of t]",
+             fontsize=latex.mpl_label_size)
+fig = pylab.gcf()
+pylab.setp(fig.get_axes()[0].get_xticklabels(),
+           fontsize=latex.mpl_tick_size)
+pylab.setp(fig.get_axes()[0].get_yticklabels(),
+           fontsize=latex.mpl_tick_size)
+fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+fig.savefig("tutorial3b_result.pdf")
+fig.savefig("tutorial3b_result.png",
+            dpi=(html.figwidth_px/latex.mpl_width_in))
diff --git a/doc/source/images/tutorial4.py b/doc/source/images/tutorial4.py
new file mode 100644
index 0000000000000000000000000000000000000000..59795c5a582ee0b3fde68928548c9b20107bc3fd
--- /dev/null
+++ b/doc/source/images/tutorial4.py
@@ -0,0 +1,199 @@
+# Physics background
+# ------------------
+#  Transport through a graphene quantum dot with a pn-junction
+#
+# Kwant features highlighted
+# --------------------------
+#  - Application of all the aspects of tutorials 1-3 to a more complicated
+#    lattice, namely graphene
+
+from __future__ import division # so that 1/2 == 0.5, and not 0
+from math import pi, sqrt
+import numpy as np
+
+import kwant
+import latex, html
+
+# For computing eigenvalues
+import scipy.sparse.linalg as sla
+
+# For plotting
+import pylab
+
+
+# Define the graphene lattice
+sin_30, cos_30 = (1/2, np.sqrt(3)/2)
+graphene = kwant.make_lattice([(1, 0), (sin_30, cos_30)],
+                              [(0, 0), (0, 1/np.sqrt(3))])
+a, b = graphene.sublattices
+
+
+def make_system(r=10, w=2.0, pot=0.1):
+
+    #### Define the scattering region. ####
+    # circular scattering region
+    def circle(pos):
+        x, y = pos
+        return x**2 + y**2 < r**2
+
+    sys= kwant.Builder()
+
+    # w: width and pot: potential maximum of the p-n junction
+    def potential(site):
+        (x, y) = site.pos
+        d = y * cos_30 + x * sin_30
+        return pot * np.tanh(d / w)
+
+    sys[graphene.shape(circle, (0,0))] = potential
+
+    # specify the hoppings of the graphene lattice in the
+    # format expected by possibe_hoppings()
+    hoppings = (((0, 0), b, a), ((0, 1), b, a), ((-1, 1), b, a))
+    for hopping in hoppings:
+        sys[sys.possible_hoppings(*hopping)] = - 1
+
+    # Modify the scattering region
+    del sys[a(0,0)]
+    sys[a(-2,1), b(2, 2)] = -1
+
+    # Keep a copy of the closed system without leads, for
+    # eigenvalue computations
+    closed_fsys = sys.finalized()
+
+    #### Define the leads. ####
+    # left lead
+    sym0 = kwant.TranslationalSymmetry([graphene.vec((-1, 0))])
+
+    def lead0_shape(pos):
+        x, y = pos
+        return (-1 < x < 1) and (-0.4 * r < y < 0.4 * r)
+
+    lead0 = kwant.Builder(sym0)
+    lead0[graphene.shape(lead0_shape, (0,0))] = - pot
+    for hopping in hoppings:
+        lead0[lead0.possible_hoppings(*hopping)] = - 1
+
+    # The second lead, going ot the top right
+    sym1 = kwant.TranslationalSymmetry([graphene.vec((0, 1))])
+
+    def lead1_shape(pos):
+        x, y = pos
+        u = x * sin_30 + y * cos_30
+        v = y * sin_30 - x * cos_30
+        return (-1 < u < 1) and (-0.4 * r < v < 0.4 * r)
+
+    lead1 = kwant.Builder(sym1)
+    lead1[graphene.shape(lead1_shape, (0,0))] = pot
+    for hopping in hoppings:
+        lead1[lead1.possible_hoppings(*hopping)] = - 1
+
+    # Attach the leads
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized(), closed_fsys, lead0.finalized()
+
+
+def compute_evs(sys):
+    # Compute some eigenvalues of the closed system
+    sparse_mat = sys.hamiltonian_submatrix(sparse=True)
+
+    try:
+        # This requires scipy version >= 0.9.0
+        # Failure (i.e. insufficient scipy version) is not critical
+        # for the remainder of the tutorial, hence the try-block
+        evs = scipy.sparse.linalg.eigs(sparse_mat, 2)[0]
+        print evs
+    except:
+        pass
+
+
+def plot_conductance(fsys, energies):
+    # Compute transmission as a function of energy
+    data = []
+    for energy in energies:
+        smatrix = kwant.solve(fsys, energy)
+        data.append(smatrix.transmission(0, 1))
+
+    pylab.clf()
+    pylab.plot(energies, data)
+    pylab.xlabel("energy [in units of t]",
+                 fontsize=latex.mpl_label_size)
+    pylab.ylabel("conductance [in units of e^2/h]",
+                 fontsize=latex.mpl_label_size)
+    fig = pylab.gcf()
+    pylab.setp(fig.get_axes()[0].get_xticklabels(),
+               fontsize=latex.mpl_tick_size)
+    pylab.setp(fig.get_axes()[0].get_yticklabels(),
+               fontsize=latex.mpl_tick_size)
+    fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+    fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+    fig.savefig("tutorial4_result.pdf")
+    fig.savefig("tutorial4_result.png",
+                dpi=(html.figwidth_px/latex.mpl_width_in))
+
+
+def plot_bandstructure(flead, momenta):
+    # Use the method ``energies`` of the finalized lead to compute
+    # the bandstructure
+    energy_list = [flead.energies(k) for k in momenta]
+
+    pylab.clf()
+    pylab.plot(momenta, energy_list)
+    pylab.xlabel("momentum [in untis of (lattice constant)^-1]",
+                 fontsize=latex.mpl_label_size)
+    pylab.ylabel("energy [in units of t]",
+                 fontsize=latex.mpl_label_size)
+    fig = pylab.gcf()
+    pylab.setp(fig.get_axes()[0].get_xticklabels(),
+               fontsize=latex.mpl_tick_size)
+    pylab.setp(fig.get_axes()[0].get_yticklabels(),
+               fontsize=latex.mpl_tick_size)
+    fig.set_size_inches(latex.mpl_width_in, latex.mpl_width_in*3./4.)
+    fig.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
+    fig.savefig("tutorial4_bs.pdf")
+    fig.savefig("tutorial4_bs.png",
+                dpi=(html.figwidth_px/latex.mpl_width_in))
+
+
+def main():
+    pot = 0.1
+    fsys, closed_fsys, flead = make_system(pot=pot)
+
+    # First, plot the closed system, and compute some eigenvalues
+
+    # To highlight the two sublattices of graphene, we plot one with
+    # a filled, and the other one with an open circle:
+    plotter_symbols = {a: kwant.plotter.Circle(r=0.3),
+                       b: kwant.plotter.Circle(r=0.3,
+                                               fcol=kwant.plotter.white,
+                                               lcol=kwant.plotter.black)}
+
+    kwant.plot(closed_fsys, a=1./sqrt(3.), symbols=plotter_symbols,
+               filename="tutorial4_sys1.pdf",
+               width=latex.figwidth_pt)
+    kwant.plot(closed_fsys, a=1./sqrt(3.), symbols=plotter_symbols,
+               filename="tutorial4_sys1.png",
+               width=html.figwidth_px)
+
+    # Then, plot the system with leads and compute the band structure
+    # of one of the (zigzag) leads, as well as the conductance
+
+    kwant.plot(fsys, a=1/sqrt(3.), symbols=plotter_symbols,
+               filename="tutorial4_sys2.png",
+               width=html.figwidth_px)
+    kwant.plot(fsys, a=1/sqrt(3.), symbols=plotter_symbols,
+               filename="tutorial4_sys2.pdf",
+               width=latex.figwidth_pt)
+
+    momenta = np.arange(-pi, pi + .01, 0.1 * pi)
+    plot_bandstructure(flead, momenta)
+
+    energies = np.arange(-2 * pot, 2 * pot, pot / 10.5)
+    plot_conductance(fsys, energies)
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..52be102cf36c565347fe630a63cc3d541e87bfe6
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,20 @@
+===================
+kwant documentation
+===================
+
+.. toctree::
+   :maxdepth: 1
+
+   README file (includes installation instructions) <readme>
+
+.. toctree::
+   :maxdepth: 2
+
+   tutorial/index.rst
+
+.. toctree::
+   :maxdepth: 2
+
+   reference/index
+
+* :ref:`genindex`
diff --git a/doc/source/readme.rst b/doc/source/readme.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8ecf341e8f24ee0691316b27e0f658f6e5c654db
--- /dev/null
+++ b/doc/source/readme.rst
@@ -0,0 +1 @@
+.. include:: ../../README.txt
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f87e423ef059580fbf51703ce9410483593a8756
--- /dev/null
+++ b/doc/source/reference/index.rst
@@ -0,0 +1,19 @@
+Reference guide
+===============
+
+In the following, all kwant modules appear in the order of decreasing end-user
+relevance.
+
+.. toctree::
+   :maxdepth: 1
+
+   kwant
+   kwant.builder
+   kwant.lattice
+   kwant.plotter
+   kwant.solvers
+   kwant.run
+   kwant.system
+   kwant.graph
+   kwant.physics
+   kwant.linalg
diff --git a/doc/source/reference/kwant.builder.rst b/doc/source/reference/kwant.builder.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0ed06ff9892cc63bb879dde42e79a589955ea289
--- /dev/null
+++ b/doc/source/reference/kwant.builder.rst
@@ -0,0 +1,24 @@
+:mod:`kwant.builder` -- High-level construction of systems
+==========================================================
+
+.. module:: kwant.builder
+
+Types
+-----
+.. autosummary::
+   :toctree: generated/
+
+   Builder
+   Site
+   SimpleSiteGroup
+   BuilderLead
+   SelfEnergy
+
+Abstract base classes
+---------------------
+.. autosummary::
+   :toctree: generated/
+
+   SiteGroup
+   Symmetry
+   Lead
diff --git a/doc/source/reference/kwant.graph.rst b/doc/source/reference/kwant.graph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1bafcd9662859837b3421901a648c972f00ec4de
--- /dev/null
+++ b/doc/source/reference/kwant.graph.rst
@@ -0,0 +1,94 @@
+:mod:`kwant.graph` -- Low-level, efficient directed graphs
+==========================================================
+
+.. module:: kwant.graph
+
+Graphs, as handled by this module, consist of nodes (numbered by integers,
+usually >= 0).  Pairs of nodes can be connected by edges (numbered by integers
+>= 0).  An edge is described by a pair (tail, head) of node numbers and is
+always directed.
+
+The basic work-flow is to
+
+ (1) create an object of type `Graph`,
+
+ (2) add edges to it using the methods `~Graph.add_edge` and
+     `~Graph.add_edges`,
+
+ (3) create a compressed copy of the graph using the method
+     `~Graph.compressed`,
+
+ (4) and use the thus created object for efficient queries.
+
+Example:
+
+>>> import kwant
+>>> g = kwant.graph.Graph()
+>>> g.add_edge(0, 1)
+0
+>>> g.add_edge(0, 2)
+1
+>>> g = g.compressed()
+>>> list(g.out_neighbors(0))
+[1, 2]
+
+Node numbers can be assigned freely, but if they are not consecutive integers
+starting with zero, storage space is wasted in the compressed graph.  Negative
+node numbers are special and can be allowed optionally (see further).
+
+Whenever a method returns multiple edges or nodes (via an iterator), they
+appear in the order in which the edges associated with them were added to the
+graph during construction.
+
+Edge IDs are non-negative integers which identify edges unambiguously.  They
+are assigned automatically when the graph is compressed.  The edge IDs of edges
+with the same tail will occupy a dense interval of integers.  The IDs of edges
+sharing the same tail will be assigned from lowest to highest in the order in
+which these edges had been added.
+
+The method `Graph.compressed` takes a parameter which determines whether the
+graph will be one-way (the default) or two-way.  One-way graphs can be queried
+for the existence of an edge and provide the nodes to which a node points
+(=outgoing neighbors).  In addition, two-way graphs can be queried for the
+nodes which point to a node (=incoming neighbors).
+
+Another parameter of `Graph.compressed`, `edge_nr_translation`, determines
+whether it will be possible to use the method `edge_id` of the compressed
+graph.  This method returns the edge ID of an edge given the edge number that
+was returned when an edge was added.
+
+Negative node numbers can be allowed for a `Graph` (parameter
+`allow_negative_nodes` of the constructor).  Edges with negative nodes are
+considered to be dangling: negative nodes can be neighbors of other nodes, but
+cannot be queried directly for neighbors.  Consequently, "doubly-dangling"
+edges which connect two negative nodes do not make sense and are never allowed.
+The range of values used for the negative node numbers does not influence the
+required storage space in any way.
+
+Compressed graphs have the readonly attributes `~kwant.CGraph.num_nodes` and
+`~kwant.CGraph.num_edges`.
+
+Graph types
+-----------
+.. autosummary::
+   :toctree: generated/
+
+   Graph
+   CGraph
+
+Graph algorithms
+----------------
+.. autosummary::
+   :toctree: generated/
+
+   slice
+   make_undirected
+   remove_duplicates
+   induced_subgraph
+   print_graph
+
+Other
+-----
++--------------+------------------------------------------+
+| `gint_dtype` | Data type used for graph nodes and edges |
++--------------+------------------------------------------+
diff --git a/doc/source/reference/kwant.lattice.rst b/doc/source/reference/kwant.lattice.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9309feea0f695866a8ea9a783e396b8c6b22c1e1
--- /dev/null
+++ b/doc/source/reference/kwant.lattice.rst
@@ -0,0 +1,23 @@
+:mod:`kwant.lattice` -- Bravais lattices
+========================================
+
+.. module:: kwant.lattice
+
+General
+-------
+.. autosummary::
+   :toctree: generated/
+
+   make_lattice
+   TranslationalSymmetry
+   MonatomicLattice
+   PolyatomicLattice
+
+Library of lattices
+-------------------
+.. autosummary::
+   :toctree: generated/
+
+   Chain
+   Honeycomb
+   Square
diff --git a/doc/source/reference/kwant.linalg.lapack.rst b/doc/source/reference/kwant.linalg.lapack.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a1e026250481df20c4434c061050cd113f86c6b
--- /dev/null
+++ b/doc/source/reference/kwant.linalg.lapack.rst
@@ -0,0 +1,6 @@
+:mod:`kwant.linalg.lapack` -- Low-level access to LAPACK functions
+==================================================================
+
+.. automodule:: kwant.linalg.lapack
+   :members:
+   :undoc-members:
diff --git a/doc/source/reference/kwant.linalg.rst b/doc/source/reference/kwant.linalg.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cecf2bb46b5b3b654cfbf1c9e65f1631b638fd8f
--- /dev/null
+++ b/doc/source/reference/kwant.linalg.rst
@@ -0,0 +1,43 @@
+:mod:`kwant.linalg` -- Linear algebra routines
+==============================================
+
+.. module:: kwant.linalg
+
+LU decomposition
+----------------
+.. autosummary::
+   :toctree: generated/
+
+   lu_factor
+   lu_solve
+   rcond_from_lu
+
+Schur decomposition
+-------------------
+.. autosummary::
+   :toctree: generated/
+
+   convert_r2c_gen_schur
+   convert_r2c_schur
+   evecs_from_gen_schur
+   evecs_from_schur
+   gen_schur
+   order_gen_schur
+   order_schur
+   schur
+
+Eigendecomposition
+------------------
+.. autosummary::
+   :toctree: generated/
+
+   gen_eig
+
+
+Submodules
+----------
+.. toctree::
+   :maxdepth: 1
+
+
+   kwant.linalg.lapack
diff --git a/doc/source/reference/kwant.physics.rst b/doc/source/reference/kwant.physics.rst
new file mode 100644
index 0000000000000000000000000000000000000000..83d0c2f1d36f0b022c535c3a490de607b478d51d
--- /dev/null
+++ b/doc/source/reference/kwant.physics.rst
@@ -0,0 +1,12 @@
+:mod:`kwant.physics` -- Physics-related algorithms
+==================================================
+
+.. module:: kwant.physics
+
+Leads
+-----
+.. autosummary::
+   :toctree: generated/
+
+   modes
+   self_energy
diff --git a/doc/source/reference/kwant.plotter.rst b/doc/source/reference/kwant.plotter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..39245ad194da32afa0522a74e613cad64e3b9ef2
--- /dev/null
+++ b/doc/source/reference/kwant.plotter.rst
@@ -0,0 +1,37 @@
+:mod:`kwant.plotter` -- Plotting of systems
+===========================================
+
+.. module:: kwant.plotter
+
+
+Plotting routine
+----------------
+.. autosummary::
+   :toctree: generated/
+
+   plot
+
+Auxilliary types
+----------------
+.. autosummary::
+   :toctree: generated/
+
+   Circle
+   Polygon
+   Line
+   LineStyle
+   Color
+
+Pre-defined colors
+------------------
++------------------------+
+| `~kwant.plotter.black` |
++------------------------+
+| `~kwant.plotter.white` |
++------------------------+
+| `~kwant.plotter.red`   |
++------------------------+
+| `~kwant.plotter.green` |
++------------------------+
+| `~kwant.plotter.blue`  |
++------------------------+
diff --git a/doc/source/reference/kwant.rst b/doc/source/reference/kwant.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6b7058874341d33a10eb1f8926f91b97568d62b9
--- /dev/null
+++ b/doc/source/reference/kwant.rst
@@ -0,0 +1,42 @@
+:mod:`kwant` -- Top level package
+=================================
+
+.. module:: kwant
+
+Along with the various modules documented in this reference, the top-level
+kwant package provides short names for a few widely used objects from
+several modules.
+
+.. currentmodule:: kwant.builder
+
+From `kwant.builder`
+--------------------
+.. autosummary::
+
+   Builder
+
+.. currentmodule:: kwant.lattice
+
+From `kwant.lattice`
+--------------------
+.. autosummary::
+
+   TranslationalSymmetry
+   make_lattice
+
+.. currentmodule:: kwant.plotter
+
+From `kwant.plotter`
+--------------------
+
+.. autosummary::
+
+   plot
+
+.. currentmodule:: kwant.solvers.sparse
+
+From `kwant.solvers.sparse`
+---------------------------
+.. autosummary::
+
+   solve
diff --git a/doc/source/reference/kwant.run.rst b/doc/source/reference/kwant.run.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cd8ffe89fa191dd64bb9e43ce0aae24112e0655c
--- /dev/null
+++ b/doc/source/reference/kwant.run.rst
@@ -0,0 +1,10 @@
+:mod:`kwant.run` -- Support for running scripts from the system shell
+=====================================================================
+
+.. module:: kwant.run
+
+.. autosummary::
+   :toctree: generated/
+
+   exec_argv
+   randomize
diff --git a/doc/source/reference/kwant.solvers.rst b/doc/source/reference/kwant.solvers.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bde4ca876269cce9baa2ca7aa4e3ad07f56e5416
--- /dev/null
+++ b/doc/source/reference/kwant.solvers.rst
@@ -0,0 +1,12 @@
+:mod:`kwant.solvers` -- Library of solvers
+==========================================
+
+The following solvers are available.  Note that the solvers (with the exception
+of `kwant.solvers.sparse`) have to be imported explicitly.
+
+.. module:: kwant.solvers
+
+.. toctree::
+   :maxdepth: 1
+
+   kwant.solvers.sparse
diff --git a/doc/source/reference/kwant.solvers.sparse.rst b/doc/source/reference/kwant.solvers.sparse.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d1f6b46fdf10aae20410fdda058ac24f4fdf9fa9
--- /dev/null
+++ b/doc/source/reference/kwant.solvers.sparse.rst
@@ -0,0 +1,11 @@
+:mod:`kwant.solvers.sparse` -- Basic sparse matrix solver
+=========================================================
+
+.. module:: kwant.solvers.sparse
+
+.. autosummary::
+   :toctree: generated/
+
+   solve
+   make_linear_sys
+   BlockResult
diff --git a/doc/source/reference/kwant.system.rst b/doc/source/reference/kwant.system.rst
new file mode 100644
index 0000000000000000000000000000000000000000..005da206220d5254896f21dd9fb6aba59f936b47
--- /dev/null
+++ b/doc/source/reference/kwant.system.rst
@@ -0,0 +1,22 @@
+:mod:`kwant.system` -- Low-level interface of tight binding systems
+*******************************************************************
+
+.. currentmodule:: kwant.system
+
+This module is the binding link between constructing tight-binding systems and
+doing calculations with these systems.  It defines the interface which any
+problem-solving algorithm should be able to access, independently on how the
+system was constructed.  This is achieved by using python abstract base classes
+(ABC) -- classes, which help to ensure that any derived classes implement the
+necessary interface.
+
+Any system which is provided to a solver should be derived from the appropriate
+class in this module, and every solver can assume that its input corresponds to
+the interface defined here.
+
+.. autosummary::
+   :toctree: generated/
+
+   System
+   InfiniteSystem
+   FiniteSystem
diff --git a/doc/source/tutorial/index.rst b/doc/source/tutorial/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..27b0f4509f5726f5e458e5ff56097af634f75ea5
--- /dev/null
+++ b/doc/source/tutorial/index.rst
@@ -0,0 +1,15 @@
+Tutorial: Learning kwant through examples
+=========================================
+
+In the following, the most important features of kwant are explained using
+simple, but still physically meaningful examples. Each of the examples
+is commented extensively. In addition, you will find notes about more subtle,
+technical details at the end of each example. At first reading,
+these notes maybe safely skipped.
+
+.. toctree::
+    introduction
+    tutorial1
+    tutorial2
+    tutorial3
+    tutorial4
diff --git a/doc/source/tutorial/introduction.rst b/doc/source/tutorial/introduction.rst
new file mode 100644
index 0000000000000000000000000000000000000000..77035e0f65887ec63ac684d2cd8e67e1232223a3
--- /dev/null
+++ b/doc/source/tutorial/introduction.rst
@@ -0,0 +1,54 @@
+Introduction
+============
+
+kwant is currently suitable for calculating Landauer transport and
+calculating dispersions of various tight binding systems.  Possible future
+extensions are calculations of local density of states, supercurrent, of
+various transport properties like Wigner-Smith delay time or Goos-Hänchen
+shift, Boltzmann transport, etc.
+
+There are two steps in obtaining a numerical solution to a problem: The first
+is defining the problem in a computer-accessible way, the second solving it.
+The aim of a software package like kwant is to make both steps easier.
+
+In kwant, the definition of the problem amounts to the creation of a tight
+binding system.  The solution of the problem, i.e. the calculation of the
+values of physical observables, is achieved by passing the system to a
+*solver*.
+
+The definition of a tight binding system can be seen as nothing else than the
+creation of a huge sparse matrix (the Hamiltonian).  Equivalently, the sparse
+Hamiltonian matrix can be seen as an annotated *graph*: the nodes of the graph
+are the sites of the tight binding system, the edges are the hoppings.  Sites
+are annotated with the corresponding on-site Hamiltonian matrix, hoppings are
+annotated with the corresponding hopping integral matrix.
+
+One of the central goals of kwant is to allow easy creation of such annotated
+graphs that represent tight binding system.  kwant can be made to know about
+the general structure of a particular system, the involved lattices and
+symmetries.  For example, a system with a 1D translational symmetry may be used
+as a lead and attached to a another system.  If both systems have sites which
+belong to the same lattices, the attaching can be done automatically, even if
+the shapes of the systems are irregular.
+
+kwant is a library for the `Python <http://python.org/>`_ programming language.
+Care was taken to fit well with the spirit of the language and to take
+advantage of its expressive power.
+
+Once a tight binding system has been created, solvers provided by kwant can be
+used to compute physical observables.  Solvers expect the system to be in a
+different format than the one used for construction -- the system has to be
+*finalized*.  In a finalized system the tight binding graph is fixed but the
+matrix elements of the Hamiltonian may still change.  The finalized format is
+both more efficient and simpler -- the solvers don't have to deal with the
+various details which were facilitating the construction of the system.
+
+The typical workflow with kwant is as follows:
+
+#. Create an "empty" tight binding system.
+
+#. Set its matrix elements and hoppings.
+
+#. Attach leads (tight binding systems with translational symmetry).
+
+#. Pass the finalized system to a solver.
diff --git a/doc/source/tutorial/tutorial1.rst b/doc/source/tutorial/tutorial1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..88071dd8da690fcefb8127507ece39483868be65
--- /dev/null
+++ b/doc/source/tutorial/tutorial1.rst
@@ -0,0 +1,478 @@
+First steps in kwant: Setting up a simple system and computing transport
+------------------------------------------------------------------------
+
+Transport through a quantum wire
+................................
+
+As first example, we compute the transmission probability
+through a two-dimensional quantum wire. For this we use a tight-binding
+model representing the two-dimensional Schroedinger equation
+
+.. math::
+
+    H = \frac{\hbar^2}{2 m} (\partial_x^2+\partial_y^2) + V(y)
+
+with a hard wall confinement :math:`V(y)` in y-direction.
+
+In order to use kwant, we need to import it:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 11
+
+Enabling kwant is as easy as this [#]_ !
+
+The first step is now the definition of the system with scattering region
+and leads. For this we make use of the `~kwant.builder.Builder` class
+that allows for a convenient way to define the system. For this we need to
+create an instance of the `~kwant.builder.Builder` class:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 15
+
+Next, we tell `~kwant.builder.Builder` that we want to work
+with a square lattice (more about the details of this code snippet in
+the notes below).  For simplicity, we set the lattice constant to
+unity:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 18-20
+
+Since we work with a square lattice, we label the points with two
+integer coordinates `(i, j)`. `~kwant.builder.Builder` then
+allows us to add matrix elements corresponding to lattice points:
+``sys[(i, j)] = ...`` sets the on-site energy for the point `(i, j)`,
+and ``sys[(i1, j1), (i2, j2)] = ...`` the hopping matrix element
+**from** point `(i2, j2)` **to** point `(i1, j1)`.
+
+We now build a rectangular scattering region that is `W`
+lattice points wide and `L` lattice points long:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 22-24, 27-38
+
+Next, we define the leads. Leads are also constructed using
+`~kwant.builder.Builder`, but in this case, the
+system must have a translational symmetry:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 46-48
+
+.. note::
+
+    Here it is essential that we write ``lead0.default_site_group = lat``
+    instead of ``lead0.default_site_group = kwant.lattice.Square(a)``.
+    For details see the notes below.
+
+Here, the `~kwant.builder.Builder` takes the translational symmetry
+as an optional parameter. Note that the (real space)
+vector ``lat.vec((-1, 0))`` defining the translational symmetry
+must point in a direction *away* from the scattering region, *into*
+the lead -- hence, lead 0 [#]_ will be the left lead, extending to
+infinity to the left.
+
+For the lead itself it is enough to add the points of one unit cell as well
+as the hoppings inside one unit cell and to the next unit cell of the lead.
+For a square lattice, and a lead in y-direction the unit cell is
+simply a vertical line of points:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 50-56
+
+Note that here it doesn't matter if you add the hoppings to the next or the
+previous unit cell -- the translational symmetry takes care of that.
+
+We also want to add a lead on the right side. The only difference to
+the left lead is that the vector of the translational
+symmetry must point to the right, the remaining code is the same:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 60-70
+
+Note that here we added points with x-coordinate 0, just as for the left lead.
+You might object that the right lead should be placed `L`
+(or `L+1`?) points to the right with respect to the left lead. In fact,
+you do not need to worry about that. The `~kwant.builder.Builder` with
+`~kwant.lattice.TranslationalSymmetry` represents a lead which is
+infinitely extended. These isolated, infinite leads can then be simply
+attached at the right position using:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 74-75
+
+More details about attaching leads can be found in the tutorial
+:ref:`tutorial-abring`.
+
+Now we have finished building our system! We need to finalize it, in
+order to use it for a transport calculation:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 79
+
+and should plot it, to make sure we didn't make mistakes:
+
+.. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 83
+
+This command should bring up this picture:
+
+.. image:: /images/tutorial1a_sys.*
+
+The system is represented in the usual way for tight-binding systems:
+dots represent the lattice points `(i, j)`, and for every
+nonzero hopping element between points there is a line connecting these
+points. From the leads, only a few (default 2) unit cells are shown, with
+fading color.
+
+Having successfully created a system, we now can immediately start to compute
+its conductance as a function of energy:
+
+ .. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 87-98
+
+Currently, there is only one algorithm implemented to compute the
+conductance: :func:`kwant.solve <kwant.solvers.sparse.solve>` which computes
+the scattering matrix `smatrix` solving a sparse linear system.
+`smatrix` itself allows you to directly compute the total
+transmission probability from lead 0 to lead 1 as
+``smatrix.transmission(1, 0)``.
+
+Finally we can use `matplotlib` to make a plot of the computed data
+(although writing to file and using an external viewer such as
+gnuplot or xmgrace is just as viable)
+
+ .. literalinclude:: ../../../examples/tutorial1a.py
+    :lines: 102-108
+
+This should yield the result
+
+.. image:: /images/tutorial1a_result.*
+
+We see a conductance quantized in units of :math:`e^2/h`,
+increasing in steps as the energy is increased. The
+value of the conductance is determined by the number of occupied
+subbands that increases with energy.
+
+
+.. seealso::
+     The full source code can be found in
+     :download:`example/tutorial1a.py <../../../examples/tutorial1a.py>`
+
+.. specialnote:: Technical details
+
+   - In the example above, when building the system, only one direction
+     of hopping is given, i.e. ``sys[(i, j), (i, j-1)] = ...`` and
+     not also ``sys[(i, j-1), (i, j)] = ...``. The reason is that
+     `~kwant.builder.Builder` automatically adds the other
+     direction of the hopping such that the resulting system is Hermitian.
+
+     It however does not hurt the other direction of the hopping, too::
+
+         sys[(1, 0), (0, 0)] = - t
+         sys[(0, 0), (1, 0)] = - t.conj()
+
+     (assuming that `t` is complex) is perfectly fine. However,
+     be aware that also
+
+     ::
+
+         sys[(1, 0), (0, 0)] = - 1
+         sys[(0, 0), (1, 0)] = - 2
+
+     is valid code. In the latter case, the hopping ``sys[(1, 0), (0, 0)]``
+     is overwritten by the last line and also equals to -2.
+
+   - Some more details about
+
+     ::
+
+         lat = kwant.lattices.Square(a)
+         sys.default_site_group = lat
+
+     By setting ``sys.default_site_group = lat`` you specify to
+     `~kwant.builder.Builder` that it should interpret tuples like
+     `(i, j)` as indices in a square lattice.
+
+     Technically, `~kwant.builder.Builder` expects
+     **sites** as indices. Sites themselves have a certain type, and
+     belong to a **site group**. A site group is also used to convert
+     something that represents a site (like a tuple) into a
+     proper `~kwant.builder.Site` object that can be used with
+     `~kwant.builder.Builder`.
+
+     In the above example, `lat` is the site group. By specifying it
+     as the `default_site_group`, `~kwant.builder.Builder`
+     knows that it should use `lat` to interpret any input that is not of
+     type `~kwant.builder.Site`. Instead of using
+     `default_site_group`, one could have manually converted the
+     tuples `(i, j)` into sites ``lat(i, j)``::
+
+         for i in xrange(L):
+             for j in xrange(W):
+                 sys[lat(i, j)] = 4 * t
+
+                 # hoppig in y-direction
+                 if j > 0 :
+                     sys[lat(i, j), lat(i, j-1)] = - t
+
+                 #hopping in x-direction
+                 if i > 0:
+                     sys[lat(i, j), lat(i-1, j)] = -t
+
+     (The concept of site groups and sites allows `~kwant.builder.Builder`
+     to mix arbitrary lattices and site groups)
+
+   - Note that we wrote::
+
+         lat = kwant.lattices.Square(a)
+
+         sys.default_site_group = lat
+         lead0.default_site_group = lat
+
+     instead of::
+
+         sys.default_site_group = kwant.lattices.Square(a)
+         lead0.default_site_group = kwant.lattices.Square(a)
+
+     The reason is that in the latter case, `sys` and `lead0` have two
+     different site groups (although both representing a
+     square lattice), since a site group is represented by a particular
+     instance of the class, not the class itself.
+
+     Hence, the latter example is interpreted as two different
+     square lattices, which will fail when the lead is attached to the
+     system.
+
+   - Note that the vector passed to the `~kwant.lattice.TranslationalSymmetry`
+     (in fact, what is passed is a list of vectors -- there could be more than
+     on periodic direction. However, for a lead there is only one.) is
+     a realspace vector: ``lat.vec((1,0))``. Here, ``lat.vec``
+     converts the integer indices `(1,0)` into a realspace vector. In
+     this particular example, this is trivial (even more as ``a=1``),
+     but it is not so any more for more complicated lattices.
+
+     Even though the translational symmetry vector is specified in
+     realspace, it must be compatible with the lattice symmetries
+     (in principle, there could be more than one lattice). Hence,
+     it will be typically specified using ``lat.vec``, as this
+     is guaranteed to be a proper lattice vector, compatible
+     with the lattice symmetry.
+
+   - Instead of plotting to the screen (which is standard, if the
+     Python Image Library PIL is installed), :func:`plot <kwant.plotter.plot>`
+     can also write to the file specified by the argument `filename`.
+     (for details, see the documentation of :func:`plot <kwant.plotter.plot>`.)
+
+
+.. rubric:: Footnotes
+
+.. [#] http://xkcd.com/353/
+.. [#] Leads are numbered in the python convention, starting from 0.
+
+The same but different: Alternative system building
+...................................................
+
+kwant is very flexible, and often allows you more than one way to
+build up your system. The reason is that `~kwant.builder.Builder`
+is essentially just a container, and allows for different
+ways to be filled. Here we present a more compact rewrite of
+the previous example (still with the same results).
+
+Also, the previous example was written in the form of a pythons script
+with little structure, and everything governed by global variables.
+This is OK for such a simple example, but for larger projects it makes
+sense to structure different functionality into different functional
+entities. In this example we therefore also aim at more structure.
+
+We begin the program collecting all imports in the beginning of the
+file and put the build-up of the system into a separate function
+`make_system`:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 13-24
+
+Previously, the scattering region was build using two ``for``-loops.
+Instead, we now write:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 27
+
+Here, all lattice points are added at once in the first line.  The
+construct ``((i, j) for i in xrange(L) for j in xrange(W))`` is a
+generator that iterates over all points in the rectangle as did the
+two ``for``-loops in the previous example. In fact, a
+`~kwant.builder.Builder` can not only be indexed by a single
+lattice point -- it also allows for lists of points, or, as in this
+example, an generator (as is also used in list comprehensions in
+python).
+
+Having added all lattice points in one line, we now turn to the
+hoppings. In this case, an iterable like for the lattice
+points becomes a bit cumbersome, and we use instead another
+feature of kwant:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 28-29
+
+In regular lattices, one has only very few types of different hoppings
+(by one lattice point in x or y-direction in the case of a square
+lattice considered here). For the square lattice, these types of
+hoppings are stored as a list in ``lat.nearest``, and the ``for``-loop
+runs over all of them.
+`~kwant.builder.Builder.possible_hoppings` takes as an argument
+one type of hopping (more about that in the notes below;
+details on the hopping definition will be discussed in
+:ref:`tutorial_spinorbit`), and generates all
+hoppings of this type that are possible with all the lattice points
+that were added before.  ``sys[sys.possible_hoppings(*hopping)] = -t``
+then sets all of those hopping matrix elements at once.
+
+The leads can be constructed in an analogous way:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 35-41
+
+Note that in the previous example, we essentially used the same code
+for the right and the left lead, the only difference was the direction
+of the translational symmetry vector. The
+`~kwant.builder.Builder` used for the lead provides a method
+`~kwant.builder.Builder.reversed` that returns a copy of the
+lead, but with it's translational vector reversed.  This can thus be
+used to obtain a lead pointing in the opposite direction, i.e. makes a
+right lead from a left lead:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 45
+
+The remainder of the code is identical to the previous example
+(except for a bit of reorganization into functions):
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 48-52
+
+and
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 53-63
+
+Finally, we use a python trick to make our example usable both
+as a script, as well as allowing it to be imported as a module.
+We collect all statements that should be executed in the script
+in a ``main()``-function:
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 66-73
+
+Finally, we use the following python construct [#]_ that executes
+``main()`` if the program is used as a script (i.e. executed as
+``python tutorial1b.py``):
+
+.. literalinclude:: ../../../examples/tutorial1b.py
+    :lines: 78-79
+
+If the example however is imported using ``import tutorial1b``,
+``main()`` is not executed automatically. Instead, you can execute it
+manually using ``tutorial1b.main()``.  On the other hand, you also
+have access to the other functions, ``make_system()`` and
+``plot_conductance()``, and can thus play with the parameters.
+
+The result of the example should be identical to the previous one.
+
+.. seealso::
+    The full source code can be found in
+    :download:`examples/tutorial1b.py <../../../examples/tutorial1b.py>`
+
+.. specialnote:: Technical details
+
+   - In
+
+     .. literalinclude:: ../../../examples/tutorial1b.py
+       :lines: 28-29
+
+     we write ``*hopping`` instead of ``hopping``. The reason is as follows:
+     `~kwant.builder.Builder.possible_hoppings` expects the hopping to
+     be defined using three parameters (in particular, a tuple
+     containing a relative lattice vector, and two (sub)lattice objects that
+     indicate the start and end lattice, more about that in
+     a :ref:`later tutorial <tutorial_spinorbit>`). ``lat.nearest``
+     is a list of tuples, with every tuple containing the three
+     parameters expected by `~kwant.builder.Builder.possible_hoppings`.
+
+     Hence, ``hopping`` is a tuple. But passing it to
+     `~kwant.builder.Builder.possible_hoppings` would fail,
+     as three parameters are expected (not a single tuple). ``*hopping``
+     unpacks the tuple into these three separate parameters (see
+     <http://docs.python.org/tutorial/controlflow.html#unpacking-argument-lists>)
+
+   - We have seen different ways to add lattice points to a
+     `~kwant.builder.Builder`. It allows to
+
+     * add single points, specified as sites (or tuples, if
+       a `default_site_group` is specified as in the previous
+       example).
+     * add several points at once using a generator (as in this example)
+     * add several points at once using a list (typically less
+       effective compared to a generator)
+
+     For technical reasons it is not possible to add several points
+     using a tuple of sites. Hence it is worth noting
+     the a subtle detail in
+
+     .. literalinclude:: ../../../examples/tutorial1b.py
+         :lines: 27
+
+     Note that ``((x, y) for x in range(L) for y in range(W))`` is not
+     a tuple, but a generator.
+
+     Let us elaborate a bit more on this using a simpler example:
+
+     >>> a = (0, 1, 2, 3)
+     >>> b = (i for i in xrange(4))
+
+     Here, `a` is a tuple, whereas `b` is a generator. One difference
+     is that one can subscript tuples, but not generators:
+
+     >>> a[0]
+     0
+     >>> b[0]
+     Traceback (most recent call last):
+       File "<stdin>", line 1, in <module>
+     TypeError: 'generator' object is unsubscriptable
+
+     However, both can be used in ``for``-loops, for example.
+
+   - In the example, we have added all the hoppings using
+     `~kwant.builder.Builder.possible_hoppings`. In fact,
+     hoppings can be added in the same fashion as sites, namely specifying
+
+     * a single hopping
+     * several hoppings via a generator
+     * several hoppings via a list
+
+     A hopping is defined using two sites. If several hoppings are
+     added at once, these two sites should be encapsulated in a tuple.
+     In particular, one must write::
+
+         sys[(((0,j+1), (0, j)) for j in xrange(W-1)] = ...
+
+     or::
+
+         sys[[(site1, site2), (site3, site4), ...]] = ...
+
+     You might wonder, why it is then possible to write for a single hopping::
+
+        sys[site1, site2] = ...
+
+     instead of ::
+
+        sys[(site1, site2)] = ...
+
+     In fact, due to the way python handles subscripting, ``sys[site1, site2]``
+     is the same as ``sys[(site1, site2)]``.
+
+     (This is the deeper reason why several sites cannot be added as a tuple --
+     it would be impossible to distinguish whether one would like to add two
+     separate sites, or one hopping.
+
+.. rubric:: Footnotes
+
+.. [#] http://docs.python.org/library/__main__.html
diff --git a/doc/source/tutorial/tutorial2.rst b/doc/source/tutorial/tutorial2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc142d0033c7c656444ce66b8e3490e3466b0a78
--- /dev/null
+++ b/doc/source/tutorial/tutorial2.rst
@@ -0,0 +1,415 @@
+Adding more structure to the problem
+------------------------------------
+
+.. _tutorial_spinorbit:
+
+Matrix structure of on-site and hopping elements
+................................................
+
+In the next examples, we will extend the previous examples and add more
+structure. We begin by extending the simple 2DEG-Hamiltonian by
+a Rashba spin-orbit coupling and a Zeeman splitting due to
+an external magnetic field:
+
+.. math::
+
+    H = \frac{\hbar^2}{2 m} (\partial_x^2+\partial_y^2) -
+      i \alpha (\partial_x \sigma_y - \partial_y \sigma_x) +
+      E_\text{Z} \sigma_z +  V(y)
+
+Here :math:`\sigma_{x,y,z}` denote the Pauli matrices.
+
+It turns out that this well studied Rashba-Hamiltonian has some peculiar
+properties in (ballistic) nanowires: It was first predicted theoretically
+in `Phys. Rev. Lett. 90, 256601 (2003)
+<http://prl.aps.org/abstract/PRL/v90/i25/e256601>`_ that such a system should
+exhibit non-monotonic conductance steps due to a spin-orbit gap. Only
+very recently, this non-monotonic behavior has been supposedly
+observed in experiment: `Nature Physics 6, 336 (2010)
+<http://www.nature.com/nphys/journal/v6/n5/abs/nphys1626.html>`_. Here
+we will show that a very simple extension of our previous examples will
+exactly show this behavior (Note though that no care was taken to choose
+realistic parameters).
+
+The tight-binding model corresponding to the Rashba-Hamiltonian
+naturally exhibits a 2x2-matrix structure of onsite energies and hoppings.
+In order to deal with matrices in python, kwant uses the `numpy package
+<numpy.scipy.org>`_. In order to use matrices in our program, we thus also
+have to import that package:
+
+.. literalinclude:: ../../../examples/tutorial2a.py
+    :lines: 19
+
+For convenience, we define the Pauli-matrices first (with `sigma_0` the
+unit matrix):
+
+.. literalinclude:: ../../../examples/tutorial2a.py
+    :lines: 22-25
+
+Previously, we used numbers as the values of our matrix elements.
+However, `~kwant.builder.Builder` also accepts matrices as values, and
+we can simply write:
+
+.. literalinclude:: ../../../examples/tutorial2a.py
+    :lines: 37-44
+
+Note that the Zeeman energy adds to the onsite term, whereas the Rashba
+spin-orbit term adds to the hoppings (due to the derivative operator).
+Furthermore, the hoppings in x and y-direction have a different matrix
+structure. We still use `~kwant.builder.Builder.possible_hoppings`
+to add all the hoppings at once, but we now have to distinguish
+x and y-direction. Because of that, we have to explicitly specify
+the hoppings in the form expected by
+`~kwant.builder.Builder.possible_hoppings`:
+
+- A tuple with relative lattice indices.  For example, `(1, 0)` means
+  hopping from `(i, j)` to `(i+1, j)`, whereas `(1, 1)` would
+  mean hopping to `(i+1, j+1)`.
+- The target lattice (where to hop to)
+- The source lattice (where the hopping originates
+
+Since we are only dealing with a single lattice here, source and target
+lattice are identical, but still must be specified  (for an example
+with hopping between different (sub)lattices, see :ref:`tutorial-graphene`).
+
+Again, it is enough to specify one direction of the hopping (i.e.
+when specifying `(1, 0)` it is not necessary to specify `(-1, 0)`),
+`~kwant.builder.Builder` assures hermiticity.
+
+The leads also allow for a matrix structure,
+
+.. literalinclude:: ../../../examples/tutorial2a.py
+    :lines: 52-58
+
+The remainder of the code is unchanged, and as a result we should obtain
+the following, clearly non-monotonic conductance steps:
+
+.. image:: ../images/tutorial2a_result.*
+
+.. seealso::
+     The full source code can be found in
+     :download:`example/tutorial2a.py <../../../examples/tutorial2a.py>`
+
+.. specialnote:: Technical details
+
+  - It should be emphasized that the relative hopping used for
+    `~kwant.builder.Builder.possible_hoppings` is given in terms of
+    lattice indices, i.e. relative to the Bravais lattice vectors.
+    For a square lattice, the Bravais lattice vectors are simply
+    :math:`(a,0)` and :math:`(0,a)`, and hence the mapping from
+    lattice indices `(i,j)` to realspace and back is trivial.
+    This becomes more involved in more complicated lattices, where
+    the realspace directions corresponding to, for example, `(1,0)`
+    and `(0,1)` need not be orthogonal any more
+    (see :ref:`tutorial-graphene`).
+
+
+Spatially dependent values through functions
+............................................
+
+Up to now, all examples had position-independent matrix-elements
+(and thus translational invariance along the wire, which
+was the origin of the conductance steps). Now, we consider the
+case of a position-dependent potential:
+
+.. math::
+
+    H = \frac{\hbar^2}{2 m} (\partial_x^2+\partial_y^2) + V(x, y)
+
+The position-dependent potential enters in the onsite energies.
+One possibility would be to again set the onsite matrix elements
+of each lattice point individually (as in tutorial1a.py). However,
+changing the potential then implies the need to build up the system again.
+
+Instead, we use a python *function* to define the onsite energies. We
+define the potential profile of a quantum well as:
+
+.. literalinclude:: ../../../examples/tutorial2b.py
+    :lines: 16-18, 22, 28-34
+
+This function takes one argument which is of type
+`~kwant.builder.Site`, from which you can get the realspace
+coordinates using ``site.pos``. Note that we use several global
+variables to define the behavior of `potential()`: `L` and `L_well`
+are variables taken from the namespace of `make_system`, the variable `pot`
+is taken from the global module namespace. By this one can change the
+behavior of `potential()` at another place, for example by setting
+`pot` to a different value. We will use this later to compute
+the transmission as a function of well depth.
+
+kwant now allows us to pass a function as a value to
+`~kwant.builder.Builder`:
+
+.. literalinclude:: ../../../examples/tutorial2b.py
+    :lines: 36-41
+
+For each lattice point, the corresponding site is then passed to the
+function `onsite()`. Note that we had to define `onsite()`, as it is
+not possible to mix values and functions as in ``sys[...] = 4 * t +
+potential``.
+
+For the leads, we just use constant values as before. If we passed a
+function also for the leads (which is perfectly allowed), this
+function would need to be compatible with the translational symmetry
+of the lead -- this should be kept in mind.
+
+Finally, we compute the transmission probability:
+
+.. literalinclude:: ../../../examples/tutorial2b.py
+    :lines: 65, 68-77
+
+Since we change the value of the global variable `pot` to vary the
+well depth, python requires us to write ``global pot`` to `enable
+access to it
+<http://docs.python.org/faq/programming.html#what-are-the-rules-for-local-and-global-variables-in-python>`_.
+Subsequent calls to :func:`kwant.solve <kwant.solvers.sparse.solve>`
+then will use the updated value of pot, and we get the result:
+
+.. image:: ../images/tutorial2b_result.*
+
+Starting from no potential (well depth = 0), we observe the typical
+oscillatory transmission behavior through resonances in the quantum well.
+
+.. seealso::
+     The full source code can be found in
+     :download:`example/tutorial2b.py <../../../examples/tutorial2b.py>`
+
+.. warning::
+
+    If functions are used to set values inside a lead, then they must satisfy
+    the same symmetry as the lead does.  There is (currently) no check and
+    wrong results will be the consequence of a misbehaving function.
+
+.. specialnote:: Technical details
+
+  - Functions can also be used for hoppings. In this case, they take
+    two `~kwant.builder.Site`'s as arguments.
+
+  - In example/tutorial2b.py, line 16
+
+    .. literalinclude:: ../../../examples/tutorial2b.py
+        :lines: 16
+
+    is not really necessary. If this line was left out, the
+    global variable `pot` would in fact be created by the
+    first assignment in `plot_conductance()`.
+
+  - Apart from the realspace position `pos`, `~kwant.builder.Site`
+    has also an attribute `tag` containing the lattice indices
+    of the site.
+
+  - Since we use a global variable to change the value of the
+    potential, let us briefly reflect on how python determines
+    which variable to use.
+
+    In our example, the function `potential()` uses the variable
+    `pot` which is not defined in the function itself. In this case,
+    python looks for the variable in the enclosing scopes, i.e.
+    inside the functions/modules/scripts that enclose the
+    corresponding piece of code. For example, in
+
+    >>> def f():
+    ...     def g():
+    ...         print string
+    ...     return g
+    ...
+    >>> g = f()
+    >>> string = "global"
+    >>> g()
+    global
+
+    function `g()` defined inside `f()` uses the global variable
+    `string` (which was actually created only *after* the
+    definition of `g()`!). Note that this only works as long as
+    one only reads `string`; if `g()` was to write to string,
+    it would need to add ``global string`` to `g()`, as we
+    did in `plot_conductance()`.
+
+    Things change if the function `f()` also contains a variable
+    of the same name:
+
+    >>> def f():
+    ...     def g():
+    ...         print string
+    ...     string = "local"
+    ...     return g
+    ...
+    >>> g = f()
+    >>> g()
+    local
+    >>> string = "global"
+    >>> g()
+    local
+
+    In this case, `g()` always uses the local variable inside `f()`
+    (unless we would add ``global string`` in `g()`).
+
+  - `~kwant.builder.Builder` does not only accept functions as
+    values, but every python object that can be used as a function.
+
+    In particular it allows to use a functor::
+
+        class Functor:
+            def __init__(self, x1, x2):
+                self.x1 = x1
+                self.x2 = x2
+
+            def __call__(self, site):
+                x, y = site.pos
+                if self.x1 < x < self.x2:
+                    return self.pot
+                else:
+                    return 0
+
+        functor = Functor(10, 20)
+
+        sys[...] = functor
+
+        functor.pot = ...
+
+    This approach would in principle also avoid the use of
+    a global variable, as the value of the potential could
+    be changed inside the object.
+
+.. _tutorial-abring:
+
+Nontrivial shapes
+.................
+
+Up to now, we only dealt with simple wire geometries. Now we turn to the case
+of a more complex geometry, namely transport through a quantum ring
+that is pierced by a magnetic flux :math:`\Phi`:
+
+.. image:: ../images/tutorial2c_sketch.*
+
+For a flux line, it is possible to choose a gauge such that a
+charged particle acquires a phase :math:`e\Phi/h` whenever it
+crosses the branch cut originating from the flux line (branch
+cut shown as red dashed line) [#]_. There are more symmetric gauges, but
+this one is most convenient to implement numerically.
+
+Defining such a complex structure adding individual lattice sites
+is possible, but cumbersome. Fortunately, there is a more convenient solution:
+First, define a boolean function defining the desired shape, i.e. a function
+that returns ``True`` whenever a point is inside the shape, and
+``False`` otherwise:
+
+.. literalinclude:: ../../../examples/tutorial2c.py
+    :lines: 20, 24-27, 30-33
+
+Note that this function takes a realspace position as argument (not a
+`~kwant.builder.Site`).
+
+We can now simply add all of the lattice points inside this shape at
+once, using the function `~kwant.lattice.Square.shape`
+provided by the lattice:
+
+.. literalinclude:: ../../../examples/tutorial2c.py
+    :lines: 36-38
+
+Here, ``lat.shape()`` takes as a second parameter a (realspace) point
+that is inside the desired shape. The hoppings can still be added
+using `~kwant.builder.Builder.possible_hoppings` as before.
+
+Up to now, the system contains constant hoppings and onsite energies,
+and we still need to include the phase shift due to the magnetic flux.
+This is done by **overwriting** the values of hoppings in x-direction
+along the branch cut in the lower arm of the ring. For this we select
+all hoppings in x-drection that are of the form `((1, j), (0, j))`
+with ``j<0``:
+
+.. literalinclude:: ../../../examples/tutorial2c.py
+    :lines: 46-58
+
+Here, `crosses_branchcut` is a boolean function that returns ``True`` for
+the desired hoppings. We then use again a generator (this time with
+an ``if``-conditional) to set the value of all hoppings across
+the branch cut to `fluxphase`. The rationale
+behind using a function instead of a constant value for the hopping
+is again that we want to vary the flux through the ring, without
+constantly rebuilding the system -- instead the flux is governed
+by the global variable `phi`.
+
+For the leads, we can also use the ``lat.shape()``-functionality:
+
+.. literalinclude:: ../../../examples/tutorial2c.py
+    :lines: 62-71
+
+Here, the shape must cover *at least* one unit cell of the lead
+(it does not hurt if it covers more unit cells).
+
+Attaching the leads is done as before:
+
+.. literalinclude:: ../../../examples/tutorial2c.py
+    :lines: 78-79
+
+In fact, attaching leads seems not so simple any more for the current
+structure with a scattering region very much different from the lead
+shapes. However, the choice of unit cell together with the
+translational vector allows to place the lead unambiguously in realspace --
+the unit cell is repeated infinitely many times in the direction and
+opposite to the direction of the translational vector.
+kwant examines the lead starting from infinity and traces it
+back (going opposite to the direction of the translational vector)
+until it intersects the scattering region. At this intersection,
+the lead is attached:
+
+.. image:: ../images/tutorial2c_sketch2.*
+
+After the lead has been attached, the system should look like this:
+
+.. image:: ../images/tutorial2c_sys.*
+
+The computation of the conductance goes in the same fashion as before.
+Finally you should get the following result:
+
+.. image:: ../images/tutorial2c_result.*
+
+where one can observe the conductance oscillations with the
+period of one flux quantum.
+
+.. seealso::
+     The full source code can be found in
+     :download:`example/tutorial2c.py <../../../examples/tutorial2c.py>`
+
+.. specialnote:: Technical details
+
+  - Note that in this example, we did not need to set
+    ``sys.default_site_group = lat``. All lattice points were
+    added using functionality from ``lat`` and thus were
+    proper sites already.
+
+  - Leads have to have proper periodicity. Furthermore, the kwant
+    format requires the hopping from the leads to the scattering
+    region to be identical to the hoppings between unit cells in
+    the lead. `~kwant.builder.Builder.attach_lead` takes care of
+    all these details for you! In fact, it even adds points to
+    the scattering region, if proper attaching requires this. This
+    becomes more apparent if we attach the leads a bit further away
+    from the central axis o the ring, as was done in this example:
+
+    .. image:: ../images/tutorial2c_note1.*
+
+  - Per default, `~kwant.builder.Builder.attach_lead` attaches
+    the lead to the "outside" of the structure, by tracing the
+    lead backwards, coming from infinity.
+
+    One can also attach the lead to the inside of the structure,
+    by providing an alternative starting point from where
+    the lead is traced back::
+
+        sys.attach_lead(lead1, (0, 0))
+
+    starts the trace-back in the middle of the ring, resulting
+    in the lead being attached to the inner circle:
+
+    .. image:: ../images/tutorial2c_note2.*
+
+    Note that here the lead is treated as if it would pass over
+    the other arm of the ring, without intersecting it.
+
+.. rubric:: Footnotes
+
+.. [#] The corresponding vector potential is :math:`A_x(x,y)=\Phi \delta(x)
+       \Theta(-y)` which yields the correct magnetic field :math:`B(x,y)=\Phi
+       \delta(x)\delta(y)`.
diff --git a/doc/source/tutorial/tutorial3.rst b/doc/source/tutorial/tutorial3.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d8ec247e52e88cf9b453bc535b684e7f93bc4079
--- /dev/null
+++ b/doc/source/tutorial/tutorial3.rst
@@ -0,0 +1,120 @@
+Beyond transport: Band structures and closed systems
+----------------------------------------------------
+
+Band structure calculations
+...........................
+
+When doing transport simulations, one also often needs to know the
+band structure of the leads, i.e. the energies of the propagating
+plane waves in the leads as a function of momentum. This band structure
+contains information about the number of modes, the velocities, ...
+
+In this example, we aim to compute the bandstructure of a simple
+tight-binding wire.
+
+Computing band structures in kwant is easy. Just define a lead in the
+usual way:
+
+.. literalinclude:: ../../../examples/tutorial3a.py
+    :lines: 18-37
+
+"Usual way" means defining a translational symmetry vector, as well
+as one unit cell of the lead, and the hoppings to neighboring
+unit cells. This information is enough to make the infinite, translationally
+invariant system needed for band structure calculations.
+
+In contrast to previous usage however, you have to *finalize* the lead.  A
+finalized lead has a method/attribute `~kwant.system.InfiniteSystem.energies`
+that allows to compute the eigenenergies of the translational invariant system
+for a given momentum `k`. Computing these eigenenergies for different momenta
+`k` then yields the bandstructure:
+
+.. literalinclude:: ../../../examples/tutorial3a.py
+    :lines: 40 - 57
+
+This gives the result:
+
+.. image:: ../images/tutorial3a_result.*
+
+where we observe the cosine-like dispersion of the square lattice. Close
+to ``k=0`` this agrees well with the quadratic dispersion this tight-binding
+Hamiltonian is approximating.
+
+.. seealso::
+     The full source code can be found in
+     :download:`example/tutorial3a.py <../../../examples/tutorial3a.py>`
+
+.. specialnote:: Technical details
+
+  - Note that we have used `~kwant.system.InfiniteSystem.energies` as if it
+    were a function. In fact, `~kwant.system.InfiniteSystem.energies` is a
+    so-called callable object, i.e. a object that can be used just as a
+    function. We could also have written::
+
+        Energies = flead.energies
+        energy_list = [Energies(k) for k in momenta]
+
+    In fact, this would even be a little bit more efficient, as the object
+    `Energies` would only need to setup the Hamiltonian matrices once (the
+    efficiency difference is small however, as the eigenenergy calculation
+    dominates). For more details, see `~kwant.system.InfiniteSystem.energies`.
+
+
+Closed systems
+..............
+
+Although kwant is (currently) mainly aimed towards transport problem, it
+can also easily be used to compute properties of closed systems -- after
+all, a closed system is nothing more than a scattering region without leads!
+
+In this example, we compute the spectrum of a closed, (approximately)
+circular quantum dot as a function of magnetic field
+(Fock-Darwin spectrum).
+
+To compute the eigenenergies, we will make use of the linear algebra
+functionality of `scipy <www.scipy.org>`_:
+
+.. literalinclude:: ../../../examples/tutorial3b.py
+    :lines: 16
+
+We set up the system using the `shape`-function as in
+:ref:`tutorial-abring`, but do not add any leads:
+
+.. literalinclude:: ../../../examples/tutorial3b.py
+    :lines: 30-47
+
+We add the magnetic field using a function and a global variable as we
+did in the two previous examples. (Here, the gauge is chosen such that
+:math:`A_x(y) = - B y` and :math:`A_y=0`.)
+
+The spectrum can be obtained by diagonalizing the Hamiltonian of the
+system, which in turn can be obtained from the finalized
+system using `~kwant.system.System.hamiltonian_submatrix`:
+
+.. literalinclude:: ../../../examples/tutorial3b.py
+    :lines: 50, 52, 58-69
+
+In this toy model we use dense matrices and dense matrix algebra since
+the system is very small. (In a real application one would probably
+want to use sparse matrix methods.) Finally, we obtain the result:
+
+.. image:: ../images/tutorial3b_result.*
+
+At zero magnetic field several energy levels are degenerate (since our
+quantum dot is rather symmetric). These degeneracies are split
+by the magnetic field, and the eigenenergies flow towards the
+Landau level energies at higher magnetic fields [#]_
+
+.. specialnote:: Technical details
+
+  - `~kwant.system.System.hamiltonian_submatrix` can also return a sparse
+    matrix, if the optional argument ``sparse=True``. The sparse matrix is in
+    scipy's `scipy.sparse.coo_matrix` format, which can be easily be converted
+    to various other sparse matrix formats (see `scipy's documentation
+    <http://docs.scipy.org/doc/scipy/reference/>`_).
+
+.. rubric:: Footnotes
+
+.. [#] Again, in this tutorial example no care was taken into choosing
+       appropriate material parameters or units. For this reason, magnetic
+       field is given only in "arbitrary units"
diff --git a/doc/source/tutorial/tutorial4.rst b/doc/source/tutorial/tutorial4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..788dd5f3fb662a60da5faebf268df96fc7152dfd
--- /dev/null
+++ b/doc/source/tutorial/tutorial4.rst
@@ -0,0 +1,198 @@
+.. _tutorial-graphene:
+
+Using a more complicated lattice (graphene)
+-------------------------------------------
+
+In the following example, we are going to calculate the
+conductance through a graphene quantum dot with a p-n junction
+and two non-collinear leads. In the process, we will touch
+all of the topics that we have seen in the previous tutorials,
+but now for the honeycomb lattice. As you will see, everything
+carries over nicely.
+spectrum of a quantum dot.
+
+We begin from defining the honeycomb lattice of graphene. This is
+in principle already done in `kwant.lattice.Honeycomb`, but we do it
+explicitly here to show how to define a new lattice:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 24-27
+
+The first argument to the `make_lattice` function is the list of primitive
+vectors of the lattice; the second one is the coordinates of basis atoms.
+The honeycomb lattice has two basis atoms. Each type of basis atom by itself
+forms a regular lattice of the same type as well, and those *sublattices*
+are referenced as `a` and `b` above.
+
+In the next step we define the shape of the scattering region (circle again)
+and add all lattice points using the ``shape()``-functionality:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 30-31, 34-39, 41-46
+
+As you can see, this works exactly the same for any kind of lattice.
+We add the onsite energies using a function describing the p-n junction;
+in contrast to the previous examples, the potential value is this time taken
+from the scope of `make_system()`, since we keep the potential fixed
+in this example.
+
+As a next step we add the hoppings, making use of
+`~kwant.builder.Builder.possible_hoppings`. Since we use our home-made
+lattice (instead of `kwant.lattice.Honeycomb`), we have to define
+the hoppings ourselves:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 50
+
+The nearest-neighbor model for graphene contains only
+hoppings between different basis atoms. For these type of
+hoppings, it is not enough to specify relative lattice indices,
+but we also need to specify the proper target and source
+sublattices. Remember that the format of the hopping specification
+is ``(i,j), target, source``. In the previous examples (i.e.
+:ref:`tutorial_spinorbit`) ``target=source=lat``, whereas here
+we have to specify different sublattices. Furthermore,
+note that the directions given by the lattice indices
+`(1, 0)` and `(0, 1)` are not orthogonal any more, as they are given with
+respect to the two primitive vectors ``[(1, 0), (sin_30, cos_30)]``.
+
+Adding the hoppings however still works the same way:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 51-52
+
+Modifying the scattering region is also possible as before. Let's
+do something crazy, and remove an atom in sublattice A
+(which removes also the hoppings from/to this site) as well
+as add an additional link:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 55-56
+
+Note that the conversion from a tuple `(i,j)` to site
+is done be the sublattices `a` and `b`.
+
+Later, we will compute some eigenvalues of the closed
+scattering region without leads. For that, obtain a finalized
+snapshot of the system:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 60
+
+Adding leads to the scattering region is done as before:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 64-93
+
+Note here that the translational vectors ``graphene.vec((-1, 0))`` and
+``graphene.vec((0, 1))`` are *not* orthogonal any more as they would
+have been in a square lattice-- they follow the non-orthogonal
+primitive vectors defined in the beginning.
+
+In the end, we return not only the finalized system with leads, but
+also a finalized copy of the closed system (for eigenvalues)
+as well as a finalized lead (for band structure calculations).
+
+The computation of some eigenvalues of the closed system is done
+in the following piece of code:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 96-101, 104-107
+
+Here we use in contrast to the previous example a sparse matrix and
+the sparse linear algebra functionality of scipy (this requires
+scipy version >= 0.9.0; since the remaining part of the example does not
+depend on this eigenenergy calculation, a ``try``-block simply skips this
+calculation if a lower scipy version is installed.)
+
+The code for computing the band structure and the conductance is identical
+to the previous examples, and needs not be further explained here.
+
+Finally, in the `main()` function we make and
+plot the system:
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 135-137, 142-147
+
+Here we customize the plotting: `plotter_symbols` is a dictionary
+which the sublattice objects `a` and `b` as keys, and the
+`~kwant.plotter.Circle` objects specify that the sublattice `a` should
+be drawn using a filled black circle, and `b` using a white circle
+with a black outline. The radius of the circle is given in relative
+units: :func:`plot <kwant.plotter.plot>` uses a typical length
+scale as a reference length. By default, the typical length scale is
+the smallest distance between lattice points.  :func:`plot
+<kwant.plotter.plot>` can find this length by itself, but must then go
+through all hoppings. Alternatively, one can specify the typical
+length scale using the argument `a` as in the example (not to be
+confused with the sublattice `a`) which is here set to the distance
+between carbon atoms in the graphene lattice. Specifying ``r=0.3`` in
+`~kwant.plotter.Circle` hence means that the radius of the circle is
+30% of the carbon-carbon distance. Using this relative units it is
+easy to make good-looking plots where the symbols cover a well-defined
+part of the plot.
+
+Plotting the closed system gives this result:
+
+.. image:: ../images/tutorial4_sys1.*
+
+and computing the eigenvalues of largest magnitude,
+
+.. literalinclude:: ../../../examples/tutorial4.py
+    :lines: 148
+
+should yield two eigenvalues similar to `[ 3.07869311
++1.02714523e-17j, -3.06233144 -6.68085759e-18j]` (round-off might
+change the imaginary part which should in exact arithmetics be equal
+to zero).
+
+The remaining code of `main()` plots the system with leads:
+
+.. image:: ../images/tutorial4_sys2.*
+
+It computes the band structure of one of the leads:
+
+.. image:: ../images/tutorial4_bs.*
+
+showing all the features of a zigzag lead, including the flat
+edge state bands (note that the band structure is not symmetric around
+zero energy, as we have a potential in the leads).
+
+Finally the transmission through the system is computed,
+
+.. image:: ../images/tutorial4_result.*
+
+showing the typical resonance-like transmission probability through
+an open quantum dot
+
+.. seealso::
+    The full source code can be found in
+    :download:`examples/tutorial4.py <../../../examples/tutorial4.py>`
+
+.. specialnote:: Technical details
+
+ - Apart from circles, the `kwant.plotter` module also has regular
+   `~kwant.plotter.Polygon`'s as predefined symbols. It is also
+   easy to define any custom symbol. Furthermore, plotting offers
+   many more options to customize plots. See the documentation of
+   :func:`plot <kwant.plotter.plot>` for more details.
+
+ - In a lattice with more than one basis atom, you can always act either
+   on all sublattice at the same time, or on a single sublattice only.
+
+   For example, you can add lattice points for all sublattices in the
+   current example using::
+
+       sys[graphene.shape(...)] = ...
+
+   or just for a single sublattice::
+
+       sys[a.shape(...)] = ...
+
+   and the same of course with `b`. Also, you can selectively remove points::
+
+       del sys[graphene.shape(...)]
+       del sys[a.shape(...)]
+
+   where the first line removes points in both sublattices, whereas the
+   second line removes only points in one sublattice.
diff --git a/doc/sphinxext/LICENSE.txt b/doc/sphinxext/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e00efc31ec2571318522f822bae7dcf68850c77d
--- /dev/null
+++ b/doc/sphinxext/LICENSE.txt
@@ -0,0 +1,97 @@
+-------------------------------------------------------------------------------
+    The files
+    - numpydoc.py
+    - autosummary.py
+    - autosummary_generate.py
+    - docscrape.py
+    - docscrape_sphinx.py
+    - phantom_import.py
+    have the following license:
+
+Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>, Pauli Virtanen <pav@iki.fi>
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+    The files
+    - compiler_unparse.py
+    - comment_eater.py
+    - traitsdoc.py
+    have the following license:
+
+This software is OSI Certified Open Source Software.
+OSI Certified is a certification mark of the Open Source Initiative.
+
+Copyright (c) 2006, Enthought, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Enthought, Inc. nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-------------------------------------------------------------------------------
+    The files
+    - only_directives.py
+    - plot_directive.py
+    originate from Matplotlib (http://matplotlib.sf.net/) which has
+    the following license:
+
+Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
+
+1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
+
+4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
+
diff --git a/doc/sphinxext/MANIFEST.in b/doc/sphinxext/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..f88ed785c525fabb23241c00cfd6d776b6e4231f
--- /dev/null
+++ b/doc/sphinxext/MANIFEST.in
@@ -0,0 +1,2 @@
+recursive-include tests *.py
+include *.txt
diff --git a/doc/sphinxext/README.txt b/doc/sphinxext/README.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6ba63e6d856b5ae2e09565ca4a6d54b63829caf3
--- /dev/null
+++ b/doc/sphinxext/README.txt
@@ -0,0 +1,45 @@
+=====================================
+numpydoc -- Numpy's Sphinx extensions
+=====================================
+
+Numpy's documentation uses several custom extensions to Sphinx.  These
+are shipped in this ``numpydoc`` package, in case you want to make use
+of them in third-party projects.
+
+The following extensions are available:
+
+  - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add
+    the code description directives ``np:function``, ``np-c:function``, etc.
+    that support the Numpy docstring syntax.
+
+  - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes.
+
+  - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::``
+    directive. Note that this implementation may still undergo severe
+    changes or eventually be deprecated.
+
+
+numpydoc
+========
+
+Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings
+following the Numpy/Scipy format to a form palatable to Sphinx.
+
+Options
+-------
+
+The following options can be set in conf.py:
+
+- numpydoc_use_plots: bool
+
+  Whether to produce ``plot::`` directives for Examples sections that
+  contain ``import matplotlib``.
+
+- numpydoc_show_class_members: bool
+
+  Whether to show all members of a class in the Methods and Attributes
+  sections automatically.
+
+- numpydoc_edit_link: bool  (DEPRECATED -- edit your HTML template instead)
+
+  Whether to insert an edit link after docstrings.
diff --git a/doc/sphinxext/__init__.py b/doc/sphinxext/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae9073bc4115fd5fb7ef03381da59f4d9115bca2
--- /dev/null
+++ b/doc/sphinxext/__init__.py
@@ -0,0 +1 @@
+from numpydoc import setup
diff --git a/doc/sphinxext/comment_eater.py b/doc/sphinxext/comment_eater.py
new file mode 100644
index 0000000000000000000000000000000000000000..e11eea90210734962afe50e53ee5892be6eb100e
--- /dev/null
+++ b/doc/sphinxext/comment_eater.py
@@ -0,0 +1,158 @@
+from cStringIO import StringIO
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from compiler_unparse import unparse
+
+
+class Comment(object):
+    """ A comment block.
+    """
+    is_comment = True
+    def __init__(self, start_lineno, end_lineno, text):
+        # int : The first line number in the block. 1-indexed.
+        self.start_lineno = start_lineno
+        # int : The last line number. Inclusive!
+        self.end_lineno = end_lineno
+        # str : The text block including '#' character but not any leading spaces.
+        self.text = text
+
+    def add(self, string, start, end, line):
+        """ Add a new comment line.
+        """
+        self.start_lineno = min(self.start_lineno, start[0])
+        self.end_lineno = max(self.end_lineno, end[0])
+        self.text += string
+
+    def __repr__(self):
+        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno, self.text)
+
+
+class NonComment(object):
+    """ A non-comment block of code.
+    """
+    is_comment = False
+    def __init__(self, start_lineno, end_lineno):
+        self.start_lineno = start_lineno
+        self.end_lineno = end_lineno
+
+    def add(self, string, start, end, line):
+        """ Add lines to the block.
+        """
+        if string.strip():
+            # Only add if not entirely whitespace.
+            self.start_lineno = min(self.start_lineno, start[0])
+            self.end_lineno = max(self.end_lineno, end[0])
+
+    def __repr__(self):
+        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno)
+
+
+class CommentBlocker(object):
+    """ Pull out contiguous comment blocks.
+    """
+    def __init__(self):
+        # Start with a dummy.
+        self.current_block = NonComment(0, 0)
+
+        # All of the blocks seen so far.
+        self.blocks = []
+
+        # The index mapping lines of code to their associated comment blocks.
+        self.index = {}
+
+    def process_file(self, file):
+        """ Process a file object.
+        """
+        for token in tokenize.generate_tokens(file.next):
+            self.process_token(*token)
+        self.make_index()
+
+    def process_token(self, kind, string, start, end, line):
+        """ Process a single token.
+        """
+        if self.current_block.is_comment:
+            if kind == tokenize.COMMENT:
+                self.current_block.add(string, start, end, line)
+            else:
+                self.new_noncomment(start[0], end[0])
+        else:
+            if kind == tokenize.COMMENT:
+                self.new_comment(string, start, end, line)
+            else:
+                self.current_block.add(string, start, end, line)
+
+    def new_noncomment(self, start_lineno, end_lineno):
+        """ We are transitioning from a noncomment to a comment.
+        """
+        block = NonComment(start_lineno, end_lineno)
+        self.blocks.append(block)
+        self.current_block = block
+
+    def new_comment(self, string, start, end, line):
+        """ Possibly add a new comment.
+        
+        Only adds a new comment if this comment is the only thing on the line.
+        Otherwise, it extends the noncomment block.
+        """
+        prefix = line[:start[1]]
+        if prefix.strip():
+            # Oops! Trailing comment, not a comment block.
+            self.current_block.add(string, start, end, line)
+        else:
+            # A comment block.
+            block = Comment(start[0], end[0], string)
+            self.blocks.append(block)
+            self.current_block = block
+
+    def make_index(self):
+        """ Make the index mapping lines of actual code to their associated
+        prefix comments.
+        """
+        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+            if not block.is_comment:
+                self.index[block.start_lineno] = prev
+
+    def search_for_comment(self, lineno, default=None):
+        """ Find the comment block just before the given line number.
+
+        Returns None (or the specified default) if there is no such block.
+        """
+        if not self.index:
+            self.make_index()
+        block = self.index.get(lineno, None)
+        text = getattr(block, 'text', default)
+        return text
+
+
+def strip_comment_marker(text):
+    """ Strip # markers at the front of a block of comment text.
+    """
+    lines = []
+    for line in text.splitlines():
+        lines.append(line.lstrip('#'))
+    text = textwrap.dedent('\n'.join(lines))
+    return text
+
+
+def get_class_traits(klass):
+    """ Yield all of the documentation for trait definitions on a class object.
+    """
+    # FIXME: gracefully handle errors here or in the caller?
+    source = inspect.getsource(klass)
+    cb = CommentBlocker()
+    cb.process_file(StringIO(source))
+    mod_ast = compiler.parse(source)
+    class_ast = mod_ast.node.nodes[0]
+    for node in class_ast.code.nodes:
+        # FIXME: handle other kinds of assignments?
+        if isinstance(node, compiler.ast.Assign):
+            name = node.nodes[0].name
+            rhs = unparse(node.expr).strip()
+            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+            yield name, rhs, doc
+
diff --git a/doc/sphinxext/compiler_unparse.py b/doc/sphinxext/compiler_unparse.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffcf51b353a106e50b3c4ec5bbd4ab4342bc7528
--- /dev/null
+++ b/doc/sphinxext/compiler_unparse.py
@@ -0,0 +1,860 @@
+""" Turn compiler.ast structures back into executable python code.
+
+    The unparse method takes a compiler.ast tree and transforms it back into
+    valid python code.  It is incomplete and currently only works for
+    import statements, function calls, function definitions, assignments, and
+    basic expressions.
+
+    Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+    fixme: We may want to move to using _ast trees because the compiler for
+           them is about 6 times faster than compiler.compile.
+"""
+
+import sys
+import cStringIO
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+def unparse(ast, single_line_functions=False):
+    s = cStringIO.StringIO()
+    UnparseCompilerAst(ast, s, single_line_functions)
+    return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+    """ Methods in this class recursively traverse an AST and
+        output source code for the abstract syntax; original formatting
+        is disregarged.
+    """
+
+    #########################################################################
+    # object interface.
+    #########################################################################
+
+    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+        """ Unparser(tree, file=sys.stdout) -> None.
+
+            Print the source for tree to file.
+        """
+        self.f = file
+        self._single_func = single_line_functions
+        self._do_indent = True
+        self._indent = 0
+        self._dispatch(tree)
+        self._write("\n")
+        self.f.flush()
+
+    #########################################################################
+    # Unparser private interface.
+    #########################################################################
+
+    ### format, output, and dispatch methods ################################
+
+    def _fill(self, text = ""):
+        "Indent a piece of text, according to the current indentation level"
+        if self._do_indent:
+            self._write("\n"+"    "*self._indent + text)
+        else:
+            self._write(text)
+
+    def _write(self, text):
+        "Append a piece of text to the current line."
+        self.f.write(text)
+
+    def _enter(self):
+        "Print ':', and increase the indentation."
+        self._write(": ")
+        self._indent += 1
+
+    def _leave(self):
+        "Decrease the indentation level."
+        self._indent -= 1
+
+    def _dispatch(self, tree):
+        "_dispatcher function, _dispatching tree type T to method _T."
+        if isinstance(tree, list):
+            for t in tree:
+                self._dispatch(t)
+            return
+        meth = getattr(self, "_"+tree.__class__.__name__)
+        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+            return
+        meth(tree)
+
+
+    #########################################################################
+    # compiler.ast unparsing methods.
+    #
+    # There should be one method per concrete grammar type. They are
+    # organized in alphabetical order.
+    #########################################################################
+
+    def _Add(self, t):
+        self.__binary_op(t, '+')
+
+    def _And(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") and (")
+        self._write(")")
+               
+    def _AssAttr(self, t):
+        """ Handle assigning an attribute of an object
+        """
+        self._dispatch(t.expr)
+        self._write('.'+t.attrname)
+ 
+    def _Assign(self, t):
+        """ Expression Assignment such as "a = 1".
+
+            This only handles assignment in expressions.  Keyword assignment
+            is handled separately.
+        """
+        self._fill()
+        for target in t.nodes:
+            self._dispatch(target)
+            self._write(" = ")
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write('; ')
+
+    def _AssName(self, t):
+        """ Name on left hand side of expression.
+
+            Treat just like a name on the right side of an expression.
+        """
+        self._Name(t)
+
+    def _AssTuple(self, t):
+        """ Tuple on left hand side of an expression.
+        """
+
+        # _write each elements, separated by a comma.
+        for element in t.nodes[:-1]:
+            self._dispatch(element)
+            self._write(", ")
+
+        # Handle the last one without writing comma
+        last_element = t.nodes[-1]
+        self._dispatch(last_element)
+
+    def _AugAssign(self, t):
+        """ +=,-=,*=,/=,**=, etc. operations
+        """
+        
+        self._fill()
+        self._dispatch(t.node)
+        self._write(' '+t.op+' ')
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write(';')
+            
+    def _Bitand(self, t):
+        """ Bit and operation.
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" & ")
+                
+    def _Bitor(self, t):
+        """ Bit or operation
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" | ")
+                
+    def _CallFunc(self, t):
+        """ Function call.
+        """
+        self._dispatch(t.node)
+        self._write("(")
+        comma = False
+        for e in t.args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._dispatch(e)
+        if t.star_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("*")
+            self._dispatch(t.star_args)
+        if t.dstar_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("**")
+            self._dispatch(t.dstar_args)
+        self._write(")")
+
+    def _Compare(self, t):
+        self._dispatch(t.expr)
+        for op, expr in t.ops:
+            self._write(" " + op + " ")
+            self._dispatch(expr)
+
+    def _Const(self, t):
+        """ A constant value such as an integer value, 3, or a string, "hello".
+        """
+        self._dispatch(t.value)
+
+    def _Decorators(self, t):
+        """ Handle function decorators (eg. @has_units)
+        """
+        for node in t.nodes:
+            self._dispatch(node)
+
+    def _Dict(self, t):
+        self._write("{")
+        for  i, (k, v) in enumerate(t.items):
+            self._dispatch(k)
+            self._write(": ")
+            self._dispatch(v)
+            if i < len(t.items)-1:
+                self._write(", ")
+        self._write("}")
+
+    def _Discard(self, t):
+        """ Node for when return value is ignored such as in "foo(a)".
+        """
+        self._fill()
+        self._dispatch(t.expr)
+
+    def _Div(self, t):
+        self.__binary_op(t, '/')
+
+    def _Ellipsis(self, t):
+        self._write("...")
+
+    def _From(self, t):
+        """ Handle "from xyz import foo, bar as baz".
+        """
+        # fixme: Are From and ImportFrom handled differently?
+        self._fill("from ")
+        self._write(t.modname)
+        self._write(" import ")
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+                
+    def _Function(self, t):
+        """ Handle function definitions
+        """
+        if t.decorators is not None:
+            self._fill("@")
+            self._dispatch(t.decorators)
+        self._fill("def "+t.name + "(")
+        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+        for i, arg in enumerate(zip(t.argnames, defaults)):
+            self._write(arg[0])
+            if arg[1] is not None:
+                self._write('=')
+                self._dispatch(arg[1])
+            if i < len(t.argnames)-1:
+                self._write(', ')
+        self._write(")")
+        if self._single_func:
+            self._do_indent = False
+        self._enter()
+        self._dispatch(t.code)
+        self._leave()
+        self._do_indent = True
+
+    def _Getattr(self, t):
+        """ Handle getting an attribute of an object
+        """
+        if isinstance(t.expr, (Div, Mul, Sub, Add)):
+            self._write('(')
+            self._dispatch(t.expr)
+            self._write(')')
+        else:
+            self._dispatch(t.expr)
+            
+        self._write('.'+t.attrname)
+        
+    def _If(self, t):
+        self._fill()
+        
+        for i, (compare,code) in enumerate(t.tests):
+            if i == 0:
+                self._write("if ")
+            else:
+                self._write("elif ")
+            self._dispatch(compare)
+            self._enter()
+            self._fill()
+            self._dispatch(code)
+            self._leave()
+            self._write("\n")
+
+        if t.else_ is not None:
+            self._write("else")
+            self._enter()
+            self._fill()
+            self._dispatch(t.else_)
+            self._leave()
+            self._write("\n")
+            
+    def _IfExp(self, t):
+        self._dispatch(t.then)
+        self._write(" if ")
+        self._dispatch(t.test)
+
+        if t.else_ is not None:
+            self._write(" else (")
+            self._dispatch(t.else_)
+            self._write(")")
+
+    def _Import(self, t):
+        """ Handle "import xyz.foo".
+        """
+        self._fill("import ")
+        
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+
+    def _Keyword(self, t):
+        """ Keyword value assignment within function calls and definitions.
+        """
+        self._write(t.name)
+        self._write("=")
+        self._dispatch(t.expr)
+        
+    def _List(self, t):
+        self._write("[")
+        for  i,node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i < len(t.nodes)-1:
+                self._write(", ")
+        self._write("]")
+
+    def _Module(self, t):
+        if t.doc is not None:
+            self._dispatch(t.doc)
+        self._dispatch(t.node)
+
+    def _Mul(self, t):
+        self.__binary_op(t, '*')
+
+    def _Name(self, t):
+        self._write(t.name)
+
+    def _NoneType(self, t):
+        self._write("None")
+        
+    def _Not(self, t):
+        self._write('not (')
+        self._dispatch(t.expr)
+        self._write(')')
+        
+    def _Or(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") or (")
+        self._write(")")
+                
+    def _Pass(self, t):
+        self._write("pass\n")
+
+    def _Printnl(self, t):
+        self._fill("print ")
+        if t.dest:
+            self._write(">> ")
+            self._dispatch(t.dest)
+            self._write(", ")
+        comma = False
+        for node in t.nodes:
+            if comma: self._write(', ')
+            else: comma = True
+            self._dispatch(node)
+
+    def _Power(self, t):
+        self.__binary_op(t, '**')
+
+    def _Return(self, t):
+        self._fill("return ")
+        if t.value:
+            if isinstance(t.value, Tuple):
+                text = ', '.join([ name.name for name in t.value.asList() ])
+                self._write(text)
+            else:
+                self._dispatch(t.value)
+            if not self._do_indent:
+                self._write('; ')
+
+    def _Slice(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        if t.lower:
+            self._dispatch(t.lower)
+        self._write(":")
+        if t.upper:
+            self._dispatch(t.upper)
+        #if t.step:
+        #    self._write(":")
+        #    self._dispatch(t.step)
+        self._write("]")
+
+    def _Sliceobj(self, t):
+        for i, node in enumerate(t.nodes):
+            if i != 0:
+                self._write(":")
+            if not (isinstance(node, Const) and node.value is None):
+                self._dispatch(node)
+
+    def _Stmt(self, tree):
+        for node in tree.nodes:
+            self._dispatch(node)
+
+    def _Sub(self, t):
+        self.__binary_op(t, '-')
+
+    def _Subscript(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        for i, value in enumerate(t.subs):
+            if i != 0:
+                self._write(",")
+            self._dispatch(value)
+        self._write("]")
+
+    def _TryExcept(self, t):
+        self._fill("try")
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+
+        for handler in t.handlers:
+            self._fill('except ')
+            self._dispatch(handler[0])
+            if handler[1] is not None:
+                self._write(', ')
+                self._dispatch(handler[1])
+            self._enter()
+            self._dispatch(handler[2])
+            self._leave()
+            
+        if t.else_:
+            self._fill("else")
+            self._enter()
+            self._dispatch(t.else_)
+            self._leave()
+
+    def _Tuple(self, t):
+
+        if not t.nodes:
+            # Empty tuple.
+            self._write("()")
+        else:
+            self._write("(")
+
+            # _write each elements, separated by a comma.
+            for element in t.nodes[:-1]:
+                self._dispatch(element)
+                self._write(", ")
+
+            # Handle the last one without writing comma
+            last_element = t.nodes[-1]
+            self._dispatch(last_element)
+
+            self._write(")")
+            
+    def _UnaryAdd(self, t):
+        self._write("+")
+        self._dispatch(t.expr)
+        
+    def _UnarySub(self, t):
+        self._write("-")
+        self._dispatch(t.expr)        
+
+    def _With(self, t):
+        self._fill('with ')
+        self._dispatch(t.expr)
+        if t.vars:
+            self._write(' as ')
+            self._dispatch(t.vars.name)
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+        self._write('\n')
+        
+    def _int(self, t):
+        self._write(repr(t))
+
+    def __binary_op(self, t, symbol):
+        # Check if parenthesis are needed on left side and then dispatch
+        has_paren = False
+        left_class = str(t.left.__class__)
+        if (left_class in op_precedence.keys() and
+            op_precedence[left_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.left)
+        if has_paren:
+            self._write(')')
+        # Write the appropriate symbol for operator
+        self._write(symbol)
+        # Check if parenthesis are needed on the right side and then dispatch
+        has_paren = False
+        right_class = str(t.right.__class__)
+        if (right_class in op_precedence.keys() and
+            op_precedence[right_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.right)
+        if has_paren:
+            self._write(')')
+
+    def _float(self, t):
+        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+        # We prefer str here.
+        self._write(str(t))
+
+    def _str(self, t):
+        self._write(repr(t))
+        
+    def _tuple(self, t):
+        self._write(str(t))
+
+    #########################################################################
+    # These are the methods from the _ast modules unparse.
+    #
+    # As our needs to handle more advanced code increase, we may want to
+    # modify some of the methods below so that they work for compiler.ast.
+    #########################################################################
+
+#    # stmt
+#    def _Expr(self, tree):
+#        self._fill()
+#        self._dispatch(tree.value)
+#
+#    def _Import(self, t):
+#        self._fill("import ")
+#        first = True
+#        for a in t.names:
+#            if first:
+#                first = False
+#            else:
+#                self._write(", ")
+#            self._write(a.name)
+#            if a.asname:
+#                self._write(" as "+a.asname)
+#
+##    def _ImportFrom(self, t):
+##        self._fill("from ")
+##        self._write(t.module)
+##        self._write(" import ")
+##        for i, a in enumerate(t.names):
+##            if i == 0:
+##                self._write(", ")
+##            self._write(a.name)
+##            if a.asname:
+##                self._write(" as "+a.asname)
+##        # XXX(jpe) what is level for?
+##
+#
+#    def _Break(self, t):
+#        self._fill("break")
+#
+#    def _Continue(self, t):
+#        self._fill("continue")
+#
+#    def _Delete(self, t):
+#        self._fill("del ")
+#        self._dispatch(t.targets)
+#
+#    def _Assert(self, t):
+#        self._fill("assert ")
+#        self._dispatch(t.test)
+#        if t.msg:
+#            self._write(", ")
+#            self._dispatch(t.msg)
+#
+#    def _Exec(self, t):
+#        self._fill("exec ")
+#        self._dispatch(t.body)
+#        if t.globals:
+#            self._write(" in ")
+#            self._dispatch(t.globals)
+#        if t.locals:
+#            self._write(", ")
+#            self._dispatch(t.locals)
+#
+#    def _Print(self, t):
+#        self._fill("print ")
+#        do_comma = False
+#        if t.dest:
+#            self._write(">>")
+#            self._dispatch(t.dest)
+#            do_comma = True
+#        for e in t.values:
+#            if do_comma:self._write(", ")
+#            else:do_comma=True
+#            self._dispatch(e)
+#        if not t.nl:
+#            self._write(",")
+#
+#    def _Global(self, t):
+#        self._fill("global")
+#        for i, n in enumerate(t.names):
+#            if i != 0:
+#                self._write(",")
+#            self._write(" " + n)
+#
+#    def _Yield(self, t):
+#        self._fill("yield")
+#        if t.value:
+#            self._write(" (")
+#            self._dispatch(t.value)
+#            self._write(")")
+#
+#    def _Raise(self, t):
+#        self._fill('raise ')
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.inst:
+#            self._write(", ")
+#            self._dispatch(t.inst)
+#        if t.tback:
+#            self._write(", ")
+#            self._dispatch(t.tback)
+#
+#
+#    def _TryFinally(self, t):
+#        self._fill("try")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#        self._fill("finally")
+#        self._enter()
+#        self._dispatch(t.finalbody)
+#        self._leave()
+#
+#    def _excepthandler(self, t):
+#        self._fill("except ")
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.name:
+#            self._write(", ")
+#            self._dispatch(t.name)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _ClassDef(self, t):
+#        self._write("\n")
+#        self._fill("class "+t.name)
+#        if t.bases:
+#            self._write("(")
+#            for a in t.bases:
+#                self._dispatch(a)
+#                self._write(", ")
+#            self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _FunctionDef(self, t):
+#        self._write("\n")
+#        for deco in t.decorators:
+#            self._fill("@")
+#            self._dispatch(deco)
+#        self._fill("def "+t.name + "(")
+#        self._dispatch(t.args)
+#        self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _For(self, t):
+#        self._fill("for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    def _While(self, t):
+#        self._fill("while ")
+#        self._dispatch(t.test)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    # expr
+#    def _Str(self, tree):
+#        self._write(repr(tree.s))
+##
+#    def _Repr(self, t):
+#        self._write("`")
+#        self._dispatch(t.value)
+#        self._write("`")
+#
+#    def _Num(self, t):
+#        self._write(repr(t.n))
+#
+#    def _ListComp(self, t):
+#        self._write("[")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write("]")
+#
+#    def _GeneratorExp(self, t):
+#        self._write("(")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write(")")
+#
+#    def _comprehension(self, t):
+#        self._write(" for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        for if_clause in t.ifs:
+#            self._write(" if ")
+#            self._dispatch(if_clause)
+#
+#    def _IfExp(self, t):
+#        self._dispatch(t.body)
+#        self._write(" if ")
+#        self._dispatch(t.test)
+#        if t.orelse:
+#            self._write(" else ")
+#            self._dispatch(t.orelse)
+#
+#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+#    def _UnaryOp(self, t):
+#        self._write(self.unop[t.op.__class__.__name__])
+#        self._write("(")
+#        self._dispatch(t.operand)
+#        self._write(")")
+#
+#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+#                    "FloorDiv":"//", "Pow": "**"}
+#    def _BinOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.left)
+#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+#        self._dispatch(t.right)
+#        self._write(")")
+#
+#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
+#    def _BoolOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.values[0])
+#        for v in t.values[1:]:
+#            self._write(" %s " % self.boolops[t.op.__class__])
+#            self._dispatch(v)
+#        self._write(")")
+#
+#    def _Attribute(self,t):
+#        self._dispatch(t.value)
+#        self._write(".")
+#        self._write(t.attr)
+#
+##    def _Call(self, t):
+##        self._dispatch(t.func)
+##        self._write("(")
+##        comma = False
+##        for e in t.args:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        for e in t.keywords:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        if t.starargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("*")
+##            self._dispatch(t.starargs)
+##        if t.kwargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("**")
+##            self._dispatch(t.kwargs)
+##        self._write(")")
+#
+#    # slice
+#    def _Index(self, t):
+#        self._dispatch(t.value)
+#
+#    def _ExtSlice(self, t):
+#        for i, d in enumerate(t.dims):
+#            if i != 0:
+#                self._write(': ')
+#            self._dispatch(d)
+#
+#    # others
+#    def _arguments(self, t):
+#        first = True
+#        nonDef = len(t.args)-len(t.defaults)
+#        for a in t.args[0:nonDef]:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a)
+#        for a,d in zip(t.args[nonDef:], t.defaults):
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a),
+#            self._write("=")
+#            self._dispatch(d)
+#        if t.vararg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("*"+t.vararg)
+#        if t.kwarg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("**"+t.kwarg)
+#
+##    def _keyword(self, t):
+##        self._write(t.arg)
+##        self._write("=")
+##        self._dispatch(t.value)
+#
+#    def _Lambda(self, t):
+#        self._write("lambda ")
+#        self._dispatch(t.args)
+#        self._write(": ")
+#        self._dispatch(t.body)
+
+
+
diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e3ae28b2e0e8659e3cb5775a72c86f04520d5a8
--- /dev/null
+++ b/doc/sphinxext/docscrape.py
@@ -0,0 +1,507 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self, docstring, config={}):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Instance Variables': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if not self._parsed_data.has_key(key):
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        if func.strip():
+                            push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        summary = self._doc.read_to_next_empty_line()
+        summary_str = " ".join([s.strip() for s in summary]).strip()
+        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+            self['Signature'] = summary_str
+            if not self._is_at_section():
+                self['Summary'] = self._doc.read_to_next_empty_line()
+        else:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
+                           'Other Parameters', 'Attributes',
+                           'Instance Variables', 'Methods'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                out += ['%s : %s' % (param, param_type)]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Returns', 'Other Parameters',
+                           'Raises', 'Warns'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        for param_list in ('Instance Variables', 'Attributes', 'Methods'):
+            out += self._str_param_list(param_list)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func', doc=None, config={}):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+
+        if doc is None:
+            if func is None:
+                raise ValueError("No function or docstring given")
+            doc = inspect.getdoc(func) or ''
+        NumpyDocString.__init__(self, doc)
+
+        if not self['Signature'] and func is not None:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError, e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if not roles.has_key(self._role):
+                print "Warning: invalid role %s" % self._role
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+
+    extra_public_methods = ['__call__']
+
+    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
+                 config={}):
+        if not inspect.isclass(cls) and cls is not None:
+            raise ValueError("Expected a class or None, but got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+
+        if doc is None:
+            if cls is None:
+                raise ValueError("No class or documentation string given")
+            doc = pydoc.getdoc(cls)
+
+        NumpyDocString.__init__(self, doc)
+
+        if config.get('show_class_members', True):
+            if not self['Methods']:
+                self['Methods'] = [(name, '', '')
+                                   for name in sorted(self.methods)]
+            if not self['Attributes']:
+                self['Attributes'] = [(name, '', '')
+                                      for name in sorted(self.properties)]
+
+    @property
+    def methods(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if ((not name.startswith('_')
+                     or name in self.extra_public_methods)
+                    and callable(func))]
+
+    @property
+    def properties(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and func is None]
diff --git a/doc/sphinxext/docscrape_sphinx.py b/doc/sphinxext/docscrape_sphinx.py
new file mode 100644
index 0000000000000000000000000000000000000000..28465a22ca675ab2aebb3541e64fc6e49c7c492e
--- /dev/null
+++ b/doc/sphinxext/docscrape_sphinx.py
@@ -0,0 +1,227 @@
+import re, inspect, textwrap, pydoc
+import sphinx
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+    def __init__(self, docstring, config={}):
+        self.use_plots = config.get('use_plots', False)
+        NumpyDocString.__init__(self, docstring, config=config)
+
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param,param_type,desc in self[name]:
+                out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                          param_type)])
+                out += ['']
+                out += self._str_indent(desc,8)
+                out += ['']
+        return out
+
+    @property
+    def _obj(self):
+        if hasattr(self, '_cls'):
+            return self._cls
+        elif hasattr(self, '_f'):
+            return self._f
+        return None
+
+    def _str_member_list(self, name):
+        """
+        Generate a member listing, autosummary:: table where possible,
+        and a table where not.
+
+        """
+        out = []
+        if self[name]:
+            out += ['.. rubric:: %s' % name, '']
+            prefix = getattr(self, '_name', '')
+
+            if prefix:
+                prefix = '~%s.' % prefix
+
+            autosum = []
+            others = []
+            for param, param_type, desc in self[name]:
+                param = param.strip()
+                if not self._obj or hasattr(self._obj, param):
+                    autosum += ["   %s%s" % (prefix, param)]
+                else:
+                    others.append((param, param_type, desc))
+
+            if autosum:
+                out += ['.. autosummary::', '   :toctree:', '']
+                out += autosum
+
+            if others:
+                maxlen_0 = max([len(x[0]) for x in others])
+                maxlen_1 = max([len(x[1]) for x in others])
+                hdr = "="*maxlen_0 + "  " + "="*maxlen_1 + "  " + "="*10
+                fmt = '%%%ds  %%%ds  ' % (maxlen_0, maxlen_1)
+                n_indent = maxlen_0 + maxlen_1 + 4
+                out += [hdr]
+                for param, param_type, desc in others:
+                    out += [fmt % (param.strip(), param_type)]
+                    out += self._str_indent(desc, n_indent)
+                out += [hdr]
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+            # Latex collects all references to a separate bibliography,
+            # so we need to insert links to it
+            if sphinx.__version__ >= "0.6":
+                out += ['.. only:: latex','']
+            else:
+                out += ['.. latexonly::','']
+            items = []
+            for line in self['References']:
+                m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
+                if m:
+                    items.append(m.group(1))
+            out += ['   ' + ", ".join(["[%s]_" % item for item in items]), '']
+        return out
+
+    def _str_examples(self):
+        examples_str = "\n".join(self['Examples'])
+
+        if (self.use_plots and 'import matplotlib' in examples_str
+                and 'plot::' not in examples_str):
+            out = []
+            out += self._str_header('Examples')
+            out += ['.. plot::', '']
+            out += self._str_indent(self['Examples'])
+            out += ['']
+            return out
+        else:
+            return self._str_section('Examples')
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Returns', 'Other Parameters',
+                           'Raises', 'Warns'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_examples()
+        for param_list in ('Instance Variables', 'Attributes', 'Methods'):
+            out += self._str_member_list(param_list)
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    def __init__(self, obj, doc=None, config={}):
+        self.use_plots = config.get('use_plots', False)
+        FunctionDoc.__init__(self, obj, doc=doc, config=config)
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    def __init__(self, obj, doc=None, func_doc=None, config={}):
+        self.use_plots = config.get('use_plots', False)
+        ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
+
+class SphinxObjDoc(SphinxDocString):
+    def __init__(self, obj, doc=None, config={}):
+        self._f = obj
+        SphinxDocString.__init__(self, doc, config=config)
+
+def get_doc_object(obj, what=None, doc=None, config={}):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
+                              config=config)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, doc=doc, config=config)
+    else:
+        if doc is None:
+            doc = pydoc.getdoc(obj)
+        return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/sphinxext/kwantdoc.py b/doc/sphinxext/kwantdoc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2bdd84c86e0b00784327f52a610543352852395
--- /dev/null
+++ b/doc/sphinxext/kwantdoc.py
@@ -0,0 +1,69 @@
+"""Simple sphinx extension that allows for a section that can be
+hidden/shown on click using javascript"""
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+from sphinx.util.compat import Directive
+
+id_count = 0
+
+class specialnote(nodes.General, nodes.Element):
+    pass
+
+class SpecialNote(Directive):
+
+    required_arguments = 1
+    final_argument_whitespace = True
+    has_content = True
+
+    def run(self):
+        global id_count
+
+        self.assert_has_content()
+        text = '\n'.join(self.content)
+
+        classes = directives.class_option(self.arguments[0])
+
+        node = specialnote(text)
+
+        node['classes'].extend(classes)
+        node['title'] = self.arguments[0]
+        node['myid'] = "specialnote-id" + str(id_count)
+        id_count = id_count + 1
+
+        self.state.nested_parse(self.content, self.content_offset, node)
+
+        return [node]
+
+def visit_html(self, node):
+    self.body.append("<div class=\"specialnote-title\" id=\"" + node['myid'] +
+                     "-title\">" + node['title'])
+    self.body.append("</div>")
+    self.body.append("<div class=\"specialnote-body\" id=\"" +
+                     node['myid'] + "-body\">")
+
+def leave_html(self, node):
+    self.body.append("</div>\n")
+    #If javascript is enabled, hide the content by default. Otherwise show.
+    self.body.append("<script language='javascript'>")
+    self.body.append("$('" + "#" + node['myid'] +
+                     "-title').append(\" <a style='float: right;' id=\\\"" +
+                     node['myid'] +
+                     "-button\\\" href=\\\"javascript:togglediv('" +
+                     node['myid'] + "')\\\">show</a>\");")
+    self.body.append("$('" + "#" + node['myid'] +
+                     "-body').css('display', 'none');")
+    self.body.append("</script>\n")
+
+def visit_latex(self, node):
+    self.body.append("\strong{" + node['title'] +"}\n\n")
+
+def leave_latex(self, node):
+    pass
+
+def setup(app):
+    app.add_node(specialnote,
+                 html=(visit_html, leave_html),
+                 latex=(visit_latex, leave_latex))
+
+    app.add_directive('specialnote', SpecialNote)
diff --git a/doc/sphinxext/numpydoc.py b/doc/sphinxext/numpydoc.py
new file mode 100644
index 0000000000000000000000000000000000000000..7679352c2e235850ad9c28ef3943feb6c614af76
--- /dev/null
+++ b/doc/sphinxext/numpydoc.py
@@ -0,0 +1,169 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+
+"""
+
+import sphinx
+
+if sphinx.__version__ < '1.0.1':
+    raise RuntimeError("Sphinx 1.0.1 or newer is required")
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+from sphinx.util.compat import Directive
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+                      reference_offset=[0]):
+
+    cfg = dict(use_plots=app.config.numpydoc_use_plots,
+               show_class_members=app.config.numpydoc_show_class_members)
+
+    if what == 'module':
+        # Strip top title
+        title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+                              re.I|re.S)
+        lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
+    else:
+        doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
+        lines[:] = unicode(doc).split(u"\n")
+
+    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+           obj.__name__:
+        if hasattr(obj, '__module__'):
+            v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
+        else:
+            v = dict(full_name=obj.__name__)
+        lines += [u'', u'.. htmlonly::', '']
+        lines += [u'    %s' % x for x in
+                  (app.config.numpydoc_edit_link % v).split("\n")]
+
+    # replace reference numbers so that there are no duplicates
+    references = []
+    for line in lines:
+        line = line.strip()
+        m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
+        if m:
+            references.append(m.group(1))
+
+    # start renaming from the longest string, to avoid overwriting parts
+    references.sort(key=lambda x: -len(x))
+    if references:
+        for i, line in enumerate(lines):
+            for r in references:
+                if re.match(ur'^\d+$', r):
+                    new_r = u"R%d" % (reference_offset[0] + int(r))
+                else:
+                    new_r = u"%s%d" % (r, reference_offset[0])
+                lines[i] = lines[i].replace(u'[%s]_' % r,
+                                            u'[%s]_' % new_r)
+                lines[i] = lines[i].replace(u'.. [%s]' % r,
+                                            u'.. [%s]' % new_r)
+
+    reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+    # Do not try to inspect classes that don't define `__init__`
+    if (inspect.isclass(obj) and
+        (not hasattr(obj, '__init__') or
+        'initializes x; see ' in pydoc.getdoc(obj.__init__))):
+        return '', ''
+
+    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+    if not hasattr(obj, '__doc__'): return
+
+    doc = SphinxDocString(pydoc.getdoc(obj))
+    if doc['Signature']:
+        sig = re.sub(u"^[^(]*", u"", doc['Signature'])
+        return sig, u''
+
+def setup(app, get_doc_object_=get_doc_object):
+    global get_doc_object
+    get_doc_object = get_doc_object_
+
+    app.connect('autodoc-process-docstring', mangle_docstrings)
+    app.connect('autodoc-process-signature', mangle_signature)
+    app.add_config_value('numpydoc_edit_link', None, False)
+    app.add_config_value('numpydoc_use_plots', None, False)
+    app.add_config_value('numpydoc_show_class_members', True, True)
+
+    # Extra mangling domains
+    app.add_domain(NumpyPythonDomain)
+    app.add_domain(NumpyCDomain)
+
+#------------------------------------------------------------------------------
+# Docstring-mangling domains
+#------------------------------------------------------------------------------
+
+from docutils.statemachine import ViewList
+from sphinx.domains.c import CDomain
+from sphinx.domains.python import PythonDomain
+
+class ManglingDomainBase(object):
+    directive_mangling_map = {}
+
+    def __init__(self, *a, **kw):
+        super(ManglingDomainBase, self).__init__(*a, **kw)
+        self.wrap_mangling_directives()
+
+    def wrap_mangling_directives(self):
+        for name, objtype in self.directive_mangling_map.items():
+            self.directives[name] = wrap_mangling_directive(
+                self.directives[name], objtype)
+
+class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
+    name = 'np'
+    directive_mangling_map = {
+        'function': 'function',
+        'class': 'class',
+        'exception': 'class',
+        'method': 'function',
+        'classmethod': 'function',
+        'staticmethod': 'function',
+        'attribute': 'attribute',
+    }
+
+class NumpyCDomain(ManglingDomainBase, CDomain):
+    name = 'np-c'
+    directive_mangling_map = {
+        'function': 'function',
+        'member': 'attribute',
+        'macro': 'function',
+        'type': 'class',
+        'var': 'object',
+    }
+
+def wrap_mangling_directive(base_directive, objtype):
+    class directive(base_directive):
+        def run(self):
+            env = self.state.document.settings.env
+
+            name = None
+            if self.arguments:
+                m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
+                name = m.group(2).strip()
+
+            if not name:
+                name = self.arguments[0]
+
+            lines = list(self.content)
+            mangle_docstrings(env.app, objtype, name, None, None, lines)
+            self.content = ViewList(lines, self.content.parent)
+
+            return base_directive.run(self)
+
+    return directive
+
diff --git a/doc/sphinxext/phantom_import.py b/doc/sphinxext/phantom_import.py
new file mode 100644
index 0000000000000000000000000000000000000000..c77eeb544e78bd38e9f32b5315026061c9c8a483
--- /dev/null
+++ b/doc/sphinxext/phantom_import.py
@@ -0,0 +1,162 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+    app.connect('builder-inited', initialize)
+    app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+    fn = app.config.phantom_import_file
+    if (fn and os.path.isfile(fn)):
+        print "[numpydoc] Phantom importing modules from", fn, "..."
+        import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+    """
+    Insert a fake Python module to sys.modules, based on a XML file.
+
+    The XML file is expected to conform to Pydocweb DTD. The fake
+    module will contain dummy objects, which guarantee the following:
+
+    - Docstrings are correct.
+    - Class inheritance relationships are correct (if present in XML).
+    - Function argspec is *NOT* correct (even if present in XML).
+      Instead, the function signature is prepended to the function docstring.
+    - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+    Parameters
+    ----------
+    xml_file : str
+        Name of an XML file to read
+    
+    """
+    import lxml.etree as etree
+
+    object_cache = {}
+
+    tree = etree.parse(xml_file)
+    root = tree.getroot()
+
+    # Sort items so that
+    # - Base classes come before classes inherited from them
+    # - Modules come before their contents
+    all_nodes = dict([(n.attrib['id'], n) for n in root])
+    
+    def _get_bases(node, recurse=False):
+        bases = [x.attrib['ref'] for x in node.findall('base')]
+        if recurse:
+            j = 0
+            while True:
+                try:
+                    b = bases[j]
+                except IndexError: break
+                if b in all_nodes:
+                    bases.extend(_get_bases(all_nodes[b]))
+                j += 1
+        return bases
+
+    type_index = ['module', 'class', 'callable', 'object']
+    
+    def base_cmp(a, b):
+        x = cmp(type_index.index(a.tag), type_index.index(b.tag))
+        if x != 0: return x
+
+        if a.tag == 'class' and b.tag == 'class':
+            a_bases = _get_bases(a, recurse=True)
+            b_bases = _get_bases(b, recurse=True)
+            x = cmp(len(a_bases), len(b_bases))
+            if x != 0: return x
+            if a.attrib['id'] in b_bases: return -1
+            if b.attrib['id'] in a_bases: return 1
+        
+        return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
+
+    nodes = root.getchildren()
+    nodes.sort(base_cmp)
+
+    # Create phantom items
+    for node in nodes:
+        name = node.attrib['id']
+        doc = (node.text or '').decode('string-escape') + "\n"
+        if doc == "\n": doc = ""
+
+        # create parent, if missing
+        parent = name
+        while True:
+            parent = '.'.join(parent.split('.')[:-1])
+            if not parent: break
+            if parent in object_cache: break
+            obj = imp.new_module(parent)
+            object_cache[parent] = obj
+            sys.modules[parent] = obj
+
+        # create object
+        if node.tag == 'module':
+            obj = imp.new_module(name)
+            obj.__doc__ = doc
+            sys.modules[name] = obj
+        elif node.tag == 'class':
+            bases = [object_cache[b] for b in _get_bases(node)
+                     if b in object_cache]
+            bases.append(object)
+            init = lambda self: None
+            init.__doc__ = doc
+            obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
+            obj.__name__ = name.split('.')[-1]
+        elif node.tag == 'callable':
+            funcname = node.attrib['id'].split('.')[-1]
+            argspec = node.attrib.get('argspec')
+            if argspec:
+                argspec = re.sub('^[^(]*', '', argspec)
+                doc = "%s%s\n\n%s" % (funcname, argspec, doc)
+            obj = lambda: 0
+            obj.__argspec_is_invalid_ = True
+            obj.func_name = funcname
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__objclass__ = object_cache[parent]
+        else:
+            class Dummy(object): pass
+            obj = Dummy()
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__get__ = lambda: None
+        object_cache[name] = obj
+
+        if parent:
+            if inspect.ismodule(object_cache[parent]):
+                obj.__module__ = parent
+                setattr(object_cache[parent], name.split('.')[-1], obj)
+
+    # Populate items
+    for node in root:
+        obj = object_cache.get(node.attrib['id'])
+        if obj is None: continue
+        for ref in node.findall('ref'):
+            if node.tag == 'class':
+                if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
+                    setattr(obj, ref.attrib['name'],
+                            object_cache.get(ref.attrib['ref']))
+            else:
+                setattr(obj, ref.attrib['name'],
+                        object_cache.get(ref.attrib['ref']))
diff --git a/doc/sphinxext/plot_directive.py b/doc/sphinxext/plot_directive.py
new file mode 100644
index 0000000000000000000000000000000000000000..80801e7986dc6ad55f6d57a596e85df0e1c2fab8
--- /dev/null
+++ b/doc/sphinxext/plot_directive.py
@@ -0,0 +1,636 @@
+"""
+A special directive for generating a matplotlib plot.
+
+.. warning::
+
+   This is a hacked version of plot_directive.py from Matplotlib.
+   It's very much subject to change!
+
+
+Usage
+-----
+
+Can be used like this::
+
+    .. plot:: examples/example.py
+
+    .. plot::
+
+       import matplotlib.pyplot as plt
+       plt.plot([1,2,3], [4,5,6])
+
+    .. plot::
+
+       A plotting example:
+
+       >>> import matplotlib.pyplot as plt
+       >>> plt.plot([1,2,3], [4,5,6])
+
+The content is interpreted as doctest formatted if it has a line starting
+with ``>>>``.
+
+The ``plot`` directive supports the options
+
+    format : {'python', 'doctest'}
+        Specify the format of the input
+
+    include-source : bool
+        Whether to display the source code. Default can be changed in conf.py
+    
+and the ``image`` directive options ``alt``, ``height``, ``width``,
+``scale``, ``align``, ``class``.
+
+Configuration options
+---------------------
+
+The plot directive has the following configuration options:
+
+    plot_include_source
+        Default value for the include-source option
+
+    plot_pre_code
+        Code that should be executed before each plot.
+
+    plot_basedir
+        Base directory, to which plot:: file names are relative to.
+        (If None or empty, file names are relative to the directoly where
+        the file containing the directive is.)
+
+    plot_formats
+        File formats to generate. List of tuples or strings::
+
+            [(suffix, dpi), suffix, ...]
+
+        that determine the file format and the DPI. For entries whose
+        DPI was omitted, sensible defaults are chosen.
+
+    plot_html_show_formats
+        Whether to show links to the files in HTML.
+
+TODO
+----
+
+* Refactor Latex output; now it's plain images, but it would be nice
+  to make them appear side-by-side, or in floats.
+
+"""
+
+import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
+import sphinx
+
+import warnings
+warnings.warn("A plot_directive module is also available under "
+              "matplotlib.sphinxext; expect this numpydoc.plot_directive "
+              "module to be deprecated after relevant features have been "
+              "integrated there.",
+              FutureWarning, stacklevel=2)
+
+
+#------------------------------------------------------------------------------
+# Registration hook
+#------------------------------------------------------------------------------
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+    
+    app.add_config_value('plot_pre_code', '', True)
+    app.add_config_value('plot_include_source', False, True)
+    app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
+    app.add_config_value('plot_basedir', None, True)
+    app.add_config_value('plot_html_show_formats', True, True)
+
+    app.add_directive('plot', plot_directive, True, (0, 1, False),
+                      **plot_directive_options)
+
+#------------------------------------------------------------------------------
+# plot:: directive
+#------------------------------------------------------------------------------
+from docutils.parsers.rst import directives
+from docutils import nodes
+
+def plot_directive(name, arguments, options, content, lineno,
+                   content_offset, block_text, state, state_machine):
+    return run(arguments, content, options, state_machine, state, lineno)
+plot_directive.__doc__ = __doc__
+
+def _option_boolean(arg):
+    if not arg or not arg.strip():
+        # no argument given, assume used as a flag
+        return True
+    elif arg.strip().lower() in ('no', '0', 'false'):
+        return False
+    elif arg.strip().lower() in ('yes', '1', 'true'):
+        return True
+    else:
+        raise ValueError('"%s" unknown boolean' % arg)
+
+def _option_format(arg):
+    return directives.choice(arg, ('python', 'lisp'))
+
+def _option_align(arg):
+    return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
+                                   "right"))
+
+plot_directive_options = {'alt': directives.unchanged,
+                          'height': directives.length_or_unitless,
+                          'width': directives.length_or_percentage_or_unitless,
+                          'scale': directives.nonnegative_int,
+                          'align': _option_align,
+                          'class': directives.class_option,
+                          'include-source': _option_boolean,
+                          'format': _option_format,
+                          }
+
+#------------------------------------------------------------------------------
+# Generating output
+#------------------------------------------------------------------------------
+
+from docutils import nodes, utils
+
+try:
+    # Sphinx depends on either Jinja or Jinja2
+    import jinja2
+    def format_template(template, **kw):
+        return jinja2.Template(template).render(**kw)
+except ImportError:
+    import jinja
+    def format_template(template, **kw):
+        return jinja.from_string(template, **kw)
+
+TEMPLATE = """
+{{ source_code }}
+
+{{ only_html }}
+
+   {% if source_link or (html_show_formats and not multi_image) %}
+   (
+   {%- if source_link -%}
+   `Source code <{{ source_link }}>`__
+   {%- endif -%}
+   {%- if html_show_formats and not multi_image -%}
+     {%- for img in images -%}
+       {%- for fmt in img.formats -%}
+         {%- if source_link or not loop.first -%}, {% endif -%}
+         `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+       {%- endfor -%}
+     {%- endfor -%}
+   {%- endif -%}
+   )
+   {% endif %}
+
+   {% for img in images %}
+   .. figure:: {{ build_dir }}/{{ img.basename }}.png
+      {%- for option in options %}
+      {{ option }}
+      {% endfor %}
+
+      {% if html_show_formats and multi_image -%}
+        (
+        {%- for fmt in img.formats -%}
+        {%- if not loop.first -%}, {% endif -%}
+        `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+        {%- endfor -%}
+        )
+      {%- endif -%}
+   {% endfor %}
+
+{{ only_latex }}
+
+   {% for img in images %}
+   .. image:: {{ build_dir }}/{{ img.basename }}.pdf
+   {% endfor %}
+
+"""
+
+class ImageFile(object):
+    def __init__(self, basename, dirname):
+        self.basename = basename
+        self.dirname = dirname
+        self.formats = []
+
+    def filename(self, format):
+        return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
+
+    def filenames(self):
+        return [self.filename(fmt) for fmt in self.formats]
+
+def run(arguments, content, options, state_machine, state, lineno):
+    if arguments and content:
+        raise RuntimeError("plot:: directive can't have both args and content")
+
+    document = state_machine.document
+    config = document.settings.env.config
+
+    options.setdefault('include-source', config.plot_include_source)
+
+    # determine input
+    rst_file = document.attributes['source']
+    rst_dir = os.path.dirname(rst_file)
+
+    if arguments:
+        if not config.plot_basedir:
+            source_file_name = os.path.join(rst_dir,
+                                            directives.uri(arguments[0]))
+        else:
+            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
+                                            directives.uri(arguments[0]))
+        code = open(source_file_name, 'r').read()
+        output_base = os.path.basename(source_file_name)
+    else:
+        source_file_name = rst_file
+        code = textwrap.dedent("\n".join(map(str, content)))
+        counter = document.attributes.get('_plot_counter', 0) + 1
+        document.attributes['_plot_counter'] = counter
+        base, ext = os.path.splitext(os.path.basename(source_file_name))
+        output_base = '%s-%d.py' % (base, counter)
+
+    base, source_ext = os.path.splitext(output_base)
+    if source_ext in ('.py', '.rst', '.txt'):
+        output_base = base
+    else:
+        source_ext = ''
+
+    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
+    output_base = output_base.replace('.', '-')
+
+    # is it in doctest format?
+    is_doctest = contains_doctest(code)
+    if options.has_key('format'):
+        if options['format'] == 'python':
+            is_doctest = False
+        else:
+            is_doctest = True
+
+    # determine output directory name fragment
+    source_rel_name = relpath(source_file_name, setup.confdir)
+    source_rel_dir = os.path.dirname(source_rel_name)
+    while source_rel_dir.startswith(os.path.sep):
+        source_rel_dir = source_rel_dir[1:]
+
+    # build_dir: where to place output files (temporarily)
+    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
+                             'plot_directive',
+                             source_rel_dir)
+    if not os.path.exists(build_dir):
+        os.makedirs(build_dir)
+
+    # output_dir: final location in the builder's directory
+    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+                                            source_rel_dir))
+
+    # how to link to files from the RST file
+    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
+                                 source_rel_dir).replace(os.path.sep, '/')
+    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
+    source_link = dest_dir_link + '/' + output_base + source_ext
+
+    # make figures
+    try:
+        results = makefig(code, source_file_name, build_dir, output_base,
+                          config)
+        errors = []
+    except PlotError, err:
+        reporter = state.memo.reporter
+        sm = reporter.system_message(
+            2, "Exception occurred in plotting %s: %s" % (output_base, err),
+            line=lineno)
+        results = [(code, [])]
+        errors = [sm]
+
+    # generate output restructuredtext
+    total_lines = []
+    for j, (code_piece, images) in enumerate(results):
+        if options['include-source']:
+            if is_doctest:
+                lines = ['']
+                lines += [row.rstrip() for row in code_piece.split('\n')]
+            else:
+                lines = ['.. code-block:: python', '']
+                lines += ['    %s' % row.rstrip()
+                          for row in code_piece.split('\n')]
+            source_code = "\n".join(lines)
+        else:
+            source_code = ""
+
+        opts = [':%s: %s' % (key, val) for key, val in options.items()
+                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
+
+        only_html = ".. only:: html"
+        only_latex = ".. only:: latex"
+
+        if j == 0:
+            src_link = source_link
+        else:
+            src_link = None
+
+        result = format_template(
+            TEMPLATE,
+            dest_dir=dest_dir_link,
+            build_dir=build_dir_link,
+            source_link=src_link,
+            multi_image=len(images) > 1,
+            only_html=only_html,
+            only_latex=only_latex,
+            options=opts,
+            images=images,
+            source_code=source_code,
+            html_show_formats=config.plot_html_show_formats)
+
+        total_lines.extend(result.split("\n"))
+        total_lines.extend("\n")
+
+    if total_lines:
+        state_machine.insert_input(total_lines, source=source_file_name)
+
+    # copy image files to builder's output directory
+    if not os.path.exists(dest_dir):
+        os.makedirs(dest_dir)
+
+    for code_piece, images in results:
+        for img in images:
+            for fn in img.filenames():
+                shutil.copyfile(fn, os.path.join(dest_dir,
+                                                 os.path.basename(fn)))
+
+    # copy script (if necessary)
+    if source_file_name == rst_file:
+        target_name = os.path.join(dest_dir, output_base + source_ext)
+        f = open(target_name, 'w')
+        f.write(unescape_doctest(code))
+        f.close()
+
+    return errors
+
+
+#------------------------------------------------------------------------------
+# Run code and capture figures
+#------------------------------------------------------------------------------
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+
+import exceptions
+
+def contains_doctest(text):
+    try:
+        # check if it's valid Python as-is
+        compile(text, '<string>', 'exec')
+        return False
+    except SyntaxError:
+        pass
+    r = re.compile(r'^\s*>>>', re.M)
+    m = r.search(text)
+    return bool(m)
+
+def unescape_doctest(text):
+    """
+    Extract code from a piece of text, which contains either Python code
+    or doctests.
+
+    """
+    if not contains_doctest(text):
+        return text
+
+    code = ""
+    for line in text.split("\n"):
+        m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
+        if m:
+            code += m.group(2) + "\n"
+        elif line.strip():
+            code += "# " + line.strip() + "\n"
+        else:
+            code += "\n"
+    return code
+
+def split_code_at_show(text):
+    """
+    Split code at plt.show()
+
+    """
+
+    parts = []
+    is_doctest = contains_doctest(text)
+
+    part = []
+    for line in text.split("\n"):
+        if (not is_doctest and line.strip() == 'plt.show()') or \
+               (is_doctest and line.strip() == '>>> plt.show()'):
+            part.append(line)
+            parts.append("\n".join(part))
+            part = []
+        else:
+            part.append(line)
+    if "\n".join(part).strip():
+        parts.append("\n".join(part))
+    return parts
+
+class PlotError(RuntimeError):
+    pass
+
+def run_code(code, code_path, ns=None):
+    # Change the working directory to the directory of the example, so
+    # it can get at its data files, if any.
+    pwd = os.getcwd()
+    old_sys_path = list(sys.path)
+    if code_path is not None:
+        dirname = os.path.abspath(os.path.dirname(code_path))
+        os.chdir(dirname)
+        sys.path.insert(0, dirname)
+
+    # Redirect stdout
+    stdout = sys.stdout
+    sys.stdout = cStringIO.StringIO()
+
+    # Reset sys.argv
+    old_sys_argv = sys.argv
+    sys.argv = [code_path]
+    
+    try:
+        try:
+            code = unescape_doctest(code)
+            if ns is None:
+                ns = {}
+            if not ns:
+                exec setup.config.plot_pre_code in ns
+            exec code in ns
+        except (Exception, SystemExit), err:
+            raise PlotError(traceback.format_exc())
+    finally:
+        os.chdir(pwd)
+        sys.argv = old_sys_argv
+        sys.path[:] = old_sys_path
+        sys.stdout = stdout
+    return ns
+
+
+#------------------------------------------------------------------------------
+# Generating figures
+#------------------------------------------------------------------------------
+
+def out_of_date(original, derived):
+    """
+    Returns True if derivative is out-of-date wrt original,
+    both of which are full file paths.
+    """
+    return (not os.path.exists(derived)
+            or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+
+def makefig(code, code_path, output_dir, output_base, config):
+    """
+    Run a pyplot script *code* and save the images under *output_dir*
+    with file names derived from *output_base*
+
+    """
+
+    # -- Parse format list
+    default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
+    formats = []
+    for fmt in config.plot_formats:
+        if isinstance(fmt, str):
+            formats.append((fmt, default_dpi.get(fmt, 80)))
+        elif type(fmt) in (tuple, list) and len(fmt)==2:
+            formats.append((str(fmt[0]), int(fmt[1])))
+        else:
+            raise PlotError('invalid image format "%r" in plot_formats' % fmt)
+
+    # -- Try to determine if all images already exist
+
+    code_pieces = split_code_at_show(code)
+
+    # Look for single-figure output files first
+    all_exists = True
+    img = ImageFile(output_base, output_dir)
+    for format, dpi in formats:
+        if out_of_date(code_path, img.filename(format)):
+            all_exists = False
+            break
+        img.formats.append(format)
+
+    if all_exists:
+        return [(code, [img])]
+
+    # Then look for multi-figure output files
+    results = []
+    all_exists = True
+    for i, code_piece in enumerate(code_pieces):
+        images = []
+        for j in xrange(1000):
+            img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
+            for format, dpi in formats:
+                if out_of_date(code_path, img.filename(format)):
+                    all_exists = False
+                    break
+                img.formats.append(format)
+
+            # assume that if we have one, we have them all
+            if not all_exists:
+                all_exists = (j > 0)
+                break
+            images.append(img)
+        if not all_exists:
+            break
+        results.append((code_piece, images))
+
+    if all_exists:
+        return results
+
+    # -- We didn't find the files, so build them
+
+    results = []
+    ns = {}
+
+    for i, code_piece in enumerate(code_pieces):
+        # Clear between runs
+        plt.close('all')
+
+        # Run code
+        run_code(code_piece, code_path, ns)
+
+        # Collect images
+        images = []
+        fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+        for j, figman in enumerate(fig_managers):
+            if len(fig_managers) == 1 and len(code_pieces) == 1:
+                img = ImageFile(output_base, output_dir)
+            else:
+                img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
+                                output_dir)
+            images.append(img)
+            for format, dpi in formats:
+                try:
+                    figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
+                except exceptions.BaseException, err:
+                    raise PlotError(traceback.format_exc())
+                img.formats.append(format)
+
+        # Results
+        results.append((code_piece, images))
+
+    return results
+
+
+#------------------------------------------------------------------------------
+# Relative pathnames
+#------------------------------------------------------------------------------
+
+try:
+    from os.path import relpath
+except ImportError:
+    # Copied from Python 2.7
+    if 'posix' in sys.builtin_module_names:
+        def relpath(path, start=os.path.curdir):
+            """Return a relative version of a path"""
+            from os.path import sep, curdir, join, abspath, commonprefix, \
+                 pardir
+
+            if not path:
+                raise ValueError("no path specified")
+
+            start_list = abspath(start).split(sep)
+            path_list = abspath(path).split(sep)
+
+            # Work out how much of the filepath is shared by start and path.
+            i = len(commonprefix([start_list, path_list]))
+
+            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+            if not rel_list:
+                return curdir
+            return join(*rel_list)
+    elif 'nt' in sys.builtin_module_names:
+        def relpath(path, start=os.path.curdir):
+            """Return a relative version of a path"""
+            from os.path import sep, curdir, join, abspath, commonprefix, \
+                 pardir, splitunc
+
+            if not path:
+                raise ValueError("no path specified")
+            start_list = abspath(start).split(sep)
+            path_list = abspath(path).split(sep)
+            if start_list[0].lower() != path_list[0].lower():
+                unc_path, rest = splitunc(path)
+                unc_start, rest = splitunc(start)
+                if bool(unc_path) ^ bool(unc_start):
+                    raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+                                                                        % (path, start))
+                else:
+                    raise ValueError("path is on drive %s, start on drive %s"
+                                                        % (path_list[0], start_list[0]))
+            # Work out how much of the filepath is shared by start and path.
+            for i in range(min(len(start_list), len(path_list))):
+                if start_list[i].lower() != path_list[i].lower():
+                    break
+            else:
+                i += 1
+
+            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+            if not rel_list:
+                return curdir
+            return join(*rel_list)
+    else:
+        raise RuntimeError("Unsupported platform (no relpath available!)")
diff --git a/doc/sphinxext/setup.py b/doc/sphinxext/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..76e3fd81bb4ceb496d4e2e20248cedc3bf4c8677
--- /dev/null
+++ b/doc/sphinxext/setup.py
@@ -0,0 +1,31 @@
+from distutils.core import setup
+import setuptools
+import sys, os
+
+version = "0.4"
+
+setup(
+    name="numpydoc",
+    packages=["numpydoc"],
+    package_dir={"numpydoc": ""},
+    version=version,
+    description="Sphinx extension to support docstrings in Numpy format",
+    # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+    classifiers=["Development Status :: 3 - Alpha",
+                 "Environment :: Plugins",
+                 "License :: OSI Approved :: BSD License",
+                 "Topic :: Documentation"],
+    keywords="sphinx numpy",
+    author="Pauli Virtanen and others",
+    author_email="pav@iki.fi",
+    url="http://github.com/numpy/numpy/tree/master/doc/sphinxext",
+    license="BSD",
+    zip_safe=False,
+    install_requires=["Sphinx >= 1.0.1"],
+    package_data={'numpydoc': 'tests', '': ''},
+    entry_points={
+        "console_scripts": [
+            "autosummary_generate = numpydoc.autosummary_generate:main",
+        ],
+    },
+)
diff --git a/doc/sphinxext/tests/test_docscrape.py b/doc/sphinxext/tests/test_docscrape.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fab79832d50b4b219da6fe06acfa67832dc7054
--- /dev/null
+++ b/doc/sphinxext/tests/test_docscrape.py
@@ -0,0 +1,615 @@
+# -*- encoding:utf-8 -*-
+
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+from docscrape_sphinx import SphinxDocString, SphinxClassDoc
+from nose.tools import *
+
+doc_txt = '''\
+  numpy.multivariate_normal(mean, cov, shape=None, spam=None)
+
+  Draw values from a multivariate normal distribution with specified
+  mean and covariance.
+
+  The multivariate normal or Gaussian distribution is a generalisation
+  of the one-dimensional normal distribution to higher dimensions.
+
+  Parameters
+  ----------
+  mean : (N,) ndarray
+      Mean of the N-dimensional distribution.
+
+      .. math::
+
+         (1+2+3)/3
+
+  cov : (N,N) ndarray
+      Covariance matrix of the distribution.
+  shape : tuple of ints
+      Given a shape of, for example, (m,n,k), m*n*k samples are
+      generated, and packed in an m-by-n-by-k arrangement.  Because
+      each sample is N-dimensional, the output shape is (m,n,k,N).
+
+  Returns
+  -------
+  out : ndarray
+      The drawn samples, arranged according to `shape`.  If the
+      shape given is (m,n,...), then the shape of `out` is is
+      (m,n,...,N).
+
+      In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+      value drawn from the distribution.
+
+  Other Parameters
+  ----------------
+  spam : parrot
+      A parrot off its mortal coil.
+
+  Raises
+  ------
+  RuntimeError
+      Some error
+
+  Warns
+  -----
+  RuntimeWarning
+      Some warning
+
+  Warnings
+  --------
+  Certain warnings apply.
+
+  Notes
+  -----
+
+  Instead of specifying the full covariance matrix, popular
+  approximations include:
+
+    - Spherical covariance (`cov` is a multiple of the identity matrix)
+    - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+  This geometrical property can be seen in two dimensions by plotting
+  generated data-points:
+
+  >>> mean = [0,0]
+  >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+  >>> x,y = multivariate_normal(mean,cov,5000).T
+  >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+  Note that the covariance matrix must be symmetric and non-negative
+  definite.
+
+  References
+  ----------
+  .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+         Processes," 3rd ed., McGraw-Hill Companies, 1991
+  .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+         2nd ed., Wiley, 2001.
+
+  See Also
+  --------
+  some, other, funcs
+  otherfunc : relationship
+
+  Examples
+  --------
+  >>> mean = (1,2)
+  >>> cov = [[1,0],[1,0]]
+  >>> x = multivariate_normal(mean,cov,(3,3))
+  >>> print x.shape
+  (3, 3, 2)
+
+  The following is probably true, given that 0.6 is roughly twice the
+  standard deviation:
+
+  >>> print list( (x[0,0,:] - mean) < 0.6 )
+  [True, True]
+
+  .. index:: random
+     :refguide: random;distributions, random;gauss
+
+  '''
+doc = NumpyDocString(doc_txt)
+
+
+def test_signature():
+    assert doc['Signature'].startswith('numpy.multivariate_normal(')
+    assert doc['Signature'].endswith('spam=None)')
+
+def test_summary():
+    assert doc['Summary'][0].startswith('Draw values')
+    assert doc['Summary'][-1].endswith('covariance.')
+
+def test_extended_summary():
+    assert doc['Extended Summary'][0].startswith('The multivariate normal')
+
+def test_parameters():
+    assert_equal(len(doc['Parameters']), 3)
+    assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
+
+    arg, arg_type, desc = doc['Parameters'][1]
+    assert_equal(arg_type, '(N,N) ndarray')
+    assert desc[0].startswith('Covariance matrix')
+    assert doc['Parameters'][0][-1][-2] == '   (1+2+3)/3'
+
+def test_other_parameters():
+    assert_equal(len(doc['Other Parameters']), 1)
+    assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
+    arg, arg_type, desc = doc['Other Parameters'][0]
+    assert_equal(arg_type, 'parrot')
+    assert desc[0].startswith('A parrot off its mortal coil')
+
+def test_returns():
+    assert_equal(len(doc['Returns']), 1)
+    arg, arg_type, desc = doc['Returns'][0]
+    assert_equal(arg, 'out')
+    assert_equal(arg_type, 'ndarray')
+    assert desc[0].startswith('The drawn samples')
+    assert desc[-1].endswith('distribution.')
+
+def test_notes():
+    assert doc['Notes'][0].startswith('Instead')
+    assert doc['Notes'][-1].endswith('definite.')
+    assert_equal(len(doc['Notes']), 17)
+
+def test_references():
+    assert doc['References'][0].startswith('..')
+    assert doc['References'][-1].endswith('2001.')
+
+def test_examples():
+    assert doc['Examples'][0].startswith('>>>')
+    assert doc['Examples'][-1].endswith('True]')
+
+def test_index():
+    assert_equal(doc['index']['default'], 'random')
+    print doc['index']
+    assert_equal(len(doc['index']), 2)
+    assert_equal(len(doc['index']['refguide']), 2)
+
+def non_blank_line_by_line_compare(a,b):
+    a = [l for l in a.split('\n') if l.strip()]
+    b = [l for l in b.split('\n') if l.strip()]
+    for n,line in enumerate(a):
+        if not line == b[n]:
+            raise AssertionError("Lines %s of a and b differ: "
+                                 "\n>>> %s\n<<< %s\n" %
+                                 (n,line,b[n]))
+def test_str():
+    non_blank_line_by_line_compare(str(doc),
+"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+Parameters
+----------
+mean : (N,) ndarray
+    Mean of the N-dimensional distribution.
+
+    .. math::
+
+       (1+2+3)/3
+
+cov : (N,N) ndarray
+    Covariance matrix of the distribution.
+shape : tuple of ints
+    Given a shape of, for example, (m,n,k), m*n*k samples are
+    generated, and packed in an m-by-n-by-k arrangement.  Because
+    each sample is N-dimensional, the output shape is (m,n,k,N).
+
+Returns
+-------
+out : ndarray
+    The drawn samples, arranged according to `shape`.  If the
+    shape given is (m,n,...), then the shape of `out` is is
+    (m,n,...,N).
+
+    In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+    value drawn from the distribution.
+
+Other Parameters
+----------------
+spam : parrot
+    A parrot off its mortal coil.
+
+Raises
+------
+RuntimeError : 
+    Some error
+
+Warns
+-----
+RuntimeWarning : 
+    Some warning
+
+Warnings
+--------
+Certain warnings apply.
+
+See Also
+--------
+`some`_, `other`_, `funcs`_
+
+`otherfunc`_
+    relationship
+
+Notes
+-----
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+References
+----------
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+Examples
+--------
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+
+.. index:: random
+   :refguide: random;distributions, random;gauss""")
+
+
+def test_sphinx_str():
+    sphinx_doc = SphinxDocString(doc_txt)
+    non_blank_line_by_line_compare(str(sphinx_doc),
+"""
+.. index:: random
+   single: random;distributions, random;gauss
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+:Parameters:
+
+    **mean** : (N,) ndarray
+
+        Mean of the N-dimensional distribution.
+
+        .. math::
+
+           (1+2+3)/3
+
+    **cov** : (N,N) ndarray
+
+        Covariance matrix of the distribution.
+
+    **shape** : tuple of ints
+
+        Given a shape of, for example, (m,n,k), m*n*k samples are
+        generated, and packed in an m-by-n-by-k arrangement.  Because
+        each sample is N-dimensional, the output shape is (m,n,k,N).
+
+:Returns:
+
+    **out** : ndarray
+
+        The drawn samples, arranged according to `shape`.  If the
+        shape given is (m,n,...), then the shape of `out` is is
+        (m,n,...,N).
+        
+        In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+        value drawn from the distribution.
+
+:Other Parameters:
+
+    **spam** : parrot
+
+        A parrot off its mortal coil.
+ 
+:Raises:
+
+    **RuntimeError** : 
+
+        Some error
+
+:Warns:
+
+    **RuntimeWarning** : 
+
+        Some warning
+
+.. warning::
+
+    Certain warnings apply.
+
+.. seealso::
+    
+    :obj:`some`, :obj:`other`, :obj:`funcs`
+    
+    :obj:`otherfunc`
+        relationship
+    
+.. rubric:: Notes
+
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+.. rubric:: References
+
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+.. only:: latex
+
+   [1]_, [2]_
+
+.. rubric:: Examples
+
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+""")
+
+       
+doc2 = NumpyDocString("""
+    Returns array of indices of the maximum values of along the given axis.
+
+    Parameters
+    ----------
+    a : {array_like}
+        Array to look in.
+    axis : {None, integer}
+        If None, the index is into the flattened array, otherwise along
+        the specified axis""")
+
+def test_parameters_without_extended_description():
+    assert_equal(len(doc2['Parameters']), 2)
+
+doc3 = NumpyDocString("""
+    my_signature(*params, **kwds)
+
+    Return this and that.
+    """)
+
+def test_escape_stars():
+    signature = str(doc3).split('\n')[0]
+    assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
+
+doc4 = NumpyDocString(
+    """a.conj()
+
+    Return an array with all complex-valued elements conjugated.""")
+
+def test_empty_extended_summary():
+    assert_equal(doc4['Extended Summary'], [])
+
+doc5 = NumpyDocString(
+    """
+    a.something()
+
+    Raises
+    ------
+    LinAlgException
+        If array is singular.
+
+    Warns
+    -----
+    SomeWarning
+        If needed
+    """)
+
+def test_raises():
+    assert_equal(len(doc5['Raises']), 1)
+    name,_,desc = doc5['Raises'][0]
+    assert_equal(name,'LinAlgException')
+    assert_equal(desc,['If array is singular.'])
+
+def test_warns():
+    assert_equal(len(doc5['Warns']), 1)
+    name,_,desc = doc5['Warns'][0]
+    assert_equal(name,'SomeWarning')
+    assert_equal(desc,['If needed'])
+
+def test_see_also():
+    doc6 = NumpyDocString(
+    """
+    z(x,theta)
+
+    See Also
+    --------
+    func_a, func_b, func_c
+    func_d : some equivalent func
+    foo.func_e : some other func over
+             multiple lines
+    func_f, func_g, :meth:`func_h`, func_j,
+    func_k
+    :obj:`baz.obj_q`
+    :class:`class_j`: fubar
+        foobar
+    """)
+
+    assert len(doc6['See Also']) == 12
+    for func, desc, role in doc6['See Also']:
+        if func in ('func_a', 'func_b', 'func_c', 'func_f',
+                    'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
+            assert(not desc)
+        else:
+            assert(desc)
+
+        if func == 'func_h':
+            assert role == 'meth'
+        elif func == 'baz.obj_q':
+            assert role == 'obj'
+        elif func == 'class_j':
+            assert role == 'class'
+        else:
+            assert role is None
+
+        if func == 'func_d':
+            assert desc == ['some equivalent func']
+        elif func == 'foo.func_e':
+            assert desc == ['some other func over', 'multiple lines']
+        elif func == 'class_j':
+            assert desc == ['fubar', 'foobar']
+
+def test_see_also_print():
+    class Dummy(object):
+        """
+        See Also
+        --------
+        func_a, func_b
+        func_c : some relationship
+                 goes here
+        func_d
+        """
+        pass
+
+    obj = Dummy()
+    s = str(FunctionDoc(obj, role='func'))
+    assert(':func:`func_a`, :func:`func_b`' in s)
+    assert('    some relationship' in s)
+    assert(':func:`func_d`' in s)
+
+doc7 = NumpyDocString("""
+
+        Doc starts on second line.
+
+        """)
+
+def test_empty_first_line():
+    assert doc7['Summary'][0].startswith('Doc starts')
+
+
+def test_no_summary():
+    str(SphinxDocString("""
+    Parameters
+    ----------"""))
+
+
+def test_unicode():
+    doc = SphinxDocString("""
+    öäöäöäöäöåååå
+
+    öäöäöäööäååå
+
+    Parameters
+    ----------
+    ååå : äää
+        ööö
+
+    Returns
+    -------
+    ååå : ööö
+        äää
+
+    """)
+    assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
+
+def test_plot_examples():
+    cfg = dict(use_plots=True)
+
+    doc = SphinxDocString("""
+    Examples
+    --------
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot([1,2,3],[4,5,6])
+    >>> plt.show()
+    """, config=cfg)
+    assert 'plot::' in str(doc), str(doc)
+
+    doc = SphinxDocString("""
+    Examples
+    --------
+    .. plot::
+    
+       import matplotlib.pyplot as plt
+       plt.plot([1,2,3],[4,5,6])
+       plt.show()
+    """, config=cfg)
+    assert str(doc).count('plot::') == 1, str(doc)
+
+def test_class_members():
+
+    class Dummy(object):
+        """
+        Dummy class.
+
+        """
+        def spam(self, a, b):
+            """Spam\n\nSpam spam."""
+            pass
+        def ham(self, c, d):
+            """Cheese\n\nNo cheese."""
+            pass
+
+    for cls in (ClassDoc, SphinxClassDoc):
+        doc = cls(Dummy, config=dict(show_class_members=False))
+        assert 'Methods' not in str(doc), (cls, str(doc))
+        assert 'spam' not in str(doc), (cls, str(doc))
+        assert 'ham' not in str(doc), (cls, str(doc))
+
+        doc = cls(Dummy, config=dict(show_class_members=True))
+        assert 'Methods' in str(doc), (cls, str(doc))
+        assert 'spam' in str(doc), (cls, str(doc))
+        assert 'ham' in str(doc), (cls, str(doc))
+
+        if cls is SphinxClassDoc:
+            assert '.. autosummary::' in str(doc), str(doc)
+
+if __name__ == "__main__":
+    import nose
+    nose.run()
+
diff --git a/doc/sphinxext/traitsdoc.py b/doc/sphinxext/traitsdoc.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fcf2c1cd38c90e2ad350e5dfc3acd1caf5cf329
--- /dev/null
+++ b/doc/sphinxext/traitsdoc.py
@@ -0,0 +1,140 @@
+"""
+=========
+traitsdoc
+=========
+
+Sphinx extension that handles docstrings in the Numpy standard format, [1]
+and support Traits [2].
+
+This extension can be used as a replacement for ``numpydoc`` when support
+for Traits is required.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [2] http://code.enthought.com/projects/traits/
+
+"""
+
+import inspect
+import os
+import pydoc
+
+import docscrape
+import docscrape_sphinx
+from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
+
+import numpydoc
+
+import comment_eater
+
+class SphinxTraitsDoc(SphinxClassDoc):
+    def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        docstring = pydoc.getdoc(cls)
+        docstring = docstring.split('\n')
+
+        # De-indent paragraph
+        try:
+            indent = min(len(s) - len(s.lstrip()) for s in docstring
+                         if s.strip())
+        except ValueError:
+            indent = 0
+
+        for n,line in enumerate(docstring):
+            docstring[n] = docstring[n][indent:]
+
+        self._doc = docscrape.Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': '',
+            'Description': [],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Traits': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'References': '',
+            'Example': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Description'] + self['Extended Summary'] + ['']
+
+    def __str__(self, indent=0, func_role="func"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Traits', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_see_also("obj")
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Example')
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+def looks_like_issubclass(obj, classname):
+    """ Return True if the object has a class or superclass with the given class
+    name.
+
+    Ignores old-style classes.
+    """
+    t = obj
+    if t.__name__ == classname:
+        return True
+    for klass in t.__mro__:
+        if klass.__name__ == classname:
+            return True
+    return False
+
+def get_doc_object(obj, what=None, config=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
+        if looks_like_issubclass(obj, 'HasTraits'):
+            for name, trait, comment in comment_eater.get_class_traits(obj):
+                # Exclude private traits.
+                if not name.startswith('_'):
+                    doc['Traits'].append((name, trait, comment.splitlines()))
+        return doc
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '', config=config)
+    else:
+        return SphinxDocString(pydoc.getdoc(obj), config=config)
+
+def setup(app):
+    # init numpydoc
+    numpydoc.setup(app, get_doc_object)
+
diff --git a/doc/templates/autosummary/class.rst b/doc/templates/autosummary/class.rst
new file mode 100644
index 0000000000000000000000000000000000000000..77c7b9ec512e7bf9ecddd9863fde60d9d96aa06c
--- /dev/null
+++ b/doc/templates/autosummary/class.rst
@@ -0,0 +1,23 @@
+{% extends "!autosummary/class.rst" %}
+
+{% block methods %}
+{% if methods %}
+   .. rubric:: Methods
+   {% for item in methods %}
+      {%- if not item.startswith('_') or item in ['__call__'] %}
+   .. automethod:: {{ name }}.{{ item }}
+      {%- endif -%}
+   {%- endfor %}
+{% endif %}
+{% endblock %}
+
+{% block attributes %}
+{% if attributes %}
+   .. rubric:: Attributes
+   {% for item in attributes %}
+      {%- if not item.startswith('_') %}
+   .. autoattribute:: {{ name }}.{{ item }}
+      {%- endif -%}
+   {%- endfor %}
+{% endif %}
+{% endblock %}
diff --git a/doc/templates/layout.html b/doc/templates/layout.html
new file mode 100644
index 0000000000000000000000000000000000000000..ee28bee929d655a035b39cd9a2eda2a9c0a04573
--- /dev/null
+++ b/doc/templates/layout.html
@@ -0,0 +1,2 @@
+{% extends "!layout.html" %}
+{% set script_files = script_files + ["_static/togglediv.js"] %}
diff --git a/examples/square.py b/examples/square.py
new file mode 100644
index 0000000000000000000000000000000000000000..62a02059d179ffd220eee4bd5a8ecfec1114d86d
--- /dev/null
+++ b/examples/square.py
@@ -0,0 +1,120 @@
+"""An example of how to directly implement a system without using
+kwant.Builder.
+"""
+
+from __future__ import division
+import numpy as np
+import kwant
+from kwant.physics.selfenergy import square_self_energy
+
+__all__ = ['System' ]
+
+class Lead(object):
+    def __init__(self, width, t, potential):
+        self.width = width
+        self.t = t
+        self.potential = potential
+
+    def self_energy(self, fermi_energy):
+        return square_self_energy(self.width, self.t, self.potential,
+                                  fermi_energy)
+
+class System(kwant.system.FiniteSystem):
+    # Override abstract attributes.
+    graph = None
+    lead_neighbor_seqs = None
+
+    def __init__(self, shape, hopping,
+                 potential=0, lead_potentials=(0, 0),
+                 return_scalars_as_matrix=True):
+        """`potential` can be a container (indexed by a pair of integers) or a
+        function (taking a pair of integers as its parameter) or a number.
+        Checked in this order.
+        """
+        assert len(shape) == 2
+        for s in shape:
+            assert int(s) == s
+            assert s >= 1
+
+        self.as_matrix = return_scalars_as_matrix
+        self.shape = shape
+        if hasattr(potential, '__getitem__'):
+            self.pot = potential.__getitem__
+        elif hasattr(potential, '__call__'):
+            self.pot = potential
+        else:
+            self.pot = lambda xy: potential
+        self.t = hopping
+
+        # Build rectangular mesh graph
+        g = kwant.graph.Graph()
+        increment = [1, shape[0]]
+        for along, across in [(0, 1), (1, 0)]:
+            # Add edges in direction "along".
+            if shape[along] < 2: continue
+            edges = np.empty((2 * shape[across], 2), dtype=int)
+            edges[:shape[across], 0] = np.arange(
+                0, shape[across] * increment[across], increment[across])
+            edges[:shape[across], 1] = edges[:shape[across], 0]
+            edges[:shape[across], 1] += increment[along]
+            edges[shape[across]:, (0, 1)] = edges[:shape[across], (1, 0)]
+            g.add_edges(edges)
+            for i in xrange(shape[along] - 2):
+                edges += increment[along]
+                g.add_edges(edges)
+        self.graph = g.compressed()
+
+        self.lead_neighbor_seqs = []
+        for x in [0, shape[0] - 1]:
+            # We have to use list here, as numpy.array does not understand
+            # generators.
+            lead_neighbors = list(self.nodeid_from_pos((x, y))
+                              for y in xrange(shape[1]))
+            self.lead_neighbor_seqs.append(np.array(lead_neighbors))
+
+        self.leads = [Lead(shape[1], hopping, lead_potentials[i])
+                      for i in range(2)]
+
+    def num_orbitals(self, site):
+        """Return the number of orbitals of a site."""
+        return 1
+
+    def hamiltonian(self, i, j):
+        """Return an submatrix of the tight-binding Hamiltonian."""
+        if i == j:
+            # An on-site Hamiltonian has been requested.
+            result = 4 * self.t + self.pot(self.pos_from_nodeid(i))
+        else:
+            # A hopping element has been requested.
+            result = -self.t
+        if self.as_matrix:
+            result = np.array([[result]], dtype=complex)
+        return result
+
+    def nodeid_from_pos(self, pos):
+        for i in xrange(2):
+            assert int(pos[i]) == pos[i]
+            assert pos[i] >= 0 and pos[i] < self.shape[i]
+        return pos[0] + pos[1] * self.shape[0]
+
+    def pos_from_nodeid(self, nodeid):
+        result = (nodeid % self.shape[0]), (nodeid // self.shape[0])
+        assert result[1] >= 0 and result[1] < self.shape[1]
+        return result
+
+
+def main():
+    sys = System((10, 5), 1)
+    energies = [0.04 * i for i in xrange(100)]
+    data = [kwant.solve(sys, energy).transmission(1, 0)
+            for energy in energies]
+
+    import pylab
+    pylab.plot(energies, data)
+    pylab.xlabel("energy [in units of t]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/examples/tests/test_square.py b/examples/tests/test_square.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9b020b9d442167c5cfc68cb11148cb862172a00
--- /dev/null
+++ b/examples/tests/test_square.py
@@ -0,0 +1,37 @@
+from kwant import square
+from nose.tools import assert_equal, assert_raises
+from numpy.testing import assert_almost_equal
+
+def test_nodeid_to_from_pos():
+    s = square.System((3, 4), 1)
+    assert_raises(StandardError, s.nodeid_from_pos, (0, -2))
+    assert_raises(StandardError, s.nodeid_from_pos, (-1, 3))
+    assert_raises(StandardError, s.nodeid_from_pos, (3, 1))
+    assert_raises(StandardError, s.pos_from_nodeid, -1)
+    assert_raises(StandardError, s.pos_from_nodeid, 12)
+    assert_equal(s.nodeid_from_pos((0, 0)), 0)
+    assert_equal(s.nodeid_from_pos(s.pos_from_nodeid(7)), 7)
+    assert_equal(s.pos_from_nodeid(s.nodeid_from_pos((2, 3))), (2, 3))
+
+def test_hamiltonian():
+    sys = square.System((4, 5), 1)
+    for i in xrange(sys.graph.num_nodes):
+        shape = sys.hamiltonian(i, i).shape
+        assert_equal(len(shape), 2)
+        assert_equal(shape[0], sys.num_orbitals(i))
+        for j in sys.graph.out_neighbors(i):
+            m = sys.hamiltonian(i, j)
+            shape = m.shape
+            m_herm = m.T.conj()
+            assert_almost_equal(m, m_herm)
+            assert_almost_equal(m_herm, sys.hamiltonian(j, i))
+
+def test_self_energy():
+    sys = square.System((2, 4), 1)
+    for lead in xrange(len(sys.lead_neighbor_seqs)):
+        n_orb = sum(
+            sys.num_orbitals(site) for site in sys.lead_neighbor_seqs[lead])
+        se = sys.self_energy(lead, 0)
+        assert_equal(len(se.shape), 2)
+        assert_equal(se.shape[0], se.shape[1])
+        assert_equal(se.shape[0], n_orb)
diff --git a/examples/tutorial1a.py b/examples/tutorial1a.py
new file mode 100644
index 0000000000000000000000000000000000000000..64b3b6f7ec66f884767358ae11d50d5ae2c92523
--- /dev/null
+++ b/examples/tutorial1a.py
@@ -0,0 +1,107 @@
+# Physics background
+# ------------------
+#  Conductance of a quantum wire; subbands
+#
+# Kwant features highlighted
+# --------------------------
+#  - Builder for setting up transport systems easily
+#  - Making scattering region and leads
+#  - Using the simple sparse solver for computing Landauer conductance
+
+import kwant
+
+# First, define the tight-binding system
+
+sys = kwant.Builder()
+
+# Here, we are only working with square lattices
+a = 1
+lat = kwant.lattice.Square(a)
+sys.default_site_group = lat
+
+t = 1.0
+W = 10
+L = 30
+
+# Define the scattering region
+
+for i in xrange(L):
+    for j in xrange(W):
+        sys[(i, j)] = 4 * t
+
+        # hoppig in y-direction
+        if j > 0 :
+            sys[(i, j), (i, j-1)] = - t
+
+        #hopping in x-direction
+        if i > 0:
+            sys[(i, j), (i-1, j)] = -t
+
+# Then, define the leads:
+
+# First the lead to the left
+
+# (Note: in the current version, TranslationalSymmetry takes a
+# realspace vector)
+sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+lead0 = kwant.Builder(sym_lead0)
+lead0.default_site_group = lat
+
+for j in xrange(W):
+    lead0[(0, j)] = 4 * t
+
+    if j > 0:
+        lead0[(0, j), (0, j-1)] = - t
+
+    lead0[(1, j), (0, j)] = - t
+
+# Then the lead to the right
+
+sym_lead1 = kwant.TranslationalSymmetry([lat.vec((1, 0))])
+lead1 = kwant.Builder(sym_lead1)
+lead1.default_site_group = lat
+
+for j in xrange(W):
+    lead1[(0, j)] = 4 * t
+
+    if j > 0:
+        lead1[(0, j), (0, j-1)] = - t
+
+    lead1[(1, j), (0, j)] = - t
+
+# Then attach the leads to the system
+
+sys.attach_lead(lead0)
+sys.attach_lead(lead1)
+
+# finalize the system
+
+fsys = sys.finalized()
+
+# and plot it, to make sure it's proper
+
+kwant.plot(fsys)
+
+# Now that we have the system, we can compute conductance
+
+energies = []
+data = []
+for ie in xrange(100):
+    energy = ie * 0.01
+
+    # compute the scattering matrix at energy energy
+    smatrix = kwant.solvers.sparse.solve(fsys, energy)
+
+    # compute the transmission probability from lead 0 to
+    # lead 1
+    energies.append(energy)
+    data.append(smatrix.transmission(1, 0))
+
+# Use matplotlib to write output
+# We should see conductance steps
+import pylab
+
+pylab.plot(energies, data)
+pylab.xlabel("energy [in units of t]")
+pylab.ylabel("conductance [in units of e^2/h]")
+pylab.show()
diff --git a/examples/tutorial1b.py b/examples/tutorial1b.py
new file mode 100644
index 0000000000000000000000000000000000000000..cee11853234d23d8267a4b306cbe3f19cc9d55a0
--- /dev/null
+++ b/examples/tutorial1b.py
@@ -0,0 +1,79 @@
+# Physics background
+# ------------------
+#  Conductance of a quantum wire; subbands
+#
+# Kwant features highlighted
+# --------------------------
+#  - Using iterables and possible_hoppings() for making systems
+#  - introducing `reversed()` for the leads
+#
+# Note: Does the same as tutorial1a.py, but using other features of kwant
+#
+
+import kwant
+
+# For plotting
+import pylab
+
+def make_system(a=1, t=1.0, W=10, L=30):
+    # Start with an empty tight-binding system and a single square lattice.
+    # `a` is the lattice constant (by default set to 1 for simplicity.
+    lat = kwant.lattice.Square(a)
+
+    sys = kwant.Builder()
+    sys.default_site_group = lat
+
+    #### Define the scattering region. ####
+    sys[((x, y) for x in range(L) for y in range(W))] = 4 * t
+    for hopping in lat.nearest:
+        sys[sys.possible_hoppings(*hopping)] = -t
+
+    #### Define the leads. ####
+    # First the lead to the left, ...
+    # (Note: in the current version, TranslationalSymmetry takes a
+    # realspace vector)
+    sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+    lead0 = kwant.Builder(sym_lead0)
+    lead0.default_site_group = lat
+
+    lead0[((0, j) for j in xrange(W))] = 4 * t
+    for hopping in lat.nearest:
+        lead0[lead0.possible_hoppings(*hopping)] = - t
+
+    # ... then the lead to the right.  We use a method that returns a copy of
+    # `lead0` with its direction reversed.
+    lead1 = lead0.reversed()
+
+    #### Attach the leads and return the finalized system. ####
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized()
+
+def plot_conductance(fsys, energies):
+    # Compute conductance
+    data = []
+    for energy in energies:
+        smatrix = kwant.solvers.sparse.solve(fsys, energy)
+        data.append(smatrix.transmission(1, 0))
+
+    pylab.plot(energies, data)
+    pylab.xlabel("energy [in units of t]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+def main():
+    fsys = make_system()
+
+    # Check that the system looks as intended.
+    kwant.plot(fsys)
+
+    # We should see conductance steps.
+    plot_conductance(fsys, energies=[0.01 * i for i in xrange(100)])
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial2a.py b/examples/tutorial2a.py
new file mode 100644
index 0000000000000000000000000000000000000000..30e4ee53ee4275e643b8334e3b4ae2f8f96dac89
--- /dev/null
+++ b/examples/tutorial2a.py
@@ -0,0 +1,96 @@
+# Physics background
+# ------------------
+#  Gaps in quantum wires with spin-orbit coupling and Zeeman splititng,
+#  as theoretically predicted in
+#   http://prl.aps.org/abstract/PRL/v90/i25/e256601
+#  and (supposedly) experimentally oberved in
+#   http://www.nature.com/nphys/journal/v6/n5/abs/nphys1626.html
+#
+# Kwant features highlighted
+# --------------------------
+#  - Numpy matrices as values in Builder
+
+import kwant
+
+# For plotting
+import pylab
+
+# For matrix support
+import numpy
+
+# define Pauli-matrices for convenience
+sigma_0 = numpy.eye(2)
+sigma_x = numpy.array([[0, 1], [1, 0]])
+sigma_y = numpy.array([[0, -1j], [1j, 0]])
+sigma_z = numpy.array([[1, 0], [0, -1]])
+
+
+def make_system(a=1, t=1.0, alpha=0.5, e_z=0.08, W=10, L=30):
+    # Start with an empty tight-binding system and a single square lattice.
+    # `a` is the lattice constant (by default set to 1 for simplicity).
+    lat = kwant.lattice.Square(a)
+
+    sys = kwant.Builder()
+    sys.default_site_group = lat
+
+    #### Define the scattering region. ####
+    sys[((x, y) for x in range(L) for y in range(W))] = 4 * t * sigma_0 + \
+        e_z * sigma_z
+    # hoppings in x-direction
+    sys[sys.possible_hoppings((1, 0), lat, lat)] = - t * sigma_0 - \
+        1j * alpha * sigma_y
+    # hoppings in y-directions
+    sys[sys.possible_hoppings((0, 1), lat, lat)] = - t * sigma_0 + \
+        1j * alpha * sigma_x
+
+    #### Define the leads. ####
+    # left lead
+    sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+    lead0 = kwant.Builder(sym_lead0)
+    lead0.default_site_group = lat
+
+    lead0[((0, j) for j in xrange(W))] = 4 * t * sigma_0 + e_z * sigma_z
+    # hoppings in x-direction
+    lead0[lead0.possible_hoppings((1, 0), lat, lat)] = - t * sigma_0 - \
+        1j * alpha * sigma_y
+    # hoppings in y-directions
+    lead0[lead0.possible_hoppings((0, 1), lat, lat)] = - t * sigma_0 + \
+        1j * alpha * sigma_x
+
+    # Then the lead to the right
+    # (again, obtained using reverse()
+    lead1 = lead0.reversed()
+
+    #### Attach the leads and return the finalized system. ####
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized()
+
+def plot_conductance(fsys, energies):
+    # Compute conductance
+    data = []
+    for energy in energies:
+        smatrix = kwant.solve(fsys, energy)
+        data.append(smatrix.transmission(1, 0))
+
+    pylab.plot(energies, data)
+    pylab.xlabel("energy [in units of t]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+def main():
+    fsys = make_system()
+
+    # Check that the system looks as intended.
+    kwant.plot(fsys)
+
+    # We should see non-monotonic conductance steps.
+    plot_conductance(fsys, energies=[0.01 * i - 0.3 for i in xrange(100)])
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial2b.py b/examples/tutorial2b.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bd284c5e3dac3eaee79dc01a7c1a6e33df8f618
--- /dev/null
+++ b/examples/tutorial2b.py
@@ -0,0 +1,99 @@
+# Physics background
+# ------------------
+#  transmission through a quantum well
+#
+# Kwant features highlighted
+# --------------------------
+#  - Functions as values in Builder
+
+import kwant
+
+# For plotting
+import pylab
+
+# global variable governing the behavior of potential() in
+# make_system()
+pot = 0
+
+def make_system(a=1, t=1.0, W=10, L=30, L_well=10):
+    # Start with an empty tight-binding system and a single square lattice.
+    # `a` is the lattice constant (by default set to 1 for simplicity.
+    lat = kwant.lattice.Square(a)
+
+    sys = kwant.Builder()
+    sys.default_site_group = lat
+
+    #### Define the scattering region. ####
+    # Potential profile
+    def potential(site):
+        (x, y) = site.pos
+        if (L - L_well) / 2 < x < (L + L_well) / 2:
+            # The potential value is provided using a global variable
+            return pot
+        else:
+            return 0
+
+    def onsite(site):
+        return 4 * t + potential(site)
+
+    sys[((x, y) for x in range(L) for y in range(W))] = onsite
+    for hopping in lat.nearest:
+        sys[sys.possible_hoppings(*hopping)] = -t
+
+    #### Define the leads. ####
+    # First the lead to the left, ...
+    # (Note: in the current version, TranslationalSymmetry takes a
+    # realspace vector)
+    sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+    lead0 = kwant.Builder(sym_lead0)
+    lead0.default_site_group = lat
+
+    lead0[((0, j) for j in xrange(W))] = 4 * t
+    for hopping in lat.nearest:
+        lead0[lead0.possible_hoppings(*hopping)] = - t
+
+    # ... then the lead to the right.  We use a method that returns a copy of
+    # `lead0` with its direction reversed.
+    lead1 = lead0.reversed()
+
+    #### Attach the leads and return the finalized system. ####
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized()
+
+def plot_conductance(fsys, energy, welldepths):
+    # We specify that we want to not only read, but also write to a
+    # global variable.
+    global pot
+
+    # Compute conductance
+    data = []
+    for welldepth in welldepths:
+        # Set the global variable that defines the potential well depth
+        pot = -welldepth
+
+        smatrix = kwant.solve(fsys, energy)
+        data.append(smatrix.transmission(1, 0))
+
+    pylab.plot(welldepths, data)
+    pylab.xlabel("well depth [in units of t]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+def main():
+    fsys = make_system()
+
+    # Check that the system looks as intended.
+    kwant.plot(fsys)
+
+    # We should see conductance steps.
+    plot_conductance(fsys, energy=0.2,
+                     welldepths=[0.01 * i for i in xrange(100)])
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial2c.py b/examples/tutorial2c.py
new file mode 100644
index 0000000000000000000000000000000000000000..855bbc29d835abaff2fd1999164a1694d2f3b7e8
--- /dev/null
+++ b/examples/tutorial2c.py
@@ -0,0 +1,116 @@
+# Physics background
+# ------------------
+#  Flux-dependent transmission through a quantum ring
+#
+# Kwant features highlighted
+# --------------------------
+#  - More complex shapes with lattices
+#  - Allows for discussion of subtleties of `attach_lead` (not in the
+#    example, but in the tutorial main text)
+#  - Modifcations of hoppings/sites after they have been added
+
+from cmath import exp
+from math import pi
+
+import kwant
+
+# For plotting
+import pylab
+
+def make_system(a=1, t=1.0, W=10, r1=10, r2=20):
+    # Start with an empty tight-binding system and a single square lattice.
+    # `a` is the lattice constant (by default set to 1 for simplicity).
+
+    lat = kwant.lattice.Square(a)
+
+    sys = kwant.Builder()
+
+    #### Define the scattering region. ####
+    # Now, we aim for a more complex shape, namely a ring (or annulus)
+    def ring(pos):
+        (x, y) = pos
+        rsq = x**2 + y**2
+        return ( r1**2 < rsq < r2**2)
+
+    # and add the corresponding lattice points using the `shape`-function
+    sys[lat.shape(ring, (0, 11))] = 4 * t
+    for hopping in lat.nearest:
+        sys[sys.possible_hoppings(*hopping)] = - t
+
+    # In order to introduce a flux through the ring, we introduce a phase
+    # on the hoppings on the line cut through one of the arms
+
+    # since we want to change the flux without modifying Builder repeatedly,
+    # we define the modified hoppings as a function that takes the flux
+    # through the global variable phi.
+    def fluxphase(site1, site2):
+        return exp(1j * phi)
+
+    def crosses_branchcut(hop):
+        ix0, iy0 = hop[0].tag
+
+        # possible_hoppings with the argument (1, 0) below
+        # returns hoppings ordered as ((i+1, j), (i, j))
+        return iy0 < 0 and ix0 == 1 # ix1 == 0 then implied
+
+    # Modify only those hopings in x-direction that cross the branch cut
+    sys[(hop for hop in sys.possible_hoppings((1,0), lat, lat)
+         if crosses_branchcut(hop))] = fluxphase
+
+    #### Define the leads. ####
+    # left lead
+    sym_lead0 = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+    lead0 = kwant.Builder(sym_lead0)
+
+    def lead_shape(pos):
+        (x, y) = pos
+        return (-1 < x < 1) and ( -W/2 < y < W/2  )
+
+    lead0[lat.shape(lead_shape, (0, 0))] = 4 * t
+    for hopping in lat.nearest:
+        lead0[lead0.possible_hoppings(*hopping)] = - t
+
+    # Then the lead to the right
+    # (again, obtained using reverse()
+    lead1 = lead0.reversed()
+
+    #### Attach the leads and return the finalized system. ####
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized()
+
+
+def plot_conductance(fsys, energy, fluxes):
+    # compute conductance
+    # global variable phi controls the flux
+    global phi
+
+    normalized_fluxes = [flux/(2 * pi) for flux in fluxes]
+    data = []
+    for flux in fluxes:
+        phi = flux
+
+        smatrix = kwant.solve(fsys, energy)
+        data.append(smatrix.transmission(1, 0))
+
+    pylab.plot(normalized_fluxes, data)
+    pylab.xlabel("flux [in units of the flux quantum]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+def main():
+    fsys = make_system()
+
+    # Check that the system looks as intended.
+    kwant.plot(fsys)
+
+    # We should see a conductance that is periodic with the flux quantum
+    plot_conductance(fsys, energy=0.15, fluxes=[0.01 * i * 3 * 2 * pi
+                                                for i in xrange(100)])
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial3a.py b/examples/tutorial3a.py
new file mode 100644
index 0000000000000000000000000000000000000000..63cf1b1363f5041a2e752e69f62f7768da97852c
--- /dev/null
+++ b/examples/tutorial3a.py
@@ -0,0 +1,63 @@
+# Physics background
+# ------------------
+#  band structure of a simple quantum wire in tight-binding approximation
+#
+# Kwant features highlighted
+# --------------------------
+#  - Computing the band structure of a finalized lead.
+
+import kwant
+
+import numpy as np
+from math import pi
+
+# For plotting
+import pylab
+
+
+def make_lead(a=1, t=1.0, W=10):
+    # Start with an empty lead with a single square lattice
+    lat = kwant.lattice.Square(a)
+
+    sym_lead = kwant.TranslationalSymmetry([lat.vec((-1, 0))])
+    lead = kwant.Builder(sym_lead)
+    lead.default_site_group = lat
+
+    # build up one unit cell of the lead, and add the hoppings
+    # to the next unit cell
+    for j in xrange(W):
+        lead[(0, j)] = 4 * t
+
+        if j > 0:
+            lead[(0, j), (0, j-1)] = - t
+
+        lead[(1, j), (0, j)] = - t
+
+    # return a finalized lead
+    return lead.finalized()
+
+
+def plot_bandstructure(flead, momenta):
+    # Use the method ``energies`` of the finalized lead to compute
+    # the bandstructure
+    energy_list = [flead.energies(k) for k in momenta]
+
+    pylab.plot(momenta, energy_list)
+    pylab.xlabel("momentum [in untis of (lattice constant)^-1]")
+    pylab.ylabel("energy [in units of t]")
+    pylab.show()
+
+
+def main():
+    flead = make_lead()
+
+    # list of momenta at which the bands should be computed
+    momenta = np.arange(-pi, pi + .01, 0.02 * pi)
+
+    plot_bandstructure(flead, momenta)
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial3b.py b/examples/tutorial3b.py
new file mode 100644
index 0000000000000000000000000000000000000000..11240f01585c20cf7ca02add85ee538e94cd95bf
--- /dev/null
+++ b/examples/tutorial3b.py
@@ -0,0 +1,91 @@
+# Physics background
+# ------------------
+#  Fock-darwin spectrum of a quantum dot (energy spectrum in
+#  as a function of a magnetic field)
+#
+# Kwant features highlighted
+# --------------------------
+#  - Use of `hamiltonian_submatrix` in order to obtain a Hamiltonian
+#    matrix.
+
+
+from cmath import exp
+import kwant
+
+# For eigenvalue computation
+import scipy.linalg as la
+
+# For plotting
+import pylab
+
+def make_system(a=1, t=1.0, r=10):
+    # Start with an empty tight-binding system and a single square lattice.
+    # `a` is the lattice constant (by default set to 1 for simplicity).
+
+    lat = kwant.lattice.Square(a)
+
+    sys = kwant.Builder()
+
+    # Define the quantum dot
+    def circle(pos):
+        (x, y) = pos
+        rsq = x**2 + y**2
+        return rsq < r**2
+
+    def hopx(site1, site2):
+        # The magnetic field is controlled by the global variable B
+        y = site1.pos[1]
+        return - t * exp(-1j * B * y)
+
+    sys[lat.shape(circle, (0, 0))] = 4 * t
+    # hoppings in x-direction
+    sys[sys.possible_hoppings((1, 0), lat, lat)] = hopx
+    # hoppings in y-directions
+    sys[sys.possible_hoppings((0, 1), lat, lat)] = - t
+
+    # It's a closed system for a change, so no leads
+    return sys.finalized()
+
+
+def plot_spectrum(fsys, Bfields):
+    # global variable B controls the magnetic field
+    global B
+
+    # In the following, we compute the spectrum of the quantum dot
+    # using dense matrix methods. This works in this toy example, as
+    # the system is tiny. In a real example, one would want to use
+    # sparse matrix methods
+
+    energies = []
+    for Bfield in Bfields:
+        B = Bfield
+
+        # Obtain the Hamiltonian as a dense matrix
+        ham_mat = fsys.hamiltonian_submatrix()
+
+        ev = la.eigh(ham_mat, eigvals_only=True)
+
+        # we only plot the 15 lowest eigenvalues
+        energies.append(ev[:15])
+
+    pylab.plot(Bfields, energies)
+    pylab.xlabel("magnetic field [some arbitrary units]")
+    pylab.ylabel("energy [in units of t]")
+    pylab.show()
+
+
+def main():
+    fsys = make_system()
+
+    # Check that the system looks as intended.
+    kwant.plot(fsys)
+
+    # We should observe energy levels that flow towards Landau
+    # level energies with increasing magnetic field
+    plot_spectrum(fsys, [iB * 0.002 for iB in xrange(100)])
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/examples/tutorial4.py b/examples/tutorial4.py
new file mode 100644
index 0000000000000000000000000000000000000000..74ea9e6f74270ab09ac47e5c06bac602cf2e1764
--- /dev/null
+++ b/examples/tutorial4.py
@@ -0,0 +1,165 @@
+# Physics background
+# ------------------
+#  Transport through a graphene quantum dot with a pn-junction
+#
+# Kwant features highlighted
+# --------------------------
+#  - Application of all the aspects of tutorials 1-3 to a more complicated
+#    lattice, namely graphene
+
+from __future__ import division # so that 1/2 == 0.5, and not 0
+from math import pi, sqrt
+import numpy as np
+
+import kwant
+
+# For computing eigenvalues
+import scipy.sparse.linalg as sla
+
+# For plotting
+import pylab
+
+
+# Define the graphene lattice
+sin_30, cos_30 = (1/2, np.sqrt(3)/2)
+graphene = kwant.make_lattice([(1, 0), (sin_30, cos_30)],
+                              [(0, 0), (0, 1/np.sqrt(3))])
+a, b = graphene.sublattices
+
+
+def make_system(r=10, w=2.0, pot=0.1):
+
+    #### Define the scattering region. ####
+    # circular scattering region
+    def circle(pos):
+        x, y = pos
+        return x**2 + y**2 < r**2
+
+    sys= kwant.Builder()
+
+    # w: width and pot: potential maximum of the p-n junction
+    def potential(site):
+        (x, y) = site.pos
+        d = y * cos_30 + x * sin_30
+        return pot * np.tanh(d / w)
+
+    sys[graphene.shape(circle, (0,0))] = potential
+
+    # specify the hoppings of the graphene lattice in the
+    # format expected by possibe_hoppings()
+    hoppings = (((0, 0), b, a), ((0, 1), b, a), ((-1, 1), b, a))
+    for hopping in hoppings:
+        sys[sys.possible_hoppings(*hopping)] = - 1
+
+    # Modify the scattering region
+    del sys[a(0,0)]
+    sys[a(-2,1), b(2, 2)] = -1
+
+    # Keep a copy of the closed system without leads, for
+    # eigenvalue computations
+    closed_fsys = sys.finalized()
+
+    #### Define the leads. ####
+    # left lead
+    sym0 = kwant.TranslationalSymmetry([graphene.vec((-1, 0))])
+
+    def lead0_shape(pos):
+        x, y = pos
+        return (-1 < x < 1) and (-0.4 * r < y < 0.4 * r)
+
+    lead0 = kwant.Builder(sym0)
+    lead0[graphene.shape(lead0_shape, (0,0))] = - pot
+    for hopping in hoppings:
+        lead0[lead0.possible_hoppings(*hopping)] = - 1
+
+    # The second lead, going ot the top right
+    sym1 = kwant.TranslationalSymmetry([graphene.vec((0, 1))])
+
+    def lead1_shape(pos):
+        x, y = pos
+        u = x * sin_30 + y * cos_30
+        v = y * sin_30 - x * cos_30
+        return (-1 < u < 1) and (-0.4 * r < v < 0.4 * r)
+
+    lead1 = kwant.Builder(sym1)
+    lead1[graphene.shape(lead1_shape, (0,0))] = pot
+    for hopping in hoppings:
+        lead1[lead1.possible_hoppings(*hopping)] = - 1
+
+    # Attach the leads
+    sys.attach_lead(lead0)
+    sys.attach_lead(lead1)
+
+    return sys.finalized(), closed_fsys, lead0.finalized()
+
+
+def compute_evs(sys):
+    # Compute some eigenvalues of the closed system
+    sparse_mat = sys.hamiltonian_submatrix(sparse=True)
+
+    try:
+        # This requires scipy version >= 0.9.0
+        # Failure (i.e. insufficient scipy version) is not critical
+        # for the remainder of the tutorial, hence the try-block
+        evs = scipy.sparse.linalg.eigs(sparse_mat, 2)[0]
+        print evs
+    except:
+        pass
+
+
+def plot_conductance(fsys, energies):
+    # Compute transmission as a function of energy
+    data = []
+    for energy in energies:
+        smatrix = kwant.solve(fsys, energy)
+        data.append(smatrix.transmission(0, 1))
+
+    pylab.plot(energies, data)
+    pylab.xlabel("energy [in units of t]")
+    pylab.ylabel("conductance [in units of e^2/h]")
+    pylab.show()
+
+
+def plot_bandstructure(flead, momenta):
+    # Use the method ``energies`` of the finalized lead to compute
+    # the bandstructure
+    energy_list = [flead.energies(k) for k in momenta]
+
+    pylab.plot(momenta, energy_list)
+    pylab.xlabel("momentum [in untis of (lattice constant)^-1]")
+    pylab.ylabel("energy [in units of t]")
+    pylab.show()
+
+
+def main():
+    pot = 0.1
+    fsys, closed_fsys, flead = make_system(pot=pot)
+
+    # First, plot the closed system, and compute some eigenvalues
+
+    # To highlight the two sublattices of graphene, we plot one with
+    # a filled, and the other one with an open circle:
+    plotter_symbols = {a: kwant.plotter.Circle(r=0.3),
+                       b: kwant.plotter.Circle(r=0.3,
+                                               fcol=kwant.plotter.white,
+                                               lcol=kwant.plotter.black)}
+
+    kwant.plot(closed_fsys, a=1./sqrt(3.), symbols=plotter_symbols)
+    compute_evs(closed_fsys)
+
+    # Then, plot the system with leads and compute the band structure
+    # of one of the (zigzag) leads, as well as the conductance
+
+    kwant.plot(fsys, a=1/sqrt(3.), symbols=plotter_symbols)
+
+    momenta = np.arange(-pi, pi + .01, 0.1 * pi)
+    plot_bandstructure(flead, momenta)
+
+    energies = np.arange(-2 * pot, 2 * pot, pot / 10.5)
+    plot_conductance(fsys, energies)
+
+
+# Call the main function if the script gets executed (as opposed to imported).
+# See <http://docs.python.org/library/__main__.html>.
+if __name__ == '__main__':
+    main()
diff --git a/kwant/__init__.py b/kwant/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..887f916559b8953c8c8daee3b330b9432e667d2c
--- /dev/null
+++ b/kwant/__init__.py
@@ -0,0 +1,22 @@
+__all__ = ['system', 'version', 'builder', 'lattice', 'run']
+for module in __all__:
+    exec 'from . import {0}'.format(module)
+
+from .builder import Builder
+__all__.append('Builder')
+
+from .lattice import make_lattice, TranslationalSymmetry
+__all__.extend(['make_lattice', 'TranslationalSymmetry'])
+
+# Importing plotter might not work, but this does not have to be a problem --
+# only no plotting will be available.
+try:
+    from . import plotter
+    from .plotter import plot
+except:
+    pass
+else:
+    __all__.extend(['plotter', 'plot'])
+
+from .solvers.sparse import solve
+__all__.append('solve')
diff --git a/kwant/builder.py b/kwant/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3f3313f578a0d71655de5a1801590287d9dc1e8
--- /dev/null
+++ b/kwant/builder.py
@@ -0,0 +1,1402 @@
+from __future__ import division
+
+__all__ = ['Builder', 'Site', 'SiteGroup', 'SimpleSiteGroup', 'Symmetry',
+           'Lead', 'BuilderLead', 'SelfEnergy']
+
+import struct, abc
+from itertools import izip, islice, chain
+from collections import Iterable, Sequence
+import numpy as np
+from kwant import graph
+from . import system
+
+
+################ Sites and site groups
+
+class Site(object):
+    """A site, member of a `SiteGroup`.
+
+    Sites are the vertices of the graph which describes the tight binding
+    system in a `Builder`.
+
+    A site is uniquely identified by its group and its tag.
+
+    Parameters
+    ----------
+    group : an instance of `SiteGroup`
+        the 'type' of the site.
+    tag : a hashable python object
+        the personal idenifier of the site e. g. its number.
+
+    Attributes
+    ----------
+    pos : sequence of numbers
+        The real space position of the site.  Used for plotting, for example.
+
+    Raises
+    ------
+    ValueError
+        If ``tag`` is not a proper tag for ``group``.
+
+    Notes
+    -----
+    For convenience, ``group(*tag)`` can be used instead of ``Site(group,
+    tag)`` to create a site.
+
+    The parameters of the constructor (see above) are stored as instance
+    variables under the same names.  Given a site ``site``, common things to
+    query are thus ``site.group``, ``site.tag``, and ``site.pos``.
+    """
+    __slots__ = ['group', 'tag']
+
+    def __init__(self, group, tag):
+        self.group = group
+        if not group.verify_tag(tag):
+            msg = 'Tag {0} is not an allowed tag for site group {1}.'
+            raise ValueError(msg.format(repr(tag), repr(group)))
+        self.tag = tag
+
+    def packed(self):
+        """Create a string storing all the site data."""
+        group = self.group
+        return group.packed_group_id + group.pack_tag(self.tag)
+
+    def shifted(self, delta, group=None):
+        """Return a copy of the site, displaced by delta.
+
+        Parameters
+        ----------
+        delta : sequence of integers
+            The vector by which to displace the site.
+        group : `SiteGroup`
+            Site group of the returned site.  If no site group is provided, the
+            original one is kept.
+
+        Returns
+        -------
+        new_site : `Site`
+            A site shifted by `delta` with site group optionally set to
+            `group`.
+
+        Notes
+        -----
+        This method *works* only if the site for which it is called has a tag
+        which is a sequences of integers.  It *make sense* only when this sites
+        lives on a regular lattice, like one provided by `kwant.lattice`.
+        """
+        if group is None:
+            group = self.group
+        tag = self.tag
+        if len(tag) != len(delta):
+            raise ValueError('Dimensionality mismatch')
+        return group(*tuple(a + b for a, b in izip(tag, delta)))
+
+
+    def __hash__(self):
+        return id(self.group) ^ hash(self.tag)
+
+    def __eq__(self, other):
+        return self.group is other.group and self.tag == other.tag
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        return 'Site({0}, {1})'.format(repr(self.group), repr(self.tag))
+
+    @property
+    def pos(self):
+        """Real space position of the site."""
+        return self.group.pos(self.tag)
+
+
+class SiteGroup(object):
+    """
+    Abstract base class for site groups.
+
+    A site group is a 'type' of sites.  All the site groups must inherit from
+    this basic one.  They have to define the following methods `pack_tag`,
+    `unpack_tag` and `verify_tag`.
+
+    Site groups which are intended for use with plotting should also provide a
+    method `pos(tag)`, which returns a vector with real space coordinates of
+    the site belonging to this group with a given tag.
+    """
+    __metaclass__ = abc.ABCMeta
+
+    def __init__(self):
+        self.packed_group_id = pgid_of_group(self)
+
+    def __repr__(self):
+        return '<{0} at {1}>'.format(self.__class__.__name__, hex(id(self)))
+
+    @abc.abstractmethod
+    def pack_tag(self, tag):
+        """Return a string storing tag data."""
+        pass
+
+    @abc.abstractmethod
+    def unpack_tag(self, ptag):
+        """Create a tag given a string with its packed content."""
+        pass
+
+    @abc.abstractmethod
+    def verify_tag(self, tag):
+        """Verify if the tag is a legitimate tag for this site group."""
+        pass
+
+    def __call__(self, *tag):
+        """
+        A convenience function.
+
+        This function allows to write sg(1, 2) instead of Site(sg, (1, 2)).
+        """
+        # Catch a likely and difficult to find mistake.
+        if tag and isinstance(tag[0], tuple):
+            raise ValueError('Use site_group(1, 2) instead of '
+                             'site_group((1, 2))!')
+        return Site(self, tag)
+
+
+class SimpleSiteGroup(SiteGroup):
+    """A site group used as an example and for testing.
+
+    A group of sites tagged by any python objects where object satisfied
+    condition ``object == eval(repr(object))``.
+
+    It exists to provide a basic site group that can be used for testing the
+    builder module without other dependencies.  It can be also used to tag
+    sites with non-numeric objects like strings should this every be useful.
+
+    Due to its low storage efficiency for numbers it is not recommended to use
+    `SimpleSiteGroup` when `kwant.lattice.MonatomicLattice` would also work.
+    """
+    def pack_tag(self, tag):
+        return repr(tag)
+
+    def unpack_tag(self, ptag):
+        return eval(ptag)
+
+    def verify_tag(self, tag):
+        return eval(repr(tag)) == tag
+
+
+# This is used for packing and unpacking group ids (gids).
+gid_pack_fmt = '@P'
+gid_pack_size = len(struct.pack(gid_pack_fmt, id(None)))
+
+def pgid_of_group(group):
+    assert isinstance(group, SiteGroup)
+    return struct.pack(gid_pack_fmt, id(group))
+
+
+# The reason why this is a global function and not a method of Builder is that
+# this functionality is also needed by finalized systems.
+def unpack(psite, group_by_pgid):
+    """Unpack a complete site (packed site group + packed tag).
+
+    This function is for internal use in `builder` module.
+    """
+    pgid = psite[:gid_pack_size]
+    try:
+        group = group_by_pgid[pgid]
+    except:
+        raise RuntimeError('Unknown site group id.')
+    return Site(group, group.unpack_tag(psite[gid_pack_size:]))
+
+
+class SequenceOfSites(Sequence):
+    """An immutable sequence of sites.
+
+    The sites are stored packed, but this is invisible to the user.
+    """
+    def __init__(self, sites):
+        self.group_by_pgid = {}
+        self.psites = []
+        for site in sites:
+            psite = site.packed()
+            pgid = psite[:gid_pack_size]
+            if pgid not in self.group_by_pgid:
+                self.group_by_pgid[pgid] = site.group
+            self.psites.append(psite)
+
+    def __getitem__(self, index):
+        return unpack(self.psites[index], self.group_by_pgid)
+
+    def __len__(self):
+        return len(self.psites)
+
+
+################ Symmetries
+
+class Symmetry(object):
+    """Abstract base class for spatial symmetries.
+
+    Many physical systems possess discrete spatial symmetry, which results in
+    special properties of these systems.  This class is a standard tool to
+    describe discrete spatial symmetries in kwant, where the symmetry of a
+    `Builder` is specified at its creation.  The most important kind of the
+    symmetry is translational symmetry, used to define scattering leads.  This
+    class is designed with translational symmetry in mind, and will possibly be
+    modified/extended in future.
+
+    Each symmetry has a fundamental domain -- a set of sites and hoppings,
+    generating all the possible sites and hoppings upon action of symmetry
+    group elements.  The class derived from `Symmetry` has to implement mapping
+    of any site or hopping (a tuple of two sites) into the fundamental domain,
+    applying a symmetry group element to a site or a hopping, and a method
+    `which` to determine the group element bringing some site from the
+    fundamental domain to the requested one.  Additionally, it has to have a
+    property `num_directions` returning the number of independent symmetry
+    group generators (number of elementary periods for translational symmetry).
+    """
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractproperty
+    def num_directions(self):
+        """Number of elementary periods of the symmetry."""
+        pass
+
+    @abc.abstractmethod
+    def which(self, site):
+        """Calculate the domain of the site.
+
+        Return the group element whose action on a certain site from the
+        fundamental domain will result in the given `site`.
+        """
+        pass
+
+    @abc.abstractmethod
+    def act(self, element, a, b=None):
+        """Act with a symmetry group element on a site or hopping."""
+        pass
+
+    def to_fd(self, a, b=None):
+        """Map a site or hopping to the fundamental domain.
+
+        If `b` is None, return a site equivalent to `a` within the fundamental
+        domain.  Otherwise, return a hopping equivalent to `(a, b)` but where
+        the first element belongs to the fundamental domain.
+
+        This default implementation works but may be not efficient.
+        """
+        group_element = tuple(-x for x in self.which(a))
+        return self.act(group_element, a, b)
+
+    def in_fd(self, site):
+        """Tell whether `site` lies within the fundamental domain."""
+        for d in self.which(site):
+            if d != 0:
+                return False
+        return True
+
+
+class NoSymmetry(Symmetry):
+    """A symmetry with a trivial symmetry group."""
+
+    def __repr__(self):
+        return 'NoSymmetry()'
+
+    @property
+    def num_directions(self):
+        return 0
+
+    def which(self, site):
+        return ()
+
+    def act(self, element, a, b=None):
+        if element:
+            raise ValueError('`element` must be empty for NoSymmetry.')
+
+    def to_fd(self, a, b=None):
+        return a if b is None else (a, b)
+
+    def in_fd(self, site):
+        return True
+
+
+################ In-place modifiable graph
+
+def edges(seq):
+    iter = izip(islice(seq, 0, None, 2), islice(seq, 1, None, 2))
+    next(iter)                  # Skip the special loop edge.
+    return iter
+
+
+class Graph(object):
+    """A directed graph optimized for efficient querying and modification.
+
+    The nodes are labeled by python objects which must be usable as dictionary
+    keys.  Each edge, specified by a ``(tail, head)`` pair of nodes, holds an
+    object as a value.  Likewise, each tail which occurs in the graph also
+    holds a value.  (Nodes which only occur as heads are not required to have
+    values.)
+
+    This class is made for internal use.
+    """
+    def __init__(self):
+        # The graph is stored in this dictionary.  For a given `tail`,
+        # self.hvhv_by_tail[tail] is a list alternately storing heads and
+        # values.  (The heads occupy even locations followed by the values at
+        # odd locations.)  Each pair of entries thus describes a single
+        # directed edge of the graph.
+        #
+        # The first pair of entries in each list is special: it always
+        # corresponds to a loop edge.  (The head is equal to the tail.)  This
+        # special edge has two purposes: It is used to store the value
+        # associated with the tail node itself, and it is necessary for the
+        # method getkey_tail which helps to conserve memory by storing equal
+        # node label only once.
+        self.hvhv_by_tail = {}
+
+    def __nonzero__(self):
+        return bool(self.hvhv_by_tail)
+
+    def getkey_tail(self, tail):
+        """Return the object equal to `tail` which is already referenced.
+
+        This method can be used to save memory by avoiding storing several
+        copies of equal node labels."""
+        return self.hvhv_by_tail[tail][0]
+
+    def getitem_tail(self, tail):
+        """Return the value of a tail."""
+        return self.hvhv_by_tail[tail][1]
+
+    def getitem_edge(self, edge):
+        """Return the value of an edge."""
+        tail, head = edge
+        for h, value in edges(self.hvhv_by_tail[tail]):
+            if h == head:
+                return value
+        raise KeyError(edge)
+
+    def setitem_tail(self, tail, value):
+        """Set the value of a tail."""
+        hvhv = self.hvhv_by_tail.setdefault(tail, [])
+        if hvhv:
+            hvhv[1] = value
+        else:
+            hvhv[:] = [tail, value]
+
+    def setitem_edge(self, edge, value):
+        """Set the value of an edge."""
+        tail, head = edge
+        hvhv = self.hvhv_by_tail[tail]
+        heads = hvhv[2::2]
+        try:
+            i = 2 + 2 * heads.index(head)
+        except ValueError:
+            hvhv.append(head)
+            hvhv.append(value)
+        else:
+            hvhv[i] = head
+            hvhv[i + 1] = value
+
+    def delitem_tail(self, tail):
+        """Delete a tail."""
+        del self.hvhv_by_tail[tail]
+
+    def pop_tail(self, tail):
+        """Delete a tail and return its value."""
+        return self.hvhv_by_tail.pop(tail)[1]
+
+    def delitem_edge(self, edge):
+        """Delete an edge."""
+        tail, head = edge
+        hvhv = self.hvhv_by_tail[tail]
+        heads = hvhv[2::2]
+        try:
+            i = 2 + 2 * heads.index(head)
+        except ValueError:
+            raise KeyError(edge)
+        del hvhv[i : i + 2]
+
+    def pop_edge(self, edge):
+        """Delete an edge and return its value."""
+        tail, head = edge
+        hvhv = self.hvhv_by_tail[tail]
+        heads = hvhv[2::2]
+        try:
+            i = 2 + 2 * heads.index(head)
+        except ValueError:
+            raise KeyError(edge)
+        value = hvhv[i + 1]
+        del hvhv[i : i + 2]
+        return value
+
+    def num_edges(self):
+        return sum(len(hvhv) - 2
+                   for hvhv in self.hvhv_by_tail.itervalues()) // 2
+
+    def edges(self):
+        """Return an iterator over all edges."""
+        for tail, hvhv in self.hvhv_by_tail.iteritems():
+            for head in islice(hvhv, 2, None, 2):
+                yield tail, head
+
+    def edge_value_pairs(self):
+        """Return an iterator over all ``(edge, value)`` pairs."""
+        for tail, hvhv in self.hvhv_by_tail.iteritems():
+            for head, value in edges(hvhv):
+                yield (tail, head), value
+
+    def tails(self):
+        """
+        Return a view of (python2: iterator over) all the tails of the graph.
+        """
+        try:
+            return self.hvhv_by_tail.viewkeys()
+        except AttributeError:
+            return iter(self.hvhv_by_tail)
+
+    def tail_value_pairs(self):
+        """Return an iterator over all ``(tails, value)`` pairs. """
+        for tail, hvhv in self.hvhv_by_tail.iteritems():
+            yield tail, hvhv[1]
+
+    def has_tail(self, tail):
+        """Return whether the graph contains a certain tail."""
+        return tail in self.hvhv_by_tail
+
+    def has_edge(self, edge):
+        """Return whether the graph contains a certain edge."""
+        tail, head = edge
+        hvhv = self.hvhv_by_tail.get(tail, [])
+        return head in islice(hvhv, 2, None, 2)
+
+    def out_neighbors(self, tail):
+        hvhv = self.hvhv_by_tail.get(tail, [])
+        return islice(hvhv, 2, None, 2)
+
+    def out_degree(self, tail):
+        hvhv = self.hvhv_by_tail.get(tail, [])
+        return len(hvhv) // 2 - 1
+
+
+################ Support for Hermitian conjugation
+
+def herm_conj(value):
+    """
+    Calculate the hermitian conjugate of a python object.
+
+    If the object is neither a complex number nor a matrix, the original value
+    is returned.  In the context of this module, this is the correct behavior
+    for functions.
+    """
+    if hasattr(value, 'conjugate'):
+        value = value.conjugate()
+        if hasattr(value, 'transpose'):
+            value = value.transpose()
+    return value
+
+
+class HermConjOfFunc(object):
+    """Proxy returning the hermitian conjugate of the original result."""
+    __slots__ = ('function')
+    def __init__(self, function):
+        self.function = function
+
+    def __call__(self, i, j):
+        return herm_conj(self.function(j, i))
+
+
+################ Leads
+
+class Lead(object):
+    """Abstract base class for leads that can be attached to a `Builder`.
+
+    Instance Variables
+    ------------------
+    neighbors : sequence of sites
+    """
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def finalized():
+        """Return a finalized version of the lead.
+
+        Returns
+        -------
+        finalized_lead
+
+        Notes
+        -----
+        The finalized lead must at least have a single method
+        ``self_energy(energy)`` but it can be a full
+        `kwant.system.InfiniteSystem` as well.
+
+        The method ``self_energy`` of the finalized lead must return a square
+        matrix of appropriate size.
+
+        The order of neighbors is assumed to be preserved during finalization.
+        """
+        pass
+
+
+class BuilderLead(Lead):
+    """A lead made from a `Builder` with a spatial symmetry.
+
+    Parameters
+    ----------
+    builder : `Builder`
+        The tight-binding system of a lead. It has to possess appropriate
+        symmetry, and it may not contain hoppings between further than
+        neighboring lead slices.
+    neighbors : sequence of `Site` instances
+        Sequence of sites in the scattering region to which the lead is
+        attached.
+
+    Notes
+    -----
+    The hopping from the scattering region to the lead is assumed to be
+    equal to the hopping from a lead slice to the next one in the direction of
+    the symmetry vector (i.e. the lead is 'leaving' the system and starts
+    with a hopping).
+
+    The given order of neighbors is preserved throughout finalization.
+
+    Every system has an attribute `leads`, which stores a list of
+    `BuilderLead` objects with all the information about the leads that are
+    attached.
+    """
+    def __init__(self, builder, neighbors):
+        self.builder = builder
+        self.neighbors = SequenceOfSites(neighbors)
+
+    def finalized(self):
+        """Return a `kwant.system.InfiniteSystem` corresponding to the
+        compressed lead.
+
+        The order of neighbors is kept during finalization.
+        """
+        return self.builder._finalized_infinite(self.neighbors)
+
+
+class SelfEnergy(Lead):
+    """A general lead defined by its self energy.
+
+    Parameters
+    ----------
+    self_energy_func : function
+        Function which returns the self energy matrix for the neighbors given
+        the energy.
+    neighbors : sequence of `Site` instances
+    """
+    def __init__(self, self_energy_func, neighbors):
+        self.self_energy_func = self_energy_func
+        self.neighbors = SequenceOfSites(neighbors)
+
+    def finalized(self):
+        """Trivial finalization: the object is returned itself."""
+        return self
+
+    def self_energy(self, energy):
+        return self.self_energy_func(energy)
+
+
+################ Builder class
+
+def is_sitelike(key):
+    """Determine whether key is similar to a site.
+
+    Returns True if `key` is potentially sitelike, False if `key` is
+    potentially hoppinglike, None if it is neither."""
+    if isinstance(key, Site):
+        return True
+    if not isinstance(key, tuple):
+        return None
+    if not key:
+        raise KeyError(key)
+    first = key[0]
+    return not (isinstance(first, Site) or isinstance(first, tuple))
+
+
+def for_each_in_key(key, f_site, f_hopp):
+    """Perform an operation on each site or hopping in key.
+
+    Key may be
+    * a single sitelike or hoppinglike object,
+    * a non-tuple iterable of sitelike objects,
+    * a non-tuple iterable of hoppinglike objects.
+    """
+    isl = is_sitelike(key)
+    if isl is not None:
+        if isl:
+            f_site(key)
+        else:
+            f_hopp(key)
+    elif isinstance(key, Iterable) and not isinstance(key, tuple):
+        ikey = iter(key)
+        try:
+            first = next(ikey)
+        except StopIteration:
+            return
+        isl = is_sitelike(first)
+        if isl is None:
+            raise KeyError(first)
+        else:
+            if isl:
+                f_site(first)
+                for sitelike in ikey:
+                    f_site(sitelike)
+            else:
+                f_hopp(first)
+                for hoppinglike in ikey:
+                    f_hopp(hoppinglike)
+    else:
+        raise KeyError(key)
+
+
+# Marker which means for hopping (i, j): this value is given by the Hermitian
+# conjugate the value of the hopping (j, i).  Used by Builder and System.
+other = []
+
+
+class Builder(object):
+    """A tight binding system defined on a graph.
+
+    This is one of the central types in kwant.  It is used to construct tight
+    binding systems in a flexible way.
+
+    The nodes of the graph are `Site` instances.  The edges, i.e. the hoppings,
+    are pairs (2-tuples) of sites.  Each node and each edge has a value
+    associated with it.  That value can be in fact any python object, but
+    currently the only *useful* values are matrices and numbers or functions
+    returning them.  The values associated with nodes are interpreted as
+    on-site Hamiltonians, the ones associated with edges as hopping integrals.
+
+    To make the graph accessible in a way that is natural within the python
+    language it is exposed as a *mapping* (much like a built-in Python
+    dictionary).  Keys are sites or pairs of them.  Possible values are 2d
+    NumPy arrays, numbers (interpreted as 1 by 1 matrices), or functions.
+    Functions receive the site or the hopping (passed to the function as two
+    sites) and are expected to return a valid value.
+
+    Builder instances can be made to automatically respect a `Symmetry` that is
+    passed to them during creation.
+
+    Parameters
+    ----------
+    symmetry : `Symmetry` or `None`
+        The symmetry of the system.
+
+    Instance Variables
+    ------------------
+    default_site_group : `SiteGroup` or `None`
+        Defaults falue is `None`
+
+    Notes
+    -----
+    The instance variable `default_site_group` can be set to a `SiteGroup`
+    instance.  Then, whenever a `Site` would have been acceptable as parameter
+    to the methods of the builder, a non-site ``tag`` object will be also
+    accepted.  The ``tag`` will be converted into a site in the following way:
+    ``Site(default_site_group, tag)``.
+
+    Builder instances automatically ensure that every hopping is Hermitian, so
+    that if ``builder[a, b]`` has been set, there is no need to set
+    ``builder[b, a]``.
+
+    Values which are functions allow to define systems quickly, to modify them
+    without reconstructing, and to save memory for many-orbital models.
+
+    The behavior of builders with a symmetry is slightly more sophisticated.
+    First of all, it is implicitly assumed throughout kwant that **every**
+    function assigned as a value to a builder with a symmetry possesses the
+    same symmetry.  Secondly, all keys are mapped to the fundamental before
+    storing them.  This may produce confusing results when neighbors of a site
+    are queried.
+
+    The methods `possible_hoppings` and `attach_lead` *work* only if the sites
+    affected by them have tags which are sequences of integers.  They *make
+    sense* only when these sites live on a regular lattice, like one provided
+    by `kwant.lattice`.
+
+    .. warning::
+
+        If functions are used to set values in a builder with a symmetry, then
+        they must satisfy the same symmetry.  There is (currently) no check and
+        wrong results will be the consequence of a misbehaving function.
+
+    Examples
+    --------
+    Define a site.
+
+    >>> builder[site] = value
+
+    Print the value of a site.
+
+    >>> print builder[site]
+
+    Define a hopping.
+
+    >>> builder[site1, site2] = value
+
+    Delete a site.
+
+    >>> del builder[site3]
+
+    """
+    def __init__(self, symmetry=None):
+        if symmetry is None:
+            symmetry = NoSymmetry()
+        self.symmetry = symmetry
+        self.default_site_group = None
+        self.leads = []
+        self._ham = Graph()
+        self._group_by_pgid = {}
+
+    def reversed(self):
+        """Return a shallow copy of the builder with the symmetry reversed.
+
+        This method can be used to attach the same infinite system as lead from
+        two opposite sides.  It requires a builder to which an infinite
+        symmetry is associated.
+        """
+        result = object.__new__(Builder)
+        result.symmetry = self.symmetry.reversed()
+        result.default_site_group = self.default_site_group
+        if self.leads:
+            raise ValueError('System to be reversed may not have leads.')
+        result.leads = []
+        result._ham = self._ham
+        result._group_by_pgid = self._group_by_pgid
+        return result
+
+    def _to_site(self, sitelike):
+        """Convert `sitelike` to a site.
+
+        Sitelike can be
+        * a site, (It is returned unmodified.)
+        * a tag. (Works only if self.default_site_group is not None.)
+        """
+        if isinstance(sitelike, Site):
+            return sitelike
+        dsg = self.default_site_group
+        if dsg is not None:
+            return Site(dsg, sitelike)
+        raise KeyError(sitelike)
+
+    def __nonzero__(self):
+        return bool(self._ham)
+
+    def _get_site(self, sitelike):
+        psite = self.symmetry.to_fd(self._to_site(sitelike)).packed()
+        try:
+            return self._ham.getitem_tail(psite)
+        except KeyError:
+            raise KeyError(sitelike)
+
+    def _get_hopping(self, hoppinglike):
+        ts = self._to_site
+        sym = self.symmetry
+        try:
+            a, b = hoppinglike
+        except:
+            raise KeyError(hoppinglike)
+        try:
+            a, b = sym.to_fd(ts(a), ts(b))
+            pa, pb = a.packed(), b.packed()
+            value = self._ham.getitem_edge((pa, pb))
+        except KeyError:
+            raise KeyError(hoppinglike)
+        if value is other:
+            if not sym.in_fd(b):
+                b, a = sym.to_fd(b, a)
+                assert not sym.in_fd(a)
+                pb, pa = b.packed(), a.packed()
+            value = self._ham.getitem_edge((pb, pa))
+            if hasattr(value, '__call__'):
+                assert not isinstance(value, HermConjOfFunc)
+                value = HermConjOfFunc(value)
+            else:
+                value = herm_conj(value)
+        return value
+
+    def __getitem__(self, key):
+        """Get the value of a single site or hopping."""
+        isl = is_sitelike(key)
+        if isl is None:
+            raise KeyError(key)
+        if isl:
+            return self._get_site(key)
+        else:
+            return self._get_hopping(key)
+
+    def __contains__(self, key):
+        """Tell whether the system contains a site or hopping."""
+        isl = is_sitelike(key)
+        if isl is None:
+            raise KeyError(key)
+        if isl:
+            site = self.symmetry.to_fd(self._to_site(key))
+            return self._ham.has_tail(site.packed())
+        else:
+            ts = self._to_site
+            a, b = key
+            a, b = self.symmetry.to_fd(ts(a), ts(b))
+            return self._ham.has_edge((a.packed(), b.packed()))
+
+    def _set_site(self, sitelike, value):
+        """Set a single site."""
+        site = self.symmetry.to_fd(self._to_site(sitelike))
+        psite = site.packed()
+        pgid = psite[:gid_pack_size]
+        if pgid not in self._group_by_pgid:
+            self._group_by_pgid[pgid] = site.group
+        self._ham.setitem_tail(psite, value)
+
+    def _set_hopping(self, hoppinglike, value):
+        """Set a single hopping."""
+        # Avoid nested HermConjOfFunc instances.
+        try:
+            a, b = hoppinglike
+        except:
+            raise KeyError(hoppinglike)
+        if isinstance(value, HermConjOfFunc):
+            a, b = b, a
+            value = value.function
+
+        ham = self._ham
+        gkt = ham.getkey_tail
+        ts = self._to_site
+        sym = self.symmetry
+
+        try:
+            a, b = sym.to_fd(ts(a), ts(b))
+            if sym.in_fd(b):
+                pa, pb = gkt(a.packed()), gkt(b.packed())
+                ham.setitem_edge((pa, pb), value)
+                ham.setitem_edge((pb, pa), other)
+            else:
+                pa, pb = gkt(a.packed()), b.packed()
+                ham.setitem_edge((pa, pb), value)
+                b, a = sym.to_fd(b, a)
+                assert not sym.in_fd(a)
+                pb, pa = gkt(b.packed()), a.packed()
+                ham.setitem_edge((pb, pa), other)
+        except KeyError:
+            raise KeyError(hoppinglike)
+
+    def __setitem__(self, key, value):
+        """Set a single site/hopping or an iterable of them."""
+        for_each_in_key(key,
+                        lambda s: self._set_site(s, value),
+                        lambda h: self._set_hopping(h, value))
+
+    def _del_site(self, sitelike):
+        """Delete a single site and all associated hoppings."""
+        tfd = self.symmetry.to_fd
+        ham = self._ham
+        site = tfd(self._to_site(sitelike))
+        psite = site.packed()
+        try:
+            for pneighbor in ham.out_neighbors(psite):
+                if ham.has_tail(pneighbor):
+                    ham.delitem_edge((pneighbor, psite))
+                else:
+                    neighbor = unpack(pneighbor, self._group_by_pgid)
+                    assert not self.symmetry.in_fd(neighbor)
+                    a, b = tfd(neighbor, site)
+                    ham.delitem_edge((a.packed(), b.packed()))
+        except KeyError:
+            raise KeyError(sitelike)
+        ham.delitem_tail(psite)
+
+    def _del_hopping(self, hoppinglike):
+        """Delete a single hopping."""
+        ham = self._ham
+        gkt = ham.getkey_tail
+        ts = self._to_site
+        sym = self.symmetry
+
+        try:
+            a, b = hoppinglike
+        except:
+            raise KeyError(hoppinglike)
+        try:
+            a, b = sym.to_fd(ts(a), ts(b))
+            if sym.in_fd(b):
+                pa, pb = a.packed(), b.packed()
+                ham.delitem_edge((pa, pb))
+                ham.delitem_edge((pb, pa))
+            else:
+                pa, pb = a.packed(), b.packed()
+                ham.delitem_edge((pa, pb))
+                b, a = sym.to_fd(b, a)
+                assert not sym.in_fd(a)
+                pb, pa = b.packed(), a.packed()
+                ham.delitem_edge((pb, pa))
+        except KeyError:
+            raise KeyError(hoppinglike)
+
+    def __delitem__(self, key):
+        """Delete a single site/hopping or an iterable of them."""
+        for_each_in_key(key,
+                        lambda s: self._del_site(s),
+                        lambda h: self._del_hopping(h))
+
+    def eradicate_dangling(self):
+        """Keep deleting dangling sites until none are left."""
+        ham = self._ham
+        psites = list(psite for psite in ham.tails()
+                      if ham.out_degree(psite) < 2)
+        for psite in psites:
+            if not ham.has_tail(psite): continue
+            while psite:
+                pneighbors = tuple(ham.out_neighbors(psite))
+                if pneighbors:
+                    assert len(pneighbors) == 1
+                    pneighbor = pneighbors[0]
+                    ham.delitem_edge((pneighbor, psite))
+                    if ham.out_degree(pneighbor) > 1:
+                        pneighbor = False
+                else:
+                    pneighbor = False
+                ham.delitem_tail(psite)
+                psite = pneighbor
+
+    def __iter__(self):
+        """Return an iterator over all sites and hoppings."""
+        return chain(self.sites(), self.hoppings())
+
+    def sites(self):
+        """Return an iterator over all sites."""
+        for psite in self._ham.tails():
+            yield unpack(psite, self._group_by_pgid)
+
+    def site_value_pairs(self):
+        """Return an iterator over all (site, value) pairs."""
+        for psite, value in self._ham.tail_value_pairs():
+            yield unpack(psite, self._group_by_pgid), value
+
+    def hoppings(self):
+        """Return an iterator over all hoppings."""
+        gbp = self._group_by_pgid
+        for phopp, value in self._ham.edge_value_pairs():
+            if value is other: continue
+            pa, pb = phopp
+            yield (unpack(pa, gbp), unpack(pb, gbp))
+
+    def hopping_value_pairs(self):
+        """Return an iterator over all (hopping, value) pairs."""
+        gbp = self._group_by_pgid
+        for phopp, value in self._ham.edge_value_pairs():
+            if value is other: continue
+            pa, pb = phopp
+            yield (unpack(pa, gbp), unpack(pb, gbp)), value
+
+    def dangling(self):
+        """Return an iterator over all dangling sites."""
+        ham = self._ham
+        for psite in ham.tails():
+            if ham.out_degree(psite) < 2:
+                yield unpack(psite, self._group_by_pgid)
+
+    def degree(self, sitelike):
+        """Return the number of neighbors of a site."""
+        psite = self.symmetry.to_fd(self._to_site(sitelike)).packed()
+        return self._ham.out_degree(psite)
+
+    def neighbors(self, sitelike):
+        """Return an iterator over all neighbors of a site."""
+        gbp = self._group_by_pgid
+        pa = self.symmetry.to_fd(self._to_site(sitelike)).packed()
+        return (unpack(pb, gbp)
+                for pb in self._ham.out_neighbors(pa))
+
+    def __iadd__(self, other_sys):
+        """Add `other_sys` to the system.
+
+        Sites and hoppings present in both systems are overwritten by those in
+        `other_sys`.  The leads of `other_sys` are appended to the leads of the
+        system being extended.
+        """
+        raise NotImplementedError()
+
+    def possible_hoppings(self, delta, group_b, group_a):
+        """Return all matching possible hoppings between existing sites.
+
+        A hopping ``(a, b)`` matches precisely when the site group of ``a`` is
+        `group_a` and that of ``b`` is `group_b` and ``(a.tag - b.tag)``
+        (interpreted as vectors) equals to `delta`.
+
+        Parameters
+        ----------
+        delta : Sequence of integers
+            The sequence is interpreted as a vector with integer elements.
+        group_a : `~kwant.builder.SiteGroup`
+        grpup_b : `~kwant.builder.SiteGroup`
+
+        Returns
+        -------
+        hoppings : Iterator over hoppings
+           All matching possible hoppings
+        """
+        minus_delta = tuple(-x for x in delta)
+        for site0 in self.sites():
+            group0 = site0.group
+            if group0 is group_a:
+                group1 = group_b
+                d = minus_delta
+            elif group0 is group_b:
+                group1 = group_a
+                d = delta
+            else:
+                continue
+            site1 = site0.shifted(d, group1)
+            if site1 in self:
+                yield site0, site1
+
+    def attach_lead(self, lead_builder, origin=None):
+        """Attach a lead to the builder, possibly adding missing sites.
+
+        Parameters
+        ----------
+        lead_builder : `Builder` with 1D translational symmetry
+            Builder of the lead which has to be attached.
+        origin : `Site`
+            The site which should belong to a domain where the lead should
+            begin. It is used to attach a lead inside the system, e.g. to an
+            inner radius of a ring.
+
+        Raises
+        ------
+        ValueError
+            If `lead_builder` does not have proper symmetry, has hoppings with
+            range of more than one slice, or if it is not completely
+            interrupted by the system.
+
+        Notes
+        -----
+        This method is not fool-proof, i.e. if it returns an error, there is
+        no guarantee that the system stayed unaltered.
+        """
+        sym = lead_builder.symmetry
+
+        if sym.num_directions != 1 \
+                or not all(-1 <= sym.which(hopping[1])[0] <= 1
+                            for hopping in lead_builder.hoppings()):
+            raise ValueError('Only builders with a 1D symmetry and no '
+                             'hoppings between non-neighboring slices '
+                             'are allowed.')
+        try:
+            lead_builder.sites().next()
+        except StopIteration:
+            raise ValueError('Lead to be attached contains no sites.')
+
+        # Check if site groups of the lead are present in the system (catches
+        # a common and a hard to find bug).
+        groups = set(self._group_by_pgid.values())
+        for site in lead_builder.sites():
+            if site.group not in groups:
+                msg = 'Sites with site group {0} do not appear in the ' +\
+                      'system, hence the system does not interrupt the ' +\
+                      'lead. Note that different lattice instances with ' +\
+                      'the same parameters are different site groups. ' +\
+                      'See tutorial for more details.'
+                raise ValueError(msg.format(site.group))
+
+        all_doms = list(sym.which(site)[0]
+                        for site in self.sites() if site in lead_builder)
+        if origin is not None:
+            orig_dom = sym.which(origin)[0]
+            all_doms = [dom for dom in all_doms if dom <= orig_dom]
+        if len(all_doms) == 0:
+            raise ValueError('Builder does not interrupt the lead,'
+                             ' this lead cannot be attached.')
+        max_dom = max(all_doms)
+        min_dom = min(all_doms)
+        del all_doms
+
+        neighbors = set()
+        added = set()
+        # Initialize flood-fill: create the outermost sites.
+        for site in lead_builder.sites():
+            for neighbor in lead_builder.neighbors(site):
+                neighbor = sym.act((max_dom + 1,), neighbor)
+                if sym.which(neighbor)[0] == max_dom:
+                    if neighbor not in self:
+                        self[neighbor] = lead_builder[neighbor]
+                        added.add(neighbor)
+                    neighbors.add(neighbor)
+
+        # Do flood-fill.
+        covered = True
+        while covered:
+            covered = False
+            added2 = set()
+            for site in added:
+                site_dom = sym.which(site)
+                move = lambda x: sym.act(site_dom, x)
+                for site_new in lead_builder.neighbors(site):
+                    site_new = move(site_new)
+                    new_dom = sym.which(site_new)[0]
+                    if new_dom == max_dom + 1:
+                        continue
+                    elif new_dom < min_dom:
+                        raise ValueError('Builder does not interrupt the lead,'
+                                         ' this lead cannot be attached.')
+                    if site_new not in self \
+                       and sym.which(site_new)[0] != max_dom + 1:
+                        self[site_new] = lead_builder[site_new]
+                        added2.add(site_new)
+                        covered = True
+                    self[site_new, site] = lead_builder[site_new, site]
+            added = added2
+
+        self.leads.append(BuilderLead(lead_builder, list(neighbors)))
+        return len(self.leads) - 1
+
+    def finalized(self):
+        """Return a finalized (=usable with solvers) copy of the system.
+
+        Returns
+        -------
+        finalized_system : `kwant.system.FiniteSystem`
+            If there is no symmetry.
+        finalized_system : `kwant.system.InfiniteSystem`
+            If a symmetry is present.
+
+        Notes
+        -----
+        This method does not modify the Builder instance for which it is
+        called.
+
+        Attached leads are also finalized and will be present in the finalized
+        system to be returned.
+
+        Currently, only Builder instances without or with a single `Symmetry`
+        can be finalized.
+        """
+        if self.symmetry.num_directions == 0:
+            return self._finalized_finite()
+        elif self.symmetry.num_directions == 1:
+            return self._finalized_infinite()
+        else:
+            raise ValueError('Currently, only builders without or with a '
+                             'single symmetry can be finalized.')
+
+    def _finalized_finite(self):
+        if self.symmetry.num_directions != 0:
+            raise ValueError(
+                'Only systems without symmetries are supported for now.')
+        ham = self._ham
+
+        #### Make translation tables.
+        id_by_psite = {}
+        psites = []
+        for psite in ham.tails(): # Loop over all packed sites.
+            psite_id = len(psites)
+            psites.append(psite)
+            id_by_psite[psite] = psite_id
+
+        #### Make graph.
+        g = graph.Graph()
+        g.num_nodes = len(psites) # Some sites could not appear in any edge.
+        for tail, head in ham.edges():
+            if tail == head: continue
+            g.add_edge(id_by_psite[tail], id_by_psite[head])
+        g = g.compressed()
+
+        #### Connect leads.
+        finalized_leads = []
+        lead_neighbor_seqs = []
+        for lead_nr, lead in enumerate(self.leads):
+            finalized_leads.append(lead.finalized())
+            lns = [id_by_psite[neighbor.packed()]
+                   for neighbor in lead.neighbors]
+            lead_neighbor_seqs.append(np.array(lns))
+
+        #### Assemble and return result.
+        result = FiniteSystem()
+        result.graph = g
+        result.psites_idxs = np.cumsum([0] + [len(psite) for psite in psites])
+        result.psites = "".join(psites)
+        result.group_by_pgid = self._group_by_pgid
+        result.leads = finalized_leads
+        result.hoppings = [ham.getitem_edge((psites[tail], psites[head]))
+                           for tail, head in g]
+        result.onsite_hamiltonians = [ham.getitem_tail(psite)
+                                      for psite in psites]
+        result.lead_neighbor_seqs = lead_neighbor_seqs
+        result.symmetry = self.symmetry
+        return result
+
+    def _finalized_infinite(self, order_of_neighbors=None):
+        """
+        Finalize this builder instance which has to have exactly a single
+        symmetry direction.
+
+        If order_of_neighbors is not set, the order of the neighbors in the
+        finalized system will be arbitrary.  If order_of_neighbors is set to a
+        sequence of neighbor sites, this order will be kept.
+        """
+        ham = self._ham
+        sym = self.symmetry
+        gbp = self._group_by_pgid
+        if sym.num_directions != 1:
+            raise ValueError('System builder must have a single symmetry'
+                             ' direction.')
+
+        #### For each site of the fundamental domain, determine whether it has
+        #### neighbors or not.
+        plsites_with = []    # Fund. domain sites with neighbors in prev. dom
+        plsites_without = [] # Remaining sites of the fundamental domain
+        for ptail in ham.tails(): # Loop over all sites of the fund. domain.
+            for phead in ham.out_neighbors(ptail):
+                head = unpack(phead, gbp)
+                fd = sym.which(head)[0]
+                if fd == 1:
+                    # Tail belongs to fund. domain, head to the next domain.
+                    plsites_with.append(ptail)
+                    break
+            else:
+                # Tail is a fund. domain site not connected to prev. domain.
+                plsites_without.append(ptail)
+        slice_size = len(plsites_with) + len(plsites_without)
+
+        ### Create list of packed sites `psites` and a lookup table
+        if order_of_neighbors is None:
+            pneighbors = [sym.act((-1,), unpack(s, gbp)).packed()
+                          for s in plsites_with]
+        else:
+            shift = (-sym.which(order_of_neighbors[0])[0] - 1,)
+            plsites_with_set = set(plsites_with)
+            plsites_with = []
+            pneighbors = []
+            for out_of_place_neighbor in order_of_neighbors:
+                # Shift the neighbor domain before the fundamental domain.
+                # That's the right place for the neighbors of a lead to be, but
+                # the neighbors in order_of_neighbors might live in a different
+                # domain.
+                neighbor = sym.act(shift, out_of_place_neighbor)
+                pneighbor = neighbor.packed()
+                plsite = sym.act((1,), neighbor).packed()
+
+                try:
+                    plsites_with_set.remove(plsite)
+                except KeyError:
+                    if (-sym.which(out_of_place_neighbor)[0] - 1,) != shift:
+                        raise ValueError(
+                            'The sites in order_of_neighbors do not all '
+                            'belong to the same lead slice.')
+                    else:
+                        raise ValueError('A site in order_of_neighbors is '
+                                         'not a neighbor:\n' + str(neighbor))
+                pneighbors.append(pneighbor)
+                plsites_with.append(plsite)
+            if plsites_with_set:
+                raise ValueError(
+                    'order_of_neighbors did not contain all neighbors.')
+            del plsites_with_set
+
+        psites = plsites_with + plsites_without + pneighbors
+        del plsites_with
+        del plsites_without
+        del pneighbors
+        id_by_psite = {}
+        for site_id, psite in enumerate(psites):
+            id_by_psite[psite] = site_id
+
+        #### Make graph and extract onsite Hamiltonians.
+        g = graph.Graph()
+        onsite_hamiltonians = []
+        for tail_id, ptail in enumerate(psites[:slice_size]):
+            onsite_hamiltonians.append(ham.getitem_tail(ptail))
+            for phead in ham.out_neighbors(ptail):
+                head_id = id_by_psite.get(phead)
+                if head_id is None:
+                    # Head belongs neither to the fundamental domain nor to the
+                    # previous domain.  Check that it belongs to the next
+                    # domain and ignore it otherwise as an edge corresponding
+                    # to this one has been added already or will be added.
+                    head = unpack(phead, gbp)
+                    fd = sym.which(head)[0]
+                    if fd != 1:
+                        tail = unpack(ptail, gbp)
+                        msg = 'Further-then-nearest-neighbor slices ' \
+                        'are connected by hopping\n{0}.'
+                        raise ValueError(msg.format((tail, head)))
+                    continue
+                if head_id >= slice_size:
+                    # Head belongs to previous domain.  The edge added here
+                    # correspond to one left out just above.
+                    g.add_edge(head_id, tail_id)
+                g.add_edge(tail_id, head_id)
+        del id_by_psite
+        g = g.compressed()
+
+        #### Extract hoppings.
+        hoppings = []
+        for tail_id, head_id in g:
+            ptail = psites[tail_id]
+            phead = psites[head_id]
+            if tail_id >= slice_size:
+                # The tail belongs to the previous domain.  Find the
+                # corresponding hopping with the tail in the fund. domain.
+                t, h = sym.to_fd(unpack(ptail, gbp), unpack(phead, gbp))
+                ptail = t.packed()
+                phead = h.packed()
+            hoppings.append(ham.getitem_edge((ptail, phead)))
+
+        #### Assemble and return result.
+        result = InfiniteSystem()
+        result.slice_size = slice_size
+        result.psites_idxs = np.cumsum([0] + [len(psite) for psite in psites])
+        result.psites = "".join(psites)
+        result.group_by_pgid = self._group_by_pgid
+        result.graph = g
+        result.hoppings = hoppings
+        result.onsite_hamiltonians = onsite_hamiltonians
+        result.symmetry = self.symmetry
+        return result
+
+
+################ Finalized systems
+
+class System(system.System):
+    """Finalized Builder."""
+
+    def hamiltonian(self, i, j):
+        if i == j:
+            value = self.onsite_hamiltonians[i]
+            if hasattr(value, '__call__'):
+                value = value(self.symmetry.to_fd(self.site(i)))
+            return value
+        else:
+            edge_id = self.graph.first_edge_id(i, j)
+            value = self.hoppings[edge_id]
+            conj = value is other
+            if conj:
+                i, j = j, i
+                edge_id = self.graph.first_edge_id(i, j)
+                value = self.hoppings[edge_id]
+            if hasattr(value, '__call__'):
+                site_i = self.site(i)
+                site_j = self.site(j)
+                value = value(*self.symmetry.to_fd(site_i, site_j))
+            if conj:
+                value = herm_conj(value)
+            return value
+
+    def site(self, i):
+        a, b = self.psites_idxs[i : i + 2]
+        return unpack(self.psites[a : b], self.group_by_pgid)
+
+    def pos(self, i):
+        return self.site(i).pos
+
+
+class FiniteSystem(System, system.FiniteSystem):
+    """
+    Finalized `Builder` with leads.
+
+    Usable as input for the solvers in `kwant.solvers`.
+    """
+    pass
+
+
+class InfiniteSystem(System, system.InfiniteSystem):
+    """Finalized infinite system, extracted from a `Builder`."""
diff --git a/kwant/graph/__init__.py b/kwant/graph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a3dafce72bd31eaad716984c060f0f0d31167de
--- /dev/null
+++ b/kwant/graph/__init__.py
@@ -0,0 +1,8 @@
+"""Functionality for graphs"""
+
+# Merge the public interface of all submodules.
+__all__ = []
+for module in ['core', 'defs', 'slicer', 'utils']:
+    exec 'from . import {0}'.format(module)
+    exec 'from .{0} import *'.format(module)
+    exec '__all__.extend({0}.__all__)'.format(module)
diff --git a/kwant/graph/c_scotch.pxd b/kwant/graph/c_scotch.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..c1b4ab31a90b76ffc242627f6f509c31c08a3644
--- /dev/null
+++ b/kwant/graph/c_scotch.pxd
@@ -0,0 +1,140 @@
+from libc.stdio cimport FILE
+
+cdef extern from "scotch.h":
+     ctypedef struct SCOTCH_Arch:
+          pass
+
+     ctypedef struct SCOTCH_Geom:
+          pass
+
+     ctypedef struct SCOTCH_Graph:
+          double dummy[12]
+
+     ctypedef struct SCOTCH_Mesh:
+          pass
+
+     ctypedef struct SCOTCH_Mapping:
+          pass
+
+     ctypedef struct SCOTCH_Ordering:
+          pass
+
+     ctypedef struct SCOTCH_Strat:
+          pass
+
+     ctypedef int SCOTCH_Idx
+
+     ctypedef int SCOTCH_Num
+
+     void SCOTCH_errorProg(char *)
+     void SCOTCH_errorPrint(char *, ...)
+     void SCOTCH_errorPrintW(char *, ...)
+
+     int SCOTCH_archInit(SCOTCH_Arch *)
+     void SCOTCH_archExit(SCOTCH_Arch *)
+     int SCOTCH_archLoad(SCOTCH_Arch *, FILE *)
+     int SCOTCH_archSave(SCOTCH_Arch *, FILE *)
+     int SCOTCH_archBuild(SCOTCH_Arch *, SCOTCH_Graph *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Strat *)
+     char *SCOTCH_archName(SCOTCH_Arch *)
+     SCOTCH_Num SCOTCH_archSize(SCOTCH_Arch *)
+     int SCOTCH_archCmplt(SCOTCH_Arch *, SCOTCH_Num)
+     int SCOTCH_archCmpltw(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num *)
+     int SCOTCH_archHcub(SCOTCH_Arch *, SCOTCH_Num)
+     int SCOTCH_archMesh2(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num)
+     int SCOTCH_archMesh3(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num)
+     int SCOTCH_archTleaf(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Num *)
+     int SCOTCH_archTorus2(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num)
+     int SCOTCH_archTorus3(SCOTCH_Arch *, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num)
+     int SCOTCH_archVcmplt(SCOTCH_Arch *)
+     int SCOTCH_archVhcub(SCOTCH_Arch *)
+
+     int SCOTCH_geomInit(SCOTCH_Geom *)
+     void SCOTCH_geomExit(SCOTCH_Geom *)
+     void SCOTCH_geomData(SCOTCH_Geom *, SCOTCH_Num *, double **)
+
+     int SCOTCH_graphInit(SCOTCH_Graph *)
+     void SCOTCH_graphExit(SCOTCH_Graph *)
+     void SCOTCH_graphFree(SCOTCH_Graph *)
+     int SCOTCH_graphLoad(SCOTCH_Graph *, FILE *, SCOTCH_Num, SCOTCH_Num)
+     int SCOTCH_graphSave(SCOTCH_Graph *, FILE *)
+     int SCOTCH_graphBuild(SCOTCH_Graph *, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Num *)
+     SCOTCH_Num SCOTCH_graphBase(SCOTCH_Graph *, SCOTCH_Num baseval)
+     int SCOTCH_graphCheck(SCOTCH_Graph *)
+     void SCOTCH_graphSize(SCOTCH_Graph *, SCOTCH_Num *, SCOTCH_Num *)
+     void SCOTCH_graphData(SCOTCH_Graph *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num *, SCOTCH_Num **, SCOTCH_Num **)
+     void SCOTCH_graphStat(SCOTCH_Graph *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, double *, double *, SCOTCH_Num *, SCOTCH_Num *, double *, double *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, double *, double *)
+     int SCOTCH_graphGeomLoadChac(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomLoadHabo(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomLoadMmkt(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomLoadScot(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomSaveChac(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomSaveMmkt(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_graphGeomSaveScot(SCOTCH_Graph *, SCOTCH_Geom *, FILE *, FILE *, char *)
+
+     int SCOTCH_graphMapInit(SCOTCH_Graph *, SCOTCH_Mapping *, SCOTCH_Arch *, SCOTCH_Num *)
+     void SCOTCH_graphMapExit(SCOTCH_Graph *, SCOTCH_Mapping *)
+     int SCOTCH_graphMapLoad(SCOTCH_Graph *, SCOTCH_Mapping *, FILE *)
+     int SCOTCH_graphMapSave(SCOTCH_Graph *, SCOTCH_Mapping *, FILE *)
+     int SCOTCH_graphMapView(SCOTCH_Graph *, SCOTCH_Mapping *, FILE *)
+     int SCOTCH_graphMapCompute(SCOTCH_Graph *, SCOTCH_Mapping *, SCOTCH_Strat *)
+     int SCOTCH_graphMap(SCOTCH_Graph *, SCOTCH_Arch *, SCOTCH_Strat *, SCOTCH_Num *)
+     int SCOTCH_graphPart(SCOTCH_Graph *, SCOTCH_Num, SCOTCH_Strat *, SCOTCH_Num *)
+
+     int SCOTCH_graphOrderInit(SCOTCH_Graph *, SCOTCH_Ordering *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     void SCOTCH_graphOrderExit(SCOTCH_Graph *, SCOTCH_Ordering *)
+     int SCOTCH_graphOrderLoad(SCOTCH_Graph *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_graphOrderSave(SCOTCH_Graph *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_graphOrderSaveMap(SCOTCH_Graph *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_graphOrderSaveTree(SCOTCH_Graph *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_graphOrderCompute(SCOTCH_Graph *, SCOTCH_Ordering *, SCOTCH_Strat *)
+     int SCOTCH_graphOrderComputeList(SCOTCH_Graph *, SCOTCH_Ordering *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Strat *)
+     int SCOTCH_graphOrderFactor(SCOTCH_Graph *, SCOTCH_Ordering *, SCOTCH_Graph *)
+     int SCOTCH_graphOrderView(SCOTCH_Graph *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_graphOrder(SCOTCH_Graph *, SCOTCH_Strat *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     int SCOTCH_graphOrderList(SCOTCH_Graph *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Strat *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     int SCOTCH_graphOrderCheck(SCOTCH_Graph *, SCOTCH_Ordering *)
+
+     int SCOTCH_meshInit(SCOTCH_Mesh *)
+     void SCOTCH_meshExit(SCOTCH_Mesh *)
+     int SCOTCH_meshLoad(SCOTCH_Mesh *, FILE *, SCOTCH_Num)
+     int SCOTCH_meshSave(SCOTCH_Mesh *, FILE *)
+     int SCOTCH_meshBuild(SCOTCH_Mesh *, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num, SCOTCH_Num *)
+     int SCOTCH_meshCheck(SCOTCH_Mesh *)
+     void SCOTCH_meshSize(SCOTCH_Mesh *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     void SCOTCH_meshData(SCOTCH_Mesh *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num **, SCOTCH_Num *, SCOTCH_Num **, SCOTCH_Num *)
+     void SCOTCH_meshStat(SCOTCH_Mesh *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, double *, double *, SCOTCH_Num *, SCOTCH_Num *, double *, double *, SCOTCH_Num *, SCOTCH_Num *, double *, double *)
+     int SCOTCH_meshGraph(SCOTCH_Mesh *, SCOTCH_Graph *)
+     int SCOTCH_meshGeomLoadHabo(SCOTCH_Mesh *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_meshGeomLoadScot(SCOTCH_Mesh *, SCOTCH_Geom *, FILE *, FILE *, char *)
+     int SCOTCH_meshGeomSaveScot(SCOTCH_Mesh *, SCOTCH_Geom *, FILE *, FILE *, char *)
+
+     int SCOTCH_meshOrderInit(SCOTCH_Mesh *, SCOTCH_Ordering *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     void SCOTCH_meshOrderExit(SCOTCH_Mesh *, SCOTCH_Ordering *)
+     int SCOTCH_meshOrderSave(SCOTCH_Mesh *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_meshOrderSaveMap(SCOTCH_Mesh *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_meshOrderSaveTree(SCOTCH_Mesh *, SCOTCH_Ordering *, FILE *)
+     int SCOTCH_meshOrderCompute(SCOTCH_Mesh *, SCOTCH_Ordering *, SCOTCH_Strat *)
+     int SCOTCH_meshOrderComputeList(SCOTCH_Mesh *, SCOTCH_Ordering *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Strat *)
+     int SCOTCH_meshOrder(SCOTCH_Mesh *, SCOTCH_Strat *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     int SCOTCH_meshOrderList(SCOTCH_Mesh *, SCOTCH_Num, SCOTCH_Num *, SCOTCH_Strat *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *, SCOTCH_Num *)
+     int SCOTCH_meshOrderCheck(SCOTCH_Mesh *, SCOTCH_Ordering *)
+
+     void SCOTCH_randomReset()
+
+     int SCOTCH_stratInit(SCOTCH_Strat *)
+     void SCOTCH_stratExit(SCOTCH_Strat *)
+     void SCOTCH_stratFree(SCOTCH_Strat *)
+     int SCOTCH_stratSave(SCOTCH_Strat *, FILE *)
+
+     int SCOTCH_stratGraphBipart(SCOTCH_Strat *, char *)
+     int SCOTCH_stratGraphMap(SCOTCH_Strat *, char *)
+     int SCOTCH_stratGraphMapBuild(SCOTCH_Strat *, SCOTCH_Num, SCOTCH_Num, double)
+     int SCOTCH_stratGraphOrder(SCOTCH_Strat *, char *)
+     int SCOTCH_stratGraphOrderBuild(SCOTCH_Strat *, SCOTCH_Num, double)
+     int SCOTCH_stratMeshOrder(SCOTCH_Strat *, char *)
+     int SCOTCH_stratMeshOrderBuild(SCOTCH_Strat *, SCOTCH_Num, double)
+
+     void SCOTCH_memoryTrace()
+     void SCOTCH_memoryUntrace()
+     void SCOTCH_memoryTraceReset()
+     unsigned long SCOTCH_memoryTraceGet()
diff --git a/kwant/graph/c_slicer.pxd b/kwant/graph/c_slicer.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..c248d8023e11ff62a71fa8767aaa8a6abc981331
--- /dev/null
+++ b/kwant/graph/c_slicer.pxd
@@ -0,0 +1,11 @@
+from defs cimport gint
+
+cdef extern from "c_slicer/slicer.h":
+   struct Slicing:
+      int nslices
+      int *slice_ptr, *slices
+
+   Slicing *slice(gint, gint *, gint *, gint, gint *,
+                  gint, gint *)
+
+   void freeSlicing(Slicing *)
diff --git a/kwant/graph/c_slicer/bucket_list.h b/kwant/graph/c_slicer/bucket_list.h
new file mode 100644
index 0000000000000000000000000000000000000000..898124bfde97f89e77968df1eedf62c9adf943c8
--- /dev/null
+++ b/kwant/graph/c_slicer/bucket_list.h
@@ -0,0 +1,292 @@
+#ifndef BUCKET_LIST_H
+#define BUCKET_LIST_H
+
+#include <vector>
+#include <list>
+#include <utility>
+#include <cstdlib>
+
+using std::vector;
+using std::list;
+using std::pair;
+using std::make_pair;
+
+class bucket_list
+{
+ private:
+  vector<list<int> > bucket;
+  int max_value;
+  int lower_bound, upper_bound;
+
+  vector<list<int>::iterator > &reference_list;
+  const vector<int> &index_list;
+
+ public:
+  bucket_list(int _lower_bound, int _upper_bound,
+	      vector<list<int>::iterator > &_reference_list,
+	      const vector<int> &_index_list) :
+    bucket(_upper_bound-_lower_bound+2, list<int>(0)),
+    max_value(_lower_bound-1),
+    lower_bound(_lower_bound), upper_bound(_upper_bound),
+    reference_list(_reference_list), index_list(_index_list)
+  {
+    //note that the vector bucket also contains an entry
+    //for lower_bound-1 !
+    //and we fill it with the default variable "-1"
+    //so that the past the end bucket is never empty.
+    //That makes the algorithms simpler
+    bucket[0].push_back(-1);
+  }
+
+  inline bool empty() const
+  {
+    return max_value<lower_bound;
+  }
+
+  inline int front() const
+  {
+    return bucket[max_value - lower_bound + 1].front();
+  }
+
+  inline void pop_front()
+  {
+    if(!empty()) {
+      bucket[max_value - lower_bound + 1].pop_front();
+
+      if(bucket[max_value - lower_bound + 1].empty()) {
+	while(bucket[--max_value - lower_bound + 1].empty())
+	  ;
+      }
+    }
+  }
+
+  inline int max_key() const
+  {
+    return max_value;
+  }
+
+  inline void push_back(int _key, int _data)
+  {
+    reference_list[index_list[_data]]=bucket[_key - lower_bound + 1].
+      insert(bucket[_key - lower_bound + 1].end(), _data);
+
+    if(_key > max_value) max_value=_key;
+  }
+
+  inline void push_front(int _key, int _data)
+  {
+    reference_list[index_list[_data]]=bucket[_key - lower_bound + 1].
+      insert(bucket[_key - lower_bound + 1].begin(), _data);
+
+    if(_key > max_value) max_value=_key;
+  }
+
+  inline void push_randomly(int _key, int _data)
+  {
+    //do a push_back() or a push_front with equal
+    //probability
+    if(rand()%2) {
+      push_front(_key, _data);
+    }
+    else {
+      push_back(_key, _data);
+    }
+  }
+
+  inline void rearrange_back(int _old_key, int _new_key, int _data)
+  {
+    bucket[_old_key - lower_bound +1].
+      erase(reference_list[index_list[_data]]);
+
+    reference_list[index_list[_data]]=bucket[_new_key - lower_bound + 1].
+      insert(bucket[_new_key - lower_bound + 1].end(), _data);
+
+    if(_new_key > max_value) {
+      max_value=_new_key;
+    }
+    else if(bucket[max_value - lower_bound + 1].empty()) {
+      while(bucket[--max_value - lower_bound + 1].empty())
+	;
+    }
+  }
+
+  inline void rearrange_front(int _old_key, int _new_key, int _data)
+  {
+    bucket[_old_key - lower_bound +1].
+      erase(reference_list[index_list[_data]]);
+
+    reference_list[index_list[_data]]=bucket[_new_key - lower_bound + 1].
+      insert(bucket[_new_key - lower_bound + 1].begin(), _data);
+
+    if(_new_key > max_value) {
+      max_value=_new_key;
+    }
+    else if(bucket[max_value - lower_bound + 1].empty()) {
+      while(bucket[--max_value - lower_bound + 1].empty())
+	;
+    }
+  }
+
+  inline void rearrange_randomly(int _old_key, int _new_key, int _data)
+  {
+    if(rand()%2)
+      rearrange_back(_old_key, _new_key, _data);
+    else
+      rearrange_front(_old_key, _new_key, _data);
+  }
+
+  inline void remove(int _key, int _data)
+  {
+    bucket[_key - lower_bound +1].
+      erase(reference_list[index_list[_data]]);
+
+    if(bucket[max_value - lower_bound + 1].empty()) {
+      while(bucket[--max_value - lower_bound + 1].empty())
+	;
+    }
+  }
+
+ private:
+  inline list<int>::iterator manual_push_back(int _key, int _data)
+  {
+    if(_key > max_value) max_value=_key;
+
+    return bucket[_key - lower_bound + 1].
+      insert(bucket[_key - lower_bound + 1].end(), _data);
+  }
+
+  friend class double_bucket_list;
+};
+
+//-------------------
+
+class double_bucket_list
+{
+ private:
+  vector<bucket_list> bucket;
+
+  int max_value;
+  int lower_bound, upper_bound;
+
+  vector<list<int>::iterator > &reference_list;
+  const vector<int> &index_list;
+
+ public:
+  double_bucket_list(int _lower_bound1, int _upper_bound1,
+	      int _lower_bound2, int _upper_bound2,
+	      vector<list<int>::iterator > &_reference_list,
+	      const vector<int> &_index_list) :
+    bucket(_upper_bound1-_lower_bound1+2,
+	   bucket_list(_lower_bound2, _upper_bound2,
+		       _reference_list, _index_list)),
+    max_value(_lower_bound1-1),
+    lower_bound(_lower_bound1), upper_bound(_upper_bound1),
+    reference_list(_reference_list), index_list(_index_list)
+  {
+    //note that the vector bucket also contains an entry
+    //for lower_bound-1 !
+    //and we push a default entry into the past the
+    //end bucket of the corresponding bucket_list
+    bucket[0].manual_push_back(_lower_bound2, -1);
+  }
+
+  inline bool empty()
+  {
+    return max_value < lower_bound;
+  }
+
+  inline int front() const
+  {
+    return bucket[max_value- lower_bound + 1].front();
+  }
+
+  inline void pop_front()
+  {
+    if(!empty()) {
+      bucket[max_value - lower_bound + 1].pop_front();
+
+      if(bucket[max_value - lower_bound + 1].empty()) {
+	while(bucket[--max_value - lower_bound + 1].empty())
+	  ;
+      }
+    }
+  }
+
+  inline pair<int,int> max_key() const
+  {
+    return make_pair(max_value, bucket[max_value - lower_bound + 1].max_key());
+  }
+
+  inline void push_back(int _key1, int _key2, int _data)
+  {
+    bucket[_key1 - lower_bound + 1].push_back(_key2, _data);
+
+    if(_key1 > max_value) max_value=_key1;
+  }
+
+  inline void push_front(int _key1, int _key2, int _data)
+  {
+    bucket[_key1 - lower_bound + 1].push_front(_key2, _data);
+
+    if(_key1 > max_value) max_value=_key1;
+  }
+
+  inline void push_randomly(int _key1, int _key2, int _data)
+  {
+    if(rand()%2)
+      push_back(_key1, _key2, _data);
+    else
+      push_front(_key1, _key2, _data);
+  }
+
+  inline void rearrange_back(int _old_key1, int _old_key2,
+			     int _new_key1, int _new_key2, int _data)
+  {
+    if(_old_key1 == _new_key1) {
+      bucket[_old_key1 - lower_bound +1].rearrange_back(_old_key2, _new_key2, _data);
+    }
+    else {
+      bucket[_old_key1 - lower_bound +1].remove(_old_key2, _data);
+      bucket[_new_key1 - lower_bound +1].push_back(_new_key2, _data);
+
+      if(_new_key1 > max_value) {
+	max_value=_new_key1;
+      }
+      else if(bucket[max_value - lower_bound + 1].empty()) {
+	while(bucket[--max_value - lower_bound + 1].empty())
+	  ;
+      }
+    }
+  }
+
+  inline void rearrange_front(int _old_key1, int _old_key2,
+			      int _new_key1, int _new_key2, int _data)
+  {
+    if(_old_key1 == _new_key1) {
+      bucket[_old_key1 - lower_bound +1].rearrange_front(_old_key2, _new_key2, _data);
+    }
+    else {
+      bucket[_old_key1 - lower_bound +1].remove(_old_key2, _data);
+      bucket[_new_key1 - lower_bound +1].push_front(_new_key2, _data);
+
+      if(_new_key1 > max_value) {
+	max_value=_new_key1;
+      }
+      else if(bucket[max_value - lower_bound + 1].empty()) {
+	while(bucket[--max_value - lower_bound + 1].empty())
+	  ;
+      }
+    }
+  }
+
+  inline void rearrange_randomly(int _old_key1, int _old_key2,
+				 int _new_key1, int _new_key2, int _data)
+  {
+    if(rand()%2)
+      rearrange_back(_old_key1, _old_key2, _new_key1, _new_key2, _data);
+    else
+      rearrange_front(_old_key1, _old_key2, _new_key1, _new_key2, _data);
+  }
+};
+
+#endif
diff --git a/kwant/graph/c_slicer/graphwrap.h b/kwant/graph/c_slicer/graphwrap.h
new file mode 100644
index 0000000000000000000000000000000000000000..cf483ce6685b37175d604355f761c163a50e3d60
--- /dev/null
+++ b/kwant/graph/c_slicer/graphwrap.h
@@ -0,0 +1,93 @@
+//-*-C++-*-
+#ifndef _GRAPH_WRAPPER_H
+#define _GRAPH_WRAPPER_H
+
+#include <iostream>
+#include <vector>
+#include <deque>
+#include <cmath>
+
+class GraphWrapper
+{
+public:
+  //Some helper classes
+
+  //functor to compare the degree of vertices
+  class DegreeComparator
+  {
+  private:
+    const GraphWrapper &graph;
+
+  public:
+    DegreeComparator( const GraphWrapper &_graph ) : graph(_graph)
+    {
+    }
+
+    bool operator()( int _vertex1, int _vertex2 )
+    {
+      return graph.getEdges(_vertex1).size() < graph.getEdges(_vertex2).size();
+    }
+  };
+
+  template<typename T>
+  class VectorProxy {
+    T *begin_it, *end_it;
+
+  public:
+    VectorProxy(T *_begin_it, T *_end_it) :
+      begin_it(_begin_it), end_it(_end_it)
+    {}
+
+    T *begin() const
+    {
+      return begin_it;
+    }
+
+    T *end() const
+    {
+      return end_it;
+    }
+
+    size_t size() const
+    {
+      return end_it-begin_it;
+    }
+  };
+
+
+protected:
+  //data structure to hold graph in compressed form
+  int *vertex_ptr;
+  int *edges;
+
+  int vertex_num;
+
+public:
+  GraphWrapper(int _vnum, int *_vertex_ptr, int *_edges) :
+    vertex_ptr(_vertex_ptr), edges(_edges), vertex_num(_vnum)
+  {
+  }
+
+public:
+   //information about the graph
+
+   //! number of vertices in the graph
+   inline int size () const
+   {
+     return vertex_num;
+   }
+   //!Get the total number of edges in the graph
+   inline int edgeSize() const
+   {
+     return vertex_ptr[vertex_num];
+   }
+
+  //! functions for accessing the edge structure
+  inline VectorProxy<int> getEdges( int _vertex ) const
+  {
+    return VectorProxy<int>(edges+vertex_ptr[_vertex],
+			    edges+vertex_ptr[_vertex+1]);
+  }
+};
+
+#endif
diff --git a/kwant/graph/c_slicer/partitioner.cc b/kwant/graph/c_slicer/partitioner.cc
new file mode 100644
index 0000000000000000000000000000000000000000..1df24b98696fd5120cbbee6aad4cbcee4d4b4e11
--- /dev/null
+++ b/kwant/graph/c_slicer/partitioner.cc
@@ -0,0 +1,1687 @@
+#include "partitioner.h"
+
+//----------------------------------------------------
+//Functions that do the bisection
+//----------------------------------------------------
+
+void Partitioner::bisectFirst(std::vector<int> &_left, std::vector<int> &_right,
+			      double _tolerance,
+			      int _min_opt_passes,
+			      int _max_opt_passes)
+{
+#if DEBUG_LEVEL>=3
+  cout << "bisectFirst" << endl;
+#endif
+
+  //First, determine the total number of slices
+  int slices=0;
+
+  if(_left.size() && _right.size()) {
+    //a starting block is defined on the left and on the right
+    std::deque<int> vertex_stack(_left.begin(), _left.end());
+    std::deque<int> rank_stack(_left.size(), 1);
+
+    std::vector<bool> locked( parts[0].size(), false);
+    std::vector<bool> rightside( parts[0].size(), false);
+
+    std::vector<int>::const_iterator vtx=_right.begin();
+    while(vtx!=_right.end()) {
+      rightside[*vtx]=true;
+
+      vtx++;
+    }
+
+    bool done=false;
+
+    int current_rank=0;
+
+    //mark all the border vertices as visited
+    std::deque<int>::const_iterator border_vertex = vertex_stack.begin();
+    while(border_vertex != vertex_stack.end()) {
+      locked[sliceIndex[*border_vertex]]=true;
+
+      border_vertex++;
+    }
+
+    while(vertex_stack.size() && !done ) {
+      int vertex = vertex_stack.front();
+      int rank   = rank_stack.front();
+
+      vertex_stack.pop_front();
+      rank_stack.pop_front();
+
+      //Have we reached a new slice?
+      if(rank > current_rank) {
+	slices++;
+	current_rank=rank;
+      }
+
+      //visit the edges
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+	//Is the node inside the central slice or already at the right
+	//border?
+	if(rightside[*edge]) {
+	  done=true;
+	  break;
+	}
+
+	if(!locked[sliceIndex[*edge]]) {
+	  vertex_stack.push_back(*edge);
+	  rank_stack.push_back(rank+1);
+
+	  locked[sliceIndex[*edge]]=true;
+	}
+
+	edge++;
+      }
+    }
+
+    slices++;
+
+#if DEBUG_LEVEL>=3
+    cout << "slices: " << slices << endl;
+#endif
+  }
+  else if(_left.size() || _right.size()) {
+    //TODO, not implemented yet
+  }
+  else {
+    int left_border_vertex, right_border_vertex;
+
+    //TODO, not implemented yet
+    //slices=findPseudoDiameter(graph, left_border_vertex, right_border_vertex);
+
+#if DEBUG_LEVEL>=3
+    cout << "slices: " << slices << endl;
+#endif
+
+    _left.push_back(left_border_vertex);
+    _right.push_back(right_border_vertex);
+  }
+
+#if DEBUG_LEVEL>=3
+  cout << "BTD: Total number of slices = " << slices << endl;
+#endif
+
+  //Return if there is only one slice
+  if(slices<2) return;
+
+  //Find the maximum number of edges (needed or Fiduccia-Mattheyses)
+  int max_edges=0;
+
+  for(size_t ivertex=0; ivertex < parts[0].size(); ivertex++) {
+    if(graph.getEdges(parts[0][ivertex]).size() > max_edges) {
+      max_edges=graph.getEdges(parts[0][ivertex]).size();
+    }
+  }
+
+  //insert enough space for the parts arrays
+  parts.insert(parts.begin()+1, slices-1, std::vector<int>(0));
+
+  //now do the bisection
+  bisect(0, slices,
+	 _left, _right,
+	 max_edges, _tolerance,
+	 _min_opt_passes, _max_opt_passes);
+}
+
+//-------------
+
+void Partitioner::bisect(int _part, int _part_slices,
+			 vector<int> &_left_border,
+			 vector<int> &_right_border,
+			 int _max_edges, double _tolerance,
+			 int _min_opt_passes, int _max_opt_passes)
+{
+  std::vector<bool> permanently_locked(parts[_part].size(),false);
+  std::vector<bool> locked(parts[_part].size(),false);
+
+  //the BFS searches start from the border vertices
+  std::deque<int> left_stack(_left_border.begin(), _left_border.end()),
+    right_stack(_right_border.begin(), _right_border.end());
+  std::deque<int> left_rank(_left_border.size(), 1),
+    right_rank(_right_border.size(), 1);
+
+  //The number of slices is already given
+
+  int slices=_part_slices;
+
+#if DEBUG_LEVEL>=3
+  cout << "bisect: Total number of slices = " << slices << endl;
+#endif
+
+  //Return if there is only one slice
+  //(should not happen)
+  if(slices<2) return;
+
+  //determine the sizes of the two parts in slices
+  int left_slices=slices/2;
+  int right_slices=slices-left_slices;
+
+  //calculate the projected slice sizes
+  int left_size=left_slices*parts[_part].size()/slices;
+  int right_size=parts[_part].size()-left_size;
+
+  int real_left_size=0, real_right_size=0;
+
+  //compute the new indexes
+  //according to the index of the first slice in the part
+  int left_index=_part;
+  int right_index=left_index+left_slices;
+
+#if DEBUG_LEVEL >= 3
+  cout << "New indices " << left_index << " " << right_index << endl;
+  cout << "New sizes " << left_size << " " << right_size << endl;
+#endif
+
+  //Now do a breadth first search from both sides
+
+  //mark all the border vertices as locked and ineligible
+  std::deque<int>::const_iterator border_vertex;
+
+  border_vertex=left_stack.begin();
+  while(border_vertex != left_stack.end()) {
+    locked[sliceIndex[*border_vertex]]=true;
+    permanently_locked[sliceIndex[*border_vertex]]=true;
+
+    border_vertex++;
+  }
+
+  border_vertex=right_stack.begin();
+  while(border_vertex != right_stack.end()) {
+    locked[sliceIndex[*border_vertex]]=true;
+    permanently_locked[sliceIndex[*border_vertex]]=true;
+
+    border_vertex++;
+  }
+
+  //  cout << "Found border" << endl;
+
+  int max_rank=1;
+
+  while(left_stack.size() || right_stack.size()) {
+
+    //first from right
+    if(max_rank>right_slices) {
+      break;
+    }
+
+    while(right_stack.size()) {
+      int vertex=right_stack.front();
+      int rank=right_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      right_stack.pop_front();
+      right_rank.pop_front();
+
+      inSlice[vertex]=right_index;
+      real_right_size++;
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !locked[sliceIndex[*edge]]) {
+
+	  right_stack.push_back(*edge);
+	  right_rank.push_back(rank+1);
+
+	  locked[sliceIndex[*edge]]=true;
+	  if(rank < right_slices) {
+	    permanently_locked[sliceIndex[*edge]]=true;
+	  }
+	}
+
+	edge++;
+      }
+    }
+
+    //then from left
+    if(max_rank>left_slices) {
+      break;
+    }
+
+    while(left_stack.size()) {
+      int vertex=left_stack.front();
+      int rank=left_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      left_stack.pop_front();
+      left_rank.pop_front();
+
+      inSlice[vertex]=left_index;
+      real_left_size++;
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !locked[sliceIndex[*edge]]) {
+
+	  left_stack.push_back(*edge);
+	  left_rank.push_back(rank+1);
+
+	  locked[sliceIndex[*edge]]=true;
+
+	  if(rank < left_slices) {
+	    permanently_locked[sliceIndex[*edge]]=true;
+	  }
+	}
+
+	edge++;
+      }
+    }
+
+    //next slices
+    max_rank++;
+  }
+
+  distribute(left_stack, left_rank,
+	     left_index, left_size, real_left_size,
+	     right_stack, right_rank,
+	     right_index, right_size, real_right_size,
+	     _part, locked, max_rank);
+
+  //Now optimize the structure according to Fiduccia-Mattheyses
+  for(int i=0; i<_max_opt_passes; i++) {
+    locked=permanently_locked;
+
+    int result=optimize(left_index, left_size, right_index, right_size,
+			locked, _max_edges,
+			static_cast<int>(_tolerance*parts[left_index].size()));
+
+    if(!result ||
+       (i >= _min_opt_passes && result == 1) ) {
+      break;
+    }
+  }
+
+  //create new part arrays
+  {
+    std::vector<int> left_part, right_part;
+
+    for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
+      int vertex=parts[left_index][ivertex];
+
+      if(inSlice[vertex]==left_index) {
+	left_part.push_back(vertex);
+      }
+      else {
+	right_part.push_back(vertex);
+      }
+    }
+
+    parts[left_index].swap(left_part);
+    parts[right_index].swap(right_part);
+  }
+
+  //Now update the sliceIndex
+  for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
+    sliceIndex[parts[left_index][ivertex]]=ivertex;
+  }
+  for(size_t ivertex=0; ivertex < parts[right_index].size(); ivertex++) {
+    sliceIndex[parts[right_index][ivertex]]=ivertex;
+  }
+
+  //find the internal borders
+  vector<int> internal_left_border, internal_right_border;
+
+  for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
+    int vertex=parts[left_index][ivertex];
+
+    int *edge=graph.getEdges(vertex).begin();
+    while(edge != graph.getEdges(vertex).end()) {
+
+      if(inSlice[*edge]==right_index) {
+	internal_left_border.push_back(vertex);
+
+	break;
+      }
+
+      edge++;
+    }
+  }
+
+  for(size_t ivertex=0; ivertex < parts[right_index].size(); ivertex++) {
+    int vertex=parts[right_index][ivertex];
+
+    int *edge=graph.getEdges(vertex).begin();
+    while(edge != graph.getEdges(vertex).end()) {
+
+      if(inSlice[*edge]==left_index) {
+	internal_right_border.push_back(vertex);
+
+	break;
+      }
+
+      edge++;
+    }
+  }
+
+
+  /*  //debug
+      ostringstream convert;
+      convert << "part" << counter++ << ".eps";
+      write2DToEPS(convert.str().c_str());
+
+      cout << "Test " <<counter << " " << parts[left_index].size() << " " << parts[right_index].size() << endl;
+
+  */
+  //Recursively refine the bisection
+  if(left_slices>1) {
+    bisect(left_index, left_slices,
+	   _left_border, internal_left_border,
+	   _max_edges, _tolerance,
+	   _min_opt_passes, _max_opt_passes);
+  }
+  if(right_slices>1) {
+    bisect(right_index, right_slices,
+	   internal_right_border, _right_border,
+	   _max_edges, _tolerance,
+	   _min_opt_passes, _max_opt_passes);
+  }
+}
+
+//-------------------------------------------------------
+//The initial distributions
+//-------------------------------------------------------
+
+void Partitioner::
+NaturalBalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+			   int _left_index, int _left_size, int &_real_left_size,
+			   std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+			   int _right_index, int _right_size, int &_real_right_size,
+			   int _part, std::vector<bool> &_locked,
+			   int _current_rank)
+{
+  int max_rank=_current_rank;
+
+  while(_left_stack.size() || _right_stack.size()) {
+
+    //first from right
+    while(_right_stack.size()) {
+      int vertex=_right_stack.front();
+      int rank=_right_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _right_stack.pop_front();
+      _right_rank.pop_front();
+
+      //Check if we have already exceeded the size of this
+      //part of the bisection
+      if(_real_right_size >= _right_size) {
+	inSlice[vertex]=_left_index;
+	_real_left_size++;
+      }
+      else {
+	inSlice[vertex]=_right_index;
+	_real_right_size++;
+      }
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _right_stack.push_back(*edge);
+	  _right_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+	}
+
+	edge++;
+      }
+    }
+
+    //then from left
+    while(_left_stack.size()) {
+      int vertex=_left_stack.front();
+      int rank=_left_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _left_stack.pop_front();
+      _left_rank.pop_front();
+
+      //Check if we have already exceeded the size of this
+      //part of the bisection
+      if(_real_left_size >= _left_size) {
+	inSlice[vertex]=_right_index;
+	_real_right_size++;
+      }
+      else {
+	inSlice[vertex]=_left_index;
+	_real_left_size++;
+      }
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _left_stack.push_back(*edge);
+	  _left_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+
+	}
+
+	edge++;
+      }
+    }
+
+    //next slices
+    max_rank++;
+  }
+
+}
+
+void Partitioner::
+NaturalUnbalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+			     int _left_index, int _left_size, int &_real_left_size,
+			     std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+			     int _right_index, int _right_size, int &_real_right_size,
+			     int _part, std::vector<bool> &_locked,
+			     int _current_rank)
+{
+  int max_rank=_current_rank;
+
+  while(_left_stack.size() || _right_stack.size()) {
+
+    //first from right
+    while(_right_stack.size()) {
+      int vertex=_right_stack.front();
+      int rank=_right_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _right_stack.pop_front();
+      _right_rank.pop_front();
+
+      inSlice[vertex]=_right_index;
+      _real_right_size++;
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _right_stack.push_back(*edge);
+	  _right_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+	}
+
+	edge++;
+      }
+    }
+
+    //then from left
+    while(_left_stack.size()) {
+      int vertex=_left_stack.front();
+      int rank=_left_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _left_stack.pop_front();
+      _left_rank.pop_front();
+
+      inSlice[vertex]=_left_index;
+      _real_left_size++;
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _left_stack.push_back(*edge);
+	  _left_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+
+	}
+
+	edge++;
+      }
+    }
+
+    //next slices
+    max_rank++;
+  }
+
+}
+
+void Partitioner::
+Random_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+		  int _left_index, int _left_size, int &_real_left_size,
+		  std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+		  int _right_index, int _right_size, int &_real_right_size,
+		  int _part, std::vector<bool> &_locked,
+		  int _current_rank)
+{
+  int max_rank=_current_rank;
+
+  while(_left_stack.size() || _right_stack.size()) {
+
+    //first from right
+    while(_right_stack.size()) {
+      int vertex=_right_stack.front();
+      int rank=_right_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _right_stack.pop_front();
+      _right_rank.pop_front();
+
+      if(rand()%2) {
+	inSlice[vertex]=_right_index;
+	_real_right_size++;
+      }
+      else {
+	inSlice[vertex]=_left_index;
+	_real_left_size++;
+      }
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _right_stack.push_back(*edge);
+	  _right_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+	}
+
+	edge++;
+      }
+    }
+
+    //then from left
+    while(_left_stack.size()) {
+      int vertex=_left_stack.front();
+      int rank=_left_rank.front();
+
+      //stop if we have exceeded the desired rank
+      if(rank>max_rank) {
+	break;
+      }
+
+      _left_stack.pop_front();
+      _left_rank.pop_front();
+
+      if(rand()%2) {
+	inSlice[vertex]=_left_index;
+	_real_left_size++;
+      }
+      else {
+	inSlice[vertex]=_right_index;
+	_real_right_size++;
+      }
+
+      int *edge=graph.getEdges(vertex).begin();
+      while(edge != graph.getEdges(vertex).end()) {
+
+	if(inSlice[*edge]==_part &&
+	   !_locked[sliceIndex[*edge]]) {
+
+	  _left_stack.push_back(*edge);
+	  _left_rank.push_back(rank+1);
+
+	  _locked[sliceIndex[*edge]]=true;
+
+	}
+
+	edge++;
+      }
+    }
+
+    //next slices
+    max_rank++;
+  }
+}
+
+//-------------------------------------------------------
+//Optimizers
+//-------------------------------------------------------
+
+int Partitioner::
+FM_MinCut_optimize(int _left_index, int _left_size,
+		   int _right_index, int _right_size,
+		   std::vector<bool> &_locked, int _pmax, int _tolerance)
+{
+  std::vector<int> gain(_left_size+_right_size, 0);
+
+  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
+  std::vector<bucket_list> bucket(2, bucket_list(-_pmax, _pmax,
+						 vertex_list, sliceIndex));
+
+  std::vector<int> change_buffer;
+  int size[2];
+  int part_index[2];
+
+  part_index[0]=_left_index;
+  part_index[1]=_right_index;
+
+  //relative sizes and tolerances
+
+  size[0] = -_left_size;
+  size[1] = -_right_size;
+
+  int tolerance=std::max(1, _tolerance);
+  //In order for Fiduccia-Mattheyses to work,
+  //we may at least allow an imbalance of 1 vertex
+
+#if DEBUG_LEVEL>=3
+  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
+#endif
+
+  //Initialize gain values
+
+  //first left part
+  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
+    int index, part_index;
+    int vertex=parts[_left_index][ivertex];
+
+    if(inSlice[vertex] == _left_index) {
+      part_index=_left_index;
+      index=0;
+    }
+    else {
+      part_index=_right_index;
+      index=1;
+    }
+
+    //Update the partition size
+    size[index]++;
+
+    //For all the free vertices we go through
+
+    //all the edges to compute the gain
+    if(!_locked[sliceIndex[vertex]]) {
+      int *edge=graph.getEdges(vertex).begin();
+
+      while(edge != graph.getEdges(vertex).end()) {
+	if(inSlice[*edge]!=part_index) {
+	  gain[sliceIndex[vertex]]++;
+	}
+	else {
+	  gain[sliceIndex[vertex]]--;
+	}
+
+	edge++;
+      }
+
+      //Finally add the vertex to the bucket list
+      bucket[index].push_back(gain[sliceIndex[vertex]], vertex);
+    }
+  }
+
+#if DEBUG_LEVEL>=3
+  cout << size[0] << " " << size[1] << endl;
+#endif
+
+  //characteristics of the best partition
+  int relative_gain=0;
+
+  int old_size=abs(size[0]);
+  //int old_gain not needed, -> relative gain!
+
+  int best_gain=0;
+  int best_size=abs(size[0]);
+
+  int best_move=0;
+
+  while(true) {
+    //Choose the cell that should be moved
+    int from, to;
+
+    int cur_gain[2];
+
+    cur_gain[0]=bucket[0].max_key();
+    cur_gain[1]=bucket[1].max_key();
+
+    //Only allow moves that improve or maintain balance:
+    //check if partition is imbalanced or could be imbalanced
+    //at the next move
+    //(Note: it doesn't matter whether we compare size[0] or
+    // size[1] with tolerance, since size[0]+size[1]=const
+    if(abs(size[0])>=tolerance) {
+      if(size[0] > size[1]) {
+	cur_gain[1]=-_pmax-1;
+      }
+      else {
+	cur_gain[0]=-_pmax-1;
+      }
+    }
+
+    //Choose the cell with the largest gain
+    //(Note: moves that could cause imbalance
+    // are prevented by the previous checks)
+    if(cur_gain[0] != cur_gain[1]) {
+      from=(cur_gain[0] > cur_gain[1]) ? 0 : 1;
+      to=(cur_gain[0] > cur_gain[1]) ? 1 : 0;
+    }
+    //if the gains are equal, check if no further
+    //moves are possible, otherwise choose
+    //the move that improves balance, if it doesn't matter
+    //take the left slice
+    else {
+      if(cur_gain[0] > -_pmax-1) {
+	from=(size[0] >= size[1]) ? 0 : 1;
+	to=(size[0] >= size[1]) ? 1 : 0;
+      }
+      else {
+	//no further moves are possible
+
+	break;
+      }
+    }
+
+    //Remove the vertex and adjust partition size
+    int vertex=bucket[from].front();
+    bucket[from].pop_front();
+
+    _locked[sliceIndex[vertex]]=true;
+    inSlice[vertex]=part_index[to];
+
+    size[from]--;
+    size[to]++;
+
+    relative_gain+=cur_gain[from];
+    //    relative_gain+=gain[sliceIndex[vertex]];
+
+    change_buffer.push_back(vertex);
+
+    //cout << "Moving vertex " << vertex
+    //    << " with gain " << cur_gain[from] << endl;
+
+    //update gains and adjust bucket structure
+    int *edge=graph.getEdges(vertex).begin();
+    while(edge != graph.getEdges(vertex).end()) {
+
+      if(inSlice[*edge]==part_index[from]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  int old_gain=gain[sliceIndex[*edge]];
+
+	  gain[sliceIndex[*edge]]+=2;
+
+	  bucket[from].rearrange_back(old_gain, old_gain+2, *edge);
+	}
+      }
+      else if(inSlice[*edge]==part_index[to]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  int old_gain=gain[sliceIndex[*edge]];
+
+	  gain[sliceIndex[*edge]]-=2;
+
+	  bucket[to].rearrange_back(old_gain, old_gain-2, *edge);
+	}
+      }
+      edge++;
+    }
+
+
+    //Have we found a better partition
+    if(relative_gain > best_gain ||
+       (relative_gain == best_gain && best_size > abs(size[0]))) {
+      best_gain=relative_gain;
+      best_size=abs(size[0]);
+
+      best_move=change_buffer.size();
+    }
+
+  }
+
+  //Undo all the changes that did not improve the
+  //bisection any more:
+  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
+    int vertex=change_buffer[ivertex];
+
+    if(inSlice[vertex] == _left_index) {
+      inSlice[vertex]=_right_index;
+    }
+    else {
+      inSlice[vertex]=_left_index;
+    }
+  }
+
+  if(best_move == 0) {
+    return 0; //no improvements possible
+  }
+  else {
+    if(best_gain > 0 ||
+       (best_gain == 0 && best_size < old_size) ){
+      return 2; //definite improvement
+    }
+    else {
+      return 1; //found a partition with the same properties as before
+                //(needed to get out of local minima)
+    }
+  }
+}
+
+
+//----------------
+
+int Partitioner::
+FM_MinNetCut_optimize(int _left_index, int _left_size,
+		      int _right_index, int _right_size,
+		      std::vector<bool> &_locked, int _pmax, int _tolerance)
+{
+#if DEBUG_LEVEL>=3
+  cout << "In FM MinNetCut" << endl;
+#endif
+
+  std::vector<int> net_gain(_left_size+_right_size, 0);
+  std::vector<int> edge_gain(_left_size+_right_size, 0);
+
+  std::vector<int> net_distribution[2];
+
+  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
+  std::vector<bucket_list> bucket(2, bucket_list(-_pmax, _pmax,
+						 vertex_list, sliceIndex));
+
+  std::vector<int> change_buffer;
+  int size[2];
+  int part_index[2];
+
+  net_distribution[0].resize(_left_size+_right_size);
+  net_distribution[1].resize(_left_size+_right_size);
+
+  part_index[0]=_left_index;
+  part_index[1]=_right_index;
+
+  //relative sizes and tolerances
+
+  size[0] = -_left_size;
+  size[1] = -_right_size;
+
+  int tolerance=std::max(1, _tolerance);
+  //In order for Fiduccia-Mattheyses to work,
+  //we may at least allow an imbalance of 1 vertex
+
+#if DEBUG_LEVEL>=3
+  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
+#endif
+
+  //Initialize the distribution of the nets
+  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
+    int vertex=parts[_left_index][ivertex];
+
+    net_distribution[0][sliceIndex[vertex]]=0;
+    net_distribution[1][sliceIndex[vertex]]=0;
+
+    //the net includes the node itself ...
+    if(inSlice[vertex] == _left_index) {
+      net_distribution[0][sliceIndex[vertex]]++;
+    }
+    else {
+      net_distribution[1][sliceIndex[vertex]]++;
+    }
+
+    //and it's nearest neighbours
+    int *edge=graph.getEdges(vertex).begin();
+
+    while(edge != graph.getEdges(vertex).end()) {
+
+      if(inSlice[*edge] <= _left_index) {
+	net_distribution[0][sliceIndex[vertex]]++;
+      }
+      else {
+	net_distribution[1][sliceIndex[vertex]]++;
+      }
+
+      edge++;
+    }
+  }
+
+  //Initialize gain values
+  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
+    int index, part_index;
+    int vertex=parts[_left_index][ivertex];
+    int from,to;
+
+    if(inSlice[vertex] == _left_index) {
+      part_index=_left_index;
+      index=0;
+      from=0;to=1;
+    }
+    else {
+      part_index=_right_index;
+      index=1;
+      from=1;to=0;
+    }
+
+    //Update the partition size
+    size[index]++;
+
+    //For all the free vertices we go through
+
+    //all the edges to compute the gain
+    if(!_locked[sliceIndex[vertex]]) {
+      int *edge=graph.getEdges(vertex).begin();
+
+      while(edge != graph.getEdges(vertex).end()) {
+
+	//Update the gain with regard to cut edges
+	if(inSlice[*edge]!=part_index) {
+	  edge_gain[sliceIndex[vertex]]++;
+	}
+	else {
+	  edge_gain[sliceIndex[vertex]]--;	}
+
+	//and with regard to cut nets
+	if(net_distribution[from][sliceIndex[*edge]] == 1) {
+	  net_gain[sliceIndex[vertex]]++;
+	}
+	else if(net_distribution[to][sliceIndex[*edge]] == 0) {
+	  net_gain[sliceIndex[vertex]]--;
+	}
+
+	edge++;
+      }
+
+      //Finally add the vertex to the bucket list
+      bucket[index].push_randomly(net_gain[sliceIndex[vertex]], vertex);
+
+    }
+  }
+
+#if DEBUG_LEVEL>=3
+  cout << size[0] << " " << size[1] << endl;
+#endif
+
+  //characteristics of the best partition
+  int relative_gain=0;
+  int relative_edge_gain=0;
+
+  int old_size=abs(size[0]);
+  //int old_gains not needed -> relative gains!
+
+  int best_gain=0;
+  int best_edge_gain=0;
+  int best_size=abs(size[0]);
+
+  int best_move=0;
+
+  while(true) {
+    //Choose the cell that should be moved
+    int from, to;
+
+    int cur_gain[2];
+
+    cur_gain[0]=bucket[0].max_key();
+    cur_gain[1]=bucket[1].max_key();
+
+    //Only allow moves that improve or maintain balance:
+    //check if partition is imbalanced or could be imbalanced
+    //at the next move
+    //(Note: it doesn't matter whether we compare size[0] or
+    // size[1] with tolerance, since size[0]+size[1]=const
+    if(abs(size[0])>=tolerance) {
+      if(size[0] > size[1]) {
+	cur_gain[1]=-_pmax-1;
+      }
+      else {
+	cur_gain[0]=-_pmax-1;
+      }
+    }
+
+    //Choose the cell with the largest gain
+    //(Note: moves that could cause imbalance
+    // are prevented by the previous checks)
+    if(cur_gain[0] != cur_gain[1]) {
+      from=(cur_gain[0] > cur_gain[1]) ? 0 : 1;
+      to=(cur_gain[0] > cur_gain[1]) ? 1 : 0;
+    }
+    //if the gains are equal, check if no further
+    //moves are possible, otherwise choose
+    //the move that improves balance, if it doesn't matter
+    //take the left slice
+    else {
+      if(cur_gain[0] > -_pmax-1) {
+	from=(size[0] >= size[1]) ? 0 : 1;
+	to=(size[0] >= size[1]) ? 1 : 0;
+      }
+      else {
+	//no further moves are possible
+
+	break;
+      }
+    }
+
+    //Remove the vertex and adjust partition size
+    int vertex=bucket[from].front();
+    bucket[from].pop_front();
+
+    _locked[sliceIndex[vertex]]=true;
+    inSlice[vertex]=part_index[to];
+
+    size[from]--;
+    size[to]++;
+
+    relative_gain+=cur_gain[from];
+    relative_edge_gain+=edge_gain[sliceIndex[vertex]];
+    change_buffer.push_back(vertex);
+
+    //update gains and adjust bucket structure
+    int *edge=graph.getEdges(vertex).begin();
+    while(edge != graph.getEdges(vertex).end()) {
+
+      //----------------
+      //update net gains
+      //----------------
+
+      if(net_distribution[to][sliceIndex[*edge]] == 0) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]++;
+
+	  //Note: all the vertices are in the to part
+	  bucket[from].rearrange_randomly(old_gain, old_gain+1, *edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(!_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]++;
+
+	    //Note: all the vertices are in the from part
+	    bucket[from].rearrange_randomly(old_gain, old_gain+1, *edgedge);
+	  }
+	  edgedge++;
+	}
+      }
+      else if(net_distribution[to][sliceIndex[*edge]] == 1) {
+	if(inSlice[*edge]==part_index[to] && !_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]--;
+
+	  //Note: all the vertices are in the to part
+	  bucket[to].rearrange_randomly(old_gain, old_gain-1, *edge);
+
+	}
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(inSlice[*edgedge]==part_index[to] && !_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]--;
+
+	    bucket[to].rearrange_randomly(old_gain, old_gain-1, *edgedge);
+
+	    break; //there is only one vertex in the to part
+	    //(well, it's two after the move, nut the moved vertex is locked)
+	  }
+
+	  edgedge++;
+	}
+      }
+
+      net_distribution[from][sliceIndex[*edge]]--;
+      net_distribution[to][sliceIndex[*edge]]++;
+
+      if(net_distribution[from][sliceIndex[*edge]] == 0) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]--;
+
+	  //Note: all the vertices are in the to part
+	  bucket[to].rearrange_randomly(old_gain, old_gain-1, *edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(!_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]--;
+
+	    bucket[to].rearrange_randomly(old_gain, old_gain-1, *edgedge);
+	  }
+	  edgedge++;
+	}
+
+      }
+      else if(net_distribution[from][sliceIndex[*edge]] == 1) {
+	if(inSlice[*edge]==part_index[from] && !_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]++;
+
+	  bucket[from].rearrange_randomly(old_gain, old_gain+1, *edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(inSlice[*edgedge]==part_index[from] && !_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]++;
+
+	    bucket[from].rearrange_randomly(old_gain, old_gain+1, *edgedge);
+	    break; //there is only one vertex in the from part
+	    //(the other one has been moved)
+	  }
+
+	  edgedge++;
+	}
+      }
+
+      //-----------------
+      //update edge gains
+      //-----------------
+
+      if(inSlice[*edge]==part_index[from]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  edge_gain[sliceIndex[*edge]]+=2;
+	}
+      }
+      else if(inSlice[*edge]==part_index[to]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  edge_gain[sliceIndex[*edge]]-=2;
+	}
+      }
+
+      edge++;
+    }
+
+    //Have we found a better partition
+    if(relative_gain > best_gain ||
+       (relative_gain == best_gain && best_size >= abs(size[0]) ) ||
+       (relative_gain == best_gain && best_size == abs(size[0]) && relative_edge_gain>=best_edge_gain) ) {
+      best_gain=relative_gain;
+      best_edge_gain=relative_edge_gain;
+      best_size=abs(size[0]);
+
+      best_move=change_buffer.size();
+    }
+
+  }
+
+  //Undo all the changes that did not improve the
+  //bisection any more:
+  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
+    int vertex=change_buffer[ivertex];
+
+    if(inSlice[vertex] == _left_index) {
+      inSlice[vertex]=_right_index;
+    }
+    else {
+      inSlice[vertex]=_left_index;
+    }
+  }
+
+  if(best_move == 0) {
+    return 0; //no improvements possible
+  }
+  else {
+    if(best_gain > 0 ||
+       (best_gain == 0 && best_size < old_size) ||
+       (best_gain == 0 && best_size == old_size && best_edge_gain > 0) ){
+      return 2; //definite improvement
+    }
+    else {
+      return 1; //found a partition with the same properties as before
+                //(needed to get out of local minima)
+    }
+  }
+}
+
+//----------------
+
+int Partitioner::
+FM_MinNetCutMinCut_optimize(int _left_index, int _left_size,
+			    int _right_index, int _right_size,
+			    std::vector<bool> &_locked, int _pmax, int _tolerance)
+{
+#if DEBUG_LEVEL>=3
+  cout << "In FM MinNetCut_MinCut" << endl;
+#endif
+
+  std::vector<int> net_gain(_left_size+_right_size, 0);
+  std::vector<int> edge_gain(_left_size+_right_size, 0);
+
+  std::vector<int> net_distribution[2];
+
+  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
+  std::vector<double_bucket_list> bucket(2, double_bucket_list(-_pmax, _pmax,
+							       -_pmax, _pmax,
+							       vertex_list, sliceIndex));
+
+  std::vector<int> change_buffer;
+  int size[2];
+  int part_index[2];
+
+  net_distribution[0].resize(_left_size+_right_size);
+  net_distribution[1].resize(_left_size+_right_size);
+
+  part_index[0]=_left_index;
+  part_index[1]=_right_index;
+
+  //relative sizes and tolerances
+
+  size[0] = -_left_size;
+  size[1] = -_right_size;
+
+  int tolerance=std::max(1, _tolerance);
+  //In order for Fiduccia-Mattheyses to work,
+  //we may at least allow an imbalance of 1 vertex
+
+#if DEBUG_LEVEL>=3
+  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
+#endif
+
+  //Initialize the distribution of the nets
+  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
+    int vertex=parts[_left_index][ivertex];
+
+    net_distribution[0][sliceIndex[vertex]]=0;
+    net_distribution[1][sliceIndex[vertex]]=0;
+
+    //the net includes the node itself ...
+    if(inSlice[vertex] == _left_index) {
+      net_distribution[0][sliceIndex[vertex]]++;
+    }
+    else {
+      net_distribution[1][sliceIndex[vertex]]++;
+    }
+
+    //and it's nearest neighbours
+    int *edge=graph.getEdges(vertex).begin();
+
+    while(edge != graph.getEdges(vertex).end()) {
+
+      if(inSlice[*edge] <= _left_index) {
+	net_distribution[0][sliceIndex[vertex]]++;
+      }
+      else {
+	net_distribution[1][sliceIndex[vertex]]++;
+      }
+
+      edge++;
+    }
+  }
+
+  //Initialize gain values
+  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
+    int index, part_index;
+    int vertex=parts[_left_index][ivertex];
+    int from,to;
+
+    if(inSlice[vertex] == _left_index) {
+      part_index=_left_index;
+      index=0;
+      from=0;to=1;
+    }
+    else {
+      part_index=_right_index;
+      index=1;
+      from=1;to=0;
+    }
+
+    //Update the partition size
+    size[index]++;
+
+    //For all the free vertices we go through
+
+    //all the edges to compute the gain
+    if(!_locked[sliceIndex[vertex]]) {
+      int *edge=graph.getEdges(vertex).begin();
+
+      while(edge != graph.getEdges(vertex).end()) {
+
+	//Update the gain with regard to cut edges
+	if(inSlice[*edge]!=part_index) {
+	  edge_gain[sliceIndex[vertex]]++;
+	}
+	else {
+	  edge_gain[sliceIndex[vertex]]--;
+	}
+
+	//and with regard to cut nets
+	if(net_distribution[from][sliceIndex[*edge]] == 1) {
+	  net_gain[sliceIndex[vertex]]++;
+	}
+	else if(net_distribution[to][sliceIndex[*edge]] == 0) {
+	  net_gain[sliceIndex[vertex]]--;
+	}
+
+	edge++;
+      }
+
+      //Finally add the vertex to the bucket list
+      bucket[index].push_randomly(net_gain[sliceIndex[vertex]],
+				  edge_gain[sliceIndex[vertex]], vertex);
+    }
+  }
+
+#if DEBUG_LEVEL>=3
+  cout << "deviation: " << size[0] << " " << size[1] << endl;
+#endif
+
+  //characteristics of the best partition
+  int relative_gain=0;
+  int relative_edge_gain=0;
+
+  int old_size=abs(size[0]);
+  //int old_gains not needed -> relative gains!
+
+  int best_gain=0;
+  int best_edge_gain=0;
+  int best_size=abs(size[0]);
+
+  int best_move=0;
+
+  while(true) {
+    //Choose the cell that should be moved
+    int from, to;
+
+    int cur_net_gain[2];
+
+    cur_net_gain[0]=bucket[0].max_key().first;
+    cur_net_gain[1]=bucket[1].max_key().first;
+
+    //Only allow moves that improve or maintain balance:
+    //check if partition is imbalanced or could be imbalanced
+    //at the next move
+    //(Note: it doesn't matter whether we compare size[0] or
+    // size[1] with tolerance, since size[0]+size[1]=const
+    if(abs(size[0])>=tolerance) {
+      if(size[0] > size[1]) {
+	cur_net_gain[1]=-_pmax-1;
+      }
+      else {
+	cur_net_gain[0]=-_pmax-1;
+      }
+    }
+
+    //Choose the cell with the largest gain
+    //(Note: moves that could cause imbalance
+    // are prevented by the previous checks)
+    if(cur_net_gain[0] != cur_net_gain[1]) {
+      from=(cur_net_gain[0] > cur_net_gain[1]) ? 0 : 1;
+      to=(cur_net_gain[0] > cur_net_gain[1]) ? 1 : 0;
+    }
+    //if the gains are equal, check if no further
+    //moves are possible, otherwise choose
+    //the move that improves balance, if it doesn't matter
+    //take the left slice
+    else {
+      if(cur_net_gain[0] > -_pmax-1) {
+	from=(size[0] >= size[1]) ? 0 : 1;
+	to=(size[0] >= size[1]) ? 1 : 0;
+      }
+      else {
+	//no further moves are possible
+
+	break;
+      }
+    }
+
+    //Remove the vertex and adjust partition size
+    int vertex=bucket[from].front();
+    bucket[from].pop_front();
+
+    _locked[sliceIndex[vertex]]=true;
+    inSlice[vertex]=part_index[to];
+
+    size[from]--;
+    size[to]++;
+
+    relative_gain+=cur_net_gain[from];
+    relative_edge_gain+=bucket[from].max_key().second;
+
+    change_buffer.push_back(vertex);
+
+    //update gains and adjust bucket structure
+    int *edge=graph.getEdges(vertex).begin();
+    while(edge != graph.getEdges(vertex).end()) {
+
+      //-----------------
+      //update edge gains
+      //-----------------
+      int old_edge_gain=edge_gain[sliceIndex[*edge]];
+
+      if(inSlice[*edge]==part_index[from]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  edge_gain[sliceIndex[*edge]]+=2;
+
+	  bucket[from].rearrange_randomly(net_gain[sliceIndex[*edge]], old_edge_gain,
+					  net_gain[sliceIndex[*edge]], old_edge_gain+2,
+					  *edge);
+	}
+      }
+      else if(inSlice[*edge]==part_index[to]) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  edge_gain[sliceIndex[*edge]]-=2;
+
+	  bucket[to].rearrange_randomly(net_gain[sliceIndex[*edge]], old_edge_gain,
+					net_gain[sliceIndex[*edge]], old_edge_gain-2,
+					*edge);
+	}
+      }
+
+      //----------------
+      //update net gains
+      //----------------
+
+      if(net_distribution[to][sliceIndex[*edge]] == 0) {
+	if(!_locked[sliceIndex[*edge]]) {
+
+	  int old_gain=net_gain[sliceIndex[*edge]]++;
+
+	  //Note: all the vertices are in the from part
+	  bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
+					  old_gain+1, edge_gain[sliceIndex[*edge]],
+					  *edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(!_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]++;
+
+	    //Note: all the vertices are in the from part
+	    bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
+					    old_gain+1, edge_gain[sliceIndex[*edgedge]],
+					    *edgedge);
+	  }
+	  edgedge++;
+	}
+      }
+      else if(net_distribution[to][sliceIndex[*edge]] == 1) {
+	if(inSlice[*edge]==part_index[to] && !_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]--;
+
+	  //Note: all the vertices are in the to part
+	  bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
+					old_gain-1, edge_gain[sliceIndex[*edge]],
+					*edge);
+
+	}
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(inSlice[*edgedge]==part_index[to] && !_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]--;
+
+	    bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
+					  old_gain-1, edge_gain[sliceIndex[*edgedge]],
+					  *edgedge);
+
+	    break; //there is only one vertex in the to part
+	    //(well, it's two after the move, nut the moved vertex is locked)
+	  }
+
+	  edgedge++;
+	}
+      }
+
+      net_distribution[from][sliceIndex[*edge]]--;
+      net_distribution[to][sliceIndex[*edge]]++;
+
+      if(net_distribution[from][sliceIndex[*edge]] == 0) {
+	if(!_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]--;
+
+	  //Note: all the vertices are in the to part
+	  bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
+					old_gain-1, edge_gain[sliceIndex[*edge]],
+					*edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+	  if(!_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]--;
+
+	    bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
+					  old_gain-1, edge_gain[sliceIndex[*edgedge]],
+					  *edgedge);
+	  }
+	  edgedge++;
+	}
+
+      }
+      else if(net_distribution[from][sliceIndex[*edge]] == 1) {
+	if(inSlice[*edge]==part_index[from] && !_locked[sliceIndex[*edge]]) {
+	  int old_gain=net_gain[sliceIndex[*edge]]++;
+
+	  bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
+					  old_gain+1, edge_gain[sliceIndex[*edge]],
+					  *edge);
+	}
+
+	int *edgedge=graph.getEdges(*edge).begin();
+	while(edgedge != graph.getEdges(*edge).end()) {
+	  if(inSlice[*edgedge]!=_left_index &&
+	     inSlice[*edgedge]!=_right_index) {
+	    edgedge++;
+	    continue;
+	  }
+
+	  if(inSlice[*edgedge]==part_index[from] && !_locked[sliceIndex[*edgedge]]) {
+	    int old_gain=net_gain[sliceIndex[*edgedge]]++;
+
+	    bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
+					    old_gain+1, edge_gain[sliceIndex[*edgedge]],
+					    *edgedge);
+	    break; //there is only one vertex in the from part
+	    //(the other one has been moved)
+	  }
+
+	  edgedge++;
+	}
+      }
+
+      edge++;
+    }
+
+    //Have we found a better partition
+    if(relative_gain > best_gain ||
+       (relative_gain == best_gain && relative_edge_gain >= best_edge_gain) ||
+       (relative_gain == best_gain && relative_edge_gain==best_edge_gain && best_size >= abs(size[0]))) {
+      best_gain=relative_gain;
+      best_edge_gain=relative_edge_gain;
+      best_size=abs(size[0]);
+
+      best_move=change_buffer.size();
+    }
+
+  }
+
+#if DEBUG_LEVEL>2
+  cout << "best_move: " << best_move << endl;
+  cout << "best gain: " << best_gain << endl;
+  cout << "best size: " << best_size << endl;
+#endif
+
+  //Undo all the changes that did not improve the
+  //bisection any more:
+  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
+    int vertex=change_buffer[ivertex];
+
+    if(inSlice[vertex] == _left_index) {
+      inSlice[vertex]=_right_index;
+    }
+    else {
+      inSlice[vertex]=_left_index;
+    }
+  }
+
+  if(best_move == 0) {
+    return 0; //no improvements possible
+  }
+  else {
+    if(best_gain > 0 ||
+       (best_gain == 0 && best_size < old_size) ||
+       (best_gain == 0 && best_size == old_size && best_edge_gain > 0) ) {
+      return 2; //definite improvement
+    }
+    else {
+      return 1; //found a partition with the same properties as before
+                //(needed to get out of local minima)
+    }
+  }
+}
diff --git a/kwant/graph/c_slicer/partitioner.h b/kwant/graph/c_slicer/partitioner.h
new file mode 100644
index 0000000000000000000000000000000000000000..e742a68650c2cf54040144d035b14bc26a23471d
--- /dev/null
+++ b/kwant/graph/c_slicer/partitioner.h
@@ -0,0 +1,183 @@
+//-*-C++-*-
+#ifndef _BLOCK_TRIDIAGONAL_PARTITIONER_H
+#define _BLOCK_TRIDIAGONAL_PARTITIONER_H
+
+#include <vector>
+#include <deque>
+#include <cmath>
+#include <algorithm>
+
+#include "graphwrap.h"
+#include "bucket_list.h"
+//#include "graphalgorithms.h"
+
+/** @short the first graph-partitioner
+ ** *******************************************/
+class Partitioner
+{
+public:
+  enum Distributors {
+    Distribute_Natural_Unbalanced,
+    Distribute_Natural_Balanced,
+    Distribute_Randomly };
+
+  enum Optimizers {
+    Optimize_No,
+    Optimize_FM_MinCut,
+    Optimize_FM_MinNetCut,
+    Optimize_FM_MinNetCutMinCut } ;
+
+public:
+  const GraphWrapper &graph;
+
+  vector<vector<int> > parts;
+
+  vector<int> inSlice;
+  vector<int> sliceIndex;
+
+  enum Distributors distributor;
+  enum Optimizers optimizer;
+
+public:
+  /** @param _graph           the underlying grid
+   ** @param _tolerance
+   ** @param _min_opt_passes
+   ** @param _max_opt_passes
+   ** @param _distributor
+   ** @param _optimizer
+   ** @param _mode            default is TwoBlocks, OneBlock is not implemented yet
+   ** Default behaviour: TwoBlocks, left lead = 1, right lead = 2
+   ** **************************************************************/
+  template<typename Grid>
+  Partitioner( const Grid  &_graph,
+	       std::vector<int> &_left,
+	       std::vector<int> &_right,
+	       double       _tolerance = 0.01,
+	       int          _min_opt_passes = 10,
+	       int          _max_opt_passes = 10,
+	       Distributors _distributor = Distribute_Natural_Balanced,
+	       Optimizers   _optimizer   = Optimize_FM_MinNetCutMinCut) :
+    graph(_graph), parts(1, vector<int>(0)),
+    inSlice(_graph.size(),-1), sliceIndex(_graph.size(),0),
+    distributor(_distributor), optimizer(_optimizer)
+  {
+    parts[0].reserve(_graph.size());
+
+    for(int i=0; i<graph.size(); i++) {
+      parts[0].push_back(i);
+
+      inSlice[i]=0;
+      sliceIndex[i]=parts[0].size()-1;
+    }
+
+    bisectFirst(_left, _right, _tolerance, _min_opt_passes, _max_opt_passes);
+  }
+
+private:
+  /** @short first bisection? **/
+  void bisectFirst( std::vector<int> &, std::vector<int> &, double _tolerance,
+		    int _min_opt_passes, int _max_opt_passes);
+
+  void bisect(int _part, int _part_slices,
+	      std::vector<int> &, std::vector<int> &,
+	      int _max_edges, double _tolerance,
+	      int _min_opt_passes, int _max_opt_passes);
+
+  //interface function that chooses between the different Optimizers
+  inline int optimize(int _left_index, int _left_size,
+		      int _right_index, int _right_size,
+		      std::vector<bool> &_locked, int _pmax, int _tolerance)
+  {
+    if(optimizer == Optimize_FM_MinCut) {
+      return FM_MinCut_optimize(_left_index, _left_size,
+				_right_index, _right_size,
+				_locked, _pmax, _tolerance);
+    }
+    else if(optimizer == Optimize_FM_MinNetCut) {
+      return FM_MinNetCut_optimize(_left_index, _left_size,
+				   _right_index, _right_size,
+				   _locked, _pmax, _tolerance);
+    }
+    else if(optimizer == Optimize_FM_MinNetCutMinCut) {
+      return FM_MinNetCutMinCut_optimize(_left_index, _left_size,
+					 _right_index, _right_size,
+					 _locked, _pmax, _tolerance);
+    }
+    else {
+      return 0;
+    }
+  }
+
+  //
+  inline void distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+			 int _left_index, int _left_size, int &_real_left_size,
+			 std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+			 int _right_index, int _right_size, int &_real_right_size,
+			 int _part, std::vector<bool> &_locked,
+			 int _current_rank)
+  {
+    if(distributor == Distribute_Natural_Balanced) {
+      NaturalBalanced_distribute(_left_stack, _left_rank,
+				 _left_index, _left_size, _real_left_size,
+				 _right_stack, _right_rank,
+				 _right_index, _right_size, _real_right_size,
+				 _part, _locked, _current_rank);
+    }
+    else if(distributor == Distribute_Natural_Unbalanced) {
+      NaturalUnbalanced_distribute(_left_stack, _left_rank,
+				   _left_index, _left_size, _real_left_size,
+				   _right_stack, _right_rank,
+				   _right_index, _right_size, _real_right_size,
+				   _part, _locked, _current_rank);
+    }
+    else if(distributor == Distribute_Randomly) {
+      Random_distribute(_left_stack, _left_rank,
+			_left_index, _left_size, _real_left_size,
+			_right_stack, _right_rank,
+			_right_index, _right_size, _real_right_size,
+			_part, _locked, _current_rank);
+    }
+  }
+
+
+  //The inital distribution
+  void NaturalBalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+				  int _left_index, int _left_size, int &_real_left_size,
+				  std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+				  int _right_index, int _right_size, int &_real_right_size,
+				  int _part,
+				  std::vector<bool> &_locked,
+				  int _current_rank);
+
+  void NaturalUnbalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+				    int _left_index, int _left_size, int &_real_left_size,
+				    std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+				    int _right_index, int _right_size, int &_real_right_size,
+				    int _part,
+				    std::vector<bool> &_locked,
+				    int _current_rank);
+
+  void Random_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
+			 int _left_index, int _left_size, int &_real_left_size,
+			 std::deque<int> &_right_stack, std::deque<int> &_right_rank,
+			 int _right_index, int _right_size, int &_real_right_size,
+			 int _part,
+			 std::vector<bool> &_locked,
+			 int _current_rank);
+
+  //The different optimzers
+  int FM_MinCut_optimize(int _left_index, int _left_size,
+			 int _right_index, int _right_size,
+			 std::vector<bool> &_locked, int _pmax, int _tolerance);
+
+  int FM_MinNetCut_optimize(int _left_index, int _left_size,
+			    int _right_index, int _right_size,
+			    std::vector<bool> &_locked, int _pmax, int _tolerance);
+
+  int FM_MinNetCutMinCut_optimize(int _left_index, int _left_size,
+				  int _right_index, int _right_size,
+				  std::vector<bool> &_locked, int _pmax, int _tolerance);
+
+};
+
+#endif
diff --git a/kwant/graph/c_slicer/slicer.cc b/kwant/graph/c_slicer/slicer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..f55032780ea711de62881adee24e4f15f45df7eb
--- /dev/null
+++ b/kwant/graph/c_slicer/slicer.cc
@@ -0,0 +1,57 @@
+#include <algorithm>
+#include <exception>
+
+#include "graphwrap.h"
+#include "partitioner.h"
+
+#include "slicer.h"
+
+extern "C"
+Slicing *slice(int _node_num,
+	       int *_vertex_ptr,
+	       int *_edges,
+	       int _left_num, int *_left,
+	       int _right_num, int *_right)
+{
+  GraphWrapper graph(_node_num, _vertex_ptr,
+		     _edges);
+
+  vector<int> left(_left, _left+_left_num),
+    right(_right, _right+_right_num);
+
+  Partitioner parts(graph, left, right);
+
+  Slicing *slicing;
+
+  try {
+    slicing=new Slicing;
+
+    slicing->nslices=parts.parts.size();
+    slicing->slice_ptr=new int[parts.parts.size()+1];
+    slicing->slices=new int[graph.size()];
+  }
+  catch(std::bad_alloc &ba) {
+    return NULL;
+  }
+
+  slicing->slice_ptr[0]=0;
+  for(size_t i=0; i<parts.parts.size(); i++) {
+    std::copy(parts.parts[i].begin(),
+	      parts.parts[i].end(),
+	      slicing->slices+slicing->slice_ptr[i]);
+    slicing->slice_ptr[i+1]=slicing->slice_ptr[i]+
+      parts.parts[i].size();
+  }
+
+  return slicing;
+}
+
+extern "C"
+void freeSlicing(Slicing *_slicing)
+{
+  if(_slicing) {
+    delete [] _slicing->slices;
+    delete [] _slicing->slice_ptr;
+    delete _slicing;
+  }
+}
diff --git a/kwant/graph/c_slicer/slicer.h b/kwant/graph/c_slicer/slicer.h
new file mode 100644
index 0000000000000000000000000000000000000000..7707afc529285ba9a5c7e0bf02a8803fee3685e1
--- /dev/null
+++ b/kwant/graph/c_slicer/slicer.h
@@ -0,0 +1,16 @@
+struct Slicing
+{
+  int nslices;
+  int *slice_ptr, *slices;
+};
+
+#ifdef __cplusplus
+extern "C"
+#endif
+struct Slicing *slice(int, int *, int *, int, int *,
+		      int, int *);
+
+#ifdef __cplusplus
+extern "C"
+#endif
+void freeSlicing(struct Slicing *);
diff --git a/kwant/graph/core.pxd b/kwant/graph/core.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..12fa6145d56688d0d9e53b0f4d895e2b90405818
--- /dev/null
+++ b/kwant/graph/core.pxd
@@ -0,0 +1,33 @@
+cimport numpy as np
+from kwant.graph.defs cimport gint
+
+cdef struct Edge:
+    gint tail, head
+
+cdef class Graph:
+    cdef int allow_negative_nodes
+    cdef Edge *edges
+    cdef gint capacity, size, _num_nodes
+    cdef gint num_pp_edges, num_pn_edges, num_np_edges
+
+    cpdef reserve(self, gint capacity)
+    cpdef gint add_edge(self, gint tail, gint head) except -1
+    cdef _add_edges_ndarray_int64(self, np.ndarray[np.int64_t, ndim=2] edges)
+    cdef _add_edges_ndarray_int32(self, np.ndarray[np.int32_t, ndim=2] edges)
+
+cdef class gintArraySlice:
+    cdef gint *begin, *end
+
+cdef class CGraph:
+    cdef readonly bint twoway, edge_nr_translation
+    cdef readonly gint num_nodes, num_edges, num_px_edges, num_xp_edges
+    cdef gint *heads_idxs, *heads
+    cdef gint *tails_idxs, *tails, *edge_ids
+    cdef gint *edge_ids_by_edge_nr, edge_nr_end
+
+cdef class CGraph_malloc(CGraph):
+    pass
+
+cdef class EdgeIterator:
+    cdef CGraph graph
+    cdef gint edge_id, tail
diff --git a/kwant/graph/core.pyx b/kwant/graph/core.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..8f0abd74fd6d53cd24d8a68071fc22323a21af39
--- /dev/null
+++ b/kwant/graph/core.pyx
@@ -0,0 +1,691 @@
+"""Directed graphs optimized for storage and runtime efficiency."""
+
+__all__ = ['Graph', 'CGraph']
+
+# In this module we manage arrays directly with malloc, realloc and free to
+# circumvent two problems with Cython:
+#
+# (1) There are no efficient arrays which allow appending (numpy.ndarray
+#     doesn't).
+#
+# (2) Cython extension types cannot have typed buffers as members.
+#
+# Once these two problems are solved, the corresponding code could be
+# rewritten, probably to use Python's array.array.
+
+# TODO: represent all dangling nodes by -1
+
+# TODO (perhaps): transform Graph into something which behaves like a python
+# sequence.  Allow creation of compressed graphs from any sequence.
+
+from libc.stdlib cimport malloc, realloc, free
+from libc.string cimport memset
+import numpy as np
+cimport numpy as np
+from kwant.graph.defs cimport gint
+
+cdef class Graph:
+    """An uncompressed graph.  Used to make compressed graphs.  (See `CGraph`.)
+    """
+    # The array edges holds `size` elements with space for `capacity`.
+
+    def __init__(self, allow_negative_nodes=False):
+        self.allow_negative_nodes = allow_negative_nodes
+
+    def __dealloc__(self):
+        free(self.edges)
+
+    property num_nodes:
+        def __get__(self):
+            return self._num_nodes
+
+        def __set__(self, value):
+            if value < self._num_nodes:
+                raise ValueError("The number of nodes cannot be decreased.")
+            self._num_nodes = value
+
+    cpdef reserve(self, gint capacity):
+        """Reserve space for edges.
+
+        Parameters
+        ----------
+        capacity : integer
+           Number of edges for which to reserve space.
+
+        Notes
+        -----
+        It is not necessary to call this method, but using it can speed up the
+        creation of graphs.
+        """
+        if capacity <= self.capacity:
+            return
+        self.edges = <Edge*>realloc(self.edges, capacity * sizeof(Edge))
+        if not self.edges:
+            raise MemoryError
+        self.capacity = capacity
+
+    cpdef gint add_edge(self, gint tail, gint head):
+        """Add the directed edge (`tail`, `head`) to the graph.
+
+        Parameters
+        ----------
+        tail : integer
+        head : integer
+
+        Raises
+        ------
+        ValueError
+            If a negative node is added when this has not been allowed
+            explicitly or if an edge is doubly-dangling.
+
+        Returns
+        -------
+        edge_nr : integer
+           The sequential number of the edge.  This number can be used to query
+           for the edge ID of an edge in the compressed graph.
+        """
+        cdef bint neg_tail = tail < 0
+        cdef bint neg_head = head < 0
+        if neg_tail or neg_head:
+            if not self.allow_negative_nodes:
+                raise ValueError(
+                    "Negative node numbers have to be allowed explicitly.")
+            if neg_tail and neg_head:
+                raise ValueError("Doubly-dangling edges are never allowed.")
+            if neg_head:
+                self.num_pn_edges += 1
+            else:
+                self.num_np_edges += 1
+        else:
+            self.num_pp_edges += 1
+
+        if self.size == self.capacity:
+            if self.capacity == 0:
+                self.reserve(8)
+            else:
+                self.reserve(2 * self.capacity)
+        self.edges[self.size].tail = tail
+        self.edges[self.size].head = head
+        self.size += 1
+        self._num_nodes = max(self._num_nodes, tail + 1, head + 1)
+        return self.size - 1
+
+    def add_edges(self, edges):
+        """Add multiple edges in one pass.
+
+        Parameters
+        ----------
+        edges : iterable of 2-sequences of integers
+            The parameter `edges` must be an iterable of elements which
+            describe the edges to be added.  For each edge-element, edge[0] and
+            edge[1] must give, respectively, the tail and the head.  Valid
+            edges are, for example, a list of 2-integer-tuples, or an
+            numpy.ndarray of integers with a shape (n, 2).  The latter case is
+            optimized.
+
+        Returns
+        -------
+        first_edge_nr : integer
+           The sequential number of the first of the added edges.  The numbers
+           of the other added edges are consecutive integers following the
+           number of the first.  Edge numbers can be used to query for the edge
+           ID of an edge in the compressed graph.
+        """
+        result = self.size
+        if isinstance(edges, np.ndarray):
+            if edges.dtype == np.int64:
+                self._add_edges_ndarray_int64(edges)
+            elif edges.dtype == np.int32:
+                self._add_edges_ndarray_int32(edges)
+            else:
+                self._add_edges_ndarray_int64(edges.astype(np.int64))
+        else:
+            for edge in edges:
+                self.add_edge(*edge)
+        return result
+
+    cdef _add_edges_ndarray_int64(self, np.ndarray[np.int64_t, ndim=2] edges):
+        cdef int i
+        for i in range(edges.shape[0]):
+            self.add_edge(edges[i, 0], edges[i, 1])
+
+    cdef _add_edges_ndarray_int32(self, np.ndarray[np.int32_t, ndim=2] edges):
+        cdef int i
+        for i in range(edges.shape[0]):
+            self.add_edge(edges[i, 0], edges[i, 1])
+
+    def compressed(self, bint twoway=False, bint edge_nr_translation=False,
+                   bint allow_lost_edges=False):
+        """Build a CGraph from this graph.
+
+        Parameters
+        ----------
+        twoway : boolean (default: False)
+            If set, it will be possible to query the compressed graph for
+            incoming neighbors.
+        edge_nr_translation : boolean (default: False)
+            If set, it will be possible to call the method `edge_id`.
+        allow_lost_edges : boolean (default: False)
+            If set, negative tails are accepted even with one-way compression.
+
+        Raises
+        ------
+        ValueError
+            When negative tails occur while `twoway` and `allow_lost_edges` are
+            both false.
+
+        Notes
+        -----
+        In a one-way compressed graph, an edge with a negative tail is present
+        only minimally: it is only possible to query the head of such an edge,
+        given the edge ID.  This is why one-way compression of a graph with a
+        negative tail leads to a ValueError being raised, unless
+        `allow_lost_edges` is true.
+        """
+        assert (self.size ==
+                self.num_pp_edges + self.num_pn_edges + self.num_np_edges)
+        if not (twoway or allow_lost_edges or self.num_np_edges == 0):
+            raise ValueError('Edges with negative tails cannot be '
+                             'represented in an one-way compressed graph.')
+
+        cdef gint s, tail, head, edge_nr
+        cdef CGraph_malloc result = CGraph_malloc(twoway, edge_nr_translation,
+                                                  self._num_nodes,
+                                                  self.num_pp_edges,
+                                                  self.num_pn_edges,
+                                                  self.num_np_edges)
+        cdef gint *hbuf = result.heads_idxs + 1, *heads = result.heads
+        cdef gint *tbuf = result.tails_idxs + 1, *tails = result.tails
+        cdef gint *edge_ids = result.edge_ids
+        cdef gint edge_id = 0, num_edges     # = 0 is there to silence warning.
+
+        # `hbuf` is just `heads_idxs` shifted by one.  We will use `hbuf` to
+        # build up `heads` and its end state will be such that `heads_idxs`
+        # will have the right content.  For `tbuf`, replace "head" with tail in
+        # the previous text.
+
+        # Make a histogram of outgoing edges per node in `hbuf` and one of
+        # incoming edges per node in `tbuf`.
+        memset(result.heads_idxs, 0, (self._num_nodes + 1) * sizeof(gint))
+        if twoway:
+            memset(result.tails_idxs, 0, (self._num_nodes + 1) * sizeof(gint))
+        for edge_nr in range(self.size):
+            if self.edges[edge_nr].tail >= 0:
+                hbuf[self.edges[edge_nr].tail] += 1
+            if twoway and self.edges[edge_nr].head >= 0:
+                tbuf[self.edges[edge_nr].head] += 1
+
+        # Replace `hbuf` with its "antiderivative" and then subtract the
+        # original `hbuf` from it.  This is done in one pass.
+        s = 0
+        for tail in range(self._num_nodes):
+            s += hbuf[tail]
+            hbuf[tail] = s - hbuf[tail]
+
+        # Same as before for `tbuf`.
+        if twoway:
+            s = 0
+            for head in range(self._num_nodes):
+                s += tbuf[head]
+                tbuf[head] = s - tbuf[head]
+
+        # Iterate through all edges and build `heads` and `tails`.
+        next_np_edge_id = result.num_px_edges
+        for edge_nr in range(self.size):
+            edge = self.edges[edge_nr]
+            head = edge.head
+            tail = edge.tail
+            if tail >= 0:
+                edge_id = hbuf[tail]
+                hbuf[tail] += 1
+            elif twoway:
+                assert head >= 0
+                edge_id = next_np_edge_id
+                next_np_edge_id += 1
+            else:
+                edge_id = -1
+            if edge_id >= 0:
+                heads[edge_id] = head
+            if twoway and head >= 0:
+                tails[tbuf[head]] = tail
+                edge_ids[tbuf[head]] = edge_id
+                tbuf[head] += 1
+            if edge_nr_translation:
+                result.edge_ids_by_edge_nr[edge_nr] = edge_id
+
+        assert result.num_edges == next_np_edge_id
+        return result
+
+    def write_dot(self, file):
+        """Write a representation of the graph in dot format to `file`.
+
+        That resulting file can be visualized with dot(1) or neato(1) form the
+        graphviz package.
+        """
+        cdef gint edge_nr
+        file.write("digraph g {\n")
+        for edge_nr in range(self.size):
+            file.write("  %d -> %d;\n" %
+                       (self.edges[edge_nr].tail, self.edges[edge_nr].head))
+        file.write("}\n")
+
+
+cdef class gintArraySlice:
+    def __len__(self):
+        return self.end - self.begin
+
+    def __getitem__(self, gint key):
+        cdef gint *result_ptr
+        if key >= 0:
+            result_ptr = self.begin + key
+        else:
+            result_ptr = self.end + key
+        if result_ptr < self.begin or result_ptr >= self.end:
+            raise IndexError('Index out of range.')
+        return result_ptr[0]
+
+cdef class EdgeIterator:
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        if self.edge_id == self.graph.num_edges:
+            raise StopIteration
+        cdef gint current_edge_id = self.edge_id
+        self.edge_id += 1
+        while current_edge_id >= self.graph.heads_idxs[self.tail + 1]:
+            self.tail += 1
+        return self.tail, self.graph.heads[current_edge_id]
+
+class DisabledFeatureError(RuntimeError):
+    pass
+
+class NodeDoesNotExistError(IndexError):
+    pass
+
+class EdgeDoesNotExistError(IndexError):
+    pass
+
+_need_twoway = 'Enable "twoway" during graph compression.'
+
+cdef class CGraph:
+    """A compressed graph which can be efficiently queried for the existence of
+    edges and outgoing neighbors.
+
+    Objects of this class do not initialize the members themselves, but expect
+    that they hold usable values.  A good way to create them is by compressing
+    a `Graph`.
+
+    Iterating over a graph yields a sequence of (tail, head) pairs of all
+    edges.  The number of an edge in this sequence equals its edge ID.  The
+    built-in function `enumerate` can thus be used to easily iterate over all
+    edges along with their edge IDs.
+    """
+    def __iter__(self):
+        """Return an iterator over (tail, head) of all edges."""
+        cdef EdgeIterator result = EdgeIterator()
+        result.graph = self
+        result.edge_id = 0
+        result.tail = 0
+        return result
+
+    def has_dangling_edges(self):
+        return not self.num_edges == self.num_px_edges == self.num_xp_edges
+
+    def out_neighbors(self, gint node):
+        """Return the nodes a node points to.
+
+        Parameters
+        ----------
+        node : integer
+
+        Returns
+        -------
+        nodes : sequence of integers
+
+        Raises
+        ------
+        NodeDoesNotExistError
+        """
+        if node < 0 or node >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        cdef gintArraySlice result = gintArraySlice()
+        result.begin = &self.heads[self.heads_idxs[node]]
+        result.end = &self.heads[self.heads_idxs[node + 1]]
+        return result
+
+    def out_edge_ids(self, gint node):
+        """Return the IDs of outgoing edges of node.
+
+        Parameters
+        ----------
+        node : integer
+
+        Returns
+        -------
+        edge_ids : sequence of integers
+
+        Raises
+        ------
+        NodeDoesNotExistError
+        """
+        if node < 0 or node >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        return iter(xrange(self.heads_idxs[node], self.heads_idxs[node + 1]))
+
+    def in_neighbors(self, gint node):
+        """Return the nodes which point to a node.
+
+        Parameters
+        ----------
+        node : integer
+
+        Returns
+        -------
+        nodes : sequence of integers
+
+        Raises
+        ------
+        NodeDoesNotExistError
+        DisabledFeatureError
+            If the graph is not two-way compressed.
+        """
+        if not self.twoway:
+            raise DisabledFeatureError(_need_twoway)
+        if node < 0 or node >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        cdef gintArraySlice result = gintArraySlice()
+        result.begin = &self.tails[self.tails_idxs[node]]
+        result.end = &self.tails[self.tails_idxs[node + 1]]
+        return result
+
+    def in_edge_ids(self, gint node):
+        """Return the IDs of incoming edges of a node.
+
+        Parameters
+        ----------
+        node : integer
+
+        Returns
+        -------
+        edge_ids : sequence of integers
+
+        Raises
+        ------
+        NodeDoesNotExistError
+        DisabledFeatureError
+            If the graph is not two-way compressed.
+        """
+        if not self.twoway:
+            raise DisabledFeatureError(_need_twoway)
+        if node < 0 or node >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        cdef gintArraySlice result = gintArraySlice()
+        result.begin = &self.edge_ids[self.tails_idxs[node]]
+        result.end = &self.edge_ids[self.tails_idxs[node + 1]]
+        return result
+
+    def has_edge(self, gint tail, gint head):
+        """Does the graph contain the edge (tail, head)?
+
+        Parameters
+        ----------
+        tail : integer
+        head : integer
+
+        Returns
+        -------
+        had_edge : boolean
+
+        Raises
+        ------
+        NodeDoesNotExistError
+        EdgeDoesNotExistError
+        DisabledFeatureError
+            If `tail` is negative and the graph is not two-way compressed.
+        """
+        cdef gint h, t
+        if tail >= self.num_nodes or head >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        if tail >= 0:
+            for h in self.heads[self.heads_idxs[tail]
+                                : self.heads_idxs[tail + 1]]:
+                if h == head: return True
+        else:
+            if not self.twoway:
+                raise DisabledFeatureError(_need_twoway)
+            if head < 0:
+                raise EdgeDoesNotExistError()
+            for t in self.tails[self.tails_idxs[head]
+                                : self.tails_idxs[head + 1]]:
+                if t == tail: return True
+        return False
+
+    def edge_id(self, gint edge_nr):
+        """Return the edge ID of an edge given its sequential number.
+
+        Parameters
+        ----------
+        edge_nr : integer
+
+        Returns
+        -------
+        edge_id : integer
+
+        Raises
+        ------
+        DisabledFeatureError
+            If `edge_nr_translation` was not enabled during graph compression.
+        EdgeDoesNotExistError
+        """
+        if not self.edge_ids_by_edge_nr:
+            raise DisabledFeatureError(
+                'Enable "edge_nr_translation" during graph compression.')
+        if edge_nr < 0 or edge_nr >= self.edge_nr_end:
+            raise EdgeDoesNotExistError()
+        result = self.edge_ids_by_edge_nr[edge_nr]
+        if result < 0:
+            raise EdgeDoesNotExistError()
+        return result
+
+    def first_edge_id(self, gint tail, gint head):
+        """Return the edge ID of the first edge (tail, head).
+
+        Parameters
+        ----------
+        tail : integer
+        head : integer
+
+        Returns
+        -------
+        edge_id : integer
+
+        Raises
+        ------
+        NodeDoesNotExist
+        EdgeDoesNotExistError
+        DisabledFeatureError
+            If `tail` is negative and the graph is not two-way compressed.
+
+        Notes
+        -----
+        This method is useful for graphs where each edge occurs only once.
+        """
+        if tail >= self.num_nodes or head >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        if tail >= 0:
+            for head_index in xrange(self.heads_idxs[tail],
+                                     self.heads_idxs[tail + 1]):
+                if self.heads[head_index] == head:
+                    return head_index
+        else:
+            if not self.twoway:
+                raise DisabledFeatureError(_need_twoway)
+            for tail_index in xrange(self.tails_idxs[head],
+                                     self.tails_idxs[head + 1]):
+                if self.tails[tail_index] == tail:
+                    return self.edge_ids[tail_index]
+        raise EdgeDoesNotExistError()
+
+    def all_edge_ids(self, gint tail, gint head):
+        """Return an iterator over all edge IDs of edges with a given tail and
+        head.
+
+        Parameters
+        ----------
+        tail : integer
+        head : integer
+
+        Returns
+        -------
+        edge_id : integer
+
+        Raises
+        ------
+        NodeDoesNotExist
+        EdgeDoesNotExistError
+        DisabledFeatureError
+            If `tail` is negative and the graph is not two-way compressed.
+        """
+        if tail >= self.num_nodes or head >= self.num_nodes:
+            raise NodeDoesNotExistError()
+        result = []
+        if tail >= 0:
+            for head_index in xrange(self.heads_idxs[tail],
+                                     self.heads_idxs[tail + 1]):
+                if self.heads[head_index] == head:
+                    result.append(head_index)
+        else:
+            if not self.twoway:
+                raise DisabledFeatureError(_need_twoway)
+            for tail_index in xrange(self.tails_idxs[head],
+                                     self.tails_idxs[head + 1]):
+                if self.tails[tail_index] == tail:
+                    result.append(self.edge_ids[tail_index])
+        return result
+
+    # TODO: optimize this for the case of twofold graphs and low degree.
+    def tail(self, gint edge_id):
+        """Return the tail of an edge, given its edge ID.
+
+        Parameters
+        ----------
+        edge_id : integer
+
+        Returns
+        -------
+        tail : integer
+            If the edge exists and is positive.
+        None
+            If the tail is negative.
+
+        Raises
+        ------
+        EdgeDoesNotExistError
+
+        Notes
+        -----
+        The average performance of this method is O(log num_nodes) for
+        non-negative tails and O(1) for negative ones.
+        """
+        if edge_id < 0 or edge_id >= self.num_edges:
+            raise EdgeDoesNotExistError
+        if edge_id >= self.num_px_edges:
+            assert self.twoway
+            return None
+        cdef gint lower = 0, upper = self.num_nodes, tail = 0
+        while upper - lower > 1:
+            tail = (upper + lower) // 2
+            if edge_id == self.heads_idxs[tail]:
+                return tail
+            if edge_id < self.heads_idxs[tail]:
+                upper = tail
+            else:
+                lower = tail
+        return lower
+
+    def head(self, gint edge_id):
+        """Return the head of an edge, given its edge ID.
+
+        Parameters
+        ----------
+        edge_id : integer
+
+        Raises
+        ------
+        EdgeDoesNotExistError
+
+        Notes
+        -----
+        This method executes in constant time.  It works for all edge IDs,
+        returning both positive and negative heads.
+        """
+        if edge_id < 0 or edge_id >= self.num_edges:
+            raise EdgeDoesNotExistError()
+        return self.heads[edge_id]
+
+    def write_dot(self, file):
+        """Write a representation of the graph in dot format to `file`.
+
+        Parameters
+        ----------
+        file : file-like object
+
+        Notes
+        -----
+        That resulting file can be visualized with dot(1) or neato(1) form the
+        `graphviz <http://graphviz.org/>`_ package.
+        """
+        cdef gint tail
+        file.write("digraph g {\n")
+        for tail in range(self.num_nodes):
+            for head in self.heads[self.heads_idxs[tail]
+                                   : self.heads_idxs[tail + 1]]:
+                file.write("  %d -> %d;\n" % (tail, head))
+        file.write("}\n")
+
+
+cdef class CGraph_malloc(CGraph):
+    """A CGraph which allocates and frees its own memory."""
+
+    def __cinit__(self, twoway, edge_nr_translation, num_nodes,
+                  num_pp_edges, num_pn_edges, num_np_edges):
+        self.twoway = twoway
+        self.edge_nr_translation = edge_nr_translation
+        self.num_nodes = num_nodes
+        self.num_px_edges = num_pp_edges + num_pn_edges
+        self.edge_nr_end = num_pp_edges + num_pn_edges + num_np_edges
+
+        self.heads_idxs = <gint*>malloc((num_nodes + 1) * sizeof(gint))
+        if self.twoway:
+            # The graph is two-way. n->p edges will exist in the compressed
+            # graph.
+            self.num_xp_edges = num_pp_edges + num_np_edges
+            self.num_edges = self.edge_nr_end
+            self.tails_idxs = <gint*>malloc((num_nodes + 1) * sizeof(gint))
+            self.tails = <gint*>malloc(
+                self.num_xp_edges * sizeof(gint))
+            self.edge_ids = <gint*>malloc(
+                self.num_xp_edges * sizeof(gint))
+        else:
+            # The graph is one-way. n->p edges will be ignored.
+            self.num_xp_edges = num_pp_edges
+            self.num_edges = self.num_px_edges
+        self.heads = <gint*>malloc(self.num_edges * sizeof(gint))
+        if edge_nr_translation:
+            self.edge_ids_by_edge_nr = <gint*>malloc(
+                self.edge_nr_end * sizeof(gint))
+        if (not self.heads_idxs or not self.heads
+            or (twoway and (not self.tails_idxs
+                             or not self.tails
+                             or not self.edge_ids))
+            or (edge_nr_translation and not self.edge_ids_by_edge_nr)):
+            raise MemoryError
+
+    def __dealloc__(self):
+        free(self.edge_ids_by_edge_nr)
+        free(self.heads)
+        free(self.edge_ids)
+        free(self.tails)
+        free(self.tails_idxs)
+        free(self.heads_idxs)
diff --git a/kwant/graph/defs.h b/kwant/graph/defs.h
new file mode 100644
index 0000000000000000000000000000000000000000..cbb768047a2da9c47e1ad40c38e27629d73db721
--- /dev/null
+++ b/kwant/graph/defs.h
@@ -0,0 +1,10 @@
+#ifndef DEFS_H
+#define DEFS_H
+
+#include <stdint.h>
+
+// The integer type which will be used to save node and edge id's.  Must be
+// signed.
+typedef int32_t gint;
+
+#endif // DEFS_H
diff --git a/kwant/graph/defs.pxd b/kwant/graph/defs.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..5d574061ac7c2ea6742937629563fbe8a1be9559
--- /dev/null
+++ b/kwant/graph/defs.pxd
@@ -0,0 +1,2 @@
+cdef extern from "defs.h":
+    ctypedef signed int gint
diff --git a/kwant/graph/defs.py b/kwant/graph/defs.py
new file mode 100644
index 0000000000000000000000000000000000000000..472c1f0cccf75402fb116eaeae026b597fec8d19
--- /dev/null
+++ b/kwant/graph/defs.py
@@ -0,0 +1,3 @@
+__all__ = ['gint_dtype']
+import numpy as np
+gint_dtype = np.int32
diff --git a/kwant/graph/dissection.py b/kwant/graph/dissection.py
new file mode 100644
index 0000000000000000000000000000000000000000..a368d094a179eebda016f13bb9356c8dc17ded21
--- /dev/null
+++ b/kwant/graph/dissection.py
@@ -0,0 +1,71 @@
+"""Routines to compute nested dissections of graphs"""
+
+__all__ = ['edge_dissection']
+
+import numpy as np
+from . import core, utils, scotch
+from .defs import gint_dtype
+
+def edge_dissection(gr, minimum_size, is_undirected=False):
+    """Returns a nested dissection tree (represented as a nested number of
+    tuples) for the graph gr, based on edge separators.
+
+    minimum_size indicates the smallest size of a part for which the algorithm
+    should still try to dissect the part.
+
+    If the graph gr is already undirected, setting is_undirected=True avoids
+    the work of making the graph undirected.
+    """
+
+    if isinstance(gr, core.Graph):
+        grc = gr.compressed()
+    elif isinstance(gr, core.CGraph):
+        grc = gr
+    else:
+        raise ValueError('edge_dissection expects a Graph or CGraph!')
+
+    # Unless the graph is undirected from the very beginning, make it
+    # undirected.
+    if not is_undirected:
+        grc = utils.make_undirected(grc)
+
+    return edge_bisection(grc, np.arange(grc.num_nodes, dtype=gint_dtype),
+                          minimum_size)
+
+
+def edge_bisection(grc, nodeids, minimum_size):
+    """This function returns a nested dissection tree (represented as a nested
+    number of tuples) for an undirected graph represented as a CGraph and ids
+    (that go into the tree) given in nodeids.  minimum_size indicates the
+    smallest size of a part for which the algorithm should still try to dissect
+    the part.
+    """
+    parts = scotch.bisect(grc)
+
+    # Count the number of nodes in parts 0 or 1.
+    size2 = np.sum(parts)
+    size1 = grc.num_nodes - size2
+
+    # If the size of one of the parts is zero, we can't further dissect.
+    if size1 == 0 or size2 == 0:
+        return nodeids.tolist()
+
+    # Now extract all nodes that are in part 0.
+    sub_nodeids = nodeids[parts == 0]
+
+    if size1 > minimum_size:
+        subgr = utils.induced_subgraph(grc, parts == 0)
+        left = edge_bisection(subgr, sub_nodeids, minimum_size)
+    else:
+        left = sub_nodeids.tolist()
+
+    # Now extract all nodes that are in part 1.
+    sub_nodeids = nodeids[parts == 1]
+
+    if size2 > minimum_size:
+        subgr = utils.induced_subgraph(grc, parts == 1)
+        right = edge_bisection(subgr, sub_nodeids, minimum_size)
+    else:
+        right = sub_nodeids.tolist()
+
+    return left, right
diff --git a/kwant/graph/scotch.pyx b/kwant/graph/scotch.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..0b9a2efada9c07f8c1aea44635c066841f29f722
--- /dev/null
+++ b/kwant/graph/scotch.pyx
@@ -0,0 +1,66 @@
+"""Wrapper for the graph library SCOTCH"""
+
+__all__ = ['bisect', 'reset']
+
+cimport libc.stdio
+import numpy as np
+cimport numpy as np
+from kwant.graph cimport core
+from kwant.graph import core, defs
+from kwant.graph.c_scotch cimport *
+
+DEF SCOTCH_STRATQUALITY = 1
+DEF SCOTCH_STRATSPEED = 2
+DEF SCOTCH_STRATBALANCE = 4
+DEF SCOTCH_STRATSAFETY = 8
+DEF SCOTCH_STRATSCALABILITY = 16
+
+def bisect(core.CGraph gr,
+           double bal=0.0):
+    """Compute a bisection of a CGraph using SCOTCH, minimizing the number of
+    edges that are cut in the process. The bisection is returned as a numpy
+    array (with a size given by the number of nodes in graph) indicating
+    whether a node i is in part 0 or 1.
+
+    The graph to be bisected must be undirected, i.e.for every edge (i,j)
+    there must also be the edge (j,i). The result of applying bisect
+    on a directed graph is undefined.
+
+    The optional parameter bal defines how precise the bisection should be,
+    i.e. the smaller bal, the more the two parts after the bisection are
+    equally sized (however, this might affect the quality of the cut).
+    """
+    cdef SCOTCH_Graph graph
+    cdef SCOTCH_Strat strat
+
+    cdef np.ndarray[int, ndim=1] parts
+
+    parts=np.empty(gr.num_nodes, dtype=defs.gint_dtype)
+
+    SCOTCH_graphInit(&graph)
+
+    SCOTCH_graphBuild(&graph, 0, gr.num_nodes,
+                      <SCOTCH_Num *>gr.heads_idxs,
+                      NULL, NULL, NULL, gr.heads_idxs[gr.num_nodes],
+                      <SCOTCH_Num *>gr.heads,
+                      NULL,)
+
+    SCOTCH_stratInit(&strat)
+
+    SCOTCH_stratGraphMapBuild(&strat,
+                              SCOTCH_STRATQUALITY, 2, bal)
+
+    SCOTCH_graphPart(&graph, 2,
+                     &strat, <SCOTCH_Num *>parts.data)
+
+    SCOTCH_stratExit(&strat)
+
+    SCOTCH_graphExit(&graph)
+
+    return parts
+
+def reset():
+    """Resets the internal random number generator of SCOTCH. After a reset,
+    SCOTCH returns identical results for bisections, etc.
+    """
+    SCOTCH_randomReset()
diff --git a/kwant/graph/slicer.pyx b/kwant/graph/slicer.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..a07b7b2a2cbbb31b0b27e674fda946190e8a0da7
--- /dev/null
+++ b/kwant/graph/slicer.pyx
@@ -0,0 +1,52 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+from kwant.graph.defs cimport gint
+from kwant.graph.defs import gint_dtype
+from kwant.graph.core cimport CGraph
+cimport kwant.graph.c_slicer as c_slicer
+
+__all__ = ['slice']
+
+@cython.boundscheck(False)
+def slice(CGraph graph, left, right):
+    """
+    TODO: write me.
+    """
+    cdef np.ndarray[gint, ndim=1] leftarr, rightarr, slc
+    cdef c_slicer.Slicing *slicing
+    cdef int i, j, slc_size
+
+    leftarr = np.array(left, dtype=gint_dtype)
+    rightarr = np.array(right, dtype=gint_dtype)
+
+    if leftarr.ndim != 1:
+        raise ValueError("Left cannot be interpreted as a 1D array.")
+
+    if rightarr.ndim != 1:
+        raise ValueError("Right cannot be interpreted as a 1D array.")
+
+    if leftarr.size == 0 or rightarr.size == 0:
+        raise ValueError("Empty boundary arrays are not supported yet.")
+
+    slicing = c_slicer.slice(graph.num_nodes,
+                             graph.heads_idxs,
+                             graph.heads,
+                             leftarr.size, <gint *>leftarr.data,
+                             rightarr.size, <gint *>rightarr.data)
+
+    slclist = []
+    for i in xrange(slicing.nslices):
+        slc_size = slicing.slice_ptr[i+1] - slicing.slice_ptr[i]
+        slc = np.empty(slc_size, dtype=gint_dtype)
+
+        # I don't know, there should be a better way to do that.
+        cslc = slicing.slices + slicing.slice_ptr[i]
+        for j in xrange(slc_size):
+            slc[j] = cslc[j]
+
+        slclist.append(tuple(slc))
+
+    c_slicer.freeSlicing(slicing)
+
+    return tuple(slclist)
diff --git a/kwant/graph/tests/test_core.py b/kwant/graph/tests/test_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ec0879e81d36f673668ea4689fae3781eab4628
--- /dev/null
+++ b/kwant/graph/tests/test_core.py
@@ -0,0 +1,173 @@
+from StringIO import StringIO
+from itertools import izip_longest
+import numpy as np
+from nose.tools import assert_equal, assert_raises
+from kwant.graph.core import \
+    Graph, NodeDoesNotExistError, EdgeDoesNotExistError, DisabledFeatureError
+
+def test_empty():
+    graph = Graph()
+    g = graph.compressed()
+    assert not g.twoway
+    assert not g.edge_nr_translation
+    assert_raises(NodeDoesNotExistError, g.out_neighbors, 0)
+    assert_raises(NodeDoesNotExistError, g.has_edge, 0, 0)
+    assert_raises(DisabledFeatureError, g.edge_id, 0)
+    g = graph.compressed(twoway=True, edge_nr_translation=True)
+    assert g.twoway
+    assert g.edge_nr_translation
+    assert_raises(NodeDoesNotExistError, g.in_neighbors, 0)
+    assert_raises(NodeDoesNotExistError, g.out_edge_ids, 0)
+    assert_raises(NodeDoesNotExistError, g.in_edge_ids, 0)
+    assert_raises(EdgeDoesNotExistError, g.edge_id, 0)
+
+def test_num_nodes():
+    graph = Graph()
+    assert_equal(graph.num_nodes, 0)
+    graph.num_nodes = 2
+    assert_equal(graph.num_nodes, 2)
+    assert_raises(ValueError, graph.__setattr__, 'num_nodes', 1)
+    g = graph.compressed()
+    assert_equal(g.num_nodes, 2)
+
+def test_large():
+    num_edges = 1000
+    graph = Graph()
+    for i in xrange(num_edges):
+        graph.add_edge(i, i + 1)
+    g = graph.compressed()
+    g2 = graph.compressed(twoway=True)
+    assert_equal(num_edges, g.num_nodes - 1)
+    for i in xrange(num_edges):
+        assert_equal(tuple(g.out_neighbors(i)), (i + 1,))
+        assert_equal(tuple(g2.in_neighbors(i + 1)), (i,))
+
+def check_dot(dot_expect, graph):
+    output = StringIO()
+    graph.write_dot(output)
+    assert_equal(output.getvalue(), dot_expect)
+    output.close()
+
+def test_small():
+    g = Graph()
+    edges = [(0, 1), (0, 2), (1, 2), (2, 1)]
+    for edge in edges:
+        g.add_edge(*edge)
+    dot_expect = """digraph g {
+  0 -> 1;
+  0 -> 2;
+  1 -> 2;
+  2 -> 1;
+}
+"""
+    check_dot(dot_expect, g)
+    g = g.compressed(twoway=True)
+
+    for edge_should, edge_is in izip_longest(edges, g):
+        assert_equal(edge_should, edge_is)
+
+    edge_ids = []
+    for edge in edges:
+        edge_ids.append(g.first_edge_id(*edge))
+
+    assert_equal(tuple(g.out_neighbors(0)), (1, 2))
+    assert_equal(tuple(g.in_neighbors(0)), ())
+    assert_equal(tuple(g.out_edge_ids(0)), (edge_ids[0], edge_ids[1]))
+    assert_equal(tuple(g.in_edge_ids(0)), ())
+
+    assert_equal(tuple(g.out_neighbors(1)), (2,))
+    assert_equal(tuple(g.in_neighbors(1)), (0, 2))
+    assert_equal(tuple(g.out_edge_ids(1)), (edge_ids[2],))
+    assert_equal(tuple(g.in_edge_ids(1)), (edge_ids[0], edge_ids[3]))
+
+    assert_equal(tuple(g.out_neighbors(2)), (1,))
+    assert_equal(tuple(g.in_neighbors(2)), (0, 1))
+    assert_equal(tuple(g.out_edge_ids(2)), (edge_ids[3],))
+    assert_equal(tuple(g.in_edge_ids(2)), (edge_ids[1], edge_ids[2]))
+
+    assert g.has_edge(0, 1)
+    g.first_edge_id(0, 1)
+    assert not g.has_edge(1, 0)
+    assert_raises(IndexError, g.first_edge_id, 1, 0)
+    check_dot(dot_expect, g)
+
+def test_negative_node_ids():
+    g = Graph()
+    assert_raises(ValueError, g.add_edge, 0, -1)
+
+    g = Graph(allow_negative_nodes=True)
+    g.add_edge(0, -1)
+    g.add_edge(-2, 0)
+    assert_raises(ValueError, g.add_edge, -3, -4)
+    assert_raises(ValueError, g.compressed)
+    g1 = g.compressed(allow_lost_edges=True)
+    assert_equal(g1.num_px_edges, 1)
+    assert_equal(g1.num_xp_edges, 0)
+    assert g1.has_edge(0, -1)
+    assert_raises(DisabledFeatureError, g1.has_edge, -2, 0)
+    assert_equal(tuple(g1.out_neighbors(0)), (-1,))
+    g2 = g.compressed(twoway=True)
+    assert_equal(g2.num_px_edges, 1)
+    assert_equal(g2.num_xp_edges, 1)
+    assert g2.has_edge(0, -1)
+    assert g2.has_edge(-2, 0)
+    assert_equal(tuple(g2.out_neighbors(0)), (-1,))
+    assert_equal(tuple(g2.in_neighbors(0)), (-2,))
+
+def test_add_edges():
+    edges = [(0, 1), (1, 2), (2, 3), (3, 0),
+             (0, 4), (1, 4), (2, 4), (3, 4)]
+
+    def fill0(g):
+        for edge in edges:
+            g.add_edge(*edge)
+    def fill1(g):
+        g.add_edges(edges)
+    def fill2(g):
+        g.add_edges(np.array(edges))
+
+    prev_dot = None
+    for fill in [fill0, fill1, fill2]:
+        g = Graph()
+        fill(g)
+        g = g.compressed()
+        output = StringIO()
+        g.write_dot(output)
+        dot = output.getvalue()
+        if prev_dot is not None:
+            assert_equal(dot, prev_dot)
+        prev_dot = dot
+
+def test_edge_ids():
+    gr = Graph(allow_negative_nodes=True)
+    edges = [(0, -1), (-1, 0), (1, 2), (1, 2), (0, -1), (-1, 0), (-1, 0)]
+    for edge_nr, edge in enumerate(edges):
+        assert_equal(gr.add_edge(*edge), edge_nr)
+
+    g = gr.compressed(twoway=True, edge_nr_translation=True)
+    assert g.twoway
+    assert g.edge_nr_translation
+    assert_equal(sorted(g.out_edge_ids(1)), sorted(g.in_edge_ids(2)))
+    for edge_id in g.out_edge_ids(1):
+        assert_equal(g.tail(edge_id), 1)
+        assert_equal(g.head(edge_id), 2)
+    for i, edge_id in enumerate(g.all_edge_ids(0, -1)):
+        if i == 0:
+            assert_equal(edge_id, g.first_edge_id(0, -1))
+        assert_equal(g.tail(edge_id), 0)
+        assert_equal(g.head(edge_id), -1)
+    assert_equal(i, 1)
+    for i, edge_id in enumerate(g.all_edge_ids(-1, 0)):
+        if i == 0:
+            assert_equal(edge_id, g.first_edge_id(-1, 0))
+        assert_equal(g.tail(edge_id), None)
+        assert_equal(g.head(edge_id), 0)
+    assert_equal(i, 2)
+
+    for edge_nr, edge in enumerate(edges):
+        if edge[0] < 0: continue
+        edge_id = g.edge_id(edge_nr)
+        assert_equal(edge, (g.tail(edge_id), g.head(edge_id)))
+
+    g = gr.compressed(edge_nr_translation=True, allow_lost_edges=True)
+    assert_raises(EdgeDoesNotExistError, g.edge_id, 1)
diff --git a/kwant/graph/tests/test_dissection.py b/kwant/graph/tests/test_dissection.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d9392ee1b390e52847dc20eebb58ac56d97b595
--- /dev/null
+++ b/kwant/graph/tests/test_dissection.py
@@ -0,0 +1,36 @@
+from nose.tools import assert_equal, assert_true
+import numpy as np
+# from kwant.graph import Graph, dissection
+# from kwant.graph.dissection import edge_dissection
+
+def _DISABLED_test_edge_dissection():
+    # REMARK: This test is somewhat limited in the sense that it can only test
+    #         for the general sanity of the output, not if the disection is
+    #         really good (balanced, etc.).  Sanity is checked by making sure
+    #         that every node is included exactly once in the tree.
+    size = 5
+    graph = Graph()
+
+    for i in xrange(size - 1):
+        offset = i * size
+        for j in xrange(size - 1):
+            graph.add_edge(offset + j, offset + j + 1)
+            graph.add_edge(offset + j + 1, offset + j)
+        if i > 0:
+            for j in xrange(size):
+                graph.add_edge(offset + j, offset + j - size)
+                graph.add_edge(offset + j - size, offset + j)
+    g = graph.compressed()
+
+    tree = edge_dissection(g, 1)
+    found = np.zeros(g.num_nodes, dtype = int)
+
+    def parse_tree(entry):
+        if type(entry) is tuple:
+            parse_tree(entry[0])
+            parse_tree(entry[1])
+        else:
+            found[entry] += 1
+
+    parse_tree(tree)
+    assert_true((found == 1).all())
diff --git a/kwant/graph/tests/test_scotch.py b/kwant/graph/tests/test_scotch.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1713990fc35934ca6ea255c1863bb101fb219f7
--- /dev/null
+++ b/kwant/graph/tests/test_scotch.py
@@ -0,0 +1,49 @@
+import numpy as np
+from nose.tools import assert_equal, assert_true
+from kwant.graph import Graph
+# from kwant.graph.scotch import bisect, reset
+
+def _DISABLED_test_bisect():
+    # REMARK: This test is somewhat limited in the sense that it can only test
+    #         for the general sanity of the output, not if the bisection is
+    #         really good (balanced, etc.).
+    size = 5
+    graph = Graph()
+
+    for i in xrange(size-1):
+        offset = i * size
+        for j in xrange(size-1):
+            graph.add_edge(offset + j, offset + j + 1)
+            graph.add_edge(offset + j + 1, offset + j)
+        if i > 0:
+            for j in xrange(size):
+                graph.add_edge(offset + j, offset + j - size)
+                graph.add_edge(offset + j - size, offset + j)
+    g = graph.compressed()
+
+    parts = bisect(g)
+    for i in xrange(g.num_nodes):
+        assert_true(parts[i] == 0 or parts[i] == 1)
+
+def _DISABLED_test_reset():
+    size = 5
+    graph = Graph()
+
+    for i in xrange(size-1):
+        offset = i * size
+        for j in xrange(size-1):
+            graph.add_edge(offset + j, offset + j + 1)
+            graph.add_edge(offset + j + 1, offset + j)
+        if i > 0:
+            for j in xrange(size):
+                graph.add_edge(offset + j, offset + j - size)
+                graph.add_edge(offset + j - size, offset + j)
+    g = graph.compressed()
+
+    # After calling reset, SCOTCH returns identical results.
+    reset()
+    parts1 = bisect(g)
+    reset()
+    parts2 = bisect(g)
+
+    assert_true((parts1 == parts2).all())
diff --git a/kwant/graph/tests/test_slicer.py b/kwant/graph/tests/test_slicer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7761ef5d9ea1503deb160ab3276abed1cd548069
--- /dev/null
+++ b/kwant/graph/tests/test_slicer.py
@@ -0,0 +1,54 @@
+import kwant
+from kwant.graph import slicer
+
+def assert_sanity(graph, slices):
+    # Slices must comprise all of the graph.
+    slclist = [slices[j][i] for j in xrange(len(slices))
+               for i in xrange(len(slices[j]))]
+    slclist.sort()
+    assert slclist == [i for i in xrange(graph.num_nodes)]
+
+    # Nodes may only have neighbors in neighboring slices.
+    for j in xrange(len(slices)):
+        for node in slices[j]:
+            for neigh in graph.out_neighbors(node):
+                if j > 0 and j < len(slices) - 1:
+                    assert (neigh in slices[j] or
+                            neigh in slices[j+1] or
+                            neigh in slices[j-1])
+                elif j == 0:
+                    assert (neigh in slices[j] or
+                            neigh in slices[j+1])
+                else:
+                    assert (neigh in slices[j] or
+                            neigh in slices[j-1])
+
+
+def test_rectangle():
+    l = 10
+    w = 5
+
+    sys = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(-1, 0)]))
+    lat = kwant.lattice.Square()
+    lead[(lat(0, i) for i in xrange(w))] = 0
+    sys[(lat(j, i) for j in xrange(l) for i in xrange(w))] = 0
+    for s in [lead, sys]:
+        for delta in [(1, 0), (0, 1)]:
+            s[s.possible_hoppings(delta, lat, lat)] = -1
+    sys.attach_lead(lead)
+    sys.attach_lead(lead.reversed())
+    fsys = sys.finalized()
+
+    slices = slicer.slice(fsys.graph,
+                          fsys.lead_neighbor_seqs[0],
+                          fsys.lead_neighbor_seqs[1])
+
+    # In the rectangle case, the slicing is very constricted and we know that
+    # all slices must have the same shape.
+    assert len(slices) == l
+
+    for j in xrange(l):
+        assert len(slices[j]) == w
+
+    assert_sanity(fsys.graph, slices)
diff --git a/kwant/graph/tests/test_utils.py b/kwant/graph/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a1a2a95846882b46a1431d4af490ead8b5c12ee
--- /dev/null
+++ b/kwant/graph/tests/test_utils.py
@@ -0,0 +1,126 @@
+import numpy as np
+from nose.tools import assert_equal, assert_true
+from kwant.graph import Graph
+from kwant.graph.utils import \
+ make_undirected, remove_duplicates, induced_subgraph
+from kwant.graph.defs import gint_dtype
+
+def test_make_undirected():
+    graph = Graph(True)
+    graph.add_edge(0, 1)
+    graph.add_edge(1, 0)
+    graph.add_edge(1, 2)
+    graph.add_edge(2, -1)
+    g = graph.compressed()
+
+    # First, test with no duplicates removed,
+    g2 = make_undirected(g, remove_dups=False)
+
+    assert_equal(g2.num_nodes, g.num_nodes)
+    assert_equal(g2.num_edges, 6)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+    assert_true(g2.has_edge(2, 1))
+
+    # then with duplicates removed,
+    g2 = make_undirected(g, remove_dups=True)
+
+    assert_equal(g2.num_nodes, g.num_nodes)
+    assert_equal(g2.num_edges, 4)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+    assert_true(g2.has_edge(2, 1))
+
+    # and finally with weights.
+    g2, edge_w2 = make_undirected(g, remove_dups=True, calc_weights=True)
+
+    assert_equal(g2.num_nodes, g.num_nodes)
+    assert_equal(g2.num_edges, 4)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+    assert_true(g2.has_edge(2, 1))
+    assert_equal(edge_w2[g2.first_edge_id(0,1)], 2)
+    assert_equal(edge_w2[g2.first_edge_id(1,0)], 2)
+    assert_equal(edge_w2[g2.first_edge_id(1,2)], 1)
+    assert_equal(edge_w2[g2.first_edge_id(2,1)], 1)
+
+def test_remove_duplicates():
+    graph = Graph()
+    graph.add_edge(0, 1)
+    graph.add_edge(0, 1)
+    graph.add_edge(1, 2)
+
+    # First test without edge weights,
+    g = graph.compressed()
+    remove_duplicates(g)
+    assert_equal(g.num_edges, 2)
+    assert_true(g.has_edge(0, 1))
+    assert_true(g.has_edge(1, 2))
+
+    # then with edge weights.
+    g = graph.compressed()
+    edge_w = np.array([1,1,1], dtype=gint_dtype)
+    remove_duplicates(g, edge_w)
+    assert_equal(g.num_edges, 2)
+    assert_true(g.has_edge(0, 1))
+    assert_true(g.has_edge(1, 2))
+    assert_equal(edge_w[g.first_edge_id(0,1)], 2)
+    assert_equal(edge_w[g.first_edge_id(1,2)], 1)
+
+
+def test_induced_subgraph():
+    num_nodes = 6
+
+    graph = Graph()
+    for i in xrange(num_nodes-1):
+        graph.add_edge(i, i + 1)
+    graph.add_edge(1, 0)
+    g = graph.compressed()
+
+    # First test select array,
+    select = np.array([True, True, True, False, False, True])
+    g2 = induced_subgraph(g, select)
+    assert_equal(g2.num_nodes, 4)
+    assert_equal(g2.num_edges, 3)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+
+    # then test select function.
+    g2 = induced_subgraph(g, lambda i: select[i])
+    assert_equal(g2.num_nodes, 4)
+    assert_equal(g2.num_edges, 3)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+
+    # Now the same with edge weights.
+    edge_w = np.arange(g.num_edges, dtype=gint_dtype)
+    g2, edge_w2 = induced_subgraph(g, select, edge_w)
+    assert_equal(g2.num_nodes, 4)
+    assert_equal(g2.num_edges, 3)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+    assert_equal(edge_w[g.first_edge_id(0,1)],
+                 edge_w2[g2.first_edge_id(0,1)])
+    assert_equal(edge_w[g.first_edge_id(1,0)],
+                 edge_w2[g2.first_edge_id(1,0)])
+    assert_equal(edge_w[g.first_edge_id(1,2)],
+                 edge_w2[g2.first_edge_id(1,2)])
+
+    g2, edge_w2 = induced_subgraph(g, lambda i: select[i], edge_w)
+    assert_equal(g2.num_nodes, 4)
+    assert_equal(g2.num_edges, 3)
+    assert_true(g2.has_edge(0, 1))
+    assert_true(g2.has_edge(1, 0))
+    assert_true(g2.has_edge(1, 2))
+    assert_equal(edge_w[g.first_edge_id(0,1)],
+                 edge_w2[g2.first_edge_id(0,1)])
+    assert_equal(edge_w[g.first_edge_id(1,0)],
+                 edge_w2[g2.first_edge_id(1,0)])
+    assert_equal(edge_w[g.first_edge_id(1,2)],
+                 edge_w2[g2.first_edge_id(1,2)])
diff --git a/kwant/graph/utils.pyx b/kwant/graph/utils.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..fc5b75ec7c4c2ba51c9c15037488d4b04f1ad6d7
--- /dev/null
+++ b/kwant/graph/utils.pyx
@@ -0,0 +1,275 @@
+"""Utilities to modify compressed graphs"""
+
+__all__ = ['make_undirected', 'remove_duplicates', 'induced_subgraph',
+           'print_graph']
+
+from libc.stdlib cimport malloc, realloc, free
+from libc.string cimport memset
+cimport cython
+cimport numpy as np
+import numpy as np
+from kwant.graph.defs cimport gint
+from kwant.graph.defs import gint_dtype
+from kwant.graph.core cimport CGraph, CGraph_malloc
+from kwant.graph.core import CGraph, CGraph_malloc
+
+@cython.boundscheck(False)
+def make_undirected(CGraph gr, remove_dups=True, calc_weights=False):
+    """undirected_graph(gr) expects a CGraph gr as input, which is interpreted
+    as a directed graph, and returns a CGraph that is explicitely undirected,
+    i.e. for every edge (i,j) there is also the edge (j,i). In the process, the
+    function also removes all 'dangling' links, i.e. edges to or from
+    negative node numbers.
+
+    If remove_dups == True (default value is True), any duplicates of edges
+    will be removed (this applies to the case where there are multiple edges
+    (i,j), not to having (i,j) and (j,i)).
+
+    The effect of the duplicate edges can be retained if calc_weights == True
+    (default value is False), in which case a weight array is returned
+    containing the multiplicity of the edges after the graph has been made
+    undirected.
+
+    As a (somewhat drastic but illustrative) example, if make_undirected is
+    applied to a undirected graph, it will return the same graph again
+    (possibly with the order of edges changed) and a weight array with 2
+    everywhere.  (Of course, in this case one does not need to call
+    make_undirected ...)
+
+    make_undirected() will always return a one-way graph, regardless of
+    whether the input was a two-way graph or not (NOTE: This
+    restriction could be lifted, if necessary). In addition, the
+    original edge_ids are lost -- the resulting graph will have
+    edge_ids that are not related to the original ones. (NOTE: there
+    certainly is a relation, but as long as no-one needs it it remains
+    unspecified)
+    """
+
+    cdef gint i, j, p
+
+    # The undirected graph will have twice as many edges than the directed one
+    # (duplicates will be deleted afterwards).
+    cdef CGraph_malloc ret = CGraph_malloc(False, False, gr.num_nodes,
+                                           gr.heads_idxs[gr.num_nodes] * 2,
+                                           0, 0)
+
+    # In the following we build up the Graph directly in compressed format by
+    # adding for every edge (i,j) [with i,j>=0] also the edge (j,i). Taking
+    # care of possible doubling is done in a second step later.
+
+    # Initialize the new index array:
+    # First, compute a histogram of edges.
+    memset(ret.heads_idxs, 0, (ret.num_nodes + 1) * sizeof(gint))
+
+    # This is using Christoph's trick of building up the graph without
+    # additional buffer array.
+    cdef gint *buffer = ret.heads_idxs + 1
+
+    for i in xrange(gr.num_nodes):
+        for p in xrange(gr.heads_idxs[i], gr.heads_idxs[i+1]):
+            if gr.heads[p] >= 0:
+                buffer[i] += 1
+                buffer[gr.heads[p]] += 1
+
+    cdef gint s = 0
+    for i in xrange(ret.num_nodes):
+        s += buffer[i]
+        buffer[i] = s - buffer[i]
+
+    for i in xrange(gr.num_nodes):
+       for p in xrange(gr.heads_idxs[i], gr.heads_idxs[i+1]):
+           j = gr.heads[p]
+           if j >= 0:
+                ret.heads[buffer[i]] = j
+                buffer[i] += 1
+                ret.heads[buffer[j]] = i
+                buffer[j] += 1
+
+    ret.num_edges = ret.heads_idxs[ret.num_nodes]
+
+    # Now remove duplicates if desired.
+    cdef np.ndarray[gint, ndim=1] weights
+
+    if calc_weights:
+        weights = np.empty(ret.heads_idxs[ret.num_nodes], dtype=gint_dtype)
+        weights[:] = 1
+
+    if remove_dups:
+        if calc_weights:
+            remove_duplicates(ret, weights)
+        else:
+            remove_duplicates(ret)
+
+    if calc_weights:
+        return ret, weights
+    else:
+        return ret
+
+
+@cython.boundscheck(False)
+def remove_duplicates(CGraph gr, np.ndarray[gint, ndim=1] edge_weights=None):
+    """Remove duplicate edges in the CGraph gr (this applies to the case where
+    there are multiple edges (i,j), not to having (i,j) and (j,i)). This
+    function modifes the graph in place.
+
+    If edge_weights is provided, edge_weights is modified such that the new
+    edge weights are the sum of the old edge weights if there are duplicate
+    edges.
+
+    This function only works on simple graphs (not two-way graphs), and
+    it does not work on graphs which have a relation between the edge number
+    (given by the order the edges are added) and the edge_id (given by the
+    order the edges appear in the graph), see the documentation of CGraph.
+    (Both restrictions could be lifted if necessary.) Furthermore, the
+    function does not support negative node numbers, i.e. dangling links
+    (the concept of being duplicate is more complicated there.)
+    """
+    cdef gint i, j , p, q, nnz
+    cdef np.ndarray[gint, ndim=1] w
+
+    if gr.twoway:
+        raise RuntimeError("remove_duplicates does not support two-way "
+                           "graphs")
+
+    if gr.edge_ids_by_edge_nr:
+        raise RuntimeError("remove_duplicates does not support graphs with "
+                           "a relation between the edge number and the edge "
+                           "id")
+
+    # The array w will hold the position of head j in the heads array.
+    w = np.empty(gr.num_nodes, dtype=gint_dtype)
+    w[:]=-1
+
+    nnz=0
+
+    for i in xrange(gr.num_nodes):
+        q = nnz
+
+        for p in xrange(gr.heads_idxs[i], gr.heads_idxs[i+1]):
+            j = gr.heads[p]
+
+            # Check if we have found a previous entry (i,j).  (In this case w
+            # will have an index larger than the indices of all previous edges
+            # with tails < i, as stored in q.)
+            if w[j] >= q:
+                # entry is a duplicate
+                if edge_weights != None:
+                    edge_weights[w[j]] += edge_weights[p]
+            else:
+                w[j] = nnz
+                gr.heads[nnz] = j
+                nnz += 1
+
+        # Fix the index array.
+        gr.heads_idxs[i] = q
+
+    # Finally the new number of nonzeros
+    gr.heads_idxs[gr.num_nodes] = nnz
+    gr.num_edges = nnz
+
+    # Release memory that is not needed any more.
+    gr.heads = <gint *>realloc(gr.heads, nnz * sizeof(gint))
+    if not gr.heads:
+        raise MemoryError
+
+    if edge_weights != None:
+        edge_weights.resize(nnz, refcheck=False)
+
+@cython.boundscheck(False)
+def induced_subgraph(CGraph gr, select,
+                     np.ndarray[gint, ndim=1] edge_weights=None):
+    """Return a subgraph of the CGraph gr by picking all nodes
+    [0:gr.num_nodes] for which select is True. select can be either a
+    numpy array, or a function that takes the node number as
+    input. This function returns a CGraph as well.
+
+    The nodes in the new graph are again numbered sequentially from 0
+    to num_nodes-1, where num_nodes is the number of nodes in the
+    subgraph. The numbering is done such that the ordering of the node
+    numbers in the original and the subgraph are preserved (i.e.
+    if nodes n1 and n2 are both in the subgraph, and
+    original node number of n1 < original node number of n2,
+    then also subgraph node number n1 < subgraph node number n2).
+
+    If edge_weights is provided, the function also returns the edge
+    weights for the subgraph which are simply a subset of the original
+    weights.
+
+    This function returns a simple graph, regardless of whether the
+    input was a two-way graph or not (NOTE: This restriction could be
+    lifted, if necessary). Also, the resulting edge_ids are not
+    related to the original ones in any way (NOTE: There certainly is
+    a relation, but as long no-one needs it, we do not specify
+    it). Also, negative nodes are discarded (NOTE: this restriction
+    can also be lifted).
+    """
+
+    cdef np.ndarray[gint, ndim=1] indextab
+    cdef CGraph_malloc subgr
+    cdef np.ndarray[gint, ndim=1] sub_edge_weights
+    cdef gint sub_num_nodes, sub_num_edges
+    cdef gint i, iedge, edge_count
+
+    # First figure out the new number of nodes.
+    sub_num_nodes = 0
+    indextab = np.empty(gr.num_nodes, dtype=gint_dtype)
+    indextab[:] = -1
+
+    # Pre-evaluating the select functions seems to be more than twice as fast
+    # as calling select() repeatedly in the loop.  The thing is that one cannot
+    # type ndarray as bool in Cython (yet) [Taking bint results in a strange
+    # crash].  It would be possible to cast it into a cython type using
+    # .astype(), but this didn't seem to make any relevant speed difference.
+    if isinstance(select, np.ndarray):
+        selecttab = select
+    else:
+        selecttab = select(np.arange(gr.num_nodes, dtype=gint_dtype))
+
+    for i in xrange(gr.num_nodes):
+        if selecttab[i]:
+            indextab[i] = sub_num_nodes
+            sub_num_nodes += 1
+
+    # Now count the number of new edges.
+    sub_num_edges = 0
+
+    for i in xrange(gr.num_nodes):
+        if indextab[i] > -1:
+            for iedge in xrange(gr.heads_idxs[i], gr.heads_idxs[i + 1]):
+                if indextab[gr.heads[iedge]] > -1:
+                    sub_num_edges += 1
+
+    # Allocate the new graph.
+    subgr = CGraph_malloc(False, False, sub_num_nodes, sub_num_edges, 0, 0)
+
+    if edge_weights != None:
+        sub_edge_weights = np.empty(sub_num_edges, dtype=gint_dtype)
+
+    # Now fill the new edge array.
+    edge_count = 0
+
+    for i in xrange(gr.num_nodes):
+        if indextab[i]>-1:
+            subgr.heads_idxs[indextab[i]] = edge_count
+            for iedge in xrange(gr.heads_idxs[i], gr.heads_idxs[i+1]):
+                if indextab[gr.heads[iedge]] > -1:
+                    subgr.heads[edge_count] = indextab[gr.heads[iedge]]
+                    if edge_weights != None:
+                        sub_edge_weights[edge_count] = edge_weights[iedge]
+                    edge_count += 1
+    subgr.heads_idxs[sub_num_nodes] = edge_count
+
+    subgr.num_edges = edge_count
+
+    if edge_weights != None:
+        return subgr, sub_edge_weights
+    else:
+        return subgr
+
+
+def print_graph(gr):
+    for i in xrange(gr.num_nodes):
+        print i," -> ",
+        for j in gr.out_neighbors(i):
+            print j,
+        print
diff --git a/kwant/lattice.py b/kwant/lattice.py
new file mode 100644
index 0000000000000000000000000000000000000000..13f95cd0d201d48075da777e7f1f54d9f4067b41
--- /dev/null
+++ b/kwant/lattice.py
@@ -0,0 +1,403 @@
+from __future__ import division
+
+__all__ = ['make_lattice', 'TranslationalSymmetry',
+           'PolyatomicLattice', 'MonatomicLattice']
+
+import struct
+from math import sqrt
+from itertools import izip, chain
+import numpy as np
+from . import builder
+
+
+def make_lattice(prim_vecs, basis=None):
+    """
+    Create a Bravais lattice, which may have more than one basis site.
+
+    Parameters
+    ----------
+    prim_vecs : sequence of floats
+        Primitive vectors of a Bravais lattice.
+    basis : sequence of floats
+        Coordinates of the basis sites inside the unit cell.
+
+    Returns
+    -------
+    lattice : either `MonatomicLattice` or `PolyatomicLattice`
+        Resulting lattice.
+
+    Notes
+    -----
+    This function is largely an alias to the constructors of corresponding
+    lattices.
+    """
+    if basis is None:
+        return MonatomicLattice(prim_vecs)
+    else:
+        return PolyatomicLattice(prim_vecs, basis)
+
+
+class PolyatomicLattice(object):
+    """
+    Bravais lattice with a basis containing more than one site.
+
+    Contains monatomic sublattices.
+
+    Parameters
+    ----------
+    prim_vecs : sequence of floats
+        Primitive vectors of a Bravais lattice.
+    basis : sequence of floats
+        Coordinates of the basis sites inside the unit cell.
+
+    Instance Variables
+    ------------------
+    sublattices : list of `MonatomicLattice`
+        Sublattices belonging to this lattice.
+
+    Raises
+    ------
+    ValueError
+        If dimensionalities do not match.
+
+    Notes
+    -----
+
+    """
+    def __init__(self, prim_vecs, basis):
+        prim_vecs = np.asarray(prim_vecs, dtype=float)
+        dim = prim_vecs.shape[1]
+        if prim_vecs.shape[0] > dim:
+            raise ValueError('Number of primitive vectors exceeds '
+                             'the space dimensionality.')
+        basis = np.asarray(basis, dtype=float)
+        if basis.shape[1] != dim:
+            raise ValueError('Basis dimensionality does not match '
+                             'the space dimensionality.')
+        self.sublattices = [MonatomicLattice(prim_vecs, offset)
+                            for offset in basis]
+        # Sequence of primitive vectors of the lattice.
+        self.prim_vecs = prim_vecs
+
+    # TODO (Anton): Currently speed of shape does not seem to cause problem,
+    # but memory usage is excessive. This function might be changed to work
+    # with Builder, so that examined sites are already stored in Builder, and
+    # not in an additional list.
+    def shape(self, function, start):
+        """
+        Yield all the lattice site which belong to a certain shape.
+
+        Parameters
+        ----------
+        function : a boolean function of real space coordinates
+            A function which evaluates to True inside the desired shape.
+        start : real-valued vector
+            The starting point to the flood-fill algorithm.  If the site
+            nearest to `start` is not inside the shape, no sites are returned.
+
+        Returns
+        -------
+        sites : sequence of `Site` objects
+            all the sites that belong to the lattice and fit inside the shape.
+        """
+        dim = len(start)
+        if dim != self.prim_vecs.shape[1]:
+            raise ValueError('Dimensionality of start position does not match'
+                             ' the space dimensionality.')
+        sls = self.sublattices
+
+        # Check if no sites are going to be added, to catch a common error.
+        empty = True
+        for sl in sls:
+            if function(sl(*sl.closest(start)).pos):
+                empty = False
+        if empty:
+            msg = 'No sites close to {0} are inside the desired shape.'
+            raise ValueError(msg.format(start))
+
+        # Continue to flood fill.
+        pending = [sl.closest(start) for sl in sls]
+        examined = set([])
+        while pending:
+            tag = pending.pop()
+            if tag in examined: continue
+            examined.add(tag)
+
+            vec = np.dot(tag, self.prim_vecs)
+            any_hits = False
+            for sl in sls:
+                if not function(vec + sl.offset): continue
+                yield sl(*tag)
+                any_hits = True
+
+            if not any_hits: continue
+            tag = list(tag)
+            for i in xrange(dim):
+                tag[i] += 1
+                pending.append(tuple(tag))
+                tag[i] -= 2
+                pending.append(tuple(tag))
+                tag[i] += 1
+
+    def vec(self, int_vec):
+        """
+        Return coordinates of a Bravais lattice vector in real space.
+
+        Parameters
+        ----------
+        vec : integer vector
+
+        Returns
+        -------
+        output : real vector
+        """
+        return np.dot(int_vec, self.prim_vecs)
+
+
+class MonatomicLattice(PolyatomicLattice, builder.SiteGroup):
+    """
+    A site group of sites belonging to a Bravais lattice.
+
+    Parameters
+    ----------
+    prim_vecs : sequence of floats
+        Primitive vectors of a Bravais lattice.
+    offset : vector of floats
+        Displacement of the lattice origin from the real space
+        coordinates origin.
+    """
+    dim_end = 100
+    pack_fmt_prefix = '='
+    pack_letter = 'i'
+    _pack_item_size = struct.calcsize(pack_fmt_prefix + pack_letter)
+    _pack_fmts = [pack_fmt_prefix + pack_letter * i
+                  for i in xrange(dim_end)]
+    del dim_end, pack_fmt_prefix, pack_letter, i
+
+    def __init__(self, prim_vecs, offset=None):
+        prim_vecs = np.asarray(prim_vecs, dtype=float)
+        dim = prim_vecs.shape[1]
+        if prim_vecs.shape[0] > dim:
+            raise ValueError('Number of primitive vectors exceeds '
+                             'the space dimensionality.')
+        if offset is None:
+            offset = np.zeros(dim)
+        else:
+            offset = np.asarray(offset, dtype=float)
+            if offset.shape != (dim,):
+                raise ValueError('Dimensionality of offset does not match '
+                                 'that of the space.')
+        self.sublattices = [self]
+        self.prim_vecs = prim_vecs
+        self.inv_pv = np.linalg.pinv(prim_vecs)
+        self.offset = offset
+
+        assert 0 < dim < len(self._pack_fmts)
+        builder.SiteGroup.__init__(self)
+        self.dim = dim
+
+    def pack_tag(self, tag):
+        assert len(tag) == self.dim
+        return struct.pack(self._pack_fmts[len(tag)], *tag)
+
+    def verify_tag(self, tag):
+        try:
+            l = len(tag)
+        except:
+            raise ValueError('The tag must be a sequence.')
+        if l != self.dim:
+            return False
+        elif not [isinstance(i, int) for i in tag]:
+            return False
+        else:
+            return True
+
+    def unpack_tag(self, ptag):
+        d = len(ptag)
+        assert(d % self._pack_item_size == 0)
+        d //= self._pack_item_size
+        return struct.unpack(self._pack_fmts[d], ptag)
+
+    def closest(self, pos):
+        """Find the site closest to position `pos`."""
+        return tuple(np.asarray(
+                np.round(np.dot(pos - self.offset, self.inv_pv)),
+                dtype=int))
+
+    def pos(self, tag):
+        """Return the real space position of the site with a given tag."""
+        return np.dot(tag, self.prim_vecs) + self.offset
+
+
+# The following class is designed such that it should avoid floating
+# point precision issues.
+
+class TranslationalSymmetry(builder.Symmetry):
+    """
+    A translational symmetry defined in real space.
+
+    Group elements of this symmetry are integer tuples of appropriate length.
+
+    Parameters
+    ----------
+    periods : list of lists of real-valued variables
+        list of symmetry periods in real space.
+
+    Notes
+    -----
+    This symmetry automatically chooses the fundamental domain for each new
+    `SiteGroup` it encounters. If this site group does not correspond to a
+    Bravais lattice, or if it does not have a commensurate period, an error is
+    produced. A certain flexibility in choice of the fundamental domain can be
+    achieved by calling manually the `add_site_group` method and providing it
+    the `other_vectors` parameter.
+    """
+    def __init__(self, periods):
+        self.periods = np.array(periods)
+        # A dictionary containing cached data required for applying the
+        # symmetry to different site groups.
+        self.site_group_data = {}
+
+    def add_site_group(self, gr, other_vectors=None):
+        """
+        Select a fundamental domain for site group and cache associated data.
+
+        Parameters
+        ----------
+        gr : `SiteGroup`
+            the site group which has to be processed.  Be sure to delete the
+            previously processed site groups from `site_group_data` if you want
+            to modify the cache.
+        other_vectors : list of lists of integers
+            Bravais lattice vectors used to complement the periods in forming
+            a basis. The fundamental domain belongs to the linear space
+            spanned by these vectors.
+
+        Raises
+        ------
+        KeyError
+            If `gr` is already stored in `site_group_data`.
+        ValueError
+            If lattice shape of `gr` cannot have the given `periods`.
+        """
+        if gr in self.site_group_data:
+            raise KeyError('Group already processed, delete it from '
+                           'site_group_data first.')
+        inv = gr.prim_vecs.copy()
+        inv = np.linalg.pinv(inv)
+        bravais_periods = [np.dot(i, inv) for i in self.periods]
+        if not np.allclose(bravais_periods, np.round(bravais_periods),
+                           rtol=0, atol=1e-8) or \
+           not np.allclose([gr.vec(i) for i in bravais_periods],
+                           self.periods):
+            msg = 'Site group {0} does not have commensurate periods with ' +\
+                  'symmetry {1}.'
+            raise ValueError(msg.format(gr, self))
+        bravais_periods = np.array(np.round(bravais_periods), dtype='int')
+        (num_dir, dim) = bravais_periods.shape
+        if other_vectors is None:
+            other_vectors = []
+        for vec in other_vectors:
+            for a in vec:
+                if not isinstance(a, int):
+                    raise ValueError('Only integer other_vectors are allowed.')
+        m = np.zeros((dim, dim), dtype=int)
+
+        m.T[: num_dir] = bravais_periods
+        num_vec = num_dir + len(other_vectors)
+        if len(other_vectors) != 0:
+            m.T[num_dir : num_vec] = other_vectors
+        norms = np.apply_along_axis(np.linalg.norm, 1, m)
+        indices = np.argsort(norms)
+        for coord in zip(indices, range(num_vec, dim)):
+            m[coord] = 1
+
+        det_m = int(round(np.linalg.det(m)))
+        if det_m == 0:
+            raise ValueError('Singular symmetry matrix.')
+
+        det_x_inv_m = \
+            np.array(np.round(det_m * np.linalg.inv(m)), dtype=int)
+        assert (np.dot(m, det_x_inv_m) // det_m == np.identity(dim)).all()
+
+        det_x_inv_m_part = det_x_inv_m[:num_dir, :]
+        m_part = m[:, :num_dir]
+        self.site_group_data[gr] = (det_x_inv_m_part, m_part, det_m)
+
+    @property
+    def num_directions(self):
+        return len(self.periods)
+
+    def which(self, site):
+        try:
+            det_x_inv_m_part, m_part, det_m = self.site_group_data[site.group]
+        except KeyError:
+            self.add_site_group(site.group)
+            return self.which(site)
+        return np.dot(det_x_inv_m_part, site.tag) // det_m
+
+    def act(self, element, a, b=None):
+        try:
+            det_x_inv_m_part, m_part, det_m = self.site_group_data[a.group]
+        except KeyError:
+            self.add_site_group(gr)
+            return self.act(element, a, b)
+        try:
+            delta = np.dot(m_part, element)
+        except ValueError:
+            msg = 'Expecting a {0}-tuple group element, but got `{1}` instead.'
+            raise ValueError(msg.format(self.num_directions, element))
+        if b is None:
+            return a.shifted(delta)
+        else:
+            return a.shifted(delta), b.shifted(delta)
+
+    def to_fd(self, a, b=None):
+        return self.act(-self.which(a), a, b)
+
+    def reversed(self):
+        """Return a reversed copy of the symmetry.
+
+        The result is identical to creating a new symmetry with all the
+        period vectors opposite to the original but with the same fundamental
+        domain.
+        """
+        periods = [[-i for i in j] for j in self.periods]
+        result = TranslationalSymmetry(periods)
+        for gr in self.site_group_data:
+            det_x_inv_m_part, m_part, det_m = self.site_group_data[gr]
+            if self.num_directions % 2:
+                det_m = -det_m
+            else:
+                det_x_inv_m_part = -det_x_inv_m_part
+            m_part = -m_part
+            result.site_group_data[gr] = (det_x_inv_m_part.copy(), m_part,
+                                          det_m)
+        return result
+
+
+################ Library of lattices (to be extended)
+
+class Chain(MonatomicLattice):
+    def __init__(self, a=1):
+        MonatomicLattice.__init__(self, ((a,),))
+        self.nearest = [((1,), self, self)]
+
+
+class Square(MonatomicLattice):
+    def __init__(self, a=1):
+        MonatomicLattice.__init__(self, ((a, 0), (0, a)))
+        self.nearest = [((1, 0), self, self),
+                        ((0, 1), self, self)]
+
+
+class Honeycomb(PolyatomicLattice):
+    def __init__(self, a=1):
+        PolyatomicLattice.__init__(
+            self,
+            ((a, 0), (0.5 * a, 0.5 * a * sqrt(3))),
+            ((0, 0), (0, a / sqrt(3))))
+        self.a, self.b = self.sublattices
+        self.nearest = [((0, 0), self.b, self.a),
+                        ((0, 1), self.b, self.a),
+                        ((-1, 1), self.b, self.a)]
diff --git a/kwant/linalg/__init__.py b/kwant/linalg/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a545165d4cc23ee527add339e51ed93823f2687c
--- /dev/null
+++ b/kwant/linalg/__init__.py
@@ -0,0 +1,8 @@
+__all__ = ['lapack']
+from . import lapack
+
+# Merge the public interface of the other submodules.
+for module in ['decomp_lu', 'decomp_ev', 'decomp_schur']:
+    exec 'from . import {0}'.format(module)
+    exec 'from .{0} import *'.format(module)
+    exec '__all__.extend({0}.__all__)'.format(module)
diff --git a/kwant/linalg/decomp_ev.py b/kwant/linalg/decomp_ev.py
new file mode 100644
index 0000000000000000000000000000000000000000..19a5d126e11f9b5355f61ac5fd8d6c2c5e88fc7b
--- /dev/null
+++ b/kwant/linalg/decomp_ev.py
@@ -0,0 +1,58 @@
+__all__ = ['gen_eig']
+
+from . import lapack
+
+def gen_eig(a, b, left=False, right=True, overwrite_ab=False):
+    """Compute the eigenvalues and -vectors of the matrix pencil (a,b), i.e. of
+    the generalized (unsymmetric) eigenproblem a v = lambda b v where a and b
+    are square (unsymmetric) matrices, v the eigenvector and lambda the
+    eigenvalues.
+
+    The eigenvalues are returned as numerator alpha and denominator beta,
+    i.e. lambda = alpha/beta. This is advantageous, as lambda can be infinity
+    which is well-defined in this case as beta = 0.
+
+    Parameters
+    ----------
+    a : array, shape (M, M)
+    b : array, shape (M, M)
+        `a` and `b` are the two matrices defining the generalized eigenproblem
+    left : boolean
+        Whether to calculate and return left eigenvectors
+    right : boolean
+        Whether to calculate and return right eigenvectors
+
+    overwrite_ab : boolean
+        Whether to overwrite data in `a` and `b` (may improve performance)
+
+    Returns
+    -------
+    alpha : complex array, shape (M,)
+    beta : real or complex array, shape (M,)
+        The eigenvalues in the form ``alpha/beta``
+
+    (if left == True)
+    vl : double or complex array, shape (M, M)
+        The left eigenvector corresponding to the eigenvalue
+        ``alpha[i]/beta[i]`` is the column ``vl[:,i]``.
+
+    (if right == True)
+    vr : double or complex array, shape (M, M)
+        The right eigenvector corresponding to the eigenvalue
+        ``alpha[i]/beta[i]`` is the column ``vr[:,i]``.
+    """
+
+    ltype, a, b = lapack.prepare_for_lapack(overwrite_ab, a, b)
+
+    if a.ndim != 2 or b.ndim != 2:
+        raise ValueError("gen_eig requires both a and be to be matrices")
+
+    if a.shape[0] != a.shape[1]:
+        raise ValueError("gen_eig requires square matrix input")
+
+    if b.shape[0] != a.shape[0] or b.shape[1] != a.shape[1]:
+        raise ValueError("gen_eig requires a and be to have the same shape")
+
+    ggev = getattr(lapack, ltype + "ggev")
+
+    return ggev(a, b, left, right)
diff --git a/kwant/linalg/decomp_lu.py b/kwant/linalg/decomp_lu.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7659a9d8c70a8e7d74e47caf62343dbe0505b44
--- /dev/null
+++ b/kwant/linalg/decomp_lu.py
@@ -0,0 +1,131 @@
+__all__ = ['lu_factor', 'lu_solve', 'rcond_from_lu']
+
+import numpy as np
+from . import lapack
+
+def lu_factor(a, overwrite_a = False):
+    """Compute the LU factorization of a matrix A = P * L * U. The function
+    returns a tuple (lu, p, singular), where lu contains the LU factorization
+    storing the unit lower triangular matrix L in the strictly lower triangle
+    (the unit diagonal is not stored) and the upper triangular matrix U in the
+    upper triangle. p is a vector of pivot indices, and singular a Boolean
+    value indicating whether the matrix A is singular up to machine precision.
+
+    NOTE: This function mimics the behavior of scipy.linalg.lu_factor (except
+    that it has in addition the flag singular). The main reason is that
+    lu_factor in scipy has a bug that depending on the type of numpy matrix
+    passed to it, it would not return what was descirbed in the
+    documentation. This bug will be (probably) fixed in 0.10.0 but until this
+    is standard, this version is better to use.
+
+    Parameters
+    ----------
+    a : array, shape (M, M)
+        Matrix to factorize
+    overwrite_a : boolean
+        Whether to overwrite data in a (may increase performance)
+
+    Returns
+    -------
+    lu : array, shape (N, N)
+        Matrix containing U in its upper triangle, and L in its lower triangle.
+        The unit diagonal elements of L are not stored.
+    piv : array, shape (N,)
+        Pivot indices representing the permutation matrix P:
+        row i of matrix was interchanged with row piv[i].
+    singular : boolean
+        Whether the matrix a is singular (up to machine precision)
+    """
+
+    ltype, a = lapack.prepare_for_lapack(overwrite_a, a)
+
+    if a.ndim != 2:
+        raise ValueError("lu_factor expects a matrix")
+
+    if ltype == 'd':
+        return lapack.dgetrf(a)
+    elif ltype == 'z':
+        return lapack.zgetrf(a)
+    elif ltype == 's':
+        return lapack.sgetrf(a)
+    else:
+        return lapack.cgetrf(a)
+
+def lu_solve((lu, ipiv, singular), b):
+    """Solve a linear system of equations, a x = b, given the LU
+    factorization of a
+
+    Parameters
+    ----------
+    (lu, piv, singular)
+        Factorization of the coefficient matrix a, as given by lu_factor
+    b : array (vector or matrix)
+        Right-hand side
+
+    Returns
+    -------
+    x : array (vector or matrix)
+        Solution to the system
+    """
+
+    if singular:
+        raise RuntimeWarning("In lu_solve: the flag singular indicates "
+                             "a singular matrix. Result of solve step "
+                             "are probably unreliable")
+
+    ltype, lu, b = lapack.prepare_for_lapack(False, lu, b)
+    ipiv = np.ascontiguousarray(np.asanyarray(ipiv), dtype = lapack.int_dtype)
+
+    if b.ndim > 2:
+        raise ValueError("lu_solve: b must be a vector or matrix")
+
+    if lu.shape[0] != b.shape[0]:
+        raise ValueError("lu_solve: incompatible dimensions of b")
+
+    if ltype == 'd':
+        return lapack.dgetrs(lu, ipiv, b)
+    elif ltype == 'z':
+        return lapack.zgetrs(lu, ipiv, b)
+    elif ltype == 's':
+        return lapack.sgetrs(lu, ipiv, b)
+    else:
+        return lapack.cgetrs(lu, ipiv, b)
+
+def rcond_from_lu((lu, ipiv, singular), norm_a, norm = "1"):
+    """Compute the reciprocal condition number from the LU decomposition as
+    returned from lu_factor(), given additionally the norm of the matrix a in
+    norm_a.
+
+    The reciprocal condition number is given as 1/(||A||*||A^-1||), where
+    ||...|| is a matrix norm.
+
+    Parameters
+    ----------
+    (lu, piv, singular)
+        Factorization of the matrix a, as given by lu_factor
+    norm_a : float or complex
+        norm of the original matrix a (type of norm is specified in norm)
+    norm : {'1', 'I'}, optional
+        type of matrix norm which should be used to compute the condition
+        number ("1": 1-norm, "I": infinity norm). Default: '1'.
+
+    Returns
+    -------
+    rcond : float or complex
+        reciprocal condition number of a with respect to the type of matrix
+        norm specified in norm
+    """
+
+    if not norm in ("1", "I"):
+        raise ValueError("norm in rcond_from_lu must be either '1' or 'I'")
+
+    ltype, lu = lapack.prepare_for_lapack(False, lu)
+
+    if ltype == 'd':
+        return lapack.dgecon(lu, norm_a, norm)
+    elif ltype == 'z':
+        return lapack.zgecon(lu, norm_a, norm)
+    elif ltype == 's':
+        return lapack.sgecon(lu, norm_a, norm)
+    else:
+        return lapack.cgecon(lu, norm_a, norm)
diff --git a/kwant/linalg/decomp_schur.py b/kwant/linalg/decomp_schur.py
new file mode 100644
index 0000000000000000000000000000000000000000..96182fc8a00c35ded2e324fc5e3df2bc1788a441
--- /dev/null
+++ b/kwant/linalg/decomp_schur.py
@@ -0,0 +1,694 @@
+__all__ = ['schur', 'convert_r2c_schur', 'order_schur', 'evecs_from_schur',
+           'gen_schur', 'order_gen_schur', 'convert_r2c_gen_schur',
+           'evecs_from_gen_schur']
+
+from math import sqrt
+import numpy as np
+from . import lapack
+
+def schur(a, calc_q=True, calc_ev=True, overwrite_a=False):
+    """Compute the Schur form of a square matrix a.
+
+    The Schur form is a decomposition of the form a = q * t * q^dagger, where q
+    is a unitary matrix and t a upper triagonal matrix when computing the Schur
+    form of a complex matrix, and a quasi-upper triagonal matrix with only 1x1
+    and 2x2 blocks on the diagonal when computing the Schur form of a real
+    matrix (In the latter case, the 1x1 blocks correspond to real eigenvalues,
+    the 2x2 blocks to conjugate pairs of complex eigenvalues).
+
+    The Schur form is closely related to the eigenvalue problem (the entries of
+    the diagonal of the complex Schur form are the eigenvalues of the matrix),
+    and the routine can optionally also return the eigenvalues.
+
+    Parameters
+    ----------
+    a : array, shape (M, M)
+        Matrix for which to compute the Schur form.
+    calc_q : boolean
+        Whether to compute the unitary/orthogonal matrix `q`.
+    calc_ev : boolean
+        Whether to return the eigenvalues as a separate array.
+    overwrite_a : boolean
+        Whether to overwrite data in `a` (may increase performance).
+
+    Returns
+    -------
+    t : array, shape (M, M)
+        Schur form of the original matrix (complex or real, depending on the
+        input matrix).
+
+    (if calc_q == True)
+    q : array, shape (M, M)
+        Unitary transformation matrix.
+
+    (if calc_ev == True)
+    ev: array, shape (M,)
+        Array of eigenvalues of the matrix `a`. Can be complex even if a is
+        real. In the latter case, the complex eigenvalues come in conjugated
+        pairs with the eigenvalue with positive imaginary part coming
+        first.
+
+    Raises
+    ------
+    LinAlgError
+        If the underlying QR iteration fails to converge.
+    """
+
+    ltype, a = lapack.prepare_for_lapack(overwrite_a, a)
+
+    if a.ndim != 2:
+        raise ValueError("Expect matrix as input")
+
+    if a.shape[0] != a.shape[1]:
+        raise ValueError("Expect square matrix")
+
+    gees = getattr(lapack, ltype+"gees")
+
+    return gees(a, calc_q, calc_ev)
+
+def convert_r2c_schur(t, q):
+    """Convert a real Schur form (with possibly 2x2 blocks on the diagonal)
+    into a complex Schur form that is completely triangular.
+
+    This function is equivalent to the scipy.linalg.rsf2csf pendant (though the
+    implementation is different), but there is additionally the guarantee that
+    in the case of a 2x2 block at rows and columns i and i+1, t[i, i] will
+    contain the eigenvalue with the positive part, and t[i+1, i+1] the one with
+    the negative part.  This ensures that the list of eigenvalues (more
+    precisely, their order) returned originally from schur() is still valid for
+    the newly formed complex Schur form.
+
+    Parameters
+    ----------
+    t : array, shape (M, M)
+        Real Schur form of the original matrix
+    q : array, shape (M, M)
+        Schur transformation matrix
+
+    Returns
+    -------
+    t : array, shape (M, M)
+        Complex Schur form of the original matrix
+    q : array, shape (M, M)
+        Schur transformation matrix corresponding to the complex form
+    """
+
+    # First find the positions of 2x2-blocks
+    blockpos = np.diagonal(t, -1).nonzero()[0]
+
+    # Check if there are actually any 2x2-blocks
+    if not blockpos.size:
+        return (t, q)
+    else:
+        t2 = t.astype(np.common_type(t, np.array([], np.complex64)))
+        q2 = q.astype(np.common_type(q, np.array([], np.complex64)))
+
+    for i in blockpos:
+        # Bringing a 2x2 block to complex triangular form is relatively simple:
+        # the 2x2 blocks are guaranteed to be of the form [[a, b], [c, a]],
+        # where b*c < 0. The eigenvalues of this matrix are a +/- i sqrt(-b*c),
+        # the corresponding eigenvectors are [ +/- sqrt(-b*c), c].  The Schur
+        # form can be achieved by a unitary 2x2 matrix with one of the
+        # eigenvectors in the first column, and the second column an orthogonal
+        # vector.
+
+        a = t[i, i]
+        b = t[i, i+1]
+        c = t[i+1, i]
+
+        x = 1j * sqrt(-b * c)
+        y = c
+        norm = sqrt(-b * c + c * c)
+
+        U = np.array([[x/norm, -y/norm],[y/norm, -x/norm]])
+
+        t2[i, i] = a + x
+        t2[i+1, i] = 0.0
+        t2[i, i+1] = -b - c
+        t2[i+1, i+1] = a - x
+
+        t2[:i, i:i+2] = np.dot(t2[:i, i:i+2], U)
+        t2[i:i+2, i+2:] = np.dot(np.conj(U.T), t2[i:i+2, i+2:])
+
+        q2[:, i:i+2] = np.dot(q2[:, i:i+2], U)
+
+    return t2, q2
+
+
+def order_schur(select, t, q, calc_ev=True, overwrite_tq=False):
+    """Reorder the Schur form, selecting a cluster of eigenvalues.
+
+    This function reorders the generalized Schur form such that the cluster of
+    eigenvalues determined by select appears in the leading diagonal block of
+    the Schur form (this is useful, as the Schur vectors corresponding to the
+    leading diagonal block form an orthogonal basis for the subspace of
+    eigenvectors).
+
+    If a real Schur form is reordered, it is converted to complex form
+    (eliminating the 2x2 blocks on the diagonal) if in a complex conjugated
+    pair of eigenvalues only one eigenvalue is chosen.  In this case, the real
+    Schur from cannot be reordered in real form without splitting a 2x2 block
+    on the diagonal, hence switching to complex form is mandatory.
+
+    Parameters
+    ----------
+    t : array, shape (M, M)
+        Schur form
+    q : array, shape (M, M)
+        Unitary/orthogonal transformation matrices.
+    calc_ev : boolean, optional
+        Whether to return the reordered generalized eigenvalues of as two
+        separate arrays. Default: True
+    overwrite_tq : boolean, optional
+        Whether to overwrite data in `t` and `q` (may increase performance)
+        Default: False
+
+    Returns
+    -------
+    t : array, shape (M, M)
+        Reordered Schur form. If the original Schur form is real, and the
+        desired reordering separates complex conjugated pairs of generalized
+        eigenvalues, the resulting Schur form will be complex.
+    q : array, shape (M, M)
+        Unitary/orthogonal transformation matrix. Only computed if q is
+        provided (not None) as input. If the Schur form is converted from real
+        to complex, the transformation matrix is also converted from real
+        orthogonal to complex unitary
+    alpha : array, shape (M)
+    beta : array, shape (M)
+        Reordered eigenvalues. If the reordered Schur form is real, complex
+        conjugated pairs of eigenvalues are ordered such that the eigenvalue
+        with the positive imaginary part comes first.  Only computed if
+        ``calc_ev == True``
+    """
+
+    ltype, t, q = lapack.prepare_for_lapack(overwrite_tq, t, q)
+
+    trsen = getattr(lapack, ltype+"trsen")
+
+    # Figure out if select is a function or array.
+    isfun = isarray = True
+    try:
+        select(0)
+    except:
+        isfun = False
+    try:
+        select[0]
+    except:
+        isarray = False
+
+    if not (isarray or isfun):
+        raise ValueError("select must be either a function or an array")
+    elif isarray:
+        select = np.array(select, dtype = lapack.logical_dtype,
+                          order = 'F')
+    else:
+        select = np.array(np.vectorize(select)(np.arange(t.shape[0])),
+                          dtype= lapack.logical_dtype, order = 'F')
+
+    # Now check if the reordering can actually be done as desired,
+    # if we have a real Schur form (i.e. if the 2x2 blocks would be
+    # separated). If this is the case, convert to complex Schur form first.
+    for i in np.diagonal(t, -1).nonzero()[0]:
+        if bool(select[i]) != bool(select[i+1]):
+            t, q =convert_r2c_schur(t, q)
+            return order_schur(select, t, q, calc_ev, True)
+
+    return trsen(select, t, q, calc_ev)
+
+def evecs_from_schur(t, q, select=None, left=False, right=True,
+                     overwrite_tq=False):
+    """Compute eigenvectors from Schur form.
+
+    This function computes either all or selected eigenvectors for the matrix
+    that is represented by the Schur form t and the unitary matrix q, (not the
+    eigenvectors of t, but of q*t*q^dagger).
+
+    Parameters
+    ----------
+    t : array, shape (M, M)
+        Schur form
+    q : array, shape (M, M)
+        Unitary/orthogonal transformation matrix.
+    select : boolean function or array, optional
+        The value of ``select(i)`` or ``select[i]`` is used to decide whether
+        the eigenvector corresponding to the i-th eigenvalue should be
+        computed or not. If select is not provided (None), all eigenvectors
+        are computed. Default: None
+    left : boolean, optional
+        Whether to compute left eigenvectors. Default: False
+    right : boolean, optional
+        Whether to compute right eigenvectors. Default: True
+    overwrite_tq : boolean, optional
+        Whether to overwrite data in `t` and `q` (may increase performance)
+        Default: False
+
+    Returns
+    -------
+    vl : array, shape(M, N)
+        Left eigenvectors. N is the number of eigenvectors selected b
+        `select`, or equal to M if select is not provided. The eigenvectors
+        may be complex, even if `t` and `q` are real. Only computed if
+        ``left == True``.
+    vr : array, shape(M, N)
+        Right eigenvectors. N is the number of eigenvectors selected by
+        `select`, or equal to M if select is not provided. The eigenvectors
+        may be complex, even if `t` and `q` are real. Only computed if
+        ``right == True``.
+    """
+
+    ltype, t, q = lapack.prepare_for_lapack(overwrite_tq, t, q)
+
+    if (t.shape[0] != t.shape[1] or q.shape[0] != q.shape[1]
+        or t.shape[0] != q.shape[0]):
+        raise ValueError("Invalid Schur decomposition as input")
+
+    trevc = getattr(lapack, ltype+"trevc")
+
+    # check if select is a function or an array
+    if select is not None:
+        isfun = isarray = True
+        try:
+            select(0)
+        except:
+            isfun = False
+
+        try:
+            select[0]
+        except:
+            isarray = False
+
+        if not (isarray or isfun):
+            raise ValueError("select must be either a function, "
+                             "an array or None")
+        elif isarray:
+            selectarr = np.array(select, dtype = lapack.logical_dtype,
+                                 order = 'F')
+        else:
+            selectarr = np.array(np.vectorize(select)(np.arange(t.shape[0])),
+                                 dtype= lapack.logical_dtype, order = 'F')
+    else:
+        selectarr = None
+
+    return trevc(t, q, selectarr, left, right)
+
+def gen_schur(a, b, calc_q=True, calc_z=True, calc_ev=True,
+              overwrite_ab=False):
+    """Compute the generalized Schur form of a matrix pencil (a, b).
+
+    The generalized Schur form is a decomposition of the form a = q * s *
+    z^dagger and b = q * t * z^dagger, where q and z are unitary matrices
+    (orthogonal for real input), t is an upper triagonal matrix with
+    non-negative real diagonal, and s is a upper triangular matrix for complex
+    matrices, and a quasi-upper triangular matrix with only 1x1 and 2x2 blocks
+    on the diagonal for real matrices.  (In the latter case, the 1x1 blocks
+    correspond to real generalized eigenvalues, the 2x2 blocks to conjugate
+    pairs of complex generalized eigenvalues).
+
+    The generalized Schur form is closely related to the generalized eigenvalue
+    problem (the entries of the diagonal of the complex Schur form are the
+    eigenvalues of the matrix, for example), and the routine can optionally
+    also return the generalized eigenvalues in the form (alpha, beta), such
+    that alpha/beta is a generalized eigenvalue of the pencil (a, b) (see also
+    gen_eig()).
+
+    Parameters
+    ----------
+    a : array, shape (M, M)
+    b : array, shape (M, M)
+        Matrix pencil for which to compute the generalized Schur form
+    calc_q : boolean, optional
+    calc_z : boolean, optional
+        Whether to compute the unitary/orthogonal matrices `q` and `z`.
+        Default: True
+    calc_ev : boolean, optional
+        Whether to return the generalized eigenvalues as two separate
+        arrays. Default: True
+    overwrite_ab : boolean, optional
+        Whether to overwrite data in `a` and `b` (may increase performance)
+        Default: False
+
+    Returns
+    -------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Generalized Schur form of the original matrix pencil (`a`,`b`)
+        (complex or real, depending on the input matrices)
+    q : array, shape (M, M)
+    z : array, shape (M, M)
+        Unitary/orthogonal transformation matrices. Only computed if
+        ``calc_q == True`` or ``calc_z == True``, respectively.
+    alpha : array, shape (M)
+    beta : array, shape (M)
+        Generalized eigenvalues of the matrix pencil (`a`, `b`) given
+        as numerator (`alpha`) and denominator (`beta`), such that the
+        generalized eigenvalues are given as ``alpha/beta``. alpha can
+        be complex even if a is real. In the latter case, complex
+        eigenvalues come in conjugated pairs with the eigenvalue with
+        positive imaginary part coming first. Only computed if
+        ``calc_ev == True``.
+
+    Raises
+    ------
+    LinAlError
+        If the underlying QZ iteration fails to converge.
+    """
+
+    ltype, a, b = lapack.prepare_for_lapack(overwrite_ab, a, b)
+
+    if a.ndim != 2 or b.ndim != 2:
+        raise ValueError("Expect matrices as input")
+
+    if a.shape[0] != a.shape[1]:
+        raise ValueError("Expect square matrix a")
+
+    if a.shape[0] != b.shape[0] or a.shape[0] != b.shape[1]:
+        raise ValueError("Shape of b is incompatible to matrix a")
+
+    gges = getattr(lapack, ltype+"gges")
+
+    return gges(a, b, calc_q, calc_z, calc_ev)
+
+def order_gen_schur(select, s, t, q=None, z=None, calc_ev=True,
+                    overwrite_stqz=False):
+    """Reorder the generalized Schur form.
+
+    This function reorders the generalized Schur form such that the cluster of
+    eigenvalues determined by select appears in the leading diagonal blocks of
+    the Schur form (this is useful, as the Schur vectors corresponding to the
+    leading diagonal blocks form an orthogonal basis for the subspace of
+    eigenvectors).
+
+    If a real generalized Schur form is reordered, it is converted to complex
+    form (eliminating the 2x2 blocks on the diagonal) if in a complex
+    conjugated pair of eigenvalues only one eigenvalue is chosen.  In this
+    case, the real Schur from cannot be reordered in real form without
+    splitting a 2x2 block on the diagonal, hence switching to complex form is
+    mandatory.
+
+    Parameters
+    ----------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Matrices describing the generalized Schur form.
+    q : array, shape (M, M), optional
+    z : array, shape (M, M), optional
+        Unitary/orthogonal transformation matrices. Default: None.
+    calc_ev : boolean, optional
+        Whether to return the reordered generalized eigenvalues of as two
+        separate arrays. Default: True.
+    overwrite_stqz : boolean, optional
+        Whether to overwrite data in `s`, `t`, `q`, and `z` (may
+        increase performance) Default: False.
+
+    Returns
+    -------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Reordered general Schur form. If the original Schur form is real, and
+        the desired reordering separates complex conjugated pairs of
+        generalized eigenvalues, the resulting Schur form will be complex.
+    q : array, shape (M, M)
+    z : array, shape (M, M)
+        Unitary/orthogonal transformation matrices. Only computed if
+        `q` and `z` are provided (not None) on entry, respectively. If
+        the generalized Schur form is converted from real to complex,
+        the transformation matrices are also converted from real
+        orthogonal to complex unitary
+    alpha : array, shape (M)
+    beta : array, shape (M)
+        Reordered generalized eigenvalues. If the reordered Schur form is real,
+        complex conjugated pairs of eigenvalues are ordered such that the
+        eigenvalue with the positive imaginary part comes first.  Only computed
+        if ``calc_ev == True``.
+
+    Raises
+    ------
+    LinAlError
+        If the problem is too ill-conditioned.
+    """
+    ltype, s, t, q, z = lapack.prepare_for_lapack(overwrite_stqz, s, t, q, z)
+
+    if (s.ndim != 2 or t.ndim != 2 or
+        (q is not None and q.ndim != 2) or
+        (z is not None and z.ndim != 2)):
+        raise ValueError("Expect matrices as input")
+
+    if ((s.shape[0] != s.shape[1] or t.shape[0] != t.shape[1] or
+         s.shape[0] != t.shape[0]) or
+        (q is not None and (q.shape[0] != q.shape[1] or
+                            s.shape[0] != q.shape[0])) or
+        (z is not None and (z.shape[0] != z.shape[1] or
+                            s.shape[0] != z.shape[0]))):
+        raise ValueError("Invalid Schur decomposition as input")
+
+    tgsen = getattr(lapack, ltype+"tgsen")
+
+    # Figure out if select is a function or array.
+    isfun = isarray = True
+    try:
+        select(0)
+    except:
+        isfun = False
+    try:
+        select[0]
+    except:
+        isarray = False
+
+    if not (isarray or isfun):
+        raise ValueError("select must be either a function or an array")
+    elif isarray:
+        select = np.array(select, dtype = lapack.logical_dtype,
+                          order = 'F')
+    else:
+        select = np.array(np.vectorize(select)(np.arange(t.shape[0])),
+                          dtype= lapack.logical_dtype, order = 'F')
+
+    # Now check if the reordering can actually be done as desired, if we have a
+    # real Schur form (i.e. if the 2x2 blocks would be separated). If this is
+    # the case, convert to complex Schur form first.
+    for i in np.diagonal(s, -1).nonzero()[0]:
+        if bool(select[i]) != bool(select[i+1]):
+            # Convert to complex Schur form
+            if q is not None and z is not None:
+                s, t, q, z = convert_r2c_gen_schur(s, t, q, z)
+            elif q is not None:
+                s, t, q = convert_r2c_gen_schur(s, t, q=q, z=None)
+            elif z is not None:
+                s, t, z = convert_r2c_gen_schur(s, t, q=None, z=z)
+            else:
+                s,t = convert_r2c_gen_schur(s, t)
+
+            return order_gen_schur(select, s, t, q, z, calc_ev, True)
+
+    return tgsen(select, s, t, q, z, calc_ev)
+
+def convert_r2c_gen_schur(s, t, q=None, z=None):
+    """Convert a real generallzed Schur form (with possibly 2x2 blocks on the
+    diagonal) into a complex Schur form that is completely triangular.  If the
+    input is already completely triagonal (real or complex), the input is
+    returned unchanged.
+
+    This function guarantees that in the case of a 2x2 block at rows and
+    columns i and i+1, the converted, complex Schur form will contain the
+    generalized eigenvalue with the positive imaginary part in s[i,i] and
+    t[i,i], and the one with the negative imaginary part in s[i+1,i+1] and
+    t[i+1,i+1].  This ensures that the list of eigenvalues (more precisely,
+    their order) returned originally from gen_schur() is still valid for the
+    newly formed complex Schur form.
+
+    Parameters
+    ----------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Real generalized Schur form of the original matrix
+    q : array, shape (M, M), optional
+    z : array, shape (M, M), optional
+        Schur transformation matrix. Default: None
+
+    Returns
+    -------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Complex generalized Schur form of the original matrix,
+        completely triagonal
+    q : array, shape (M, M)
+    z : array, shape (M, M)
+        Schur transformation matrices corresponding to the complex
+        form. `q` or `z` are only computed if they are provided (not
+        None) on input.
+
+    Raises
+    ------
+    LinAlgError
+        If it fails to convert a 2x2 block into complex form (unlikely).
+    """
+
+    ltype, s, t, q, z = lapack.prepare_for_lapack(True, s, t, q, z)
+    # Note: overwrite=True does not mean much here, the arrays are all copied
+
+    if (s.ndim != 2 or t.ndim != 2 or
+        (q is not None and q.ndim != 2) or
+        (z is not None and z.ndim != 2)):
+        raise ValueError("Expect matrices as input")
+
+    if ((s.shape[0] != s.shape[1] or t.shape[0] != t.shape[1] or
+         s.shape[0] != t.shape[0]) or
+        (q is not None and (q.shape[0] != q.shape[1] or
+                            s.shape[0] != q.shape[0])) or
+        (z is not None and (z.shape[0] != z.shape[1] or
+                            s.shape[0] != z.shape[0]))):
+        raise ValueError("Invalid Schur decomposition as input")
+
+    # First, find the positions of 2x2-blocks.
+    blockpos = np.diagonal(s, -1).nonzero()[0]
+
+    # Check if there are actually any 2x2-blocks.
+    if not blockpos.size:
+        s2 = s
+        t2 = t
+        q2 = q
+        z2 = z
+    else:
+        s2 = s.astype(np.common_type(s, np.array([], np.complex64)))
+        t2 = t.astype(np.common_type(t, np.array([], np.complex64)))
+        if q is not None:
+            q2 = q.astype(np.common_type(q, np.array([], np.complex64)))
+        if z is not None:
+            z2 = z.astype(np.common_type(z, np.array([], np.complex64)))
+
+    for i in blockpos:
+        # In the following, we use gen_schur on individual 2x2 blocks (that are
+        # promoted to complex form) to compute the complex generalized Schur
+        # form. If necessary, order_gen_schur is used to ensure the desired
+        # order of eigenvalues.
+
+        sb, tb, qb, zb, alphab, betab = gen_schur(s2[i:i+2,i:i+2],
+                                                  t2[i:i+2,i:i+2])
+
+        # Ensure order of eigenvalues. (betab is positive)
+        if alphab[0].imag < alphab[1].imag:
+            sb, tb, qb, zb, alphab, betab = order_gen_schur([False, True],
+                                                            sb, tb, qb, zb)
+
+        s2[i:i+2, i:i+2] = sb
+        t2[i:i+2, i:i+2] = tb
+
+        s2[:i, i:i+2] = np.dot(s2[:i, i:i+2], zb)
+        s2[i:i+2, i+2:] = np.dot(qb.T.conj(), s2[i:i+2, i+2:])
+        t2[:i, i:i+2] = np.dot(t2[:i, i:i+2], zb)
+        t2[i:i+2, i+2:] = np.dot(qb.T.conj(), t2[i:i+2, i+2:])
+
+        if q is not None:
+            q2[:, i:i+2] = np.dot(q[:, i:i+2], qb)
+        if z is not None:
+            z2[:, i:i+2] = np.dot(z[:, i:i+2], zb)
+
+    if q is not None and z is not None:
+        return s2, t2, q2, z2
+    elif q is not None:
+        return s2, t2, q2
+    elif z is not None:
+        return s2, t2, z2
+    else:
+        return s2, t2
+
+def evecs_from_gen_schur(s, t, q=None, z=None, select=None,
+                         left=False, right=True, overwrite_qz=False):
+    """Compute eigenvectors from Schur form.
+
+    This function computes either all or selected eigenvectors for the matrix
+    that is represented by the generalized Schur form (s, t) and the unitary
+    matrices q and z, (not the generalized eigenvectors of (s,t), but of
+    (q*s*z^dagger, q*t*z^dagger)).
+
+    Parameters
+    ----------
+    s : array, shape (M, M)
+    t : array, shape (M, M)
+        Generalized Schur form.
+    q : array, shape (M, M), optional
+    z : array, shape (M, M), optional
+        Unitary/orthogonal transformation matrices. If the left eigenvectors
+        are to be computed, `q` must be provided, if the right eigenvectors are
+        to be computed, `z` must be provided.
+    select : boolean function or array, optional
+        The value of ``select(i)`` or ``select[i]`` is used to decide
+        whether the eigenvector corresponding to the i-th eigenvalue
+        should be computed or not. If select is not provided, all
+        eigenvectors are computed. Default: None.
+    left : boolean, optional
+        Whether to compute left eigenvectors. Default: False.
+    right : boolean, optional
+        Whether to compute right eigenvectors. Default: True.
+    overwrite_qz : boolean, optional
+        Whether to overwrite data in `q` and `z` (may increase performance).
+        Note that s and t remain always unchanged Default: False.
+
+    Returns
+    -------
+    (if left == True)
+    vl : array, shape(M, N)
+        Left generalized eigenvectors. N is the number of eigenvectors
+        selected by select, or equal to M if select is not
+        provided. The eigenvectors may be complex, even if `s`, `t`,
+        `q` and `z` are real.
+
+    (if right == True)
+    vr : array, shape(M, N)
+        Right generalized eigenvectors. N is the number of
+        eigenvectors selected by select, or equal to M if select is
+        not provided. The eigenvectors may be complex, even if `s`,
+        `t`, `q` and `z` are real.
+
+    """
+
+    ltype, s, t, q, z = lapack.prepare_for_lapack(overwrite_qz, s, t, q, z)
+
+    if (s.ndim != 2 or t.ndim != 2 or
+        (q is not None and q.ndim != 2) or
+        (z is not None and z.ndim != 2)):
+        raise ValueError("Expect matrices as input")
+
+    if ((s.shape[0] != s.shape[1] or t.shape[0] != t.shape[1] or
+         s.shape[0] != t.shape[0]) or
+        (q is not None and (q.shape[0] != q.shape[1] or
+                            s.shape[0] != q.shape[0])) or
+        (z is not None and (z.shape[0] != z.shape[1] or
+                            s.shape[0] != z.shape[0]))):
+        raise ValueError("Invalid Schur decomposition as input")
+
+    if left and q is None:
+        raise ValueError("Matrix q must be provided for left eigenvectors")
+
+    if right and z is None:
+        raise ValueError("Matrix z must be provided for right eigenvectors")
+
+    tgevc = getattr(lapack, ltype+"tgevc")
+
+    # Check if select is a function or an array.
+    if select is not None:
+        isfun = isarray = True
+        try:
+            select(0)
+        except:
+            isfun = False
+
+        try:
+            select[0]
+        except:
+            isarray = False
+
+        if not (isarray or isfun):
+            raise ValueError("select must be either a function, "
+                             "an array or None")
+        elif isarray:
+            selectarr = np.array(select, dtype = lapack.logical_dtype,
+                                 order = 'F')
+        else:
+            selectarr = np.array(np.vectorize(select)(np.arange(t.shape[0])),
+                                 dtype= lapack.logical_dtype, order = 'F')
+    else:
+        selectarr = None
+
+    return tgevc(s, t, q, z, selectarr, left, right)
diff --git a/kwant/linalg/f_lapack.pxd b/kwant/linalg/f_lapack.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..5940f9974a12960100de7be00f1b78d8937974a3
--- /dev/null
+++ b/kwant/linalg/f_lapack.pxd
@@ -0,0 +1,209 @@
+ctypedef int l_int
+ctypedef int l_logical
+
+cdef extern:
+  void sgetrf_(l_int *, l_int *, float *, l_int *, l_int *, l_int *)
+  void dgetrf_(l_int *, l_int *, double *, l_int *, l_int *, l_int *)
+  void cgetrf_(l_int *, l_int *, float complex *, l_int *, l_int *,
+               l_int *)
+  void zgetrf_(l_int *, l_int *, double complex *, l_int *, l_int *,
+               l_int *)
+
+  void sgetrs_(char *, l_int *, l_int *, float *, l_int *, l_int *,
+               float *, l_int *, l_int *)
+  void dgetrs_(char *, l_int *, l_int *, double *, l_int *, l_int *,
+               double *, l_int *, l_int *)
+  void cgetrs_(char *, l_int *, l_int *, float complex *, l_int *,
+               l_int *, float complex *, l_int *, l_int *)
+  void zgetrs_(char *, l_int *, l_int *, double complex *, l_int *,
+               l_int *, double complex *, l_int *, l_int *)
+
+  void sgecon_(char *, l_int *, float *, l_int *, float *, float *,
+               float *, l_int *, l_int *)
+  void dgecon_(char *, l_int *, double *, l_int *, double *, double *,
+               double *, l_int *, l_int *)
+  void cgecon_(char *, l_int *, float complex *, l_int *, float *,
+               float *, float complex *, float *, l_int *)
+  void zgecon_(char *, l_int *, double complex *, l_int *, double *,
+               double *, double complex *, double *, l_int *)
+
+  void sggev_(char *, char *, l_int *, float *, l_int *, float *, l_int *,
+              float *, float *, float *, float *, l_int *, float *, l_int *,
+              float *, l_int *, l_int *)
+  void dggev_(char *, char *, l_int *, double *, l_int *, double *, l_int *,
+              double *, double *, double *, double *, l_int *,
+              double *, l_int *, double *, l_int *, l_int *)
+  void cggev_(char *, char *, l_int *, float complex *, l_int *,
+              float complex *, l_int *, float complex *, float complex *,
+              float complex *, l_int *, float complex *, l_int *,
+              float complex *, l_int *, float *, l_int *)
+  void zggev_(char *, char *, l_int *, double complex *, l_int *,
+              double complex *, l_int *, double complex *,
+              double complex *, double complex *, l_int *,
+              double complex *, l_int *, double complex *, l_int *,
+              double *, l_int *)
+
+  void sgees_(char *, char *, l_logical (*)(float *, float *),
+              l_int *, float *, l_int *, l_int *,
+              float *, float *, float *, l_int *,
+              float *, l_int *, l_logical *, l_int *)
+  void dgees_(char *, char *, l_logical (*)(double *, double *),
+              l_int *, double *, l_int *, l_int *,
+              double *, double *, double *, l_int *,
+              double *, l_int *, l_logical *, l_int *)
+  void cgees_(char *, char *,
+              l_logical (*)(float complex *),
+              l_int *, float complex *,
+              l_int *, l_int *, float complex *,
+              float complex *, l_int *,
+              float complex *, l_int *, float *,
+              l_logical *, l_int *)
+  void zgees_(char *, char *,
+              l_logical (*)(double complex *),
+              l_int *, double complex *,
+              l_int *, l_int *, double complex *,
+              double complex *, l_int *,
+              double complex *, l_int *,
+              double *, l_logical *, l_int *)
+
+  void strsen_(char *, char *, l_logical *, l_int *,
+               float *, l_int *, float *,
+               l_int *, float *, float *, l_int *,
+               float *, float *, float *, l_int *,
+               l_int *, l_int *, l_int *)
+  void dtrsen_(char *, char *, l_logical *,
+               l_int *, double *, l_int *,
+               double *, l_int *, double *, double *,
+               l_int *, double *, double *, double *,
+               l_int *, l_int *, l_int *, l_int *)
+  void ctrsen_(char *, char *, l_logical *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, float complex *, l_int *,
+               float *, float *, float complex *,
+               l_int *, l_int *)
+  void ztrsen_(char *, char *, l_logical *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, double complex *, l_int *,
+               double *, double *, double complex *,
+               l_int *, l_int *)
+
+  void strevc_(char *, char *, l_logical *,
+               l_int *, float *, l_int *,
+               float *, l_int *, float *, l_int *,
+               l_int *, l_int *, float *, l_int *)
+  void dtrevc_(char *, char *, l_logical *,
+               l_int *, double *, l_int *,
+               double *, l_int *, double *,
+               l_int *, l_int *, l_int *, double *,
+               l_int *)
+  void ctrevc_(char *, char *, l_logical *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, l_int *, l_int *,
+               float complex *, float *, l_int *)
+  void ztrevc_(char *, char *, l_logical *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, l_int *, l_int *,
+               double complex *, double *, l_int *)
+
+  void sgges_(char *, char *, char *,
+              l_logical (*)(float *, float *, float *),
+              l_int *, float *, l_int *, float *,
+              l_int *, l_int *, float *, float *,
+              float *, float *, l_int *, float *,
+              l_int *, float *, l_int *, l_logical *,
+              l_int *)
+  void dgges_(char *, char *, char *,
+              l_logical (*)(double *, double *, double *),
+              l_int *, double *, l_int *, double *,
+              l_int *, l_int *, double *, double *,
+              double *, double *, l_int *, double *,
+              l_int *, double *, l_int *,
+              l_logical *, l_int *)
+  void cgges_(char *, char *, char *,
+              l_logical (*)(float complex *, float complex *),
+              l_int *, float complex *,
+              l_int *, float complex *,
+              l_int *, l_int *, float complex *,
+              float complex *, float complex *,
+              l_int *, float complex *,
+              l_int *, float complex *,
+              l_int *, float *, l_logical *, l_int *)
+  void zgges_(char *, char *, char *,
+              l_logical (*)(double complex *, double complex *),
+              l_int *, double complex *,
+              l_int *, double complex *,
+              l_int *, l_int *, double complex *,
+              double complex *,
+              double complex *, l_int *,
+              double complex *, l_int *,
+              double complex *, l_int *,
+              double *, l_logical *, l_int *)
+
+  void stgsen_(l_int *, l_logical *,
+               l_logical *, l_logical *,
+               l_int *, float *, l_int *, float *,
+               l_int *, float *, float *, float *,
+               float *, l_int *, float *, l_int *,
+               l_int *, float *, float *, float *, float *,
+               l_int *, l_int *, l_int *, l_int *)
+  void dtgsen_(l_int *, l_logical *,
+               l_logical *, l_logical *,
+               l_int *, double *, l_int *,
+               double *, l_int *, double *, double *,
+               double *, double *, l_int *, double *,
+               l_int *, l_int *, double *, double *,
+               double *, double *, l_int *, l_int *,
+               l_int *, l_int *)
+  void ctgsen_(l_int *, l_logical *,
+               l_logical *, l_logical *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               float complex *,
+               float complex *, l_int *,
+               float complex *, l_int *, l_int *,
+               float *, float *, float *,
+               float complex *, l_int *, l_int *,
+               l_int *, l_int *)
+  void ztgsen_(l_int *, l_logical *,
+               l_logical *, l_logical *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               double complex *,
+               double complex *, l_int *,
+               double complex *, l_int *, l_int *,
+               double *, double *, double *,
+               double complex *, l_int *, l_int *,
+               l_int *, l_int *)
+
+  void stgevc_(char *, char *, l_logical *,
+               l_int *, float *, l_int *,
+               float *, l_int *, float *,
+               l_int *, float *, l_int *,
+               l_int *, l_int *, float *, l_int *)
+  void dtgevc_(char *, char *, l_logical *,
+               l_int *, double *, l_int *,
+               double *, l_int *, double *,
+               l_int *, double *, l_int *,
+               l_int *, l_int *, double *, l_int *)
+  void ctgevc_(char *, char *, l_logical *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, float complex *,
+               l_int *, l_int *, l_int *,
+               float complex *, float *, l_int *)
+  void ztgevc_(char *, char *, l_logical *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, double complex *,
+               l_int *, l_int *, l_int *,
+               double complex *, double *, l_int *)
diff --git a/kwant/linalg/f_lapack.py b/kwant/linalg/f_lapack.py
new file mode 100644
index 0000000000000000000000000000000000000000..cec1c6b28f0849e4df0315c6d196d5a7be4151f7
--- /dev/null
+++ b/kwant/linalg/f_lapack.py
@@ -0,0 +1,6 @@
+__all__ = ['l_int_dtype', 'l_logical_dtype']
+
+import numpy as np
+
+l_int_dtype = np.int32
+l_logical_dtype = np.int32
diff --git a/kwant/linalg/lapack.pyx b/kwant/linalg/lapack.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..85173fdb0d6c0fa17ae7aa5331c6b0827fa21e5d
--- /dev/null
+++ b/kwant/linalg/lapack.pyx
@@ -0,0 +1,2465 @@
+"""Low-level access to LAPACK functions. """
+
+__all__ = ['sgetrf', 'dgetrf', 'cgetrf', 'zgetrf',
+           'sgetrs', 'dgetrs', 'cgetrs', 'zgetrs',
+           'sgecon', 'dgecon', 'cgecon', 'zgecon',
+           'sggev', 'dggev', 'cggev', 'zggev',
+           'sgees', 'dgees', 'cgees', 'zgees',
+           'strsen', 'dtrsen', 'ctrsen', 'ztrsen',
+           'strevc', 'dtrevc', 'ctrevc', 'ztrevc',
+           'sgges', 'dgges', 'cgges', 'zgges',
+           'stgsen', 'dtgsen', 'ctgsen', 'ztgsen',
+           'stgevc', 'dtgevc', 'ctgevc', 'ztgevc',
+           'prepare_for_lapack']
+
+import numpy as np
+cimport numpy as np
+
+# Strangely absolute imports from kwant.linalg.f_lapack don't work here.  So
+# continue using traditional implicit relative imports.
+import f_lapack
+cimport f_lapack
+from f_lapack cimport l_int, l_logical
+
+int_dtype = f_lapack.l_int_dtype
+logical_dtype = f_lapack.l_logical_dtype
+
+# exceptions
+
+class LinAlgError(RuntimeError):
+    pass
+
+
+# some helper functions
+def filter_args(select, args):
+    return tuple([arg for sel, arg in zip(select, args) if sel])
+
+def assert_fortran_mat(*mats):
+    # This is a workaround for a bug in numpy version < 2.0,
+    # where 1x1 matrices do not have the F_Contiguous flag set correctly.
+    for mat in mats:
+        if (mat is not None and (mat.shape[0] > 1 or mat.shape[1] > 1) and
+            not mat.flags["F_CONTIGUOUS"]):
+            raise ValueError("Input matrix must be Fortran contiguous")
+
+
+# Wrappers for xGETRF
+def sgetrf(np.ndarray[np.float32_t, ndim=2] A):
+    cdef l_int M, N, info
+    cdef np.ndarray[l_int, ndim=1] ipiv
+
+    assert_fortran_mat(A)
+
+    M = A.shape[0]
+    N = A.shape[1]
+    ipiv = np.empty(min(M,N), dtype = f_lapack.l_int_dtype)
+
+    f_lapack.sgetrf_(&M, &N, <float *>A.data, &M,
+                     <l_int *>ipiv.data, &info)
+
+    assert(info >= 0, "Argument error in sgetrf")
+
+    return (A, ipiv, info > 0 or M != N)
+
+def dgetrf(np.ndarray[np.float64_t, ndim=2] A):
+    cdef l_int M, N, info
+    cdef np.ndarray[l_int, ndim=1] ipiv
+
+    assert_fortran_mat(A)
+
+    M = A.shape[0]
+    N = A.shape[1]
+    ipiv = np.empty(min(M,N), dtype = f_lapack.l_int_dtype)
+
+    f_lapack.dgetrf_(&M, &N, <double *>A.data, &M,
+                     <l_int *>ipiv.data, &info)
+
+    assert(info >= 0, "Argument error in dgetrf")
+
+    return (A, ipiv, info > 0 or M != N)
+
+def cgetrf(np.ndarray[np.complex64_t, ndim=2] A):
+    cdef l_int M, N, info
+    cdef np.ndarray[l_int, ndim=1] ipiv
+
+    assert_fortran_mat(A)
+
+    M = A.shape[0]
+    N = A.shape[1]
+    ipiv = np.empty(min(M,N), dtype = f_lapack.l_int_dtype)
+
+    f_lapack.cgetrf_(&M, &N, <float complex *>A.data, &M,
+                     <l_int *>ipiv.data, &info)
+
+    assert(info >= 0, "Argument error in cgetrf")
+
+    return (A, ipiv, info > 0 or M != N)
+
+def zgetrf(np.ndarray[np.complex128_t, ndim=2] A):
+    cdef l_int M, N, info
+    cdef np.ndarray[l_int, ndim=1] ipiv
+
+    assert_fortran_mat(A)
+
+    M = A.shape[0]
+    N = A.shape[1]
+    ipiv = np.empty(min(M,N), dtype = f_lapack.l_int_dtype)
+
+    f_lapack.zgetrf_(&M, &N, <double complex *>A.data, &M,
+                     <l_int *>ipiv.data, &info)
+
+    assert(info >= 0, "Argument error in zgetrf")
+
+    return (A, ipiv, info > 0 or M != N)
+
+# Wrappers for xGETRS
+
+def sgetrs(np.ndarray[np.float32_t, ndim=2] LU,
+           np.ndarray[l_int, ndim=1] IPIV, B):
+    cdef l_int N, NRHS, info
+    cdef np.ndarray b
+
+    assert_fortran_mat(LU)
+
+    # again: workaround for 1x1-Fortran bug in numpy < v2.0
+    if (not isinstance(B, np.ndarray) or
+        (B.ndim == 2 and (B.shape[0] > 1 or B.shape[1] > 1) and
+         not B.flags["F_CONTIGUOUS"])):
+        raise ValueError("In dgetrs: B must be a Fortran ordered numpy array")
+
+    b = B
+    N = LU.shape[0]
+    if b.ndim == 1:
+        NRHS = 1
+    elif b.ndim == 2:
+        NRHS = B.shape[1]
+    else:
+        raise ValueError("In sgetrs: B must be a vector or matrix")
+
+    f_lapack.sgetrs_("N", &N, &NRHS, <float *>LU.data, &N,
+                     <l_int *>IPIV.data, <float *>b.data, &N,
+                     &info)
+
+    assert(info == 0, "Argument error in sgetrs")
+
+    return b
+
+def dgetrs(np.ndarray[np.float64_t, ndim=2] LU,
+           np.ndarray[l_int, ndim=1] IPIV, B):
+    cdef l_int N, NRHS, info
+    cdef np.ndarray b
+
+    assert_fortran_mat(LU)
+
+    # again: workaround for 1x1-Fortran bug in numpy < v2.0
+    if (not isinstance(B, np.ndarray) or
+        (B.ndim == 2 and (B.shape[0] > 1 or B.shape[1] > 1) and
+         not B.flags["F_CONTIGUOUS"])):
+        raise ValueError("In dgetrs: B must be a Fortran ordered numpy array")
+
+    b = B
+    N = LU.shape[0]
+    if b.ndim == 1:
+        NRHS = 1
+    elif b.ndim == 2:
+        NRHS = b.shape[1]
+    else:
+        raise ValueError("In dgetrs: B must be a vector or matrix")
+
+    f_lapack.dgetrs_("N", &N, &NRHS, <double *>LU.data, &N,
+                     <l_int *>IPIV.data, <double *>b.data, &N,
+                     &info)
+
+    assert(info == 0, "Argument error in dgetrs")
+
+    return b
+
+def cgetrs(np.ndarray[np.complex64_t, ndim=2] LU,
+           np.ndarray[l_int, ndim=1] IPIV, B):
+    cdef l_int N, NRHS, info
+    cdef np.ndarray b
+
+    assert_fortran_mat(LU)
+
+    # again: workaround for 1x1-Fortran bug in numpy < v2.0
+    if (not isinstance(B, np.ndarray) or
+        (B.ndim == 2 and (B.shape[0] > 1 or B.shape[1] > 1) and
+         not B.flags["F_CONTIGUOUS"])):
+        raise ValueError("In dgetrs: B must be a Fortran ordered numpy array")
+
+    b = B
+    N = LU.shape[0]
+    if b.ndim == 1:
+        NRHS = 1
+    elif b.ndim == 2:
+        NRHS = b.shape[1]
+    else:
+        raise ValueError("In cgetrs: B must be a vector or matrix")
+
+    f_lapack.cgetrs_("N", &N, &NRHS, <float complex *>LU.data, &N,
+                     <l_int *>IPIV.data, <float complex *>b.data, &N,
+                     &info)
+
+    assert(info == 0, "Argument error in cgetrs")
+
+    return b
+
+def zgetrs(np.ndarray[np.complex128_t, ndim=2] LU,
+           np.ndarray[l_int, ndim=1] IPIV, B):
+    cdef l_int N, NRHS, info
+    cdef np.ndarray b
+
+    assert_fortran_mat(LU)
+
+    # again: workaround for 1x1-Fortran bug in numpy < v2.0
+    if (not isinstance(B, np.ndarray) or
+        (B.ndim == 2 and (B.shape[0] > 1 or B.shape[1] > 1) and
+         not B.flags["F_CONTIGUOUS"])):
+        raise ValueError("In dgetrs: B must be a Fortran ordered numpy array")
+
+    b = B
+    N = LU.shape[0]
+    if b.ndim == 1:
+        NRHS = 1
+    elif b.ndim == 2:
+        NRHS = b.shape[1]
+    else:
+        raise ValueError("In zgetrs: B must be a vector or matrix")
+
+    f_lapack.zgetrs_("N", &N, &NRHS, <double complex *>LU.data, &N,
+                     <l_int *>IPIV.data, <double complex *>b.data, &N,
+                     &info)
+
+    assert(info == 0, "Argument error in zgetrs")
+
+    return b
+
+# Wrappers for xGECON
+
+def sgecon(np.ndarray[np.float32_t, ndim=2] LU,
+            float normA, char *norm = "1"):
+    cdef l_int N, info
+    cdef float rcond
+    cdef np.ndarray[np.float32_t, ndim=1] work
+    cdef np.ndarray[l_int, ndim=1] iwork
+
+    assert_fortran_mat(LU)
+
+    N = LU.shape[0]
+    work = np.empty(4*N, dtype = np.float32)
+    iwork = np.empty(N, dtype = f_lapack.l_int_dtype)
+
+    f_lapack.sgecon_(norm, &N, <float *>LU.data, &N, &normA,
+                     &rcond, <float *>work.data,
+                     <l_int *>iwork.data, &info)
+
+    assert(info == 0, "Argument error in sgecon")
+
+    return rcond
+
+def dgecon(np.ndarray[np.float64_t, ndim=2] LU,
+            double normA, char *norm = "1"):
+    cdef l_int N, info
+    cdef double rcond
+    cdef np.ndarray[np.float64_t, ndim=1] work
+    cdef np.ndarray[l_int, ndim=1] iwork
+
+    assert_fortran_mat(LU)
+
+    N = LU.shape[0]
+    work = np.empty(4*N, dtype = np.float64)
+    iwork = np.empty(N, dtype = f_lapack.l_int_dtype)
+
+    f_lapack.dgecon_(norm, &N, <double *>LU.data, &N, &normA,
+                     &rcond, <double *>work.data,
+                     <l_int *>iwork.data, &info)
+
+    assert(info == 0, "Argument error in dgecon")
+
+    return rcond
+
+def cgecon(np.ndarray[np.complex64_t, ndim=2] LU,
+            float normA, char *norm = "1"):
+    cdef l_int N, info
+    cdef float rcond
+    cdef np.ndarray[np.complex64_t, ndim=1] work
+    cdef np.ndarray[np.float32_t, ndim=1] rwork
+
+    assert_fortran_mat(LU)
+
+    N = LU.shape[0]
+    work = np.empty(2*N, dtype = np.complex64)
+    rwork = np.empty(2*N, dtype = np.float32)
+
+    f_lapack.cgecon_(norm, &N, <float complex *>LU.data, &N, &normA,
+                     &rcond, <float complex *>work.data,
+                     <float *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in cgecon")
+
+    return rcond
+
+def zgecon(np.ndarray[np.complex128_t, ndim=2] LU,
+           double normA, char *norm = "1"):
+    cdef l_int N, info
+    cdef double rcond
+    cdef np.ndarray[np.complex128_t, ndim=1] work
+    cdef np.ndarray[np.float64_t, ndim=1] rwork
+
+    assert_fortran_mat(LU)
+
+    N = LU.shape[0]
+    work = np.empty(2*N, dtype = np.complex128)
+    rwork = np.empty(2*N, dtype = np.float64)
+
+    f_lapack.zgecon_(norm, &N, <double complex *>LU.data, &N, &normA,
+                     &rcond, <double complex *>work.data,
+                     <double *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in zgecon")
+
+    return rcond
+
+# Wrappers for xGGEV
+
+# Helper function for xGGEV
+def ggev_postprocess(dtype, alphar, alphai, vl_r=None, vr_r=None):
+    # depending on whether the eigenvalues are purely real or complex,
+    # some post-processing of the eigenvalues and -vectors is necessary
+
+    indx = (alphai > 0.0).nonzero()[0]
+
+    if indx.size:
+        alpha = alphar + 1j * alphai
+
+        if vl_r != None:
+            vl = np.array(vl_r, dtype = dtype)
+            for i in indx:
+                vl.imag[:, i] = vl_r[:,i+1]
+                vl[:, i+1] = np.conj(vl[:, i])
+        else:
+            vl = None
+
+        if vr_r != None:
+            vr = np.array(vr_r, dtype = dtype)
+            for i in indx:
+                vr.imag[:, i] = vr_r[:,i+1]
+                vr[:, i+1] = np.conj(vr[:, i])
+        else:
+            vr = None
+    else:
+        alpha = alphar
+        vl = vl_r
+        vr = vr_r
+
+    return (alpha, vl, vr)
+
+
+def sggev(np.ndarray[np.float32_t, ndim=2] A,
+          np.ndarray[np.float32_t, ndim=2] B,
+          left=False, right=True):
+    cdef l_int N, info, lwork
+    cdef char *jobvl, *jobvr
+    cdef np.ndarray[np.float32_t, ndim=2] vl_r, vr_r
+    cdef float *vl_ptr, *vr_ptr, qwork
+    cdef np.ndarray[np.float32_t, ndim=1] work, alphar, alphai, beta
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alphar = np.empty(N, dtype = np.float32)
+    alphai = np.empty(N, dtype = np.float32)
+    beta = np.empty(N, dtype = np.float32)
+
+    if left:
+        vl_r = np.empty((N,N), dtype = np.float32, order='F')
+        vl_ptr = <float *>vl_r.data
+        jobvl = "V"
+    else:
+        vl_r = None
+        vl_ptr = NULL
+        jobvl = "N"
+
+    if right:
+        vr_r = np.empty((N,N), dtype = np.float32, order='F')
+        vr_ptr = <float *>vr_r.data
+        jobvr = "V"
+    else:
+        vr_r = None
+        vr_ptr = NULL
+        jobvr = "N"
+
+    # workspace query
+    lwork = -1
+
+    f_lapack.sggev_(jobvl, jobvr, &N, <float *>A.data, &N,
+                    <float *>B.data, &N,
+                    <float *>alphar.data, <float *> alphai.data,
+                    <float *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    &qwork, &lwork, &info)
+
+    assert(info == 0, "Argument error in sggev")
+
+    lwork = <l_int>qwork
+    work = np.empty(lwork, dtype = np.float32)
+
+    # Now the real calculation
+    f_lapack.sggev_(jobvl, jobvr, &N, <float *>A.data, &N,
+                    <float *>B.data, &N,
+                    <float *>alphar.data, <float *> alphai.data,
+                    <float *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    <float *>work.data, &lwork, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in sggev")
+
+    assert(info == 0, "Argument error in sggev")
+
+    alpha, vl, vr = ggev_postprocess(np.complex64, alphar, alphai, vl_r, vr_r)
+
+    return filter_args((True, True, left, right), (alpha, beta, vl, vr))
+
+
+def dggev(np.ndarray[np.float64_t, ndim=2] A,
+          np.ndarray[np.float64_t, ndim=2] B,
+          left=False, right=True):
+    cdef l_int N, info, lwork
+    cdef char *jobvl, *jobvr
+    cdef np.ndarray[np.float64_t, ndim=2] vl_r, vr_r
+    cdef double *vl_ptr, *vr_ptr, qwork
+    cdef np.ndarray[np.float64_t, ndim=1] work, alphar, alphai, beta
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alphar = np.empty(N, dtype = np.float64)
+    alphai = np.empty(N, dtype = np.float64)
+    beta = np.empty(N, dtype = np.float64)
+
+    if left:
+        vl_r = np.empty((N,N), dtype = np.float64, order='F')
+        vl_ptr = <double *>vl_r.data
+        jobvl = "V"
+    else:
+        vl_r = None
+        vl_ptr = NULL
+        jobvl = "N"
+
+    if right:
+        vr_r = np.empty((N,N), dtype = np.float64, order='F')
+        vr_ptr = <double *>vr_r.data
+        jobvr = "V"
+    else:
+        vr_r = None
+        vr_ptr = NULL
+        jobvr = "N"
+
+    # workspace query
+    lwork = -1
+
+    f_lapack.dggev_(jobvl, jobvr, &N, <double *>A.data, &N,
+                    <double *>B.data, &N,
+                    <double *>alphar.data, <double *> alphai.data,
+                    <double *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    &qwork, &lwork, &info)
+
+    assert(info == 0, "Argument error in dggev")
+
+    lwork = <l_int>qwork
+    work = np.empty(lwork, dtype = np.float64)
+
+    # Now the real calculation
+    f_lapack.dggev_(jobvl, jobvr, &N, <double *>A.data, &N,
+                    <double *>B.data, &N,
+                    <double *>alphar.data, <double *> alphai.data,
+                    <double *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    <double *>work.data, &lwork, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in dggev")
+
+    assert(info == 0, "Argument error in dggev")
+
+    alpha, vl, vr = ggev_postprocess(np.complex128, alphar, alphai, vl_r, vr_r)
+
+    return filter_args((True, True, left, right), (alpha, beta, vl, vr))
+
+
+def cggev(np.ndarray[np.complex64_t, ndim=2] A,
+          np.ndarray[np.complex64_t, ndim=2] B,
+          left=False, right=True):
+    cdef l_int N, info, lwork
+    cdef char *jobvl, *jobvr
+    cdef np.ndarray[np.complex64_t, ndim=2] vl, vr
+    cdef float complex *vl_ptr, *vr_ptr, qwork
+    cdef np.ndarray[np.complex64_t, ndim=1] work, alpha, beta
+    cdef np.ndarray[np.float32_t, ndim=1] rwork
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alpha = np.empty(N, dtype = np.complex64)
+    beta = np.empty(N, dtype = np.complex64)
+
+    if left:
+        vl = np.empty((N,N), dtype = np.complex64, order='F')
+        vl_ptr = <float complex *>vl.data
+        jobvl = "V"
+    else:
+        vl_ptr = NULL
+        jobvl = "N"
+
+    if right:
+        vr = np.empty((N,N), dtype = np.complex64, order='F')
+        vr_ptr = <float complex *>vr.data
+        jobvr = "V"
+    else:
+        vr_ptr = NULL
+        jobvr = "N"
+
+    rwork = np.empty(8 * N, dtype = np.float32)
+
+    # workspace query
+    lwork = -1
+    work = np.empty(1, dtype = np.complex64)
+
+    f_lapack.cggev_(jobvl, jobvr, &N, <float complex *>A.data, &N,
+                    <float complex *>B.data, &N,
+                    <float complex *>alpha.data, <float complex *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    &qwork, &lwork,
+                    <float *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in cggev")
+
+    lwork = <l_int>qwork.real
+
+    work = np.empty(lwork, dtype = np.complex64)
+
+    # Now the real calculation
+    f_lapack.cggev_(jobvl, jobvr, &N, <float complex *>A.data, &N,
+                    <float complex *>B.data, &N,
+                    <float complex *>alpha.data, <float complex *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    <float complex *>work.data, &lwork,
+                    <float *>rwork.data, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in cggev")
+
+    assert(info == 0, "Argument error in cggev")
+
+    return filter_args((True, True, left, right), (alpha, beta, vl, vr))
+
+
+def zggev(np.ndarray[np.complex128_t, ndim=2] A,
+          np.ndarray[np.complex128_t, ndim=2] B,
+          left=False, right=True):
+    cdef l_int N, info, lwork
+    cdef char *jobvl, *jobvr
+    cdef np.ndarray[np.complex128_t, ndim=2] vl, vr
+    cdef double complex *vl_ptr, *vr_ptr, qwork
+    cdef np.ndarray[np.complex128_t, ndim=1] work, alpha, beta
+    cdef np.ndarray[np.float64_t, ndim=1] rwork
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alpha = np.empty(N, dtype = np.complex128)
+    beta = np.empty(N, dtype = np.complex128)
+
+    if left:
+        vl = np.empty((N,N), dtype = np.complex128, order='F')
+        vl_ptr = <double complex *>vl.data
+        jobvl = "V"
+    else:
+        vl_ptr = NULL
+        jobvl = "N"
+
+    if right:
+        vr = np.empty((N,N), dtype = np.complex128, order='F')
+        vr_ptr = <double complex *>vr.data
+        jobvr = "V"
+    else:
+        vr_ptr = NULL
+        jobvr = "N"
+
+    rwork = np.empty(8 * N, dtype = np.float64)
+
+    # workspace query
+    lwork = -1
+    work = np.empty(1, dtype = np.complex128)
+
+    f_lapack.zggev_(jobvl, jobvr, &N, <double complex *>A.data, &N,
+                    <double complex *>B.data, &N,
+                    <double complex *>alpha.data, <double complex *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    &qwork, &lwork,
+                    <double *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in zggev")
+
+    lwork = <l_int>qwork.real
+    work = np.empty(lwork, dtype = np.complex128)
+
+    # Now the real calculation
+    f_lapack.zggev_(jobvl, jobvr, &N, <double complex *>A.data, &N,
+                    <double complex *>B.data, &N,
+                    <double complex *>alpha.data, <double complex *>beta.data,
+                    vl_ptr, &N, vr_ptr, &N,
+                    <double complex *>work.data, &lwork,
+                    <double *>rwork.data, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in zggev")
+
+    assert(info == 0, "Argument error in zggev")
+
+    return filter_args((True, True, left, right), (alpha, beta, vl, vr))
+
+
+# Wrapper for xGEES
+def sgees(np.ndarray[np.float32_t, ndim=2] A,
+          calc_q=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvs
+    cdef float *vs_ptr, qwork
+    cdef np.ndarray[np.float32_t, ndim=2] vs
+    cdef np.ndarray[np.float32_t] wr, wi, work
+
+    assert_fortran_mat(A)
+
+    N = A.shape[0]
+    wr = np.empty(N, dtype = np.float32)
+    wi = np.empty(N, dtype = np.float32)
+
+    if calc_q:
+        vs = np.empty((N,N), dtype = np.float32, order='F')
+        vs_ptr = <float *>vs.data
+        jobvs = "V"
+    else:
+        vs_ptr = NULL
+        jobvs = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.sgees_(jobvs, "N", NULL, &N, <float *>A.data, &N,
+                    &sdim, <float *>wr.data, <float *>wi.data, vs_ptr, &N,
+                    &qwork, &lwork, NULL, &info)
+
+    assert(info == 0, "Argument error in sgees")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float32)
+
+    # Now the real calculation
+    f_lapack.sgees_(jobvs, "N", NULL, &N, <float *>A.data, &N,
+                    &sdim, <float *>wr.data, <float *>wi.data, vs_ptr, &N,
+                    <float *>work.data, &lwork, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QR iteration failed to converge in sgees")
+
+    assert(info == 0, "Argument error in sgees")
+
+    if wi.nonzero()[0].size:
+        w = wr + 1j * wi
+    else:
+        w = wr
+
+    return filter_args((True, calc_q, calc_ev), (A, vs, w))
+
+
+def dgees(np.ndarray[np.float64_t, ndim=2] A,
+          calc_q=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvs
+    cdef double *vs_ptr, qwork
+    cdef np.ndarray[np.float64_t, ndim=2] vs
+    cdef np.ndarray[np.float64_t] wr, wi, work
+
+    assert_fortran_mat(A)
+
+    N = A.shape[0]
+    wr = np.empty(N, dtype = np.float64)
+    wi = np.empty(N, dtype = np.float64)
+
+    if calc_q:
+        vs = np.empty((N,N), dtype = np.float64, order='F')
+        vs_ptr = <double *>vs.data
+        jobvs = "V"
+    else:
+        vs_ptr = NULL
+        jobvs = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.dgees_(jobvs, "N", NULL, &N, <double *>A.data, &N,
+                    &sdim, <double *>wr.data, <double *>wi.data, vs_ptr, &N,
+                    &qwork, &lwork, NULL, &info)
+
+    assert(info == 0, "Argument error in dgees")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float64)
+
+    # Now the real calculation
+    f_lapack.dgees_(jobvs, "N", NULL, &N, <double *>A.data, &N,
+                    &sdim, <double *>wr.data, <double *>wi.data, vs_ptr, &N,
+                    <double *>work.data, &lwork, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QR iteration failed to converge in dgees")
+
+    assert(info == 0, "Argument error in dgees")
+
+    if wi.nonzero()[0].size:
+        w = wr + 1j * wi
+    else:
+        w = wr
+
+    return filter_args((True, calc_q, calc_ev), (A, vs, w))
+
+
+def cgees(np.ndarray[np.complex64_t, ndim=2] A,
+          calc_q=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvs
+    cdef float complex *vs_ptr, qwork
+    cdef np.ndarray[np.complex64_t, ndim=2] vs
+    cdef np.ndarray[np.complex64_t] w, work
+    cdef np.ndarray[np.float32_t] rwork
+
+    assert_fortran_mat(A)
+
+    N = A.shape[0]
+    w = np.empty(N, dtype = np.complex64)
+    rwork = np.empty(N, dtype = np.float32)
+
+    if calc_q:
+        vs = np.empty((N,N), dtype = np.complex64, order='F')
+        vs_ptr = <float complex *>vs.data
+        jobvs = "V"
+    else:
+        vs_ptr = NULL
+        jobvs = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.cgees_(jobvs, "N", NULL, &N, <float complex *>A.data, &N,
+                    &sdim, <float complex *>w.data, vs_ptr, &N,
+                    &qwork, &lwork, <float *>rwork.data, NULL, &info)
+
+    assert(info == 0, "Argument error in cgees")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex64)
+
+    # Now the real calculation
+    f_lapack.cgees_(jobvs, "N", NULL, &N, <float complex *>A.data, &N,
+                    &sdim, <float complex *>w.data, vs_ptr, &N,
+                    <float complex *>work.data, &lwork,
+                    <float *>rwork.data, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QR iteration failed to converge in cgees")
+
+    assert(info == 0, "Argument error in cgees")
+
+    return filter_args((True, calc_q, calc_ev), (A, vs, w))
+
+
+def zgees(np.ndarray[np.complex128_t, ndim=2] A,
+          calc_q=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvs
+    cdef double complex *vs_ptr, qwork
+    cdef np.ndarray[np.complex128_t, ndim=2] vs
+    cdef np.ndarray[np.complex128_t] w, work
+    cdef np.ndarray[np.float64_t] rwork
+
+    assert_fortran_mat(A)
+
+    N = A.shape[0]
+    w = np.empty(N, dtype = np.complex128)
+    rwork = np.empty(N, dtype = np.float64)
+
+    if calc_q:
+        vs = np.empty((N,N), dtype = np.complex128, order='F')
+        vs_ptr = <double complex *>vs.data
+        jobvs = "V"
+    else:
+        vs_ptr = NULL
+        jobvs = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.zgees_(jobvs, "N", NULL, &N, <double complex *>A.data, &N,
+                    &sdim, <double complex *>w.data, vs_ptr, &N,
+                    &qwork, &lwork, <double *>rwork.data, NULL, &info)
+
+    assert(info == 0, "Argument error in zgees")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex128)
+
+    # Now the real calculation
+    f_lapack.zgees_(jobvs, "N", NULL, &N, <double complex *>A.data, &N,
+                    &sdim, <double complex *>w.data, vs_ptr, &N,
+                    <double complex *>work.data, &lwork,
+                    <double *>rwork.data, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QR iteration failed to converge in zgees")
+
+    assert(info == 0, "Argument error in zgees")
+
+    return filter_args((True, calc_q, calc_ev), (A, vs, w))
+
+# Wrapper for xTRSEN
+def strsen(np.ndarray[l_logical] select,
+           np.ndarray[np.float32_t, ndim=2] T,
+           np.ndarray[np.float32_t, ndim=2] Q=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info
+    cdef char *compq
+    cdef float qwork, *q_ptr
+    cdef np.ndarray[np.float32_t] wr, wi, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    wr = np.empty(N, dtype = np.float32)
+    wi = np.empty(N, dtype = np.float32)
+
+    if Q is not None:
+        compq = "V"
+        q_ptr = <float *>Q.data
+    else:
+        compq = "N"
+        q_ptr = NULL
+
+    # workspace query
+    lwork = liwork = -1
+    f_lapack.strsen_("N", compq, <l_logical *>select.data,
+                     &N, <float *>T.data, &N, q_ptr, &N,
+                     <float *>wr.data, <float *>wi.data, &M, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in strsen")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float32)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = f_lapack.l_int_dtype)
+
+    # Now the real calculation
+    f_lapack.strsen_("N", compq, <l_logical *>select.data,
+                     &N, <float *>T.data, &N, q_ptr, &N,
+                     <float *>wr.data, <float *>wi.data, &M, NULL, NULL,
+                     <float *>work.data, &lwork,
+                     <int *>iwork.data, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in strsen")
+
+    if wi.nonzero()[0].size:
+        w = wr + 1j * wi
+    else:
+        w = wr
+
+    return filter_args((True, Q is not None, calc_ev), (T, Q, w))
+
+
+def dtrsen(np.ndarray[l_logical] select,
+           np.ndarray[np.float64_t, ndim=2] T,
+           np.ndarray[np.float64_t, ndim=2] Q=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info
+    cdef char *compq
+    cdef double qwork, *q_ptr
+    cdef np.ndarray[np.float64_t] wr, wi, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    wr = np.empty(N, dtype = np.float64)
+    wi = np.empty(N, dtype = np.float64)
+
+    if Q is not None:
+        compq = "V"
+        q_ptr = <double *>Q.data
+    else:
+        compq = "N"
+        q_ptr = NULL
+
+    # workspace query
+    lwork = liwork = -1
+    f_lapack.dtrsen_("N", compq, <l_logical *>select.data,
+                     &N, <double *>T.data, &N, q_ptr, &N,
+                     <double *>wr.data, <double *>wi.data, &M, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in dtrsen")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float64)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = f_lapack.l_int_dtype)
+
+    # Now the real calculation
+    f_lapack.dtrsen_("N", compq, <l_logical *>select.data,
+                     &N, <double *>T.data, &N, q_ptr, &N,
+                     <double *>wr.data, <double *>wi.data, &M, NULL, NULL,
+                     <double *>work.data, &lwork,
+                     <int *>iwork.data, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in dtrsen")
+
+    if wi.nonzero()[0].size:
+        w = wr + 1j * wi
+    else:
+        w = wr
+
+    return filter_args((True, Q is not None, calc_ev), (T, Q, w))
+
+
+def ctrsen(np.ndarray[l_logical] select,
+           np.ndarray[np.complex64_t, ndim=2] T,
+           np.ndarray[np.complex64_t, ndim=2] Q=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, info
+    cdef char *compq
+    cdef float complex qwork, *q_ptr
+    cdef np.ndarray[np.complex64_t] w, work
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    w = np.empty(N, dtype = np.complex64)
+
+    if Q is not None:
+        compq = "V"
+        q_ptr = <float complex *>Q.data
+    else:
+        compq = "N"
+        q_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    f_lapack.ctrsen_("N", compq, <l_logical *>select.data,
+                     &N, <float complex *>T.data, &N, q_ptr, &N,
+                     <float complex *>w.data, &M, NULL, NULL,
+                     &qwork, &lwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ctrsen")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex64)
+
+    # Now the real calculation
+    f_lapack.ctrsen_("N", compq, <l_logical *>select.data,
+                     &N, <float complex *>T.data, &N, q_ptr, &N,
+                     <float complex *>w.data, &M, NULL, NULL,
+                     <float complex *>work.data, &lwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ctrsen")
+
+    return filter_args((True, Q is not None, calc_ev), (T, Q, w))
+
+
+def ztrsen(np.ndarray[l_logical] select,
+           np.ndarray[np.complex128_t, ndim=2] T,
+           np.ndarray[np.complex128_t, ndim=2] Q=None,
+           calc_ev=True):
+    cdef l_int N, MM, M, lwork, info
+    cdef char *compq
+    cdef double complex qwork, *q_ptr
+    cdef np.ndarray[np.complex128_t] w, work
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    w = np.empty(N, dtype = np.complex128)
+
+    if Q is not None:
+        compq = "V"
+        q_ptr = <double complex *>Q.data
+    else:
+        compq = "N"
+        q_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    f_lapack.ztrsen_("N", compq, <l_logical *>select.data,
+                     &N, <double complex *>T.data, &N, q_ptr, &N,
+                     <double complex *>w.data, &M, NULL, NULL,
+                     &qwork, &lwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ztrsen")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex128)
+
+    # Now the real calculation
+    f_lapack.ztrsen_("N", compq, <l_logical *>select.data,
+                     &N, <double complex *>T.data, &N, q_ptr, &N,
+                     <double complex *>w.data, &M, NULL, NULL,
+                     <double complex *>work.data, &lwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ztrsen")
+
+    return filter_args((True, Q is not None, calc_ev), (T, Q, w))
+
+
+# Helper function for xTREVC and xTGEVC
+def txevc_postprocess(dtype, T, vreal, np.ndarray[l_logical] select):
+    cdef int N, M, i, m, indx
+
+    N = T.shape[0]
+    if select is None:
+        select = np.ones(N, dtype = f_lapack.l_logical_dtype)
+    selindx = select.nonzero()[0]
+    M = selindx.size
+
+    v = np.empty((N, M), dtype = dtype, order='F')
+
+    indx = 0
+    for m in xrange(M):
+        k = selindx[m]
+
+        if k < N-1 and T[k+1,k]:
+            # we have the situation of a 2x2 block, and
+            # the eigenvalue with the positive imaginary part desired
+            v[:, m] = vreal[:, indx] + 1j * vreal[:, indx + 1]
+
+            # Check if the eigenvalue with negative real part is also
+            # selected, if it is, we need the same entries in vr
+            if not select[k+1]:
+                indx += 2
+        elif k > 0 and T[k,k-1]:
+            # we have the situation of a 2x2 block, and
+            # the eigenvalue with the negative imaginary part desired
+            v[:, m] = vreal[:, indx] - 1j * vreal[:, indx + 1]
+
+            indx += 2
+        else:
+            # real eigenvalue
+            v[:, m] = vreal[:, indx]
+
+            indx += 1
+    return v
+
+
+# Wrappers for xTREVC
+def strevc(np.ndarray[np.float32_t, ndim=2] T,
+           np.ndarray[np.float32_t, ndim=2] Q=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.float32_t, ndim=2] vl_r, vr_r
+    cdef float *vl_r_ptr, *vr_r_ptr
+    cdef np.ndarray[l_logical] select_cpy
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.float32_t] work
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    work = np.empty(4*N, dtype = np.float32)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        # Correct for possible additional storage if a single complex
+        # eigenvalue is selected.
+        # For that: Figure out the positions of the 2x2 blocks.
+        cmplxindx = np.diagonal(T, -1).nonzero()[0]
+        for i in cmplxindx:
+            if bool(select[i]) != bool(select[i+1]):
+                MM += 1
+
+        # Select is overwritten in strevc.
+        select_cpy = np.array(select, dtype = f_lapack.l_logical_dtype,
+                              order = 'F')
+        select_ptr = <l_logical *>select_cpy.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if Q is not None:
+            howmny = "B"
+        else:
+            howmny = "A"
+
+    if left:
+        if Q is not None and select is None:
+            vl_r = np.asfortranarray(Q.copy())
+        else:
+            vl_r = np.empty((N, MM), dtype = np.float32, order='F')
+        vl_r_ptr = <float *>vl_r.data
+    else:
+        vl_r_ptr = NULL
+
+    if right:
+        if Q is not None and select is None:
+            vr_r = np.asfortranarray(Q.copy())
+        else:
+            vr_r = np.empty((N, MM), dtype = np.float32, order='F')
+        vr_r_ptr = <float *>vr_r.data
+    else:
+        vr_r_ptr = NULL
+
+    f_lapack.strevc_(side, howmny, select_ptr,
+                     &N, <float *>T.data, &N,
+                     vl_r_ptr, &N, vr_r_ptr, &N, &MM, &M,
+                     <float *>work.data, &info)
+
+    assert(info == 0, "Argument error in strevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in strevc")
+
+    if select is not None and Q is not None:
+        if left:
+            vl_r = np.asfortranarray(np.dot(Q, vl_r))
+        if right:
+            vr_r = np.asfortranarray(np.dot(Q, vr_r))
+
+    # If there are complex eigenvalues, we need to postprocess the
+    # eigenvectors.
+    if np.diagonal(T, -1).nonzero()[0].size:
+        if left:
+            vl = txevc_postprocess(np.complex64, T, vl_r, select)
+        if right:
+            vr = txevc_postprocess(np.complex64, T, vr_r, select)
+    else:
+        if left:
+            vl = vl_r
+        if right:
+            vr = vr_r
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def dtrevc(np.ndarray[np.float64_t, ndim=2] T,
+           np.ndarray[np.float64_t, ndim=2] Q=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.float64_t, ndim=2] vl_r, vr_r
+    cdef double *vl_r_ptr, *vr_r_ptr
+    cdef np.ndarray[l_logical] select_cpy
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.float64_t] work
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    work = np.empty(4*N, dtype = np.float64)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        # Correct for possible additional storage if a single complex
+        # eigenvalue is selected.
+        # For that: Figure out the positions of the 2x2 blocks.
+        cmplxindx = np.diagonal(T, -1).nonzero()[0]
+        for i in cmplxindx:
+            if bool(select[i]) != bool(select[i+1]):
+                MM += 1
+
+        # Select is overwritten in dtrevc.
+        select_cpy = np.array(select, dtype = f_lapack.l_logical_dtype,
+                              order = 'F')
+        select_ptr = <l_logical *>select_cpy.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if Q is not None:
+            howmny = "B"
+        else:
+            howmny = "A"
+
+    if left:
+        if Q is not None and select is None:
+            vl_r = np.asfortranarray(Q.copy())
+        else:
+            vl_r = np.empty((N, MM), dtype = np.float64, order='F')
+        vl_r_ptr = <double *>vl_r.data
+    else:
+        vl_r_ptr = NULL
+
+    if right:
+        if Q is not None and select is None:
+            vr_r = np.asfortranarray(Q.copy())
+        else:
+            vr_r = np.empty((N, MM), dtype = np.float64, order='F')
+        vr_r_ptr = <double *>vr_r.data
+    else:
+        vr_r_ptr = NULL
+
+    f_lapack.dtrevc_(side, howmny, select_ptr,
+                     &N, <double *>T.data, &N,
+                     vl_r_ptr, &N, vr_r_ptr, &N, &MM, &M,
+                     <double *>work.data, &info)
+
+    assert(info == 0, "Argument error in dtrevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in dtrevc")
+
+    if select is not None and Q is not None:
+        if left:
+            vl_r = np.asfortranarray(np.dot(Q, vl_r))
+        if right:
+            vr_r = np.asfortranarray(np.dot(Q, vr_r))
+
+    # If there are complex eigenvalues, we need to postprocess the eigenvectors
+    if np.diagonal(T, -1).nonzero()[0].size:
+        if left:
+            vl = txevc_postprocess(np.complex128, T, vl_r, select)
+        if right:
+            vr = txevc_postprocess(np.complex128, T, vr_r, select)
+    else:
+        if left:
+            vl = vl_r
+        if right:
+            vr = vr_r
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def ctrevc(np.ndarray[np.complex64_t, ndim=2] T,
+           np.ndarray[np.complex64_t, ndim=2] Q=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.complex64_t, ndim=2] vl, vr
+    cdef float complex *vl_ptr, *vr_ptr
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.complex64_t] work
+    cdef np.ndarray[np.float32_t] rwork
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    work = np.empty(2*N, dtype = np.complex64)
+    rwork = np.empty(N, dtype = np.float32)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        select_ptr = <l_logical *>select.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if Q is not None:
+            howmny = "B"
+        else:
+            howmny = "A"
+
+    if left:
+        if Q is not None and select is None:
+            vl = np.asfortranarray(Q.copy())
+        else:
+            vl = np.empty((N, MM), dtype = np.complex64, order='F')
+        vl_ptr = <float complex *>vl.data
+    else:
+        vl_ptr = NULL
+
+    if right:
+        if Q is not None and select is None:
+            vr = np.asfortranarray(Q.copy())
+        else:
+            vr = np.empty((N, MM), dtype = np.complex64, order='F')
+        vr_ptr = <float complex *>vr.data
+    else:
+        vr_ptr = NULL
+
+    f_lapack.ctrevc_(side, howmny, select_ptr,
+                     &N, <float complex *>T.data, &N,
+                     vl_ptr, &N, vr_ptr, &N, &MM, &M,
+                     <float complex *>work.data, <float *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in ctrevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in ctrevc")
+
+    if select is not None and Q is not None:
+        if left:
+            vl = np.asfortranarray(np.dot(Q, vl))
+        if right:
+            vr = np.asfortranarray(np.dot(Q, vr))
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def ztrevc(np.ndarray[np.complex128_t, ndim=2] T,
+           np.ndarray[np.complex128_t, ndim=2] Q=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.complex128_t, ndim=2] vl, vr
+    cdef double complex *vl_ptr, *vr_ptr
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.complex128_t] work
+    cdef np.ndarray[np.float64_t] rwork
+
+    assert_fortran_mat(T, Q)
+
+    N = T.shape[0]
+    work = np.empty(2*N, dtype = np.complex128)
+    rwork = np.empty(N, dtype = np.float64)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        select_ptr = <l_logical *>select.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if Q is not None:
+            howmny = "B"
+        else:
+            howmny = "A"
+
+    if left:
+        if Q is not None and select is None:
+            vl = np.asfortranarray(Q.copy())
+        else:
+            vl = np.empty((N, MM), dtype = np.complex128, order='F')
+        vl_ptr = <double complex *>vl.data
+    else:
+        vl_ptr = NULL
+
+    if right:
+        if Q is not None and select is None:
+            vr = np.asfortranarray(Q.copy())
+        else:
+            vr = np.empty((N, MM), dtype = np.complex128, order='F')
+        vr_ptr = <double complex *>vr.data
+    else:
+        vr_ptr = NULL
+
+    f_lapack.ztrevc_(side, howmny, select_ptr,
+                     &N, <double complex *>T.data, &N,
+                     vl_ptr, &N, vr_ptr, &N, &MM, &M,
+                     <double complex *>work.data, <double *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in ztrevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in ztrevc")
+
+    if select is not None and Q is not None:
+        if left:
+            vl = np.asfortranarray(np.dot(Q, vl))
+        if right:
+            vr = np.asfortranarray(np.dot(Q, vr))
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+# wrappers for xGGES
+def sgges(np.ndarray[np.float32_t, ndim=2] A,
+          np.ndarray[np.float32_t, ndim=2] B,
+          calc_q=True, calc_z=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvsl, *jobvsr
+    cdef float *vsl_ptr, *vsr_ptr, qwork
+    cdef np.ndarray[np.float32_t, ndim=2] vsl, vsr
+    cdef np.ndarray[np.float32_t] alphar, alphai, beta, work
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alphar = np.empty(N, dtype = np.float32)
+    alphai = np.empty(N, dtype = np.float32)
+    beta = np.empty(N, dtype = np.float32)
+
+    if calc_q:
+        vsl = np.empty((N,N), dtype = np.float32, order='F')
+        vsl_ptr = <float *>vsl.data
+        jobvsl = "V"
+    else:
+        vsl = None
+        vsl_ptr = NULL
+        jobvsl = "N"
+
+    if calc_z:
+        vsr = np.empty((N,N), dtype = np.float32, order='F')
+        vsr_ptr = <float *>vsr.data
+        jobvsr = "V"
+    else:
+        vsr = None
+        vsr_ptr = NULL
+        jobvsr = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.sgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <float *>A.data, &N,
+                    <float *>B.data, &N, &sdim,
+                    <float *>alphar.data, <float *>alphai.data,
+                    <float *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    &qwork, &lwork, NULL, &info)
+
+    assert(info == 0, "Argument error in zgees")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float32)
+
+    # Now the real calculation
+    f_lapack.sgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <float *>A.data, &N,
+                    <float *>B.data, &N, &sdim,
+                    <float *>alphar.data, <float *>alphai.data,
+                    <float *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    <float *>work.data, &lwork, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in sgges")
+
+    assert(info == 0, "Argument error in zgees")
+
+    if alphai.nonzero()[0].size:
+        alpha = alphar + 1j * alphai
+    else:
+        alpha = alphar
+
+    return filter_args((True, True, calc_q, calc_z, calc_ev, calc_ev),
+                       (A, B, vsl, vsr, alpha, beta))
+
+
+def dgges(np.ndarray[np.float64_t, ndim=2] A,
+          np.ndarray[np.float64_t, ndim=2] B,
+          calc_q=True, calc_z=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvsl, *jobvsr
+    cdef double *vsl_ptr, *vsr_ptr, qwork
+    cdef np.ndarray[np.float64_t, ndim=2] vsl, vsr
+    cdef np.ndarray[np.float64_t] alphar, alphai, beta, work
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alphar = np.empty(N, dtype = np.float64)
+    alphai = np.empty(N, dtype = np.float64)
+    beta = np.empty(N, dtype = np.float64)
+
+    if calc_q:
+        vsl = np.empty((N,N), dtype = np.float64, order='F')
+        vsl_ptr = <double *>vsl.data
+        jobvsl = "V"
+    else:
+        vsl = None
+        vsl_ptr = NULL
+        jobvsl = "N"
+
+    if calc_z:
+        vsr = np.empty((N,N), dtype = np.float64, order='F')
+        vsr_ptr = <double *>vsr.data
+        jobvsr = "V"
+    else:
+        vsr = None
+        vsr_ptr = NULL
+        jobvsr = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.dgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <double *>A.data, &N,
+                    <double *>B.data, &N, &sdim,
+                    <double *>alphar.data, <double *>alphai.data,
+                    <double *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    &qwork, &lwork, NULL, &info)
+
+    assert(info == 0, "Argument error in zgees")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float64)
+
+    # Now the real calculation
+    f_lapack.dgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <double *>A.data, &N,
+                    <double *>B.data, &N, &sdim,
+                    <double *>alphar.data, <double *>alphai.data,
+                    <double *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    <double *>work.data, &lwork, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in dgges")
+
+    assert(info == 0, "Argument error in zgees")
+
+    if alphai.nonzero()[0].size:
+        alpha = alphar + 1j * alphai
+    else:
+        alpha = alphar
+
+    return filter_args((True, True, calc_q, calc_z, calc_ev, calc_ev),
+                       (A, B, vsl, vsr, alpha, beta))
+
+
+def cgges(np.ndarray[np.complex64_t, ndim=2] A,
+          np.ndarray[np.complex64_t, ndim=2] B,
+          calc_q=True, calc_z=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvsl, *jobvsr
+    cdef float complex *vsl_ptr, *vsr_ptr, qwork
+    cdef np.ndarray[np.complex64_t, ndim=2] vsl, vsr
+    cdef np.ndarray[np.complex64_t] alpha, beta, work
+    cdef np.ndarray[np.float32_t] rwork
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alpha = np.empty(N, dtype = np.complex64)
+    beta = np.empty(N, dtype = np.complex64)
+    rwork = np.empty(8*N, dtype = np.float32)
+
+    if calc_q:
+        vsl = np.empty((N,N), dtype = np.complex64, order='F')
+        vsl_ptr = <float complex *>vsl.data
+        jobvsl = "V"
+    else:
+        vsl = None
+        vsl_ptr = NULL
+        jobvsl = "N"
+
+    if calc_z:
+        vsr = np.empty((N,N), dtype = np.complex64, order='F')
+        vsr_ptr = <float complex *>vsr.data
+        jobvsr = "V"
+    else:
+        vsr = None
+        vsr_ptr = NULL
+        jobvsr = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.cgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <float complex *>A.data, &N,
+                    <float complex *>B.data, &N, &sdim,
+                    <float complex *>alpha.data, <float complex *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    &qwork, &lwork, <float *>rwork.data, NULL, &info)
+
+    assert(info == 0, "Argument error in zgees")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex64)
+
+    # Now the real calculation
+    f_lapack.cgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <float complex *>A.data, &N,
+                    <float complex *>B.data, &N, &sdim,
+                    <float complex *>alpha.data, <float complex *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    <float complex *>work.data, &lwork,
+                    <float *>rwork.data, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in cgges")
+
+    assert(info == 0, "Argument error in zgees")
+
+    return filter_args((True, True, calc_q, calc_z, calc_ev, calc_ev),
+                       (A, B, vsl, vsr, alpha, beta))
+
+
+def zgges(np.ndarray[np.complex128_t, ndim=2] A,
+          np.ndarray[np.complex128_t, ndim=2] B,
+          calc_q=True, calc_z=True, calc_ev=True):
+    cdef l_int N, lwork, sdim, info
+    cdef char *jobvsl, *jobvsr
+    cdef double complex *vsl_ptr, *vsr_ptr, qwork
+    cdef np.ndarray[np.complex128_t, ndim=2] vsl, vsr
+    cdef np.ndarray[np.complex128_t] alpha, beta, work
+    cdef np.ndarray[np.float64_t] rwork
+
+    assert_fortran_mat(A, B)
+
+    N = A.shape[0]
+    alpha = np.empty(N, dtype = np.complex128)
+    beta = np.empty(N, dtype = np.complex128)
+    rwork = np.empty(8*N, dtype = np.float64)
+
+    if calc_q:
+        vsl = np.empty((N,N), dtype = np.complex128, order='F')
+        vsl_ptr = <double complex *>vsl.data
+        jobvsl = "V"
+    else:
+        vsl = None
+        vsl_ptr = NULL
+        jobvsl = "N"
+
+    if calc_z:
+        vsr = np.empty((N,N), dtype = np.complex128, order='F')
+        vsr_ptr = <double complex *>vsr.data
+        jobvsr = "V"
+    else:
+        vsr = None
+        vsr_ptr = NULL
+        jobvsr = "N"
+
+    # workspace query
+    lwork = -1
+    f_lapack.zgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <double complex *>A.data, &N,
+                    <double complex *>B.data, &N, &sdim,
+                    <double complex *>alpha.data, <double complex *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    &qwork, &lwork, <double *>rwork.data, NULL, &info)
+
+    assert(info == 0, "Argument error in zgees")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex128)
+
+    # Now the real calculation
+    f_lapack.zgges_(jobvsl, jobvsr, "N", NULL,
+                    &N, <double complex *>A.data, &N,
+                    <double complex *>B.data, &N, &sdim,
+                    <double complex *>alpha.data, <double complex *>beta.data,
+                    vsl_ptr, &N, vsr_ptr, &N,
+                    <double complex *>work.data, &lwork,
+                    <double *>rwork.data, NULL, &info)
+
+    if info > 0:
+        raise LinAlgError("QZ iteration failed to converge in zgges")
+
+    assert(info == 0, "Argument error in zgees")
+
+    return filter_args((True, True, calc_q, calc_z, calc_ev, calc_ev),
+                       (A, B, vsl, vsr, alpha, beta))
+
+
+# wrappers for xTGSEN
+def stgsen(np.ndarray[l_logical] select,
+           np.ndarray[np.float32_t, ndim=2] S,
+           np.ndarray[np.float32_t, ndim=2] T,
+           np.ndarray[np.float32_t, ndim=2] Q=None,
+           np.ndarray[np.float32_t, ndim=2] Z=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info, ijob
+    cdef l_logical wantq, wantz
+    cdef float qwork, *q_ptr, *z_ptr
+    cdef np.ndarray[np.float32_t] alphar, alphai, beta, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    alphar = np.empty(N, dtype = np.float32)
+    alphai = np.empty(N, dtype = np.float32)
+    beta = np.empty(N, dtype = np.float32)
+    ijob = 0
+
+    if Q is not None:
+        wantq = 1
+        q_ptr = <float *>Q.data
+    else:
+        wantq = 0
+        q_ptr = NULL
+
+    if Z is not None:
+        wantz = 1
+        z_ptr = <float *>Z.data
+    else:
+        wantz = 0
+        z_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    liwork = -1
+    f_lapack.stgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <float *>S.data, &N,
+                     <float *>T.data, &N,
+                     <float *>alphar.data, <float *>alphai.data,
+                     <float *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in stgsen")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float32)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = int_dtype)
+
+    # Now the real calculation
+    f_lapack.stgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <float *>S.data, &N,
+                     <float *>T.data, &N,
+                     <float *>alphar.data, <float *>alphai.data,
+                     <float *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     <float *>work.data, &lwork,
+                     <l_int *>iwork.data, &liwork, &info)
+
+    if info > 0:
+        raise LinAlgError("Reordering failed; problem is very ill-conditioned")
+
+    assert(info == 0, "Argument erroŕ in stgsen")
+
+    if alphai.nonzero()[0].size:
+        alpha = alphar + 1j * alphai
+    else:
+        alpha = alphar
+
+    return filter_args((True, True, Q is not None, Z is not None,
+                        calc_ev, calc_ev),
+                       (S, T, Q, Z, alpha, beta))
+
+
+def dtgsen(np.ndarray[l_logical] select,
+           np.ndarray[np.float64_t, ndim=2] S,
+           np.ndarray[np.float64_t, ndim=2] T,
+           np.ndarray[np.float64_t, ndim=2] Q=None,
+           np.ndarray[np.float64_t, ndim=2] Z=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info, ijob
+    cdef l_logical wantq, wantz
+    cdef double qwork, *q_ptr, *z_ptr
+    cdef np.ndarray[np.float64_t] alphar, alphai, beta, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    alphar = np.empty(N, dtype = np.float64)
+    alphai = np.empty(N, dtype = np.float64)
+    beta = np.empty(N, dtype = np.float64)
+    ijob = 0
+
+    if Q is not None:
+        wantq = 1
+        q_ptr = <double *>Q.data
+    else:
+        wantq = 0
+        q_ptr = NULL
+
+    if Z is not None:
+        wantz = 1
+        z_ptr = <double *>Z.data
+    else:
+        wantz = 0
+        z_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    liwork = -1
+    f_lapack.dtgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <double *>S.data, &N,
+                     <double *>T.data, &N,
+                     <double *>alphar.data, <double *>alphai.data,
+                     <double *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in dtgsen")
+
+    lwork = <int>qwork
+    work = np.empty(lwork, dtype = np.float64)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = int_dtype)
+
+    # Now the real calculation
+    f_lapack.dtgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <double *>S.data, &N,
+                     <double *>T.data, &N,
+                     <double *>alphar.data, <double *>alphai.data,
+                     <double *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     <double *>work.data, &lwork,
+                     <l_int *>iwork.data, &liwork, &info)
+
+    if info > 0:
+        raise LinAlgError("Reordering failed; problem is very ill-conditioned")
+
+    assert(info == 0, "Argument erroŕ in dtgsen")
+
+    if alphai.nonzero()[0].size:
+        alpha = alphar + 1j * alphai
+    else:
+        alpha = alphar
+
+    return filter_args((True, True, Q is not None, Z is not None,
+                        calc_ev, calc_ev),
+                       (S, T, Q, Z, alpha, beta))
+
+
+def ctgsen(np.ndarray[l_logical] select,
+           np.ndarray[np.complex64_t, ndim=2] S,
+           np.ndarray[np.complex64_t, ndim=2] T,
+           np.ndarray[np.complex64_t, ndim=2] Q=None,
+           np.ndarray[np.complex64_t, ndim=2] Z=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info, ijob
+    cdef l_logical wantq, wantz
+    cdef float complex qwork, *q_ptr, *z_ptr
+    cdef np.ndarray[np.complex64_t] alpha, beta, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    alpha = np.empty(N, dtype = np.complex64)
+    beta = np.empty(N, dtype = np.complex64)
+    ijob = 0
+
+    if Q is not None:
+        wantq = 1
+        q_ptr = <float complex *>Q.data
+    else:
+        wantq = 0
+        q_ptr = NULL
+
+    if Z is not None:
+        wantz = 1
+        z_ptr = <float complex *>Z.data
+    else:
+        wantz = 0
+        z_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    liwork = -1
+    f_lapack.ctgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <float complex *>S.data, &N,
+                     <float complex *>T.data, &N,
+                     <float complex *>alpha.data, <float complex *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ctgsen")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex64)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = int_dtype)
+
+    # Now the real calculation
+    f_lapack.ctgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <float complex *>S.data, &N,
+                     <float complex *>T.data, &N,
+                     <float complex *>alpha.data, <float complex *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     <float complex *>work.data, &lwork,
+                     <l_int *>iwork.data, &liwork, &info)
+
+    if info > 0:
+        raise LinAlgError("Reordering failed; problem is very ill-conditioned")
+
+    assert(info == 0, "Argument erroŕ in ctgsen")
+
+    return filter_args((True, True, Q is not None, Z is not None,
+                        calc_ev, calc_ev),
+                       (S, T, Q, Z, alpha, beta))
+
+
+def ztgsen(np.ndarray[l_logical] select,
+           np.ndarray[np.complex128_t, ndim=2] S,
+           np.ndarray[np.complex128_t, ndim=2] T,
+           np.ndarray[np.complex128_t, ndim=2] Q=None,
+           np.ndarray[np.complex128_t, ndim=2] Z=None,
+           calc_ev=True):
+    cdef l_int N, M, lwork, liwork, qiwork, info, ijob
+    cdef l_logical wantq, wantz
+    cdef double complex qwork, *q_ptr, *z_ptr
+    cdef np.ndarray[np.complex128_t] alpha, beta, work
+    cdef np.ndarray[l_int] iwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    alpha = np.empty(N, dtype = np.complex128)
+    beta = np.empty(N, dtype = np.complex128)
+    ijob = 0
+
+    if Q is not None:
+        wantq = 1
+        q_ptr = <double complex *>Q.data
+    else:
+        wantq = 0
+        q_ptr = NULL
+
+    if Z is not None:
+        wantz = 1
+        z_ptr = <double complex *>Z.data
+    else:
+        wantz = 0
+        z_ptr = NULL
+
+    # workspace query
+    lwork = -1
+    liwork = -1
+    f_lapack.ztgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <double complex *>S.data, &N,
+                     <double complex *>T.data, &N,
+                     <double complex *>alpha.data, <double complex *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     &qwork, &lwork, &qiwork, &liwork, &info)
+
+    assert(info == 0, "Argument erroŕ in ztgsen")
+
+    lwork = <int>qwork.real
+    work = np.empty(lwork, dtype = np.complex128)
+    liwork = qiwork
+    iwork = np.empty(liwork, dtype = int_dtype)
+
+    # Now the real calculation
+    f_lapack.ztgsen_(&ijob, &wantq, &wantz, <l_logical *>select.data,
+                     &N, <double complex *>S.data, &N,
+                     <double complex *>T.data, &N,
+                     <double complex *>alpha.data, <double complex *>beta.data,
+                     q_ptr, &N, z_ptr, &N, &M, NULL, NULL, NULL,
+                     <double complex *>work.data, &lwork,
+                     <l_int *>iwork.data, &liwork, &info)
+
+    if info > 0:
+        raise LinAlgError("Reordering failed; problem is very ill-conditioned")
+
+    assert(info == 0, "Argument erroŕ in ztgsen")
+
+    return filter_args((True, True, Q is not None, Z is not None,
+                        calc_ev, calc_ev),
+                       (S, T, Q, Z, alpha, beta))
+
+
+# xTGEVC
+def stgevc(np.ndarray[np.float32_t, ndim=2] S,
+           np.ndarray[np.float32_t, ndim=2] T,
+           np.ndarray[np.float32_t, ndim=2] Q=None,
+           np.ndarray[np.float32_t, ndim=2] Z=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.float32_t, ndim=2] vl_r, vr_r
+    cdef float *vl_r_ptr, *vr_r_ptr
+    cdef np.ndarray[l_logical] select_cpy
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.float32_t] work
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    work = np.empty(6*N, dtype = np.float32)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    backtr = False
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        # Correct for possible additional storage if a single complex
+        # eigenvalue is selected.
+        # For that: Figure out the positions of the 2x2 blocks.
+        cmplxindx = np.diagonal(S, -1).nonzero()[0]
+        for i in cmplxindx:
+            if bool(select[i]) != bool(select[i+1]):
+                MM += 1
+
+        # select is overwritten in stgevc
+        select_cpy = np.array(select, dtype = f_lapack.l_logical_dtype,
+                              order = 'F')
+        select_ptr = <l_logical *>select_cpy.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if ((left and right and Q is not None and Z is not None) or
+            (left and not right and Q is not None) or
+            (right and not left and Z is not None)):
+            howmny = "B"
+            backtr = True
+        else:
+            howmny = "A"
+
+    if left:
+        if backtr:
+            vl_r = Q
+        else:
+            vl_r = np.empty((N, MM), dtype = np.float32, order='F')
+        vl_r_ptr = <float *>vl_r.data
+    else:
+        vl_r_ptr = NULL
+
+    if right:
+        if backtr:
+            vr_r = Z
+        else:
+            vr_r = np.empty((N, MM), dtype = np.float32, order='F')
+        vr_r_ptr = <float *>vr_r.data
+    else:
+        vr_r_ptr = NULL
+
+    f_lapack.stgevc_(side, howmny, select_ptr,
+                     &N, <float *>S.data, &N,
+                     <float *>T.data, &N,
+                     vl_r_ptr, &N, vr_r_ptr, &N, &MM, &M,
+                     <float *>work.data, &info)
+
+    assert(info == 0, "Argument error in stgevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in stgevc")
+
+    if not backtr:
+        if left:
+            vl_r = np.asfortranarray(np.dot(Q, vl_r))
+        if right:
+            vr_r = np.asfortranarray(np.dot(Z, vr_r))
+
+    # If there are complex eigenvalues, we need to postprocess the eigenvectors
+    if np.diagonal(S, -1).nonzero()[0].size:
+        if left:
+            vl = txevc_postprocess(np.complex64, S, vl_r, select)
+        if right:
+            vr = txevc_postprocess(np.complex64, S, vr_r, select)
+    else:
+        if left:
+            vl = vl_r
+        if right:
+            vr = vr_r
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def dtgevc(np.ndarray[np.float64_t, ndim=2] S,
+           np.ndarray[np.float64_t, ndim=2] T,
+           np.ndarray[np.float64_t, ndim=2] Q=None,
+           np.ndarray[np.float64_t, ndim=2] Z=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.float64_t, ndim=2] vl_r, vr_r
+    cdef double *vl_r_ptr, *vr_r_ptr
+    cdef np.ndarray[l_logical] select_cpy
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.float64_t] work
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    work = np.empty(6*N, dtype = np.float64)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    backtr = False
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        # Correct for possible additional storage if a single complex
+        # eigenvalue is selected.
+        # For that: Figure out the positions of the 2x2 blocks.
+        cmplxindx = np.diagonal(S, -1).nonzero()[0]
+        for i in cmplxindx:
+            if bool(select[i]) != bool(select[i+1]):
+                MM += 1
+
+        # select is overwritten in dtgevc
+        select_cpy = np.array(select, dtype = f_lapack.l_logical_dtype,
+                              order = 'F')
+        select_ptr = <l_logical *>select_cpy.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if ((left and right and Q is not None and Z is not None) or
+            (left and not right and Q is not None) or
+            (right and not left and Z is not None)):
+            howmny = "B"
+            backtr = True
+        else:
+            howmny = "A"
+
+    if left:
+        if backtr:
+            vl_r = Q
+        else:
+            vl_r = np.empty((N, MM), dtype = np.float64, order='F')
+        vl_r_ptr = <double *>vl_r.data
+    else:
+        vl_r_ptr = NULL
+
+    if right:
+        if backtr:
+            vr_r = Z
+        else:
+            vr_r = np.empty((N, MM), dtype = np.float64, order='F')
+        vr_r_ptr = <double *>vr_r.data
+    else:
+        vr_r_ptr = NULL
+
+    f_lapack.dtgevc_(side, howmny, select_ptr,
+                     &N, <double *>S.data, &N,
+                     <double *>T.data, &N,
+                     vl_r_ptr, &N, vr_r_ptr, &N, &MM, &M,
+                     <double *>work.data, &info)
+
+    assert(info == 0, "Argument error in dtgevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in dtgevc")
+
+    if not backtr:
+        if left:
+            vl_r = np.asfortranarray(np.dot(Q, vl_r))
+        if right:
+            vr_r = np.asfortranarray(np.dot(Z, vr_r))
+
+    # If there are complex eigenvalues, we need to postprocess the
+    # eigenvectors.
+    if np.diagonal(S, -1).nonzero()[0].size:
+        if left:
+            vl = txevc_postprocess(np.complex128, S, vl_r, select)
+        if right:
+            vr = txevc_postprocess(np.complex128, S, vr_r, select)
+    else:
+        if left:
+            vl = vl_r
+        if right:
+            vr = vr_r
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def ctgevc(np.ndarray[np.complex64_t, ndim=2] S,
+           np.ndarray[np.complex64_t, ndim=2] T,
+           np.ndarray[np.complex64_t, ndim=2] Q=None,
+           np.ndarray[np.complex64_t, ndim=2] Z=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.complex64_t, ndim=2] vl, vr
+    cdef float complex *vl_ptr, *vr_ptr
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.complex64_t] work
+    cdef np.ndarray[np.float32_t] rwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    work = np.empty(2*N, dtype = np.complex64)
+    rwork = np.empty(2*N, dtype = np.float32)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    backtr = False
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        select_ptr = <l_logical *>select.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if ((left and right and Q is not None and Z is not None) or
+            (left and not right and Q is not None) or
+            (right and not left and Z is not None)):
+            howmny = "B"
+            backtr = True
+        else:
+            howmny = "A"
+
+    if left:
+        if backtr:
+            vl = Q
+        else:
+            vl = np.empty((N, MM), dtype = np.complex64, order='F')
+        vl_ptr = <float complex *>vl.data
+    else:
+        vl_ptr = NULL
+
+    if right:
+        if backtr:
+            vr = Z
+        else:
+            vr = np.empty((N, MM), dtype = np.complex64, order='F')
+        vr_ptr = <float complex *>vr.data
+    else:
+        vr_ptr = NULL
+
+    f_lapack.ctgevc_(side, howmny, select_ptr,
+                     &N, <float complex *>S.data, &N,
+                     <float complex *>T.data, &N,
+                     vl_ptr, &N, vr_ptr, &N, &MM, &M,
+                     <float complex *>work.data, <float *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in ctgevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in ctgevc")
+
+    if not backtr:
+        if left:
+            vl = np.asfortranarray(np.dot(Q, vl))
+        if right:
+            vr = np.asfortranarray(np.dot(Z, vr))
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def ztgevc(np.ndarray[np.complex128_t, ndim=2] S,
+           np.ndarray[np.complex128_t, ndim=2] T,
+           np.ndarray[np.complex128_t, ndim=2] Q=None,
+           np.ndarray[np.complex128_t, ndim=2] Z=None,
+           np.ndarray[l_logical] select=None,
+           left=False, right=True):
+    cdef l_int N, info, M, MM
+    cdef char *side, *howmny
+    cdef np.ndarray[np.complex128_t, ndim=2] vl, vr
+    cdef double complex *vl_ptr, *vr_ptr
+    cdef l_logical *select_ptr
+    cdef np.ndarray[np.complex128_t] work
+    cdef np.ndarray[np.float64_t] rwork
+
+    assert_fortran_mat(S, T, Q, Z)
+
+    N = S.shape[0]
+    work = np.empty(2*N, dtype = np.complex128)
+    rwork = np.empty(2*N, dtype = np.float64)
+
+    if left and right:
+        side = "B"
+    elif left:
+        side = "L"
+    elif right:
+        side = "R"
+    else:
+        return
+
+    backtr = False
+
+    if select is not None:
+        howmny = "S"
+        MM = select.nonzero()[0].size
+        select_ptr = <l_logical *>select.data
+    else:
+        MM = N
+        select_ptr = NULL
+        if ((left and right and Q is not None and Z is not None) or
+            (left and not right and Q is not None) or
+            (right and not left and Z is not None)):
+            howmny = "B"
+            backtr = True
+        else:
+            howmny = "A"
+
+    if left:
+        if backtr:
+            vl = Q
+        else:
+            vl = np.empty((N, MM), dtype = np.complex128, order='F')
+        vl_ptr = <double complex *>vl.data
+    else:
+        vl_ptr = NULL
+
+    if right:
+        if backtr:
+            vr = Z
+        else:
+            vr = np.empty((N, MM), dtype = np.complex128, order='F')
+        vr_ptr = <double complex *>vr.data
+    else:
+        vr_ptr = NULL
+
+    f_lapack.ztgevc_(side, howmny, select_ptr,
+                     &N, <double complex *>S.data, &N,
+                     <double complex *>T.data, &N,
+                     vl_ptr, &N, vr_ptr, &N, &MM, &M,
+                     <double complex *>work.data, <double *>rwork.data, &info)
+
+    assert(info == 0, "Argument error in ztgevc")
+    assert(MM == M, "Unexpected number of eigenvectors returned in ztgevc")
+
+    if not backtr:
+        if left:
+            vl = np.asfortranarray(np.dot(Q, vl))
+        if right:
+            vr = np.asfortranarray(np.dot(Z, vr))
+
+    if left and right:
+        return (vl, vr)
+    elif left:
+        return vl
+    else:
+        return vr
+
+
+def prepare_for_lapack(overwrite, *args):
+    """Convert arrays to Fortran format.
+
+    This function takes a number of array objects in `args` and converts them
+    to a format that can be directly passed to a Fortran function (Fortran
+    contiguous numpy array). If the arrays have different data type, they
+    converted arrays are cast to a common compatible data type (one of numpy's
+    `float32`, `float64`, `complex64`, `complex128` data types).
+
+    If `overwrite` is ``False``, an numpy array that would already be in the
+    correct format (Fortran contiguous, right data type) is neverthelessed
+    copied. (Hence, overwrite = True does not imply that acting on the
+    converted array in the return values will overwrite the original array in
+    all cases -- it does only so if the original array was already in the
+    correct format. The conversions require copying. In fact, that's the same
+    behavior as in scipy, it's just not explicitly stated there)
+
+    If an argument is ``None``, it is just passed through and not used to
+    determine the proper LAPACK type.
+
+    `prepare_for_lapack` returns a character indicating the proper LAPACK data
+    type ('s', 'd', 'c', 'z') and a list of properly converted arrays.
+    """
+
+    # Make sure we have numpy arrays
+    mats = [None]*len(args)
+    for i in xrange(len(args)):
+        if args[i] is not None:
+            arr = np.asanyarray(args[i])
+            if not np.issubdtype(arr.dtype, np.number):
+                raise ValueError("Argument cannot be interpreted "
+                                 "as a numeric array")
+
+            mats[i] = (arr, arr is not args[i] or overwrite)
+        else:
+            mats[i] = (None, True)
+
+    # First figure out common dtype
+    # Note: The return type of common_type is guaranteed to be a floating point
+    #       kind.
+    dtype = np.common_type(*[arr for arr, ovwrt in mats if arr is not None])
+
+    if dtype == np.float32:
+        lapacktype = 's'
+    elif dtype == np.float64:
+        lapacktype = 'd'
+    elif dtype == np.complex64:
+        lapacktype = 'c'
+    elif dtype == np.complex128:
+        lapacktype = 'z'
+    else:
+        raise AssertionError("Unexpected data type from common_type")
+
+    ret = [ lapacktype ]
+    for npmat, ovwrt in mats:
+        # Now make sure that the array is contiguous, and copy if necessary.
+        if npmat is not None:
+            if npmat.ndim == 2:
+                if not npmat.flags["F_CONTIGUOUS"]:
+                    npmat = np.asfortranarray(npmat, dtype = dtype)
+                elif npmat.dtype != dtype:
+                    npmat = npmat.astype(dtype)
+                elif not ovwrt:
+                    # ugly here: copy makes always C-array, no way to tell it
+                    # to make a Fortran array.
+                    npmat = np.asfortranarray(npmat.copy())
+            elif npmat.ndim == 1:
+                if not npmat.flags["C_CONTIGUOUS"]:
+                    npmat = np.ascontiguousarray(npmat, dtype = dtype)
+                elif npmat.dtype != dtype:
+                    npmat = npmat.astype(dtype)
+                elif not ovwrt:
+                    npmat = np.asfortranarray(npmat.copy())
+            else:
+                raise ValueError("Dimensionality of array is not 1 or 2")
+
+        ret.append(npmat)
+
+    return tuple(ret)
diff --git a/kwant/linalg/tests/test_linalg.py b/kwant/linalg/tests/test_linalg.py
new file mode 100644
index 0000000000000000000000000000000000000000..19febb638987e8ba12df0295c645bae035c91425
--- /dev/null
+++ b/kwant/linalg/tests/test_linalg.py
@@ -0,0 +1,427 @@
+from kwant.linalg import lu_factor, lu_solve, rcond_from_lu, gen_eig, schur, \
+    convert_r2c_schur, order_schur, evecs_from_schur, gen_schur, \
+    convert_r2c_gen_schur, order_gen_schur, evecs_from_gen_schur
+from nose.tools import assert_equal, assert_true
+import numpy as np
+
+class _Random:
+    def __init__(self):
+        self._x = 13
+
+    def _set_seed(self, seed):
+        self._x = seed
+
+    def _randf(self):
+        #a very bad random number generator returning
+        #number between -1 and +1
+        #Just for making some matrices, and being sure that they
+        #are the same on any architecture
+        m = 2**16
+        a = 11929
+        c = 36491
+
+        self._x = (a * self._x + c) % m
+
+        return (float(self._x)/m-0.5)*2
+
+    def _randi(self):
+        #a very bad random number generator returning
+        #number between 0 and 20
+        #Just for making some matrices, and being sure that they
+        #are the same on any architecture
+        m = 2**16
+        a = 11929
+        c = 36491
+
+        self._x = (a * self._x + c) % m
+
+        return self._x % 21
+
+    def randmat(self, n, m, dtype):
+        mat = np.empty((n, m), dtype = dtype)
+
+        if issubclass(dtype, np.complexfloating):
+            for i in xrange(n):
+                for j in xrange(m):
+                    mat[i,j] = self._randf() + 1j * self._randf()
+        elif issubclass(dtype, np.floating):
+            for i in xrange(n):
+                for j in xrange(m):
+                    mat[i,j] = self._randf()
+        elif issubclass(dtype, np.integer):
+            for i in xrange(n):
+                for j in xrange(m):
+                    mat[i,j] = self._randi()
+
+        return mat
+
+    def randvec(self, n, dtype):
+        vec = np.empty(n, dtype = dtype)
+
+        if issubclass(dtype, np.complexfloating):
+            for i in xrange(n):
+                vec[i] = self._randf() + 1j * self._randf()
+        elif issubclass(dtype, np.floating):
+            for i in xrange(n):
+                vec[i] = self._randf()
+        elif issubclass(dtype, np.integer):
+            for i in xrange(n):
+                vec[i] = self._randi()
+
+        return vec
+
+#Improved version of assert_arrays_almost_equal that
+#uses the dtype to set the precision
+#(The default precision of assert_arrays_almost_equal is sometimes
+# too small for single-precision comparisions)
+def assert_array_almost_equal(dtype, a, b):
+    if dtype == np.float32 or dtype == np.complex64:
+        prec = 5
+    else:
+        prec = 10
+
+    np.testing.assert_array_almost_equal(a, b, decimal=prec)
+
+def test_gen_eig():
+    def _test_gen_eig(dtype):
+        rand = _Random()
+        a = rand.randmat(4, 4, dtype)
+        b = rand.randmat(4, 4, dtype)
+
+        (alpha, beta, vl, vr) = gen_eig(a, b, True, True)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(a, vr), beta),
+                                  np.dot(np.dot(b, vr), alpha))
+        assert_array_almost_equal(dtype,
+                                  np.dot(beta, np.dot(np.conj(vl.T), a)),
+                                  np.dot(alpha, np.dot(np.conj(vl.T), b)))
+
+    _test_gen_eig(np.float32)
+    _test_gen_eig(np.float64)
+    _test_gen_eig(np.complex64)
+    _test_gen_eig(np.complex128)
+    #int should be propagated to float64
+    _test_gen_eig(np.int32)
+
+def test_lu():
+    def _test_lu(dtype):
+        rand = _Random()
+        a = rand.randmat(4, 4, dtype)
+        bmat = rand.randmat(4, 4, dtype)
+        bvec = rand.randvec(4, dtype)
+
+        lu = lu_factor(a)
+        xmat = lu_solve(lu, bmat)
+        xvec = lu_solve(lu, bvec)
+
+        assert_array_almost_equal(dtype, np.dot(a, xmat), bmat)
+        assert_array_almost_equal(dtype, np.dot(a, xvec), bvec)
+
+    _test_lu(np.float32)
+    _test_lu(np.float64)
+    _test_lu(np.complex64)
+    _test_lu(np.complex128)
+    #int should be propagated to float64
+    _test_lu(np.int32)
+
+def test_rcond_from_lu():
+    def _test_rcond_from_lu(dtype):
+        rand = _Random()
+        a = rand.randmat(10, 10, dtype)
+
+        norm1_a = np.linalg.norm(a, 1)
+        normI_a = np.linalg.norm(a, np.inf)
+
+        lu = lu_factor(a)
+
+        rcond1 = rcond_from_lu(lu, norm1_a, '1')
+        rcondI = rcond_from_lu(lu, normI_a, 'I')
+
+        err1 = abs(rcond1 -
+                   1/(norm1_a * np.linalg.norm(np.linalg.inv(a), 1)))
+        errI = abs(rcondI -
+                   1/(normI_a * np.linalg.norm(np.linalg.inv(a), np.inf)))
+
+        #rcond_from_lu returns an estimate for the reciprocal
+        #condition number only; hence we shouldn't be too strict about
+        #the assertions here
+        #Note: in my experience the estimate is excellent for somewhat
+        #larger matrices
+        assert_true(err1/rcond1 < 0.1)
+        assert_true(errI/rcondI < 0.1)
+
+    _test_rcond_from_lu(np.float32)
+    _test_rcond_from_lu(np.float64)
+    _test_rcond_from_lu(np.complex64)
+    _test_rcond_from_lu(np.complex128)
+    #int should be propagated to float64
+    _test_rcond_from_lu(np.int32)
+
+def test_schur():
+    def _test_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(5, 5, dtype)
+
+        t, q, ev = schur(a)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), np.conj(q.T)), a)
+
+    _test_schur(np.float32)
+    _test_schur(np.float64)
+    _test_schur(np.complex64)
+    _test_schur(np.complex128)
+    #int should be propagated to float64
+    _test_schur(np.int32)
+
+def test_convert_r2c_schur():
+    def _test_convert_r2c_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(10, 10, dtype)
+
+        t, q, ev = schur(a)
+        t2, q2 = convert_r2c_schur(t, q)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), np.conj(q.T)), a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, t2), np.conj(q2.T)),
+                                  a)
+
+    _test_convert_r2c_schur(np.float32)
+    _test_convert_r2c_schur(np.float64)
+    #in the complex case the function should actually just copy
+    _test_convert_r2c_schur(np.complex64)
+    _test_convert_r2c_schur(np.complex128)
+    #int should be propagated to float64
+    _test_convert_r2c_schur(np.int32)
+
+def test_order_schur():
+    def _test_order_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(10, 10, dtype)
+
+        t, q, ev = schur(a)
+
+        t2, q2, ev2 = order_schur(lambda i: i>2 and i<7, t, q)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), np.conj(q.T)), a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, t2), np.conj(q2.T)),
+                                  a)
+        assert_array_almost_equal(dtype, np.sort(ev), np.sort(ev2))
+        assert_array_almost_equal(dtype, np.sort(ev[3:7]), np.sort(ev2[:4]))
+
+        sel = [False, False, 0, True, True, True, 1, False, False, False]
+
+        t3, q3, ev3 = order_schur(sel, t, q)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q3, t3), np.conj(q3.T)),
+                                  a)
+        assert_array_almost_equal(dtype, t2, t3)
+        assert_array_almost_equal(dtype, q2, q3)
+
+    _test_order_schur(np.float32)
+    _test_order_schur(np.float64)
+    _test_order_schur(np.complex64)
+    _test_order_schur(np.complex128)
+    #int should be propagated to float64
+    _test_order_schur(np.int32)
+
+def test_evecs_from_schur():
+    def _test_evecs_from_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(5, 5, dtype)
+
+        t, q, ev = schur(a)
+
+        vl, vr = evecs_from_schur(t, q, select=None, left=True, right=True)
+
+        assert_array_almost_equal(dtype, np.dot(vr, np.dot(np.diag(ev),
+                                                    np.linalg.inv(vr))), a)
+        assert_array_almost_equal(dtype, np.dot(np.linalg.inv(np.conj(vl.T)),
+                                         np.dot(np.diag(ev), np.conj(vl.T))),
+                                  a)
+
+        select = np.array([True, True, False, False, False], dtype=bool)
+
+        vl, vr = evecs_from_schur(t, q, select, left=True, right=True)
+
+        assert_equal(vr.shape[1], 2)
+        assert_equal(vl.shape[1], 2)
+        assert_array_almost_equal(dtype, np.dot(a, vr),
+                                  np.dot(vr, np.diag(ev[select])))
+        assert_array_almost_equal(dtype, np.dot(vl.T.conj(), a),
+                                  np.dot(np.diag(ev[select]), vl.T.conj()))
+
+        vl, vr = evecs_from_schur(t, q, lambda i: i<2, left=True, right=True)
+
+        assert_equal(vr.shape[1], 2)
+        assert_equal(vl.shape[1], 2)
+        assert_array_almost_equal(dtype, np.dot(a, vr),
+                                  np.dot(vr, np.diag(ev[select])))
+        assert_array_almost_equal(dtype, np.dot(vl.T.conj(), a),
+                                  np.dot(np.diag(ev[select]), vl.T.conj()))
+
+    _test_evecs_from_schur(np.float32)
+    _test_evecs_from_schur(np.float64)
+    _test_evecs_from_schur(np.complex64)
+    _test_evecs_from_schur(np.complex128)
+    #int should be propagated to float64
+    _test_evecs_from_schur(np.int32)
+
+def test_gen_schur():
+    def _test_gen_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(5, 5, dtype)
+        b = rand.randmat(5, 5, dtype)
+
+        s, t, q, z, alpha, beta = gen_schur(a, b)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, s), z.T.conj()), a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), z.T.conj()), b)
+
+    _test_gen_schur(np.float32)
+    _test_gen_schur(np.float64)
+    _test_gen_schur(np.complex64)
+    _test_gen_schur(np.complex128)
+    #int should be propagated to float64
+    _test_gen_schur(np.int32)
+
+def test_convert_r2c_gen_schur():
+    def _test_convert_r2c_gen_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(10, 10, dtype)
+        b = rand.randmat(10, 10, dtype)
+
+        s, t, q, z, alpha, beta = gen_schur(a, b)
+        s2, t2, q2, z2 = convert_r2c_gen_schur(s, t, q, z)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, s), z.T.conj()), a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), z.T.conj()), b)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, s2), z2.T.conj()),
+                                  a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, t2), z2.T.conj()),
+                                  b)
+
+    _test_convert_r2c_gen_schur(np.float32)
+    _test_convert_r2c_gen_schur(np.float64)
+    #in the complex case the function should actually just copy
+    _test_convert_r2c_gen_schur(np.complex64)
+    _test_convert_r2c_gen_schur(np.complex128)
+    #int should be propagated to float64
+    _test_convert_r2c_gen_schur(np.int32)
+
+def test_order_gen_schur():
+    def _test_order_gen_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(10, 10, dtype)
+        b = rand.randmat(10, 10, dtype)
+
+        s, t, q, z, alpha, beta = gen_schur(a, b)
+
+        s2, t2, q2, z2, alpha2, beta2 = order_gen_schur(lambda i: i>2 and i<7,
+                                                        s, t, q, z)
+
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, s), z.T.conj()), a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q, t), z.T.conj()), b)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, s2), z2.T.conj()),
+                                  a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q2, t2), z2.T.conj()),
+                                  b)
+
+        #Sorting here is a bit tricky: For real matrices we expect
+        #for complex conjugated pairs identical real parts - however
+        #that seems messed up (only an error on the order of machine precision)
+        #in the division. The solution here is to sort and compare the real
+        #and imaginary parts separately. The only error that would not be
+        #catched in this comparison is if the real and imaginary parts would
+        #be assembled differently in the two arrays - an error that is highly
+        #unlikely.
+        assert_array_almost_equal(dtype, np.sort((alpha/beta).real),
+                                  np.sort((alpha2/beta2).real))
+        assert_array_almost_equal(dtype, np.sort((alpha/beta).imag),
+                                  np.sort((alpha2/beta2).imag))
+        assert_array_almost_equal(dtype, np.sort((alpha[3:7]/beta[3:7]).real),
+                                  np.sort((alpha2[:4]/beta2[:4]).real))
+        assert_array_almost_equal(dtype, np.sort((alpha[3:7]/beta[3:7]).imag),
+                                  np.sort((alpha2[:4]/beta2[:4]).imag))
+
+        sel = [False, False, 0, True, True, True, 1, False, False, False]
+
+        s3, t3, q3, z3, alpha3, beta3 = order_gen_schur(sel, s, t, q, z)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q3, s3), z3.T.conj()),
+                                  a)
+        assert_array_almost_equal(dtype, np.dot(np.dot(q3, t3), z3.T.conj()),
+                                  b)
+        assert_array_almost_equal(dtype, s2, s3)
+        assert_array_almost_equal(dtype, t2, t3)
+        assert_array_almost_equal(dtype, q2, q3)
+        assert_array_almost_equal(dtype, z2, z3)
+
+    _test_order_gen_schur(np.float32)
+    _test_order_gen_schur(np.float64)
+    _test_order_gen_schur(np.complex64)
+    _test_order_gen_schur(np.complex128)
+    #int should be propagated to float64
+    _test_order_gen_schur(np.int32)
+
+
+def test_evecs_from_gen_schur():
+    def _test_evecs_from_gen_schur(dtype):
+        rand = _Random()
+        a = rand.randmat(5, 5, dtype)
+        b = rand.randmat(5, 5, dtype)
+
+        s, t, q, z, alpha, beta = gen_schur(a, b)
+
+        vl, vr = evecs_from_gen_schur(s, t, q, z , select=None,
+                                      left=True, right=True)
+
+        assert_array_almost_equal(dtype, np.dot(a, np.dot(vr, np.diag(beta))),
+                                  np.dot(b, np.dot(vr, np.diag(alpha))))
+        assert_array_almost_equal(dtype,
+                                  np.dot(np.dot(np.diag(beta), vl.T.conj()),
+                                         a),
+                                  np.dot(np.dot(np.diag(alpha), vl.T.conj()),
+                                         b))
+
+        select = np.array([True, True, False, False, False], dtype=bool)
+
+        vl, vr = evecs_from_gen_schur(s, t, q, z, select,
+                                      left=True, right=True)
+
+        assert_equal(vr.shape[1], 2)
+        assert_equal(vl.shape[1], 2)
+        assert_array_almost_equal(dtype,
+                                  np.dot(a, np.dot(vr,
+                                                   np.diag(beta[select]))),
+                                  np.dot(b, np.dot(vr,
+                                                   np.diag(alpha[select]))))
+        assert_array_almost_equal(dtype,
+                                  np.dot(np.dot(np.diag(beta[select]),
+                                                vl.T.conj()),
+                                         a),
+                                  np.dot(np.dot(np.diag(alpha[select]),
+                                                vl.T.conj()),
+                                         b))
+
+        vl, vr = evecs_from_gen_schur(s, t, q, z, lambda i: i<2, left=True,
+                                      right=True)
+
+        assert_equal(vr.shape[1], 2)
+        assert_equal(vl.shape[1], 2)
+        assert_array_almost_equal(dtype,
+                                  np.dot(a, np.dot(vr,
+                                                   np.diag(beta[select]))),
+                                  np.dot(b, np.dot(vr,
+                                                   np.diag(alpha[select]))))
+        assert_array_almost_equal(dtype,
+                                  np.dot(np.dot(np.diag(beta[select]),
+                                                vl.T.conj()),
+                                         a),
+                                  np.dot(np.dot(np.diag(alpha[select]),
+                                                vl.T.conj()),
+                                         b))
+
+    _test_evecs_from_gen_schur(np.float32)
+    _test_evecs_from_gen_schur(np.float64)
+    _test_evecs_from_gen_schur(np.complex64)
+    _test_evecs_from_gen_schur(np.complex128)
+    #int should be propagated to float64
+    _test_evecs_from_gen_schur(np.int32)
diff --git a/kwant/physics/__init__.py b/kwant/physics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0953be2f2a1c032754a52b555e20e7f238a511a8
--- /dev/null
+++ b/kwant/physics/__init__.py
@@ -0,0 +1,8 @@
+"""Physics-related algorithms"""
+
+# Merge the public interface of all submodules.
+__all__ = []
+for module in ['selfenergy']:
+    exec 'from . import {0}'.format(module)
+    exec 'from .{0} import *'.format(module)
+    exec '__all__.extend({0}.__all__)'.format(module)
diff --git a/kwant/physics/selfenergy.py b/kwant/physics/selfenergy.py
new file mode 100644
index 0000000000000000000000000000000000000000..40ea74c3d67b76e4427a3e936d8e0d118063e47b
--- /dev/null
+++ b/kwant/physics/selfenergy.py
@@ -0,0 +1,739 @@
+from __future__ import division
+from math import sin, cos, sqrt, pi, copysign
+import numpy as np
+import numpy.linalg as npl
+import scipy.linalg as la
+import kwant.linalg as kla
+
+dot = np.dot
+
+__all__ = [ 'self_energy', 'modes' ]
+
+def setup_linsys(h_onslice, h_hop, tol=1e6):
+    """
+    Make an eigenvalue problem for eigenvectors of translation operator.
+
+    Parameters
+    ----------
+    h_onslice : numpy array with shape (n, n)
+        Hamiltonian of a single lead slice.
+    h_hop : numpy array with shape (n, m), m <= n
+        Hopping Hamiltonian from the slice to the next one.
+
+    Returns
+    -------
+    linsys : matrix or tuple
+        if the hopping is nonsingular, a single matrix defining an eigenvalue
+        problem is returned, othewise a tuple of two matrices defining a
+        generalized eigenvalue problem together with additional information is
+        returned.
+
+    Notes
+    -----
+    The lead problem with degenerate hopping is rather complicated, and it is
+    described in kwant/doc/other/lead_modes.pdf.
+    """
+    n = h_onslice.shape[0]
+    m = h_hop.shape[1]
+
+    eps = np.finfo(np.common_type(h_onslice, h_hop)).eps
+
+    # First check if the hopping matrix has eigenvalues close to 0.
+    u, s, vh = la.svd(h_hop)
+
+    assert m == vh.shape[1], "Corrupt output of svd."
+
+    # Count the number of singular values close to zero.
+    # (Close to zero is defined here as |x| < eps * tol * s[0] , where
+    #  s[0] is the largest singular value.)
+    n_nonsing = np.sum(s > eps * tol * s[0])
+
+    if n_nonsing == n:
+        # The hopping matrix is well-conditioned and can be safely inverted.
+        sol = kla.lu_factor(h_hop)
+
+        A = np.empty((2*n, 2*n), dtype=np.common_type(h_onslice, h_hop))
+
+        A[0: n, 0: n] = kla.lu_solve(sol, -h_onslice)
+        A[0: n, n: 2*n] = kla.lu_solve(sol, -h_hop.T.conj())
+        A[n: 2*n, 0: n] = np.identity(n)
+        A[n: 2*n, n: 2*n] = 0
+
+        return A
+    else:
+        # The hopping matrix has eigenvalues close to 0 - those
+        # need to be eliminated.
+
+        # Recast the svd of h_hop = u s v^dagger such that
+        # u, v are matrices with shape n x n_nonsing.
+        u = u[:, : n_nonsing]
+        s = s[: n_nonsing]
+        # pad v with zeros if necessary
+        v = np.zeros((n, n_nonsing), dtype=vh.dtype)
+        v[:vh.shape[1], :] = vh[:n_nonsing, :].T.conj()
+
+        # Eliminating the zero eigenvalues requires inverting the
+        # on-site Hamiltonian, possibly including a self-energy-like term.
+        # The self-energy-like term stabilizes the inversion, but the most
+        # stable choice is inherently complex. This can be disadvantageous
+        # if the Hamiltonian is real - as staying in real arithmetics can be
+        # significantly faster.
+        # The strategy here is to add a complex self-energy-like term
+        # always if the original Hamiltonian is complex, and check for
+        # invertibility first if it is real
+
+        gamma = None
+
+        if issubclass(np.common_type(h_onslice, h_hop), np.floating):
+
+            # Check if stabilization is needed.
+            h = h_onslice
+
+            sol = kla.lu_factor(h)
+            rcond = kla.rcond_from_lu(sol, npl.norm(h, 1))
+
+            if rcond > eps * tol:
+                gamma = 0
+
+        if gamma is None:
+            # Matrices are complex or need self-energy-like term  to be
+            # stabilized.
+
+            # Normalize such that the maximum entry in the
+            # self-energy-like term has a value comparable to the
+            # maximum entry in h_onslice.
+
+            temp = dot(u, u.T.conj()) + dot(v, v.T.conj())
+
+            max_h = np.amax(np.abs(h_onslice))
+            max_temp = np.amax(np.abs(temp))
+
+            gamma = max_h/max_temp * 1j
+
+            h = h_onslice + gamma * temp
+
+            sol = kla.lu_factor(h)
+            rcond = kla.rcond_from_lu(sol, npl.norm(h, 1))
+
+            # If the condition number of the stabilized h is
+            # still bad, there is nothing we can do.
+            if rcond < eps * tol:
+                raise RuntimeError("Flat band encountered at the requested "
+                                   "energy, result is badly defined.")
+
+        # Function that can extract the full wave function psi from
+        # the projected one (v^dagger psi lambda^-1, u^dagger psi).
+
+        def extract_wf(psi, lmbdainv):
+            return kla.lu_solve(sol,
+                                gamma * dot(v, psi[: n_nonsing]) +
+                                gamma * dot(u, psi[n_nonsing :] * lmbdainv) -
+                                dot(u * s, psi[: n_nonsing] * lmbdainv) -
+                                dot(v * s, psi[n_nonsing :]))
+
+        # Project a full wave function back.
+
+        def project_wf(psi, lmbdainv):
+            return np.asarray(np.bmat([[dot(v.T.conj(), psi * lmbdainv)],
+                                       [dot(u.T.conj(), psi)]]))
+
+        # Setup the generalized eigenvalue problem.
+
+        A = np.empty((2 * n_nonsing, 2 * n_nonsing), np.common_type(h, h_hop))
+        B = np.empty((2 * n_nonsing, 2 * n_nonsing), np.common_type(h, h_hop))
+
+        A[: n_nonsing, : n_nonsing] = -np.eye(n_nonsing)
+
+        B[n_nonsing: 2 * n_nonsing,
+          n_nonsing: 2 * n_nonsing] = np.eye(n_nonsing)
+
+        temp = kla.lu_solve(sol, v)
+        temp2 = dot(u.T.conj(), temp)
+        A[n_nonsing: 2 * n_nonsing, : n_nonsing] = gamma * temp2
+        A[n_nonsing: 2 * n_nonsing, n_nonsing: 2 * n_nonsing] = - temp2 * s
+        temp2 = dot(v.T.conj(), temp)
+        A[: n_nonsing, : n_nonsing] += gamma * temp2
+        A[: n_nonsing, n_nonsing: 2 * n_nonsing] = - temp2 * s
+
+        temp = kla.lu_solve(sol, u)
+        temp2 = dot(u.T.conj(), temp)
+        B[n_nonsing:2*n_nonsing, :n_nonsing] = temp2 * s
+        B[n_nonsing:2*n_nonsing, n_nonsing:2*n_nonsing] -= gamma * temp2
+        temp2 = dot(v.T.conj(), temp)
+        B[:n_nonsing, :n_nonsing] = temp2 * s
+        B[:n_nonsing, n_nonsing:2*n_nonsing] = - gamma * temp2
+
+        # Solving a generalized eigenproblem is about twice as expensive
+        # as solving a regular eigenvalue problem.
+        # Computing the LU factorization is negligible compared to both
+        # (approximately 1/30th of a regular eigenvalue problem).
+        # Because of this, it makes sense to try to reduce
+        # the generalized eigenvalue problem to a regular one, provided
+        # the matrix B can be safely inverted.
+
+        lu_b = kla.lu_factor(B)
+        rcond = kla.rcond_from_lu(lu_b, npl.norm(B, 1))
+
+        # I put a more stringent condition here - errors can accumulate
+        # from here to the eigenvalue calculation later.
+        if rcond > eps * tol**2:
+            return (kla.lu_solve(lu_b, A), (u, s, v[:m,:]),
+                    (extract_wf, project_wf))
+        else:
+            return (A, B, (u, s, v[: m]), (extract_wf, project_wf))
+
+
+def split_degenerate(evs, tol=1e6):
+    """
+    Find sets of approximately degenerate list elements on a unit circle.
+
+    Given a list of eigenvalues on the unit circle, return a list containing
+    tuples of indices of eigenvalues that are numerically degenerate. Two
+    eigenvalues ev[i] and ev[j] are considered to be numerically degenerate if
+    abs(ev[i] - ev[j]) < eps * tol, where eps is the machine precision.
+
+    Example
+    -------
+    >>> split_degenerate(np.array([1,-1,1,1], dtype=complex))
+    [(1,), (0, 2, 3)].
+    """
+    eps = np.finfo(evs.dtype).eps
+
+    n = evs.size
+    evlist = []
+
+    # Figure out if there are degenerate eigenvalues.
+    # For this, sort according to k, which is i*log(ev) (ev is exp(-ik)).
+    k = np.log(evs).imag
+    sortindx = np.argsort(k)
+    evs_sorted = evs[sortindx]
+
+    # Note that we sorted eigenvalues on the unit circle, cutting
+    # the unit circle at -1. We thus must search for degeneracies also
+    # across this cut
+
+    start = 0
+    while (start - 1 > -n and
+           abs(evs_sorted[start - 1] - evs_sorted[start]) < eps * tol):
+        start = start - 1
+
+    stop = n + start
+
+    while start < stop:
+        deglist = [sortindx[start]]
+        while (start + 1 < stop and
+               abs(evs_sorted[start] - evs_sorted[start + 1]) < eps * tol):
+            start += 1
+            deglist.append(sortindx[start])
+
+        evlist.append(tuple(deglist))
+        start += 1
+
+    return evlist
+
+
+def make_proper_modes(lmbdainv, psi, h_hop, extract=None,
+                      project=None, tol=1e6):
+    """
+    Determine the velocities and direction of the propagating eigenmodes.
+
+    Special care is taken of the case of degenerate k-values, where the
+    numerically computed modes are typically a superposition of the real
+    modes. In this case, also the proper (orthogonal) modes are computed.
+    """
+
+    vel_eps = np.finfo(np.common_type(psi, h_hop)).eps * tol
+
+    # h_hop is either the full hopping matrix, or the singular
+    # values vector of the svd.
+    if h_hop.ndim == 2:
+        n = h_hop.shape[0]
+        m = h_hop.shape[1]
+    else:
+        n = h_hop.size
+
+    nmodes = psi.shape[1]
+
+    if nmodes == 0:
+        raise ValueError('Empty mode array.')
+
+    # Array for the velocities.
+    v = np.empty(nmodes, dtype=float)
+
+    # Mark the right-going modes.
+    rightselect = np.zeros(nmodes, dtype=bool)
+
+    n_left = n_right = 0
+    crossing = False
+
+    indxclust = split_degenerate(lmbdainv)
+
+    for indx in indxclust:
+        if len(indx) > 1:
+            # Several degenerate propagating modes. In this case, the computed
+            # eigenvectors do not orthogonalize the velocity
+            # operator, i.e. they do not have a proper velocity.
+
+            indx = np.array(indx)
+
+            # If there is a degenerate eigenvalue with several different
+            # eigenvectors, the numerical routines return some arbitrary
+            # overlap of the real, physical solutions. In order
+            # to figure out the correct wave function, we need to
+            # have the full, not the projected wave functions
+            # (at least to our current knowledge).
+
+            if extract is not None:
+                full_psi = extract(psi[:, indx], lmbdainv[indx])
+            else:
+                full_psi = psi[: n, indx]
+
+            # Finding the true modes is done in two steps:
+
+            # 1. The true transversal modes should be orthogonal to
+            # each other, as they share the same Bloch momentum (note
+            # that transversal modes with different Bloch momenta k1
+            # and k2 need not be orthogonal, the full modes are
+            # orthogonal because of the longitudinal dependence
+            # e^{i k1 x} and e^{i k2 x}).
+            # The modes are therefore orthogonalized:
+
+            # Note: Here's a workaround for the fact that the interface
+            # to qr changed from scipy 0.8.0 to 0.9.0
+            try:
+                full_psi = la.qr(full_psi, econ=True, mode='qr')[0]
+            except TypeError:
+                full_psi = la.qr(full_psi, mode='economic')[0]
+
+            if project:
+                psi[:, indx] = project(full_psi, lmbdainv[indx])
+            else:
+                psi[: n, indx] = full_psi * lmbdainv[indx]
+                psi[n: 2*n, indx] = full_psi
+
+            # 2. Moving infinitesimally away from the degeneracy
+            # point, the modes should diagonalize the velocity
+            # operator (i.e. when they are non-degenerate any more)
+            # The modes are therefore rotated properly such that the
+            # diagonalize the velocity operator.
+            # Note that step 2. does not give a unique result if there are
+            # two modes with the same velocity, or if the modes stay
+            # degenerate even for a range of Bloch momenta (and hence
+            # must have the same velocity). However, this does not matter,
+            # as we are happy with any superposition in this case.
+
+            if h_hop.ndim == 2:
+                vel_op = -1j * dot(psi[n :, indx].T.conj(),
+                                     dot(h_hop, psi[: m, indx]))
+            else:
+                vel_op = -1j * dot(psi[n :, indx].T.conj() * h_hop,
+                                     psi[: n, indx])
+
+            vel_op = vel_op + vel_op.T.conj()
+
+            vel_vals, rot = la.eigh(vel_op)
+
+            # If the eigenvectors were purely real up to this stage,
+            # will typically become complex after the rotation.
+            if psi.dtype != np.common_type(psi, rot):
+                psi = psi.astype(np.common_type(psi, rot))
+
+            psi[:, indx] = dot(psi[:, indx], rot)
+
+            v[indx] = vel_vals
+
+            # For some of the self-energy methods it matters
+            # whether the degeneracy is a crossing with velocities
+            # of different sign
+            if not ((vel_vals > 0).all() or (vel_vals < 0).all()):
+                crossing = True
+
+            for (vel, k) in zip(vel_vals, indx):
+                if vel > vel_eps:
+                    n_right += 1
+                    rightselect[k] = True
+                elif vel < -vel_eps:
+                    n_left += 1
+                else:
+                    raise RuntimeError("Found a mode with zero or close to "
+                                       "zero velocity.")
+        else:
+            # A single, unique propagating mode
+            k = indx[0]
+
+            if h_hop.ndim == 2:
+                v[k] = 2 * dot(dot(psi[n: 2*n, k: k + 1].T.conj(), h_hop),
+                               psi[: m, k: k+1]).imag
+            else:
+                v[k] = 2 * dot(psi[n: 2*n, k: k + 1].T.conj() * h_hop,
+                               psi[0: n, k: k + 1]).imag
+
+            if v[k] > vel_eps:
+                rightselect[k] = True
+                n_right += 1
+            elif v[k] < -vel_eps:
+                n_left += 1
+            else:
+                raise RuntimeError("Found a mode with zero or close to "
+                                   "zero velocity.")
+
+    if n_left != n_right:
+        raise RuntimeError("Numbers of left- and right-propagating "
+                           "modes differ.")
+
+    return psi, v, rightselect, crossing
+
+
+def unified_eigenproblem(h_onslice, h_hop, tol):
+    """A helper routine for general() and modes(), that wraps eigenproblems.
+
+    This routine wraps the different types of eigenproblems that can arise
+    in a unfied way.
+
+    Returns
+    -------
+    ev : numpy array
+        an array of eigenvalues (can contain NaNs and Infs, but those
+        are not accessed in `general()` and `modes()`) The number of
+        eigenvalues is given by twice the number of nonzero singular values of
+        `h_hop` (i.e. `2*h_onslice.shape[0]` if `h_hop` is invertible).
+    select : numpy array
+        index array of right-decaying modes.
+    propselect : numpy array
+        index array of propagating modes (both left and right).
+    vec_gen(select) : function
+        a function that computes the eigenvectors chosen by the array select.
+    ord_schur(select) : function
+        a function that computes the unitary matrix (corresponding to the right
+        eigenvector space) of the (general) Schur decomposition reordered such
+        that the eigenvalues chosen by the array select are in the top left
+        block.
+    u, v, w :
+        if the hopping is singular, the svd of the hopping matrix, otherwise
+        they all the three values are None.
+    extract, project : functions
+        functions to extract the full wave function from the projected wave
+        functions, and project it back. Both are equal to None if the hopping
+        is invertible.
+    """
+
+    eps = np.finfo(np.common_type(h_onslice, h_hop)).eps
+
+    linsys = setup_linsys(h_onslice, h_hop)
+
+    if isinstance(linsys, tuple):
+        # In the singular case, it depends on the details of the system
+        # whether one needs to solve a regular or a generalized
+        # eigenproblem.
+
+        assert len(linsys) == 3 or len(linsys) == 4, \
+            "Corrupt lead eigenproblem data."
+
+        if len(linsys) == 3:
+            t, z, ev = kla.schur(linsys[0])
+
+            # Right-decaying modes.
+            select = np.abs(ev) > 1 + eps * tol
+            # Propagating modes.
+            propselect = np.abs(np.abs(ev) - 1) < eps * tol
+
+            u, w, v = linsys[1]
+            extract, project = linsys[2]
+
+            vec_gen = lambda x: kla.evecs_from_schur(t, z, select=x)
+            ord_schur = lambda x: kla.order_schur(x, t, z, calc_ev=False)[1]
+
+        else:
+            s, t, z, alpha, beta = kla.gen_schur(linsys[0], linsys[1],
+                                                 calc_q=False)
+
+            # Right-decaying modes.
+            select = np.abs(alpha) > (1 + eps * tol) * np.abs(beta)
+            # Propagating modes.
+            propselect = (np.abs(np.abs(alpha) - np.abs(beta)) <
+                          eps * tol * np.abs(beta))
+
+            invalid_warning_setting = np.seterr(invalid='ignore')['invalid']
+            ev = alpha/beta
+            np.seterr(invalid=invalid_warning_setting)
+            # Note: the division is OK here, as we later only access
+            #       eigenvalues close to the unit circle
+
+            u, w, v = linsys[2]
+            extract, project = linsys[3]
+
+            vec_gen = lambda x: kla.evecs_from_gen_schur(s, t, z=z, select=x)
+            ord_schur = lambda x: kla.order_schur(x, s, t,
+                                                  z=z, calc_ev=False)[2]
+    else:
+        # Hopping matrix can be safely inverted -> regular eigenproblem can be
+        # used. This also means, that the hopping matrix is n x n square.
+
+        t, z, ev = kla.schur(linsys)
+
+        # Right-decaying modes.
+        select = np.abs(ev) > 1 + eps * tol
+        # Propagating modes.
+        propselect = np.abs(np.abs(ev) - 1) < eps * tol
+
+        # Signal that we are in the regular case.
+        u = v = w = None
+        extract = project = None
+
+        vec_gen = lambda x: kla.evecs_from_schur(t, z, select=x)
+        ord_schur = lambda x: kla.order_schur(x, t, z, calc_ev=False)[1]
+
+    return ev, select, propselect, vec_gen, ord_schur,\
+        u, w, v, extract, project
+
+
+def self_energy(h_onslice, h_hop, tol=1e6):
+    """
+    Compute the self-energy generated by a lead.
+
+    The lead is described by the unit-cell
+    Hamiltonian h_onslice and the hopping matrix h_hop.
+
+    Parameters
+    ----------
+    h_onslice : numpy array, real or complex, shape (N,N) The unit cell
+        Hamiltonian of the lead slice.
+    h_hop : numpy array, real or complex, shape (N,M)
+        the hopping matrix from a lead slice to the one on which self-energy
+        has to be calculated (and any other hopping in the same direction).
+
+    Returns
+    -------
+    Sigma : numpy array, real or complex, shape (M,M)
+        The computed self-energy. Note that even if `h_onslice` and `h_hop`
+        are both real, `Sigma` will typically be complex. (More precisely, if
+        there is a propagating mode, `Sigma` will definitely be complex.)
+
+    Notes
+    -----
+    This function uses the most stable and efficient algorithm for calculating
+    self-energy, described in kwant/doc/other/lead_modes.pdf
+    """
+
+    m = h_hop.shape[1]
+
+    if (h_onslice.shape[0] != h_onslice.shape[1] or
+        h_onslice.shape[0] != h_hop.shape[0]):
+        raise ValueError("Incompatible matrix sizes for h_onslice and h_hop.")
+
+    #defer most of the calculation to a helper routine (also used by modes)
+    ev, select, propselect, vec_gen, ord_schur,\
+        u, w, v, extract, project = unified_eigenproblem(h_onslice, h_hop, tol)
+
+    if w is not None:
+        n = w.size
+        h_hop = w
+    else:
+        n = h_onslice.shape[0]
+
+    # Compute the propagating eigenvectors, if they present.
+    nprop = np.sum(propselect)
+
+    if nprop > 0:
+        prop_vecs = vec_gen(propselect)
+
+        prop_vecs, vel, rselect, crossing = \
+            make_proper_modes(ev[propselect], prop_vecs, h_hop,
+                              extract, project)
+    else:
+        # Without propagating modes, the Schur methods certainly work.
+        crossing = False
+
+    if crossing:
+        # Schur decomposition method does not work in this case, we need to
+        # compute all the eigenvectors.
+
+        # We already have the propagating ones, now we just need the
+        # evanescent ones in addition.
+
+        if nprop > 0:
+            vecs = np.empty((2*n, n),
+                            dtype=np.common_type(ev, prop_vecs))
+        else:
+            vecs = np.empty((2*n, n), dtype=ev.dtype)
+        # Note: rationale for the dtype: only if all the eigenvalues are real,
+        #       (which can only happen if the original eigenproblem was
+        #       real) and all propagating modes are real, the matrix of
+        #       eigenvectors will be real, too.
+
+        if nprop > 0:
+            nrmodes = np.sum(rselect)
+            vecs[:, : nrmodes] = prop_vecs[:, rselect]
+        else:
+            nrmodes = 0
+
+        vecs[:, nrmodes:] = vec_gen(select)
+
+        if v is not None:
+            return dot(v * w, dot(vecs[n :], dot(npl.inv(vecs[: n]),
+                                                 v.T.conj())))
+        else:
+            return dot(h_hop.T.conj(), dot(vecs[n :], npl.inv(vecs[: n])))
+    else:
+        # Reorder all the right-going eigenmodes to the top left part of
+        # the Schur decomposition.
+
+        if nprop > 0:
+            select[propselect] = rselect
+
+        z = ord_schur(select)
+
+        if v is not None:
+            return dot(v * w, dot(z[n :, : n], dot(npl.inv(z[: n, : n]),
+                                                   v.T.conj())))
+        else:
+            return dot(h_hop.T.conj(), dot(z[n:, : n], npl.inv(z[: n, : n])))
+
+def modes(h_onslice, h_hop, tol=1e6):
+    """
+    Compute the eigendecomposition of a translation operator of a lead.
+
+    Parameters
+    ----------
+    h_onslice : numpy array, real or complex, shape (N,N) The unit cell
+        Hamiltonian of the lead slice.
+    h_hop : numpy array, real or complex, shape (N,M)
+        the hopping matrix from a lead slice to the one on which self-energy
+        has to be calculated (and any other hopping in the same direction).
+
+    Returns
+    -------
+    vecs : numpy matrix
+        the matrix of eigenvectors of translation operator.
+    vecslmbdainv : numpy matrix
+        the matrix of eigenvectors multiplied with their corresponding inverse
+        eigenvalue.
+    nrmodes : integer
+        number of propagating modes in either direction.
+    (u, s, v) : singular value decomposition of the hopping matrix.
+        If `h_hop` is invertible, a single None is returned instead.
+
+    Notes
+    -----
+    Only propagating modes and modes decaying away from the system
+    are returned.
+
+    If `h_hop` is invertible, the full transverse wave functions are returned.
+    If it is singular, the projections (u^dagger psi, v^dagger psi lambda^-1)
+    are returned.
+
+    First `nrmodes` are incoming, second `nrmodes` are reflected, the rest are
+    evanescent.
+
+    Propagating modes with the same lambda are orthogonalized. All the
+    propagating modes are normalized by current.
+
+    This function uses the most stable and efficient algorithm for calculating
+    self-energy, described in kwant/doc/other/lead_modes.pdf
+    """
+
+    m = h_hop.shape[1]
+
+    if (h_onslice.shape[0] != h_onslice.shape[1] or
+        h_onslice.shape[0] != h_hop.shape[0]):
+        raise ValueError("Incompatible matrix sizes for h_onslice and h_hop.")
+
+    # Defer most of the calculation to a helper routine.
+    ev, evanselect, propselect, vec_gen, ord_schur, \
+        u, s, v, extract, project = unified_eigenproblem(h_onslice, h_hop, tol)
+
+    if s is not None:
+        n = s.size
+    else:
+        n = h_onslice.shape[0]
+
+    nprop = np.sum(propselect)
+    evan_vecs = vec_gen(evanselect)
+
+    if nprop > 0:
+        # Compute the propagating eigenvectors.
+        prop_vecs = vec_gen(propselect)
+
+        # Compute their velocity, and, if necessary, rotate them
+        if s is not None:
+            prop_vecs, vel, rprop, crossing = \
+                make_proper_modes(ev[propselect], prop_vecs, s,
+                                  extract, project)
+        else:
+            prop_vecs, vel, rprop, crossing = \
+                make_proper_modes(ev[propselect], prop_vecs, h_hop)
+
+        # Normalize propagating eigenvectors by velocities.
+        prop_vecs /= np.sqrt(np.abs(vel))
+
+        # Fix phase factor - make maximum of transverse wave function real
+        # TODO (Anton): Take care of multiple maxima when normalizing.
+        maxnode = prop_vecs[n + np.argmax(np.abs(prop_vecs[n:, :]), axis=0),
+                            np.arange(prop_vecs.shape[1])]
+        maxnode /= np.abs(maxnode)
+        prop_vecs /= maxnode
+
+        lprop = np.logical_not(rprop)
+        nrmodes = np.sum(rprop)
+        vecs = np.c_[prop_vecs[n:, lprop], prop_vecs[n:, rprop],
+                     evan_vecs[n:]]
+        vecslmbdainv = np.c_[prop_vecs[: n, lprop], prop_vecs[: n, rprop],
+                             evan_vecs[: n]]
+
+    else:
+        vecs = evan_vecs[n:]
+        vecslmbdainv = evan_vecs[: n]
+        nrmodes = 0
+
+    if s is not None:
+        return vecs, vecslmbdainv, nrmodes, (u, s, v)
+    else:
+        return vecs, vecslmbdainv, nrmodes, None
+
+
+def square_self_energy(width, hopping, potential, fermi_energy):
+    """
+    Calculate analytically the self energy for a square lattice.
+
+    The lattice is assumed to have a single orbital per site and
+    nearest-neighbor hopping.
+
+    Parameters
+    ----------
+    width : integer
+        width of the lattice
+    """
+
+    # Following appendix C of M. Wimmer's diploma thesis:
+    # http://www.physik.uni-regensburg.de/forschung/\
+    # richter/richter/media/research/publications2004/wimmer-Diplomarbeit.pdf
+
+    # p labels transversal modes.  i and j label the sites of a slice.
+
+    # Precalculate the transverse wave function.
+    psi_p_i = np.empty((width, width))
+    factor = pi / (width + 1)
+    prefactor = sqrt(2 / (width + 1))
+    for p in xrange(width):
+        for i in xrange(width):
+            psi_p_i[p, i] = prefactor * sin(factor * (p + 1) * (i + 1))
+
+    # Precalculate the integrals of the longitudinal wave functions.
+    def f(q):
+        if abs(q) <= 2:
+            return q/2 - 1j * sqrt(1 - (q/2)**2)
+        else:
+            return q/2 - copysign(sqrt((q/2)**2 - 1), q)
+    f_p = np.empty((width,), dtype=complex)
+    for p in xrange(width):
+        e = 2 * hopping * (1 - cos(factor * (p + 1)))
+        q = (fermi_energy - potential - e) / hopping - 2
+        f_p[p] = f(q)
+
+    # Put everything together into the self energy and return it.
+    result = np.empty((width, width), dtype=complex)
+    for i in xrange(width):
+        for j in xrange(width):
+            result[i, j] = hopping * \
+                (psi_p_i[:, i] * psi_p_i[:, j].conj() * f_p).sum()
+    return result
diff --git a/kwant/physics/tests/test_selfenergy.py b/kwant/physics/tests/test_selfenergy.py
new file mode 100644
index 0000000000000000000000000000000000000000..326beb635b3a533624f5f37135ac572bcc609845
--- /dev/null
+++ b/kwant/physics/tests/test_selfenergy.py
@@ -0,0 +1,234 @@
+from __future__ import division
+import numpy as np
+from numpy.testing import assert_almost_equal
+import kwant.physics.selfenergy as se
+
+def test_analytic_numeric():
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop = -t * np.identity(w)
+    h_onslice = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice.flat[1 :: w + 1] = -t
+    h_onslice.flat[w :: w + 1] = -t
+
+    assert_almost_equal(se.square_self_energy(w, t, v, e),
+                        se.self_energy(h_onslice, h_hop))
+
+def test_regular_fully_degenerate():
+    """This testcase features an invertible hopping matrix,
+    and bands that are fully degenerate.
+
+    This case can still be treated with the Schur technique."""
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((2*w, 2*w))
+    h_hop[0:w, 0:w] = h_hop_s
+    h_hop[w:2*w, w:2*w] = h_hop_s
+
+    h_onslice = np.zeros((2*w, 2*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[w:2*w, w:2*w] = h_onslice_s
+
+    g = np.zeros((2*w, 2*w), dtype=complex)
+    g[0:w, 0:w] = se.square_self_energy(w, t, v, e)
+    g[w:2*w, w:2*w] = se.square_self_energy(w, t, v, e)
+
+    assert_almost_equal(g,
+                        se.self_energy(h_onslice, h_hop))
+
+def test_regular_degenerate_with_crossing():
+    """This is a testcase with invertible hopping matrices,
+    and degenerate k-values with a crossing such that one
+    mode has a positive velocity, and one a negative velocity
+
+    For this case the fall-back technique must be used.
+    """
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((2*w, 2*w))
+    h_hop[0:w, 0:w] = h_hop_s
+    h_hop[w:2*w, w:2*w] = -h_hop_s
+
+    h_onslice = np.zeros((2*w, 2*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[w:2*w, w:2*w] = -h_onslice_s
+
+    g = np.zeros((2*w, 2*w), dtype=complex)
+    g[0:w, 0:w] = se.square_self_energy(w, t, v, e)
+    g[w:2*w, w:2*w] = -np.conj(se.square_self_energy(w, t, v, e))
+
+    assert_almost_equal(g,
+                        se.self_energy(h_onslice, h_hop))
+
+def test_singular():
+    """This testcase features a rectangular (and hence singular)
+     hopping matrix without degeneracies.
+
+    This case can be treated with the Schur technique."""
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((2*w, w))
+    h_hop[w:2*w, 0:w] = h_hop_s
+
+    h_onslice = np.zeros((2*w, 2*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[0:w, w:2*w] = h_hop_s
+    h_onslice[w:2*w, 0:w] = h_hop_s
+    h_onslice[w:2*w, w:2*w] = h_onslice_s
+
+    assert_almost_equal(se.square_self_energy(w, t, v, e),
+                        se.self_energy(h_onslice, h_hop))
+
+def test_singular_but_square():
+    """This testcase features a singular, square hopping matrices
+    without degeneracies.
+
+    This case can be treated with the Schur technique."""
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((2*w, 2*w))
+    h_hop[w:2*w, 0:w] = h_hop_s
+
+    h_onslice = np.zeros((2*w, 2*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[0:w, w:2*w] = h_hop_s
+    h_onslice[w:2*w, 0:w] = h_hop_s
+    h_onslice[w:2*w, w:2*w] = h_onslice_s
+
+    g = np.zeros((2*w, 2*w), dtype=complex)
+    g[0:w, 0:w] = se.square_self_energy(w, t, v, e)
+
+    assert_almost_equal(g,
+                        se.self_energy(h_onslice, h_hop))
+
+def test_singular_fully_degenerate():
+    """This testcase features a rectangular (and hence singular)
+     hopping matrix with complete degeneracy.
+
+    This case can still be treated with the Schur technique."""
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((4*w, 2*w))
+    h_hop[2*w:3*w, 0:w] = h_hop_s
+    h_hop[3*w:4*w, w:2*w] = h_hop_s
+
+    h_onslice = np.zeros((4*w, 4*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[0:w, 2*w:3*w] = h_hop_s
+    h_onslice[w:2*w, w:2*w] = h_onslice_s
+    h_onslice[w:2*w, 3*w:4*w] = h_hop_s
+    h_onslice[2*w:3*w, 0:w] = h_hop_s
+    h_onslice[2*w:3*w, 2*w:3*w] = h_onslice_s
+    h_onslice[3*w:4*w, w:2*w] = h_hop_s
+    h_onslice[3*w:4*w, 3*w:4*w] = h_onslice_s
+
+    g = np.zeros((2*w, 2*w), dtype=complex)
+    g[0:w, 0:w] = se.square_self_energy(w, t, v, e)
+    g[w:2*w, w:2*w] = se.square_self_energy(w, t, v, e)
+
+    assert_almost_equal(g,
+                        se.self_energy(h_onslice, h_hop))
+
+def test_singular_degenerate_with_crossing():
+    """This testcase features a rectangular (and hence singular)
+     hopping matrix with degeneracy k-values including a crossing
+     with velocities of opposite sign.
+
+    This case must be treated with the fall-back technique."""
+
+    w = 5                       # width
+    t = 0.5                     # hopping element
+    v = 2                       # potential
+    e = 3.3                     # Fermi energy
+
+    h_hop_s = -t * np.identity(w)
+    h_onslice_s = ((v + 4 * t - e)
+                 * np.identity(w))
+    h_onslice_s.flat[1 :: w + 1] = -t
+    h_onslice_s.flat[w :: w + 1] = -t
+
+    h_hop = np.zeros((4*w, 2*w))
+    h_hop[2*w:3*w, 0:w] = h_hop_s
+    h_hop[3*w:4*w, w:2*w] = -h_hop_s
+
+    h_onslice = np.zeros((4*w, 4*w))
+    h_onslice[0:w, 0:w] = h_onslice_s
+    h_onslice[0:w, 2*w:3*w] = h_hop_s
+    h_onslice[w:2*w, w:2*w] = -h_onslice_s
+    h_onslice[w:2*w, 3*w:4*w] = -h_hop_s
+    h_onslice[2*w:3*w, 0:w] = h_hop_s
+    h_onslice[2*w:3*w, 2*w:3*w] = h_onslice_s
+    h_onslice[3*w:4*w, w:2*w] = -h_hop_s
+    h_onslice[3*w:4*w, 3*w:4*w] = -h_onslice_s
+
+    g = np.zeros((2*w, 2*w), dtype=complex)
+    g[0:w, 0:w] = se.square_self_energy(w, t, v, e)
+    g[w:2*w, w:2*w] = -np.conj(se.square_self_energy(w, t, v, e))
+
+    assert_almost_equal(g,
+                        se.self_energy(h_onslice, h_hop))
+
+def test_modes():
+    h, t = .3, .7
+    vecs, vecslinv, nrpop, svd = se.modes(np.mat(h), np.mat(t))
+    l = (np.sqrt(h**2 - 4 * t**2 + 0j) - h) / (2 * t)
+    current = np.sqrt(4 * t**2 - h**2)
+    assert nrpop == 1
+    assert svd is None
+    np.testing.assert_almost_equal(vecs, [2 * [1/np.sqrt(current)]])
+    np.testing.assert_almost_equal(vecslinv,
+                                   vecs * np.array([1/l, 1/l.conj()]))
diff --git a/kwant/plotter.py b/kwant/plotter.py
new file mode 100644
index 0000000000000000000000000000000000000000..5339cd4960bb44e1798a73174e6d1cd3c3a40ffe
--- /dev/null
+++ b/kwant/plotter.py
@@ -0,0 +1,873 @@
+"""kwant.plotter docstring"""
+
+from math import sqrt, pi, sin, cos, tan
+import warnings
+import cairo
+try:
+    import Image
+    defaultname = None
+    has_pil = True
+except:
+    defaultname = "plot.pdf"
+    has_pil = False
+
+import kwant
+
+__all__ = ['plot', 'Circle', 'Polygon', 'Line', 'Color', 'LineStyle',
+           'black', 'white', 'red', 'green', 'blue']
+
+class Color(object):
+    """RGBA color.
+
+    Standard Color object that can be used to specify colors in
+    `plot`.
+
+    When creating the Color object, the color is specified in an RGBA scheme,
+    i.e. by specifying the red (r), green (g) and blue (b) components
+    of the color and optionally an alpha channel controlling the transparancy.
+
+    Parameters
+    ----------
+    r, g, b : float in the range [0, 1]
+        specifies the values of the red, green and blue components of the color
+    alpha : float in the range [0, 1], optional
+        specifies the transparancy, with alpha=0 completely transparent and
+        alpha=1 completely opaque (not transparent).
+        Defaults to 1 (opaque).
+
+    Examples
+    --------
+    The color black is specified using
+
+    >>> black = Color(0, 0, 0)
+
+    and white using
+
+    >>> white = Color(1, 1, 1)
+
+    By default, a color is completely opaque (not transparent). Using the
+    optional parameter alpha one can specify transparancy. For example,
+
+    >>> black_transp = Color(0, 0, 0, alpha=0.5)
+
+    is black with 50% transparancy.
+
+    """
+    def __init__(self, r, g, b, alpha=1.0):
+        for val in (r, g, b, alpha):
+            if val < 0 or val > 1:
+                raise ValueError("r, g, b, and alpha must be in "
+                                 "the range [0,1]")
+        self.r = r
+        self.g = g
+        self.b = b
+        self.alpha = alpha
+
+    def _set_color_cairo(self, ctx, fading=None):
+        if fading is not None:
+            ctx.set_source_rgba(self.r + fading[1] * (fading[0].r - self.r),
+                                self.g + fading[1] * (fading[0].g - self.g),
+                                self.b + fading[1] * (fading[0].b - self.b),
+                                self.alpha + fading[1] *
+                                (fading[0].alpha - self.alpha))
+        else:
+            ctx.set_source_rgba(self.r, self.g, self.b, self.alpha)
+
+black = Color(0, 0, 0)
+white = Color(1, 1, 1)
+red = Color(1, 0, 0)
+green = Color(0, 1, 0)
+blue = Color(0, 0, 1)
+
+# TODO: possibly add dashed, etc.
+class LineStyle(object):
+    """Object for describing a line style. Can be used as a parameter in the
+    class `Line`.
+
+    Right now, the LineStyle object only allows to specify the line cap (i.e.
+    the shape of the end of the line). In the future might include dashing,
+    etc.
+
+    Parameters
+    ----------
+    lcap : { 'butt', 'round', 'square'}, optional
+        Specifies the shape of the end of the line:
+
+        'butt'
+           End of the line is rectangular and ends exactly at the end point.
+        'round'
+           End of the line is rounded, as if a half-circle is drawn around the
+           end point.
+        'square'
+           End of the line is rectangular, but protrudes beyond the end point,
+           as if a square was drawn centered at the end point.
+
+        Defaults to 'butt'.
+    """
+    def __init__(self, lcap="butt"):
+        if lcap == "butt":
+            self.lcap = cairo.LINE_CAP_BUTT
+        elif lcap == "round":
+            self.lcap = cairo.LINE_CAP_ROUND
+        elif lcap == "square":
+            self.lcap = cairo.LINE_CAP_SQUARE
+        else:
+            raise ValueError("Unknown line cap style "+lcap)
+
+    def _set_props_cairo(self, ctx, reflen):
+        ctx.set_line_cap(self.lcap)
+
+class Line(object):
+    """Draws a straight line between the two sites connected by a hopping.
+
+    Standard object that can be used to specify how to draw
+    a line representing a hopping in `plot`.
+
+    Parameters
+    ----------
+    lw : float
+        line width relative to the reference length (see `plot`)
+    lcol : object realizing the "color functionality" (see `plot`)
+        line color
+    lsty : a LineStyle object
+        line style
+    """
+    def __init__(self, lw, lcol=black, lsty=LineStyle()):
+        self.lw = lw
+        self.lcol = lcol
+        self.lsty = lsty
+
+    def _draw_cairo(self, ctx, pos1, pos2, reflen, fading=None):
+        ctx.new_path()
+        if self.lw > 0 and self.lcol is not None and self.lsty is not None:
+            ctx.set_line_width(self.lw * reflen)
+            self.lcol._set_color_cairo(ctx, fading=fading)
+            self.lsty._set_props_cairo(ctx, reflen)
+            ctx.move_to(pos1[0], pos1[1])
+            ctx.line_to(pos2[0], pos2[1])
+            ctx.stroke()
+
+class Circle(object):
+    """Draw circle with (relative) radius r centered at a site.
+
+    Standard symbol object that can be used with `plot`.
+    Sizes are always given in terms of the reference length
+    of `plot`.
+
+    Parameters
+    ----------
+    r : float
+       Radius of the circle
+    fcol : color_like object or None, optional
+       Fill color. If None, the circle is not filled. Defaults to black.
+    lw : float, optional
+       Line width of the outline. If 0, no outline is drawn.
+       Defaults to 0.1.
+    lcol : color_like object or None, optional
+       Color of the outline. If None, no outline is drawn. Defaults to None.
+    lsty : `LineStyle` object
+       Line style of the outline. Defaults to LineStyle().
+    """
+    def __init__(self, r, fcol=black, lw=0.1, lcol=None, lsty=LineStyle()):
+        self.r = r
+        self.fcol = fcol
+        self.lw = lw
+        self.lcol= lcol
+        self.lsty = lsty
+
+    def _draw_cairo(self, ctx, pos, reflen, fading=None):
+        ctx.new_path()
+
+        if self.fcol is not None:
+            self.fcol._set_color_cairo(ctx, fading=fading)
+            ctx.arc(pos[0], pos[1], self.r * reflen, 0, 2*pi)
+            ctx.fill()
+
+        if self.lw > 0 and self.lcol is not None and self.lsty is not None:
+            ctx.set_line_width(self.lw * reflen)
+            self.lcol._set_color_cairo(ctx, fading=fading)
+            self.lsty._set_props_cairo(ctx, reflen)
+            ctx.arc(pos[0], pos[1], self.r * reflen, 0, 2*pi)
+            ctx.stroke()
+
+class Polygon(object):
+    """Draw a regular n-sided polygon centered at a site.
+
+    Standard symbol object that can be used with `plot`.
+    Sizes are always given in terms of the reference length
+    of `plot`.
+
+    The size of the polygon can be specifed in one of two ways:
+     - either by specifying the side length `a`
+     - or by demanding that the area of the polygon is equal to a circle
+       with radius `size`
+
+    Parameters
+    ----------
+    n : int
+        Number of sides (i.e. `n=3` is a triangle, `n=4` a square, etc.)
+    a, size : float, exactly one must be given
+        The size of the polygon, either specified by the side length `a`
+        or the radius `size` of a circle of equal area.
+    angle : float, optional
+        Rotate the polygon counter-clockwise by `angle` (specified
+        in radians. Defaults to 0.
+    fcol : color_like object or None, optional
+        Fill color. If None, the polygon is not filled. Defaults to black.
+    lw : float, optional
+        Line width of the outline. If 0, no outline is drawn.
+        Defaults to 0.1.
+    lcol : color_like object or None, optional
+        Color of the outline. If None, no outline is drawn. Defaults to None.
+    lsty : `LineStyle` object
+        Line style of the outline. Defaults to LineStyle().
+    """
+    def __init__(self, n, a=None, size=None,
+                 angle=0, fcol=black, lw=0.1, lcol=None, lsty=LineStyle()):
+        if ((a is None and size is None) or
+            (a is not None and size is not None)):
+            raise ValueError("Either sidelength or equivalent circle radius "
+                             "must be specified")
+
+        self.n = n
+        if a is None:
+            # make are of triangle equal to circle of radius size
+            a = sqrt(4 * tan(pi / n) / n * pi) * size
+        # note: self.rc is the radius of the circumscribed circle
+        self.rc = a / (2 * sin(pi / n))
+        self.angle = angle
+        self.fcol = fcol
+        self.lw = lw
+        self.lcol = lcol
+        self.lsty = lsty
+
+    def _draw_cairo_poly(self, ctx, pos, reflen):
+        ctx.move_to(pos[0] + sin(self.angle) * self.rc * reflen,
+                    pos[1] + cos(self.angle) * self.rc * reflen)
+        for i in xrange(1, self.n):
+            phi = i * 2 * pi / self.n
+            ctx.line_to(pos[0] + sin(self.angle + phi) * self.rc * reflen,
+                        pos[1] + cos(self.angle + phi) * self.rc * reflen)
+        ctx.close_path()
+
+    def _draw_cairo(self, ctx, pos, reflen, fading=None):
+        ctx.new_path()
+
+        if self.fcol is not None:
+            self.fcol._set_color_cairo(ctx, fading=fading)
+            self._draw_cairo_poly(ctx, pos, reflen)
+            ctx.fill()
+
+        if self.lw > 0 and self.lcol is not None and self.lsty is not None:
+            ctx.set_line_width(self.lw * reflen)
+            self.lcol._set_color_cairo(ctx, fading=fading)
+            self.lsty._set_props_cairo(ctx, reflen)
+            self._draw_cairo_poly(ctx, pos, reflen)
+            ctx.stroke()
+
+
+def plot(system, filename=defaultname, fmt=None, a=None,
+         width=600, height=None, border=0.1, bcol=white, pos=None,
+         symbols=Circle(r=0.3), lines=Line(lw=0.1),
+         lead_symbols=-1, lead_lines=-1,
+         lead_fading=[0.6, 0.85]):
+    """Plot two-dimensional systems (or two-dimensional representations
+    of a system).
+
+    `plot` can be used to plot both unfinalized systems derived from
+    kwant.builder.Builder, or the corresponding finalized systems.
+
+    The output of `plot` is highly modifyable, as it does not perform
+    any drawing itself, but instead lets objects passed by the user
+    (or as default parameters) do the actual drawing work. `plot`
+    itself does figure out the range of positions occupied by the
+    sites, as well as the smallest distance between two sites which
+    then serves as a reference length, unless the user specifies
+    explicitely a reference length. This reference length is then
+    used so that the sizes of symbols or lines are always given relative
+    to that reference length. This is particularly advantageous for
+    regular lattices, as it makes it easy to specify the area covered
+    by symbols, etc.
+
+    The objects that determine `plot`'s behavior are symbol_like
+    (symbols representing sites), line_like (lines representing
+    hoppings) and color_like (representing colors). The notes below
+    explain in detail how to implement custom classes. In most cases
+    it is enough to use the predefined standard objects:
+
+    - for symbol_like: `Circle` and `Polygon`
+    - for line_like: `Line`
+    - for color_like: `Color`.
+
+    `plot` draws both system sites, as well as sites corresponding to the
+    leads. For the leads, several copies of the lead unit cell are plotted
+    (per default 2), and they are gradually faded towards the background
+    color (at least in the default behavior).
+
+    Parameters
+    ----------
+    system : (un)finalized system
+        System to plot. Either an unfinalized Builder
+        (instance of `kwant.builder.Builder`)
+        or a finalized builder (instance of
+        `kwant.builder.FiniteSystem`).
+    filename : string or None, optional
+        Name of the file the plot should be written to. The format
+        of the file can be determined from the suffix (see `fmt`).
+        If None, the plot is output on the screen [provided that the
+        Python Image Library (PIL) is installed]. Default is
+        None if the PIL is installed, and "plot.pdf" otherwise.
+    fmt : {"pdf", "ps", "eps", "svg", "png", "jpg", None}, optional
+        Format of the output file, if `filename` is not None. If
+        `fmt` is None, the format is determined from the suffix of the
+        `filename`. Defaults to None.
+    a : float, optional
+        Reference length. If None, the reference length is determined
+        as the smallest nonzero distance between sites. Defaults to None.
+    width, height : float or None, optional
+        Width and height of the output picture. In units of
+        "pt" for the vector graphics formats (pdf, ps, eps, svg)
+        and in pixels for the bitmap formats (png, jpg, and output to screen).
+        For the bitmap formats, `width` and `height` are rounded to the nearest
+        integer. One of `width` and `height` may be None (but not both
+        simultaneously). In this case, the unspecified size is chosen to
+        fit with the aspect ratio of the plot. If both are specified, the plot
+        is centered on the canvas (possibly with increasing the blank borders).
+        `width` defaults to 600, and `height` to None.
+    border : float, optional
+        Size of the blank border around the plot, relative to the
+        total size. Defaults to 0.1.
+    bcol : color_like, optional
+        Background color. Defaults to white.
+
+        (If the plot is saved in a vector graphics format, `white`
+        actually corresponds to no background.  This is a bit hacky
+        maybe [fading to bcol e.g. still makes a white symbol, not a
+        transparant symbol], but then again there is no reason for
+        having a white box behind everything)
+    pos : callable or None, optional
+        When passed a site should return its (2D) position as a numpy array of
+        length 2. If None, the method pos() of the site is used.
+        Defaults to None.
+    symbols : {symbol_like, callable, dict, None}, optional
+        Object responsible for drawing the symbols correspodning to sites.
+        Either must be a single symbol_like object (the same symbol is drawn
+        for every site, regardless of site group), a callable that
+        returns a symbol_like object when passed a site, a dictionary
+        with site groups as keys and symbol_like as values
+        (allowing to specify different symbols for different site groups),
+        or None (in which case no symbols are drawn). Instead of
+        a symbol_like object the callable or the dict may also return None
+        corresponding to no symbol. Defaults to ``Circle(r=0.3)``.
+
+        The standard symbols available are `Circle` and
+        `Polygon`.
+    lines : {line_like, callable, dict, None}, optional
+        Object responsible for drawing the lines representing the
+        hoppings between sites. Either a single line_like object
+        (the same type of line is drawn for all hoppings), a callable
+        that returns a line_like object when passed two sites,
+        a dictionary with tuples of two site groups as keys and
+        line_like objects as values (allowing to specify different
+        line styles for different hoppings; note that if the hopping
+        (a, b) is specified, (b, a) needs not be included in the dictionary),
+        or None (in which case no hoppings are drawn). Instead of
+        a line_like object the callable or the dict may also return None
+        corresponding to no line. Defaults to ``Line(lw=0.1)``.
+
+        The standard line available is `Line`.
+    lead_symbols : {symbol_like, callable, dict, -1, None}, optional
+        Symbols to be drawn for the sites in the leads. The special
+        value -1 indicates that `symbols` (which is used for system sites)
+        should be used also for the leads. The other possible values are
+        as for the system `symbols`.
+        Defaults to -1.
+    lead_lines : {line_like, callable, dict, -1, None}, optional
+        Lines to be drawn for the hoppings in the leads. The special
+        value -1 indicates that `lines` (which is used for system hoppings)
+        should be used also for the leads. The other possible values are
+        as for the system `lines`.
+        Defaults to -1.
+    lead_fading : list, optional
+        The number of entries in the list determines the number of
+        lead unit cells that are plotted. The unit cell `i` is then
+        faded by the ratio ``lead_fading[i]`` towards the
+        background color `bcol`. Here ``lead_fading[i]==0`` implies no fading
+        (i.e. the original symbols and lines),
+        whereas ``lead_fading[i]==1`` corresponds to the background color.
+
+    Notes
+    -----
+
+    `plot` knows three different legitimate classes representing
+    symbols (symbol_like), lines (line_like), and colors (color_like).
+    In order to serve as a legitimate object for these,
+    a class has to implement certain methods. In particular these
+    are
+
+    - symbol_like: objects representing symbols for sites::
+
+         _draw_cairo(ctx, pos, reflen[, fading])
+
+      which draws the symbol onto the cairo context `ctx`
+      at the position `pos` (passed as a numpy array of length 2).
+      `reflen` is the reference length, allowing the symbol to use
+      relative sizes. (Note though that `pos` is in **absolute** cairo
+      coordinates).
+
+      If the symbol should also be used to draw leads, `_draw_cairo`
+      should also take the optional parameter `fading` wich is a tuple
+      `(fadecol, percent)` where `fadecol` is the color towards which
+      the symbol should be faded, and `percent` is a number between 0
+      and 1 indicating the amount of fading, with `percent=0` no
+      fading, and `percent=1` fully faded to `fadecol`. Note that
+      while "fading" usually will imply color fading, this is not
+      required by plot. Anything conceivable is legitimate.
+
+      The module :mod:`plot` provides two standard symbol classes:
+      `Circle` and `Polygon`.
+
+    - line_like: objects representing lines for hoppings::
+
+         _draw_cairo(ctx, pos1, pos2, reflen[, fading])
+
+      which draws the something (typically a line of some sort) onto
+      the cairo context `ctx` connecting the position `pos1` and
+      `pos2` (passed as a numpy arrays of length 2).  `reflen` is the
+      reference length, allowing the line to use relative sizes. (Note
+      though that `pos1` and `pos2` are in **absolute** cairo
+      coordinates).
+
+      If the line should also be used to draw leads, `_draw_cairo`
+      should also take the optional parameter `fading` wich is a tuple
+      `(fadecol, percent)` where `fadecol` is the color towards which
+      the symbol should be faded, and `percent` is a number between 0
+      and 1 indicating the amount of fading, with `percent=0` no
+      fading, and `percent=1` fully faded to `fadecol`. Note that
+      while "fading" usually will imply color fading, this is not
+      required by plot. Anything conceivable is legitimate.
+
+      The module :mod:`plot` provides one standard line class: `Line`.
+
+    - color_like: for objects representing colors::
+
+         def _set_color_cairo(ctx[, fading]):
+
+      which sets the current color of the cairo context `ctx`.
+
+      If the color is passed to an object that requires fading in
+      order to be applicable for the representation of leads,
+      it must also take the optional parameter 'fading' wich is a tuple
+      `(fadecol, percent)` where `fadecol` is the color towards which
+      the symbol should be faded, and `percent` is a number between 0
+      and 1 indicating the amount of fading, with `percent=0` no
+      fading, and `percent=1` fully faded to `fadecol`. Note that
+      while "fading" usually will imply color fading, this is not
+      required by plot. Anything conceivable is legitimate.
+
+      The module :mod:`plot` provides one standard color class:
+      `Color`. In addition, a few common colors are predefined
+      as instances of `Color`:`black`, `white`, `red`, `green`,
+      and `blue`.
+    """
+
+    def iterate_lead_sites_builder(system, lead_copies):
+        for lead in system.leads:
+            if not isinstance(lead, kwant.builder.BuilderLead):
+                continue
+            sym = lead.builder.symmetry
+            shift = sym.which(lead.neighbors[0]) + 1
+
+            for i in xrange(lead_copies):
+                for site in lead.builder.sites():
+                    yield sym.act(shift + i, site), i
+
+    def iterate_lead_hoppings_builder(system, lead_copies):
+        for lead in system.leads:
+            if not isinstance(lead, kwant.builder.BuilderLead):
+                continue
+            sym = lead.builder.symmetry
+            shift = sym.which(lead.neighbors[0]) + 1
+
+            for i in xrange(lead_copies):
+                for site1, site2 in lead.builder.hoppings():
+                    shift1 = sym.which(site1)[0]
+                    shift2 = sym.which(site2)[0]
+                    if shift1 >= shift2:
+                        yield (sym.act(shift + i, site1),
+                               sym.act(shift + i, site2),
+                               i + shift1, i  + shift2)
+                    else:
+                        # Note: this makes sure that hoppings beyond the unit
+                        #       cell are always ordered such that they are into
+                        #       the previous slice
+                        yield (sym.act(shift + i - 1, site1),
+                               sym.act(shift + i - 1, site2),
+                               i - 1 + shift1, i - 1 + shift2)
+
+    def iterate_system_sites_builder(system):
+        for site in system.sites():
+            yield site
+
+    def iterate_system_hoppings_builder(system):
+        for hopping in system.hoppings():
+            yield hopping
+
+    def iterate_lead_sites_llsys(system, lead_copies):
+        for ilead in xrange(len(system.leads)):
+            lead = system.leads[ilead]
+            sym = lead.symmetry
+            shift = sym.which(system.site(system.lead_neighbor_seqs[ilead][0]))
+            shift += 1
+
+            for i in xrange(lead_copies):
+                for isite in xrange(lead.slice_size):
+                        yield sym.act(shift + i, lead.site(isite)), i
+
+    def iterate_lead_hoppings_llsys(system, lead_copies):
+        for ilead in xrange(len(system.leads)):
+            lead = system.leads[ilead]
+            sym = lead.symmetry
+            shift = sym.which(system.site(system.lead_neighbor_seqs[ilead][0]))
+            shift += 1
+
+            for i in xrange(lead_copies):
+                for isite in xrange(lead.slice_size):
+                    for jsite in lead.graph.out_neighbors(isite):
+                        # Note: unlike in builder, it is guaranteed
+                        #       in the finalized system that hoppings
+                        #       beyond the unit cell are in the previous
+                        #       "slice"
+                        if jsite < lead.slice_size:
+                            yield (sym.act(shift + i, lead.site(isite)),
+                                   sym.act(shift + i, lead.site(jsite)),
+                                   i, i)
+                        else:
+                            jsite -= lead.slice_size
+                            yield (sym.act(shift + i, lead.site(isite)),
+                                   sym.act(shift + i - 1, lead.site(jsite)),
+                                   i, i - 1)
+
+
+    def iterate_system_sites_llsys(system):
+        for i in xrange(system.graph.num_nodes):
+            yield system.site(i)
+
+    def iterate_system_hoppings_llsys(system):
+        for i in xrange(system.graph.num_nodes):
+            site1 = system.site(i)
+            for j in system.graph.out_neighbors(i):
+                # Only yield half of the hoppings (as builder does)
+                if i < j:
+                    yield (site1, system.site(j))
+
+
+    def iterate_all_sites(system, lead_copies=0):
+        for site in iterate_system_sites(system):
+            yield site
+
+        for site, ucindx in iterate_lead_sites(system, lead_copies):
+            yield site
+
+    def iterate_all_hoppings(system, lead_copies=0):
+        for site1, site2 in iterate_system_hoppings(system):
+            yield site1, site2
+
+        for site1, site2, i1, i2 in iterate_lead_hoppings(system, lead_copies):
+            yield site1, site2
+
+    if isinstance(system, kwant.builder.Builder):
+        iterate_system_sites = iterate_system_sites_builder
+        iterate_system_hoppings = iterate_system_hoppings_builder
+        iterate_lead_sites = iterate_lead_sites_builder
+        iterate_lead_hoppings = iterate_lead_hoppings_builder
+    elif isinstance(system, kwant.builder.FiniteSystem):
+        iterate_system_sites = iterate_system_sites_llsys
+        iterate_system_hoppings = iterate_system_hoppings_llsys
+        iterate_lead_sites = iterate_lead_sites_llsys
+        iterate_lead_hoppings = iterate_lead_hoppings_llsys
+    else:
+        raise ValueError("Plotting not suported for given system")
+
+    if width is None and height is None:
+        raise ValueError("One of width and height must be not None")
+
+    if a is None:
+        dist = 0
+    else:
+        if a > 0:
+            dist = a
+        else:
+            raise ValueError("The distance a must be >0")
+
+    if pos is None:
+        pos = lambda site: site.pos
+
+    if fmt is None and filename is not None:
+        # Try to figure out the format from the filename
+        fmt = filename.split(".")[-1].lower()
+    elif fmt is not None and filename is None:
+        raise ValueError("If fmt is specified, filename must be given, too")
+
+    if fmt not in [None, "pdf", "ps", "eps", "svg", "png", "jpg"]:
+        raise ValueError("Unknwon format " + fmt)
+
+    # Those two need the PIL
+    if fmt in [None, "jpg"] and not has_pil:
+        raise ValueError("The requested functionality requires the "
+                         "Python Image Library (PIL)")
+
+    # symbols and lines may be constant, functions or dicts
+    # Here they are wrapped with a function
+
+    if hasattr(symbols, "__call__"):
+        fsymbols = symbols
+    elif hasattr(symbols, "__getitem__"):
+        fsymbols = lambda x : symbols[x]
+    else:
+        fsymbols = lambda x : symbols
+
+    if hasattr(lines, "__call__"):
+        flines = lines
+    elif hasattr(lines, "__getitem__"):
+        flines = lambda x, y : lines[x,y] if (x,y) in lines else lines[y,x]
+    else:
+        flines = lambda x, y : lines
+
+    if lead_symbols == -1:
+        flsymbols = fsymbols
+    elif hasattr(lead_symbols, "__call__"):
+        flsymbols = lead_symbols
+    elif hasattr(lead_symbols, "__getitem__"):
+        flsymbols = lambda x : lead_symbols[x]
+    else:
+        flsymbols = lambda x : lead_symbols
+
+    if lead_lines == -1:
+        fllines = flines
+    elif hasattr(lead_lines, "__call__"):
+        fllines = lead_lines
+    elif hasattr(lines, "__getitem__"):
+        fllines = lambda x, y : (lead_lines[x,y]
+                                 if (x,y) in lead_lines else lead_lines[y,x])
+    else:
+        fllines = lambda x, y : lead_lines
+
+
+    #Figure out the extent of the system
+    nsites = 0
+    first = True
+    for site in iterate_all_sites(system, len(lead_fading)):
+        sitepos = pos(site)
+        nsites += 1
+
+        if len(sitepos) != 2:
+            raise RuntimeError("Only 2 dimensions are supported by plot")
+
+        if first:
+            minx = maxx = sitepos[0]
+            miny = maxy = sitepos[1]
+            first = False
+        else:
+            minx = min(sitepos[0], minx)
+            maxx = max(sitepos[0], maxx)
+            miny = min(sitepos[1], miny)
+            maxy = max(sitepos[1], maxy)
+
+    if nsites == 0:
+        warnings.warn("Empty system. No output generated");
+        return
+
+    rangex = (maxx - minx) / (1 - 2 * border)
+    rangey = (maxy - miny) / (1 - 2 * border)
+
+    # If the user gave no typical distance between sites, we need to figure it
+    # out ourselves
+    # (Note: it is enough to consider one copy of the lead unit cell for
+    #        figuring out distances, because of the translational symmetry)
+    if a is None:
+        first = True
+        for site1, site2 in iterate_all_hoppings(system, lead_copies=1):
+
+            # TODO: can I assume always numpy?
+            sitedist = sqrt((pos(site1)[0] - pos(site2)[0])**2 +
+                            (pos(site1)[1] - pos(site2)[1])**2)
+
+            if sitedist > 0:
+                if first:
+                    dist = sitedist
+                    first = False
+                else:
+                    dist = min(dist, sitedist)
+
+        # If there were no hoppings, then we can only find the distance
+        # by checking the distance between all sites (potentially slow)
+        if dist == 0:
+            warnings.warn("Finding the typical distance automatically"
+                          "may be slow!")
+
+            first = True
+            # TODO: hm, in this way I will check distances always twice
+            # in principle, it would be enoughto go through all sites
+            # site2 *after* site1
+            # it's only a factor 2 in speed, so not clear if it makes
+            # sense to change it
+            for site1 in iterate_all_sites(system, lead_copies=1):
+                for site2 in iterate_all_sites(system, lead_copies=1):
+
+                    # TODO: can I assume always numpy?
+                    sitedist = sqrt((pos(site1)[0] - pos(site2)[0])**2 +
+                                    (pos(site1)[1] - pos(site2)[1])**2)
+
+                    if sitedist > 0:
+                        if first:
+                            dist = sitedist
+                            first = False
+                        else:
+                            dist = min(dist, sitedist)
+
+        # If dist ist still 0, all sites sit at the same spot
+        # In this case I can just use any value for dist
+        # (rangex and rangey will also be 0 then)
+        if dist == 0:
+            dist = 1
+
+    # Use the typical distance, if one of the ranges is 0
+    # (e.g. in a one-dimensional system)
+
+    if rangex == 0:
+        rangex = dist / (1 - 2 * border)
+    if rangey == 0:
+        rangey = dist / (1 - 2 * border)
+
+    # Compare with the desired dimensions of the plot
+
+    if height is None:
+        height = width * rangey / rangex
+    elif width is None:
+        width = height * rangex / rangey
+    else:
+        # both width and height specified
+        # check in which direction to expand the border
+        if width/height > rangex / rangey:
+            rangex = rangey * width / height
+        else:
+            rangey = rangex * height / width
+
+    # Setup cairo
+    if fmt == "pdf":
+        surface = cairo.PDFSurface(filename, width, height)
+    elif fmt == "ps":
+        surface = cairo.PSSurface(filename, width, height)
+    elif fmt == "eps":
+        surface = cairo.PSSurface(filename, width, height)
+        surface.set_eps(True)
+    elif fmt == "svg":
+        surface = cairo.SVGSurface(filename, width, height)
+    elif fmt == "png" or fmt == "jpg" or fmt is None:
+        surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
+                                     int(round(width)), int(round(height)))
+
+    ctx = cairo.Context(surface)
+
+    # The default background in the image surface is black
+    if fmt == "png" or fmt == "jpg" or fmt is None:
+        bcol._set_color_cairo(ctx)
+        ctx.rectangle(0, 0, int(round(width)), int(round(height)))
+        ctx.fill()
+    elif bcol is not white:
+        # only draw a background rectangle if background color is not white
+        bcol._set_color_cairo(ctx)
+        ctx.rectangle(0, 0, width, height)
+        ctx.fill()
+
+    # Setup the coordinate transformation
+
+    # Note: Cairo uses a coordinate system
+    #  ---> x     positioned in the left upper corner
+    #  |          of the screen.
+    #  v y
+    #
+    # Instead, we use a mathematical coordinate system.
+
+    # TODO: figure out, if file sizes are smaller without transformation
+    #       i. e. if we do the transformation ourselves
+    scrminx = width * 0.5 * (rangex - (maxx - minx)) / rangex
+    scrminy = height * 0.5 * (rangey - (maxy - miny)) / rangey
+
+    ctx.translate(scrminx, height - scrminy)
+    ctx.scale(width/rangex, -height/rangey)
+    ctx.translate(-minx, -miny)
+
+    # Now draw the system!
+
+    # The lines for the hoppings
+
+    for site1, site2 in iterate_system_hoppings(system):
+        line = flines(site1.group, site2.group)
+
+        if line is not None:
+            line._draw_cairo(ctx, pos(site1), pos(site2), dist)
+
+    for site1, site2, ucindx1, ucindx2 in \
+            iterate_lead_hoppings(system, len(lead_fading)):
+        if ucindx1 == ucindx2:
+            line = fllines(site1.group, site2.group)
+
+            if line is not None:
+                line._draw_cairo(ctx, pos(site1), pos(site2), dist,
+                                 fading=(bcol, lead_fading[ucindx1]))
+        else:
+            if ucindx1 > -1:
+                line = fllines(site1.group, site2.group)
+                if line is not None:
+                    line._draw_cairo(ctx, pos(site1), (pos(site1)+pos(site2))/2,
+                                     dist, fading=(bcol, lead_fading[ucindx1]))
+            else:
+                #one end of the line is in the system
+                line = flines(site1.group, site2.group)
+                if line is not None:
+                    line._draw_cairo(ctx, pos(site1), (pos(site1)+pos(site2))/2,
+                                     dist)
+
+            if ucindx2 > -1:
+                line = fllines(site2.group, site1.group)
+                if line is not None:
+                    line._draw_cairo(ctx, pos(site2), (pos(site1)+pos(site2))/2,
+                                     dist, fading=(bcol, lead_fading[ucindx2]))
+            else:
+                #one end of the line is in the system
+                line = flines(site2.group, site1.group)
+                if line is not None:
+                    line._draw_cairo(ctx, pos(site2), (pos(site1)+pos(site2))/2,
+                                     dist)
+    # the symbols for the sites
+
+    for site in iterate_system_sites(system):
+        symbol = fsymbols(site.group)
+
+        if symbol is not None:
+            symbol._draw_cairo(ctx, pos(site), dist)
+
+    for site, ucindx in iterate_lead_sites(system,
+                                           lead_copies=len(lead_fading)):
+        symbol = flsymbols(site.group)
+
+        if symbol is not None:
+            symbol._draw_cairo(ctx, pos(site), dist,
+                               fading=(bcol, lead_fading[ucindx]))
+
+
+    # Show or save the picture, if necessary (depends on format)
+    if fmt == None:
+        im = Image.frombuffer("RGBA",
+                              (surface.get_width(), surface.get_height()),
+                              surface.get_data(), "raw", "BGRA", 0, 1)
+        im.show()
+    elif fmt == "png":
+        surface.write_to_png(filename)
+    elif fmt == "jpg":
+        im = Image.frombuffer("RGBA",
+                              (surface.get_width(), surface.get_height()),
+                              surface.get_data(), "raw", "BGRA", 0, 1)
+        im.save(filename, "JPG")
diff --git a/kwant/run.py b/kwant/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..9074b76f9bfd8994f2a63439a3dbfec3aa532cb3
--- /dev/null
+++ b/kwant/run.py
@@ -0,0 +1,67 @@
+"""Support for running functions from the command line"""
+
+from __future__ import division
+import os, struct, sys
+import numpy, scipy
+from .version import version
+__all__ = ['randomize', 'exec_argv']
+
+numpy_version = numpy.version.version
+if not numpy.version.release:
+    numpy_version +=  '-non-release'
+
+scipy_version = scipy.version.version
+if not scipy.version.release:
+    scipy_version +=  '-non-release'
+
+def randomize():
+    """Seed numpy's random generator according to RNG_SEED environment
+    variable.
+
+    If RNG_SEED is undefined or has the value "random", the seed is read from
+    /dev/urandom.  Otherwise, the value of RNG_SEED (which may be the decimal
+    representation of an 8-byte signed integer) is used as seed.
+
+    """
+    try:
+        seed = os.environ['RNG_SEED']
+    except KeyError:
+        seed = 'random'
+    if seed == 'random':
+        f = open('/dev/urandom')
+        seed = struct.unpack('Q', f.read(8))[0]
+        f.close()
+    else:
+        seed = int(seed)
+
+    assert seed >= 0 and seed < 1<<64
+    seed_lo = int((seed & 0xffffffff) - (1<<31))
+    seed_hi = int((seed >> 32) - (1<<31))
+    numpy.random.seed((seed_lo, seed_hi))
+
+    return seed
+
+def exec_argv(vars):
+    """Execute command line arguments as python statements.
+
+    First, the versions of kwant, scipy and numpy are reported on stdout.
+    numpy's random number generator is initialized by `run.randomize()` and the
+    seed reported.
+
+    Then each command line argument is executed as a python statement within
+    the environment specified by `vars`.  Most of the time `vars` should be set
+    to the return value of `globals()`.
+
+    """
+
+    if len(sys.argv) == 1:
+        help('__main__')
+        return
+
+    seed = randomize()
+    print '#### kwant %s, scipy %s, numpy %s' % \
+        (version, scipy_version, numpy_version)
+    print "#### numpy random seed: %d" % seed
+    for statement in sys.argv[1:]:
+        print "## %s" % statement
+        exec statement in vars
diff --git a/kwant/solvers/__init__.py b/kwant/solvers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e545b9a23fff417dee97296f21587b2de8348ce7
--- /dev/null
+++ b/kwant/solvers/__init__.py
@@ -0,0 +1,4 @@
+"""This package contains solvers in individual sub-packages.
+
+please explicitly import the sub-packages you need.
+"""
diff --git a/kwant/solvers/sparse.py b/kwant/solvers/sparse.py
new file mode 100644
index 0000000000000000000000000000000000000000..559d17c958ea16537086be8f795e5aa055245b9e
--- /dev/null
+++ b/kwant/solvers/sparse.py
@@ -0,0 +1,313 @@
+__all__ = [ 'make_linear_sys' , 'solve', 'BlockResult']
+
+from functools import reduce
+from collections import namedtuple
+import numpy as np
+import scipy.linalg as la
+import scipy.sparse as sp
+import scipy.sparse.linalg as spl
+from kwant import physics, system
+
+def make_linear_sys(sys, out_leads, in_leads, energy=0,
+                    force_realspace=False):
+    """
+    Make a sparse linear system of equations defining a scattering problem.
+
+    Parameters
+    ----------
+    sys : kwant.system.FiniteSystem
+        low level system, containing the leads and the Hamiltonian of a
+        scattering region.
+    energy : number
+        excitation energy at which to solve the scattering problem.
+    in_leads : list of integers
+        numbers of leads in which current or wave function is injected.
+    out_leads : list of integers
+        numbers of leads where current or wave function is exctracted
+    force_realspace : bool
+        calculate Green's function between the outermost lead
+        sites, instead of lead modes. This is almost always
+        more computationally expensive and less stable.
+
+    Returns
+    -------
+    (h_sys, rhs, keep_vars, num_modes) : tuple of inhomogeneous data
+        `h_sys` is a numpy.sparse.csc_matrix, containing the right hand side
+        of the system of equations, `rhs` is the list of matrices with the
+        left hand side, `keep_vars` is a list with numbers of variables in the
+        solution that have to be stored (typically a small part of the
+        complete solution). Finally, `num_modes` is a list with number of
+        propagating modes or lattice points in each lead.
+
+    Notes
+    -----
+    Both incomding and outgoing leads can be defined via either self-energy,
+    or a low-level translationally invariant system.
+    The system of equations that is created is described in
+    kwant/doc/other/linear_system.pdf
+    """
+    if not sys.lead_neighbor_seqs:
+        raise ValueError('System contains no leads.')
+    h_sys = sys.hamiltonian_submatrix(sparse=True).tocsc()
+    h_sys = h_sys - energy * sp.identity(h_sys.shape[0], format='csc')
+
+    norb, num_nodes = sys.num_orbitals, sys.graph.num_nodes
+    norb_arr = np.array([norb(i) for i in xrange(num_nodes)], int)
+    offsets = np.zeros(norb_arr.shape[0] + 1, int)
+    offsets[1 :] = np.cumsum(norb_arr)
+
+    # Process the leads, generate the eigenvector matrices and lambda vectors.
+    # Then create blocks of the linear system and add them step by step.
+    keep_vars = []
+    rhs = []
+    num_modes = []
+    for leadnum, lead_neighbors in enumerate(sys.lead_neighbor_seqs):
+        lead = sys.leads[leadnum]
+        if isinstance(lead, system.InfiniteSystem) and not force_realspace:
+            h = lead.slice_hamiltonian()
+            h -= energy * np.identity(h.shape[0])
+            v = lead.inter_slice_hopping()
+
+            u, ulinv, nprop, svd = physics.modes(h, v)
+
+            num_modes.append(nprop)
+
+            if leadnum in out_leads:
+                keep_vars.append(range(h_sys.shape[0], h_sys.shape[0] + nprop))
+
+            u_out, ulinv_out = u[:, nprop:], ulinv[:, nprop:]
+            u_in, ulinv_in = u[:, : nprop], ulinv[:, : nprop]
+
+            # Construct a matrix of 1's that translates the
+            # inter-slice hopping to a proper hopping
+            # from the system to the lead.
+            neighbors = np.r_[tuple(np.arange(offsets[i], offsets[i + 1])
+                                    for i in lead_neighbors)]
+            coords = np.r_[[np.arange(neighbors.size)], [neighbors]]
+            tmp = sp.csc_matrix((np.ones(neighbors.size), coords),
+                                (neighbors.size, h_sys.shape[0]))
+
+            if svd is not None:
+                v_sp = sp.csc_matrix(svd[2].T.conj()) * tmp
+                vdaguout_sp = tmp.T * sp.csr_matrix(np.dot(svd[2] * svd[1],
+                                                           u_out))
+                lead_mat = - ulinv_out
+            else:
+                v_sp = sp.csc_matrix(v) * tmp
+                vdaguout_sp = tmp.T * sp.csr_matrix(np.dot(v.T.conj(), u_out))
+                lead_mat = - np.dot(v, ulinv_out)
+
+            h_sys = sp.bmat([[h_sys, vdaguout_sp], [v_sp, lead_mat]])
+
+            if nprop > 0 and leadnum in in_leads:
+                if svd:
+                    vdaguin_sp = tmp.T * sp.csr_matrix(-np.dot(svd[2] * svd[1],
+                                                               u_in))
+                    lead_mat_in = ulinv_in
+                else:
+                    vdaguin_sp = tmp.T * sp.csr_matrix(-np.dot(v.T.conj(),
+                                                               u_in))
+                    lead_mat_in = np.dot(v, ulinv_in)
+
+                rhs.append(sp.bmat([[vdaguin_sp], [lead_mat_in]]))
+            else:
+                rhs.append(np.zeros((0, 0)))
+        else:
+            sigma = lead.self_energy(energy)
+            num_modes.append(sigma)
+            indices = np.r_[tuple(range(offsets[i], offsets[i + 1]) for i in
+                                 lead_neighbors)]
+            assert sigma.shape == 2 * indices.shape
+            y, x = np.meshgrid(indices, indices)
+            sig_sparse = sp.coo_matrix((sigma.flat, [x.flat, y.flat]),
+                                       h_sys.shape)
+            h_sys = h_sys + sig_sparse # __iadd__ is not implemented in v0.7
+            if leadnum in out_leads:
+                keep_vars.append(list(indices))
+            if leadnum in in_leads:
+                l = indices.shape[0]
+                rhs.append(sp.coo_matrix((-np.ones(l), [indices,
+                                                        np.arange(l)])))
+
+    return h_sys, rhs, keep_vars, num_modes
+
+
+def solve_linsys(a, b, keep_vars=None):
+    """
+    Solve matrix system of equations a x = b with sparse input.
+
+    Parameters
+    ----------
+    a : a scipy.sparse.csc_matrix sparse matrix
+    b : a list of matrices.
+        Sizes of these matrices may be smaller than needed, the missing
+        entries at the end are padded with zeros.
+    keep_vars : list of lists of integers
+        a list of numbers of variables to keep in the solution
+
+    Returns
+    -------
+    output : a numpy matrix
+        solution to the system of equations.
+
+    Notes
+    -----
+    This function is largely a wrapper to a scipy.sparse.linalg.factorized.
+    """
+    a = sp.csc_matrix(a)
+    if keep_vars == None:
+        keep_vars = [range(a.shape[1])]
+    slv = spl.factorized(a)
+    keeptot = sum(keep_vars, [])
+    sols = []
+    for mat in b:
+        if mat.shape[1] != 0:
+            mat = sp.csr_matrix(mat)
+            for j in xrange(mat.shape[1]):
+                vec = np.zeros(a.shape[0], complex)
+                vec[: mat.shape[0]] = mat[:, j].todense().flatten()
+                sols.append(slv(vec)[keeptot])
+    return np.mat(sols).T
+
+
+def solve(sys, energy=0, out_leads=None, in_leads=None,
+          force_realspace=False):
+    """
+    Calculate a Green's function of a system.
+
+    Parameters
+    ----------
+    sys : `kwant.system.FiniteSystem`
+        low level system, containing the leads and the Hamiltonian of a
+        scattering region.
+    energy : number
+        excitation energy at which to solve the scattering problem.
+    in_leads : list of integers
+        numbers of leads in which current or wave function is injected.
+    out_leads : list of integers
+        numbers of leads where current or wave function is exctracted
+    force_realspace : bool
+        calculate Green's function between the outermost lead
+        sites, instead of lead modes. This is almost always
+        more computationally expensive and less stable.
+
+    Returns
+    -------
+    output: `BlockResult`
+        see notes below and `BlockResult` docstring for more information about
+        the output format.
+
+    Notes
+    -----
+    Both in_leads and out_leads should be sorted and should only contain
+    unique entries.
+
+    Returns the Green's function elements between in_leads and out_leads. If
+    the leads are defined as a self-energy, the result is just the real
+    space retarded Green's function between from in_leads to out_leads. If the
+    leads are defined as tight-binding systems, then Green's function from
+    incoming to outgoing modes is returned. Additionally a list containing
+    numbers of modes in each lead is returned. Sum of these numbers equals to
+    the size of the returned Green's function subblock. The Green's function
+    elements between incoming and outgoing modes form the scattering matrix of
+    the system. If some leads are defined via self-energy, and some as
+    tight-binding systems, result has Green's function's elements between modes
+    and sites.
+
+    Alternatively, if force_realspace=True is used, G^R is returned
+    always in real space, however this option is more computationally
+    expensive and can be less stable.
+    """
+    n = len(sys.lead_neighbor_seqs)
+    if in_leads is None:
+        in_leads = range(n)
+    if out_leads is None:
+        out_leads = range(n)
+    if sorted(in_leads) != in_leads or sorted(out_leads) != out_leads or \
+        len(set(in_leads)) != len(in_leads) or \
+        len(set(out_leads)) != len(out_leads):
+        raise ValueError('Lead lists must be sorted and with unique entries.')
+    if len(in_leads) == 0 or len(out_leads) == 0:
+        raise ValueError('No output is requested.')
+    linsys = make_linear_sys(sys, out_leads, in_leads, energy,
+                             force_realspace)
+    out_modes = [len(i) for i in linsys[2]]
+    in_modes = [i.shape[1] for i in linsys[1]]
+    result = BlockResult(solve_linsys(*linsys[: -1]), linsys[3])
+    result.in_leads = in_leads
+    result.out_leads = out_leads
+    return result
+
+
+class BlockResult(namedtuple('BlockResultTuple', ['data', 'num_modes'])):
+    """
+    Solution of a transport problem, subblock of retarded Green's function.
+
+    This class is derived from ``namedtuple('BlockResultTuple', ['data',
+    'num_modes'])``. In addition to direct access to `data` and `num_modes`,
+    this class also supports a higher level interface via its methods.
+
+    Instance Variables
+    ------------------
+    data : numpy matrix
+        a matrix containing all the requested matrix elements of Green's
+        function.
+    num_modes : list of integers
+        a list of numbers of modes (or sites if real space lead representation
+        is used) in each lead.
+    """
+    def block_coords(self, lead_out, lead_in):
+        """
+        Return slices corresponding to the block from lead_in to lead_out.
+        """
+        lead_out = self.out_leads.index(lead_out)
+        lead_in = self.in_leads.index(lead_in)
+        if not hasattr(self, '_sizes'):
+            sizes = []
+            for i in self.num_modes:
+                if np.isscalar(i):
+                    sizes.append(i)
+                else:
+                    sizes.append(i.shape[0])
+            self._sizes = np.array(sizes)
+            self._in_offsets = np.zeros(len(self.in_leads) + 1, int)
+            self._in_offsets[1 :] = np.cumsum(self._sizes[self.in_leads])
+            self._out_offsets = np.zeros(len(self.out_leads) + 1, int)
+            self._out_offsets[1 :] = np.cumsum(self._sizes[self.out_leads])
+        return slice(self._out_offsets[lead_out],
+                     self._out_offsets[lead_out + 1]), \
+               slice(self._in_offsets[lead_in], self._in_offsets[lead_in + 1])
+
+    def submatrix(self, lead_out, lead_in):
+        """Return the matrix elements from lead_in to lead_out."""
+        return self.data[self.block_coords(lead_out, lead_in)]
+
+    def _a_ttdagger_a_inv(self, lead_out, lead_in):
+        gf = np.asmatrix(self.submatrix(lead_out, lead_in))
+        if np.isscalar(self.num_modes[lead_out]):
+            gamma_out = np.asmatrix(np.identity(self.num_modes[lead_out]))
+        else:
+            gamma_out = np.matrix(self.num_modes[lead_out], dtype=complex)
+            gamma_out -= gamma_out.H
+            gamma_out *= 1j
+        if np.isscalar(self.num_modes[lead_in]):
+            gamma_in = np.asmatrix(np.identity(self.num_modes[lead_in]))
+        else:
+            gamma_in = np.matrix(self.num_modes[lead_in], dtype=complex)
+            gamma_in -= gamma_in.H
+            gamma_in *= 1j
+        return gamma_out * gf * gamma_in * gf.H
+
+    def transmission(self, lead_out, lead_in):
+        """Return transmission from lead_in to lead_out."""
+        if np.isscalar(self.num_modes[lead_out]) and \
+           np.isscalar(self.num_modes[lead_in]):
+            return la.norm(self.submatrix(lead_out, lead_in))**2
+        else:
+            return np.trace(self._a_ttdagger_a_inv(lead_out, lead_in)).real
+
+    def noise(self, lead_out, lead_in):
+        """Return shot noise from lead_in to lead_out."""
+        ttdag = self._a_ttdagger_a_inv(lead_out, lead_in)
+        ttdag -= ttdag * ttdag
+        return np.trace(ttdag).real
diff --git a/kwant/solvers/tests/test_sparse.py b/kwant/solvers/tests/test_sparse.py
new file mode 100644
index 0000000000000000000000000000000000000000..adcc793ef4976bf4e5c111255654c2910b777aa8
--- /dev/null
+++ b/kwant/solvers/tests/test_sparse.py
@@ -0,0 +1,257 @@
+from __future__ import division
+import numpy as np
+from nose.tools import assert_raises
+from numpy.testing import assert_equal, assert_almost_equal
+import kwant
+
+# The solver has to provide full scattering matrix and labels with lead numbers
+# of each mode of the output.
+from kwant.solvers.sparse import solve
+
+n = 5
+chain = kwant.lattice.Chain()
+square = kwant.lattice.Square()
+
+# Test output sanity: that an error is raised if no output is requested,
+# and that solving for a subblock of a scattering matrix is the same as taking
+# a subblock of the full scattering matrix.
+def test_output():
+    np.random.seed(3)
+    system = kwant.Builder()
+    left_lead = kwant.Builder(kwant.TranslationalSymmetry([(-1,)]))
+    right_lead = kwant.Builder(kwant.TranslationalSymmetry([(1,)]))
+    for b, site in [(system, chain(0)), (system, chain(1)),
+                 (left_lead, chain(0)), (right_lead, chain(0))]:
+        h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+        h += h.H
+        b[site] = h
+    for b, hopp in [(system, (chain(0), chain(1))),
+                    (left_lead, (chain(0), chain(1))),
+                    (right_lead, (chain(0), chain(1)))]:
+        b[hopp] = 10 * np.random.rand(n, n) + 1j * np.random.rand(n, n)
+    system.attach_lead(left_lead)
+    system.attach_lead(right_lead)
+    fsys = system.finalized()
+
+    result1 = solve(fsys)
+    s, modes1 = result1
+    assert s.shape == 2 * (sum(modes1),)
+    s1 = np.asmatrix(result1.submatrix(1, 0))
+    s2, modes2 = solve(fsys, 0, [1], [0])
+    assert s2.shape == (modes2[1], modes2[0])
+    assert_almost_equal(s1, s2)
+    assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+    assert_raises(ValueError, solve, fsys, 0, [])
+
+
+# Test that a system with one lead has unitary scattering matrix.
+def test_one_lead():
+    np.random.seed(3)
+    system = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(-1,)]))
+    for b, site in [(system, chain(0)), (system, chain(1)), (system, chain(2)),
+                    (lead, chain(0))]:
+        h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+        h += h.H
+        b[site] = h
+    for b, hopp in [(system, (chain(0), chain(1))),
+                    (system, (chain(1), chain(2))),
+                    (lead, (chain(0), chain(1)))]:
+        b[hopp] = 10 * np.random.rand(n, n) + 1j * np.random.rand(n, n)
+    system.attach_lead(lead)
+    fsys = system.finalized()
+
+    s = np.asmatrix(solve(fsys)[0])
+    assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+
+
+# Test that a translationally invariant system with two leads has only
+# transmission and that transmission does not mix modes.
+def test_two_equal_leads():
+    def check_fsys():
+        s, leads = solve(fsys)[: 2]
+        assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+        n_modes = leads[0]
+        assert leads[1] == n_modes
+        assert_almost_equal(s[: n_modes, : n_modes], 0)
+        t_elements = np.sort(np.abs(np.asarray(s[n_modes :, : n_modes])),
+                             axis=None)
+        t_el_should_be = n_modes * (n_modes - 1) * [0] + n_modes * [1]
+        assert_almost_equal(t_elements, t_el_should_be)
+
+    np.random.seed(11)
+    system = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(1,)]))
+    h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+    h += h.H
+    h *= 0.8
+    t = 4 * np.asmatrix(np.random.rand(n, n) + 4j * np.random.rand(n, n))
+    lead[chain(0)] = system[chain(0)] = h
+    lead[chain(0), chain(1)] = t
+    system.attach_lead(lead)
+    system.attach_lead(lead.reversed())
+    fsys = system.finalized()
+    check_fsys()
+
+    # Test the same, but with a larger scattering region.
+    system = kwant.Builder()
+    system[[chain(0), chain(1)]] = h
+    system[chain(0), chain(1)] = t
+    system.attach_lead(lead)
+    system.attach_lead(lead.reversed())
+    fsys = system.finalized()
+    check_fsys()
+
+
+# Test a more complicated graph with non-singular hopping.
+def test_graph_system():
+    np.random.seed(11)
+    system = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(-1, 0)]))
+    lead.default_site_group = system.default_site_group = square
+
+    h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+    h += h.H
+    h *= 0.8
+    t = 4 * np.asmatrix(np.random.rand(n, n) + 4j * np.random.rand(n, n))
+    t1 = 4 * np.asmatrix(np.random.rand(n, n) + 4j * np.random.rand(n, n))
+    lead[0, 0] = system[[(0, 0), (1, 0)]] = h
+    lead[0, 1] = system[[(0, 1), (1, 1)]] = 4 * h
+    for builder in [system, lead]:
+        builder[(0, 0), (1, 0)] = t
+        builder[(0, 1), (1, 0)] = t1
+        builder[(0, 1), (1, 1)] = 1.1j * t1
+    system.attach_lead(lead)
+    system.attach_lead(lead.reversed())
+    fsys = system.finalized()
+
+    s, leads = solve(fsys)[: 2]
+    assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+    n_modes = leads[0]
+    assert_equal(leads[1], n_modes)
+    assert_almost_equal(s[: n_modes, : n_modes], 0)
+    t_elements = np.sort(np.abs(np.asarray(s[n_modes :, : n_modes])),
+                         axis=None)
+    t_el_should_be = n_modes * (n_modes - 1) * [0] + n_modes * [1]
+    assert_almost_equal(t_elements, t_el_should_be)
+
+
+# Test a system with singular hopping.
+def test_singular_graph_system():
+    np.random.seed(11)
+
+    system = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(-1, 0)]))
+    lead.default_site_group = system.default_site_group = square
+    h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+    h += h.H
+    h *= 0.8
+    t = 4 * np.asmatrix(np.random.rand(n, n) + 4j * np.random.rand(n, n))
+    t1 = 4 * np.asmatrix(np.random.rand(n, n) + 4j * np.random.rand(n, n))
+    lead[0, 0] = system[[(0, 0), (1, 0)]] = h
+    lead[0, 1] = system[[(0, 1), (1, 1)]] = 4 * h
+    for builder in [system, lead]:
+        builder[(0, 0), (1, 0)] = t
+        builder[(0, 1), (1, 0)] = t1
+    system.attach_lead(lead)
+    system.attach_lead(lead.reversed())
+    fsys = system.finalized()
+
+    s, leads = solve(fsys)[: 2]
+    assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+    n_modes = leads[0]
+    assert leads[1] == n_modes
+    assert_almost_equal(s[: n_modes, : n_modes], 0)
+    t_elements = np.sort(np.abs(np.asarray(s[n_modes :, : n_modes])),
+                         axis=None)
+    t_el_should_be = n_modes * (n_modes - 1) * [0] + n_modes * [1]
+    assert_almost_equal(t_elements, t_el_should_be)
+
+
+# This test features inside the onslice Hamiltonian a hopping matrix with more
+# zero eigenvalues than the lead hopping matrix. The previous version of the
+# sparse solver failed here.
+
+def test_tricky_singular_hopping():
+    system = kwant.Builder()
+    lead = kwant.Builder(kwant.TranslationalSymmetry([(4, 0)]))
+    lead.default_site_group = system.default_site_group = square
+
+    neighbors = []
+    for i in xrange(n):
+        site = square(-1, i)
+        neighbors.append(site)
+        system[site] = 0
+        for j in xrange(4):
+            lead[j, i] = 0
+    for i in xrange(n-1):
+        system[(-1, i), (-1, i+1)] = -1
+        for j in xrange(4):
+            lead[(j, i), (j, i+1)] = -1
+    for i in xrange(n):
+        for j in xrange(4):
+            lead[(j, i), (j+1, i)] = -1
+    del lead[(1, 0), (2, 0)]
+
+    system.leads.append(kwant.builder.BuilderLead(lead, neighbors))
+    fsys = system.finalized()
+
+    s = np.asmatrix(solve(fsys, -1.3)[0])
+    assert_almost_equal(s.H * s, np.identity(s.shape[0]))
+
+
+# Test equivalence between self-energy and scattering matrix representations.
+# Also check that transmission and noise work.
+
+def test_self_energy():
+    class LeadWithOnlySelfEnergy(object):
+        def __init__(self, lead):
+            self.lead = lead
+
+        def self_energy(self, energy):
+            return self.lead.self_energy(energy)
+
+    np.random.seed(4)
+    system = kwant.Builder()
+    left_lead = kwant.Builder(kwant.TranslationalSymmetry([(-1,)]))
+    right_lead = kwant.Builder(kwant.TranslationalSymmetry([(1,)]))
+    for b, site in [(system, chain(0)), (system, chain(1)),
+                 (left_lead, chain(0)), (right_lead, chain(0))]:
+        h = np.asmatrix(np.random.rand(n, n) + 1j * np.random.rand(n, n))
+        h += h.H
+        b[site] = h
+    for b, hopp in [(system, (chain(0), chain(1))),
+                    (left_lead, (chain(0), chain(1))),
+                    (right_lead, (chain(0), chain(1)))]:
+        b[hopp] = 10 * np.random.rand(n, n) + 1j * np.random.rand(n, n)
+    system.attach_lead(left_lead)
+    system.attach_lead(right_lead)
+    fsys = system.finalized()
+
+    t = solve(fsys, 0, [1], [0]).data
+    eig_should_be = np.linalg.eigvals(t * t.H)
+    n_eig = len(eig_should_be)
+
+    fsys.leads[1] = LeadWithOnlySelfEnergy(fsys.leads[1])
+    sol = solve(fsys, 0, [1], [0])
+    ttdagnew = sol._a_ttdagger_a_inv(1, 0)
+    eig_are = np.linalg.eigvals(ttdagnew)
+    t_should_be = np.sum(eig_are)
+    noise_should_be = np.sum(eig_are * (1 - eig_are))
+    assert_almost_equal(eig_are.imag, 0)
+    assert_almost_equal(np.sort(eig_are.real)[-n_eig :],
+                        np.sort(eig_should_be.real))
+    assert_almost_equal(t_should_be, sol.transmission(1, 0))
+    assert_almost_equal(noise_should_be, sol.noise(1, 0))
+
+    fsys.leads[0] = LeadWithOnlySelfEnergy(fsys.leads[0])
+    sol = solve(fsys, 0, [1], [0])
+    ttdagnew = sol._a_ttdagger_a_inv(1, 0)
+    eig_are = np.linalg.eigvals(ttdagnew)
+    t_should_be = np.sum(eig_are)
+    noise_should_be = np.sum(eig_are * (1 - eig_are))
+    assert_almost_equal(eig_are.imag, 0)
+    assert_almost_equal(np.sort(eig_are.real)[-n_eig :],
+                        np.sort(eig_should_be.real))
+    assert_almost_equal(t_should_be, sol.transmission(1, 0))
+    assert_almost_equal(noise_should_be, sol.noise(1, 0))
diff --git a/kwant/system.py b/kwant/system.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed2d50df027771a1b745c0769c00e08e0cec737b
--- /dev/null
+++ b/kwant/system.py
@@ -0,0 +1,281 @@
+"""Low-level interface of tight binding systems"""
+
+from __future__ import division
+__all__ = ['System', 'FiniteSystem', 'InfiniteSystem']
+
+import abc, math
+import numpy as np
+from scipy import sparse as sp
+from itertools import chain
+from kwant import physics
+
+class System(object):
+    """Abstract general low-level system.
+
+    Instance Variables
+    ------------------
+    graph : kwant.graph.CGraph
+        The system graph.
+
+    Notes
+    -----
+    The sites of the system are indexed by integers ranging from 0 to
+    ``self.graph.num_nodes - 1``.
+
+    Optionally, a class derived from `System` can provide a method `pos` which
+    is assumed to return the real space position of a site given its index.
+    """
+    __metaclass__ = abc.ABCMeta
+
+    def num_orbitals(self, site):
+        """Return the number of orbitals of a site.
+
+        This is an inefficient general implementation.  It should be
+        overridden, if a more efficient way to calculate is available.
+        """
+        ham = self.hamiltonian(site, site)
+        return 1 if np.isscalar(ham) else ham.shape[0]
+
+    @abc.abstractmethod
+    def hamiltonian(self, i, j):
+        """Return the hamiltonian matrix element for sites `i` and `j`.
+
+        If ``i == j``, return the on-site Hamiltonian of site `i`.
+
+        if ``i != j``, return the hopping between site `i` and `j`.
+        """
+        pass
+
+    def hamiltonian_submatrix(self, b_sites=None, a_sites=None, sparse=False):
+        """Return a submatrix of the system Hamiltonian.
+
+        The returned submatrix contains all the Hamiltonian matrix elements
+        from `a_sites` to `b_sites`.  If no value for a_sites or `b_sites` is
+        provided, the default is to take all sites of the system in the order
+        in which they appear. If sparse is set to `True`, a
+        `scipy.sparse.coo_matrix` is returned, otherwise a dense one.
+        """
+        def create_sparse():
+            # Calculate the data size.
+            num_entries = 0
+            for n_i, i in enumerate(a_sites):
+                for j in chain((i,), gr.out_neighbors(i)):
+                    if j in b_coord:
+                        n_j = b_coord[j]
+                        num_entries += b_norb[n_j] * a_norb[n_i]
+
+            ij = np.empty((2, num_entries), dtype=int)
+            data = np.empty(num_entries, dtype=complex)
+
+            offset = 0
+            for n_i, i in enumerate(a_sites):
+                for j in chain((i,), gr.out_neighbors(i)):
+                    if j in b_coord:
+                        n_j = b_coord[j]
+                        h = ham(j, i)
+                        # The shape check is here to prevent data corruption.
+                        shape = (1, 1) if np.isscalar(h) else h.shape
+                        if shape != (b_norb[n_j], a_norb[n_i]):
+                            raise ValueError('matrix element '
+                                             'dimensions mismatch')
+                        if np.isscalar(h):
+                            data[offset] = h
+                            ij[0, offset] = n_j
+                            ij[1, offset] = n_i
+                            offset += 1
+                        else:
+                            h = np.ravel(h, order='F')
+                            coord = slice(offset, offset + h.size)
+                            data[coord] = h
+                            jtmp = np.arange(b_norb[n_j]) + b_off[n_j]
+                            itmp = np.arange(a_norb[n_i]) + a_off[n_i]
+                            jtmp, itmp = np.meshgrid(jtmp, itmp)
+                            ij[0, coord] = jtmp.ravel()
+                            ij[1, coord] = itmp.ravel()
+                            offset += h.shape[0]
+            return sp.coo_matrix((data, ij), shape=result_shape)
+
+        def create_dense():
+            # Shape checks of arrays are performed by numpy upon subblock
+            # assignment.
+            h_sub = np.zeros(result_shape, dtype='complex')
+            for n_i, i in enumerate(a_sites):
+                for j in chain((i,), gr.out_neighbors(i)):
+                    if j in b_coord:
+                        n_j = b_coord[j]
+                        try:
+                            h_sub[b_off[n_j] : b_off[n_j + 1],
+                                    a_off[n_i] : a_off[n_i + 1]] = ham(j, i)
+                        except ValueError:
+                            raise ValueError('matrix element '
+                                             'dimensions mismatch')
+            return h_sub
+
+        gr = self.graph
+        ham = self.hamiltonian
+        n = self.graph.num_nodes
+        if a_sites is None:
+            a_sites = xrange(n)
+        if b_sites is None:
+            b_sites = xrange(n)
+        if not all(site < n for site in a_sites) or \
+           not all(site < n for site in b_sites):
+            raise KeyError('site number out of range')
+        a_norb = np.array([self.num_orbitals(i) for i in a_sites], int)
+        a_off = np.zeros(a_norb.shape[0] + 1, int)
+        a_off[1 :] = np.cumsum(a_norb)
+
+        b_norb = np.array([self.num_orbitals(i) for i in b_sites], int)
+        b_off = np.zeros(b_norb.shape[0] + 1, int)
+        b_off[1 :] = np.cumsum(b_norb)
+        # Instead of doing a double loop over a_sites and b_sites it is more
+        # efficient to check if neighbors of a_sites are in b_sites.
+        b_coord = dict((i[1], i[0]) for i in enumerate(b_sites))
+        result_shape = (b_off[-1],  a_off[-1])
+
+        if sparse:
+            return create_sparse()
+        else:
+            return create_dense()
+
+
+class FiniteSystem(System):
+    """Abstract finite low-level system, possibly with leads.
+
+    Instance Variables
+    ------------------
+    leads : sequence of lead objects
+        Each lead object has to provide at least a method ``self_energy(energy)``.
+    lead_neighbor_seqs : sequence of sequences of integers
+        Each sub-sequence contains the indices of the system sites to which the
+        lead is connected.
+
+    Notes
+    -----
+    The length of `leads` must be equal to the length of `lead_neighbor_seqs`.
+
+    For lead ``n``, the method leads[n].self_energy must return a square matrix
+    whose size is ``sum(self.num_orbitals(neighbor) for neighbor in
+    self.lead_neighbor_seqs[n])``.
+
+    Often, the elements of `leads` will be instances of `InfiniteSystem`.  If
+    this is the case for lead ``n``, the sites ``lead_neighbor_seqs[n]`` match
+    the first ``len(lead_neighbor_seqs[n])`` sites of the InfiniteSystem.
+    """
+    __metaclass__ = abc.ABCMeta
+
+
+class InfiniteSystem(System):
+    """
+    Abstract infinite low-level system.
+
+    An infinite system consists of an infinite series of identical slices.
+    Adjacent slices are connected by identical inter-slice hoppings.
+
+    Instance Variables
+    ------------------
+    slice_size : integer
+        The number of sites in a single slice of the system.
+
+    Notes
+    -----
+    The system graph of an infinite systems contains a single slice, as well as
+    the part of the previous slice which is connected to it.  The first
+    `slice_size` sites form one complete single slice.  The remaining `N` sites
+    of the graph (`N` equals ``graph.num_nodes - slice_size``) belong to the
+    previous slice.  They are included so that hoppings between slices can be
+    represented.  The N sites of the previous slice correspond to the first `N`
+    sites of the fully included slice.  When an InfiniteSystem is used as a
+    lead, `N` acts also as the number of neighbors to which it must be
+    connected.
+
+    The drawing shows three slices of an infinite system.  Each slice consists
+    of three sites.  Numbers denote sites which are included into the system
+    graph.  Stars denote sites which are not included.  Hoppings are included
+    in the graph if and only if they occur between two sites which are part of
+    the graph::
+
+            * 2 *
+        ... | | | ...
+            * 0 3
+            |/|/|
+            *-1-4
+
+        <-- order of slices
+
+    The numbering of sites in the drawing is one of the two valid ones for that
+    infinite system.  The other scheme has the numbers of site 0 and 1
+    exchanged, as well as of site 3 and 4.
+    """
+    __metaclass__ = abc.ABCMeta
+
+    def slice_hamiltonian(self):
+        """Hamiltonian of a single slice of the infinite system."""
+        slice_sites = xrange(self.slice_size)
+        return self.hamiltonian_submatrix(slice_sites, slice_sites)
+
+    def inter_slice_hopping(self):
+        """Hopping Hamiltonian between two slices of the infinite system."""
+        slice_size = self.slice_size
+        slice_sites = xrange(self.slice_size)
+        neighbor_sites = xrange(self.slice_size, self.graph.num_nodes)
+        return self.hamiltonian_submatrix(slice_sites, neighbor_sites)
+
+    def self_energy(self, energy):
+        """Return self-energy of a lead.
+
+        The returned matrix has the shape (n, n), where n is
+        ``sum(self.num_orbitals(i) for i in range(self.slice_size))``.
+        """
+        ham = self.slice_hamiltonian()
+        shape = ham.shape
+        assert len(shape) == 2
+        assert shape[0] == shape[1]
+        # Subtract energy from the diagonal.
+        ham.flat[::ham.shape[0] + 1] -= energy
+        return physics.self_energy(ham, self.inter_slice_hopping())
+
+    def num_orbitals(self, site):
+        """Return the number of orbitals of a site.
+
+        This is an inefficient general implementation.  It should be
+        overridden, if a more efficient way to calculate is available.
+        """
+        if site >= self.slice_size:
+            site -= self.slice_size
+        ham = self.hamiltonian(site, site)
+        return 1 if np.isscalar(ham) else ham.shape[0]
+
+    @property
+    def energies(self):
+        """
+        A callable object which returns the energies at wave vector `k`
+
+        Because the value of this property is a callable object, it can be used
+        as if it were a method:
+
+        >>> for k in arange(-pi, pi, 0.1):
+        >>>     for e in infsys.energies(k):
+        >>>         print k, e
+
+        But it is more efficient to evaluate the property only once:
+
+        >>> energies = infsys.energies
+        >>> for k in arange(-pi, pi, 0.1):
+        >>>     for e in energies(k):
+        >>>         print k, e
+        """
+        result = Energies()
+        result.ham = self.slice_hamiltonian()
+        hop = self.inter_slice_hopping()
+        result.hop = np.empty(result.ham.shape, dtype=complex)
+        result.hop[:, : hop.shape[1]] = hop
+        result.hop[:, hop.shape[1] :] = 0
+        return result
+
+
+class Energies():
+    def __call__(self, k):
+        mat = self.hop * complex(math.cos(k), math.sin(k))
+        mat += mat.conjugate().transpose() + self.ham
+        return np.sort(np.linalg.eigvalsh(mat))
diff --git a/kwant/tests/test_builder.py b/kwant/tests/test_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..0430e4cc135635d8b83f290be706ead5f1765445
--- /dev/null
+++ b/kwant/tests/test_builder.py
@@ -0,0 +1,484 @@
+from __future__ import division
+from random import Random
+from StringIO import StringIO
+from nose.tools import assert_raises, assert_not_equal
+from numpy.testing import assert_equal
+import numpy as np
+import kwant
+from kwant import builder
+
+
+def test_graph():
+    graph = builder.Graph()
+    assert not graph
+
+    a = 'a'
+    graph.setitem_tail(a, 'node a')
+    graph.setitem_edge(('a', 'a'), 0)
+    graph.setitem_edge(('a', 'b'), 1)
+    assert graph.has_tail('a')
+    assert not graph.has_tail('b')
+    assert graph.getkey_tail('a') is a
+    assert_raises(KeyError, graph.getkey_tail, 'b')
+    assert_raises(KeyError, graph.setitem_edge, ('b', 'a'), 1)
+    graph.setitem_tail('b', 'node b')
+    graph.setitem_tail('c', 'node c')
+    graph.setitem_edge(('b', 'c'), 2)
+    graph.setitem_edge(('c', 'a'), 3)
+    graph.setitem_edge(('a', 'c'), 4)
+    graph.setitem_edge(('a', 'b'), graph.getitem_edge(('a', 'b')) - 1)
+    graph.setitem_edge(('b', 'a'), -1)
+    assert_equal(graph.pop_edge(('c', 'a')), 3)
+    graph.delitem_edge(('a', 'c'))
+    graph.setitem_edge(('b', 'c'), 2) # Overwrite with same value
+    edges_should_be = [('a', 'a'), ('a', 'b'), ('b', 'c'), ('b', 'a')]
+    edges_should_be.sort()
+
+    assert graph
+    assert_equal(graph.getitem_tail('b'), 'node b')
+    assert_raises(KeyError, graph.getitem_tail, 'x')
+    assert_equal(graph.getitem_edge(('a', 'b')), 0)
+    assert_equal(graph.getitem_edge(('b', 'c')), 2)
+    assert_raises(KeyError, graph.getitem_edge, ('c', 'a'))
+    assert_raises(KeyError, graph.getitem_edge, ('x', 'z'))
+
+    edges = list(graph.edges())
+    edges.sort()
+    assert_equal(edges, edges_should_be)
+    for edge in edges_should_be:
+        assert graph.has_edge(edge)
+    assert not graph.has_edge(('x', 'y'))
+
+    assert graph.has_tail('a')
+    assert not graph.has_tail('x')
+
+
+def test_site_groups():
+    pgid = builder.pgid_of_group
+
+    sys = builder.Builder()
+    assert_equal(sys._group_by_pgid, {})
+    sg = builder.SimpleSiteGroup()
+    osg = builder.SimpleSiteGroup()
+
+    assert_raises(KeyError, sys.__setitem__, (0, ), 7)
+    sys[sg(0)] = 7
+    assert_equal(sys[sg(0)], 7)
+    assert_raises(KeyError, sys.__getitem__, (0, ))
+
+    sys.default_site_group = sg
+    sys[1,] = 123
+    assert_equal(sys[1,], 123)
+    assert_equal(sys[sg(1)], 123)
+    assert_raises(KeyError, sys.__getitem__, osg(1))
+
+    assert_equal(sys._group_by_pgid, {pgid(sg) : sg})
+    sys[osg(1)] = 321
+    assert_equal(sys._group_by_pgid, {pgid(sg) : sg, pgid(osg) : osg})
+    assert_equal(sys[osg(1)], 321)
+
+    assert_equal(sg(-5).shifted((-2,), osg), osg(-7))
+
+
+def test_sequence_of_sites():
+    sg = builder.SimpleSiteGroup()
+    sites = [sg(1, 2, 3), sg('aa'), sg(12, 'bb')]
+    assert_equal(sites, list(builder.SequenceOfSites(sites)))
+
+
+def test_construction_and_indexing():
+    sites = [(0, 0), (0, 1), (1, 0)]
+    hoppings = [((0, 0), (0, 1)),
+                ((0, 1), (1, 0)),
+                ((1, 0), (0, 0))]
+    sys = builder.Builder()
+    sys.default_site_group = sg = builder.SimpleSiteGroup()
+    t, V = 1.0j, 0.0
+    sys[sites] = V
+    sys[sites[0]] = V
+    sys[hoppings] = t
+    sys[hoppings[0]] = t
+    assert_raises(KeyError, sys.__setitem__, ((0, 1), (7, 8)), t)
+    assert_raises(KeyError, sys.__setitem__, ((12, 14), (0, 1)), t)
+
+    assert (123, 5) not in sys
+    assert ((0, 0), (123, 1)) not in sys
+    assert ((7, 8), (0, 0)) not in sys
+    for site in sites:
+        assert site in sys
+        assert_equal(sys[site], V)
+    for hop in hoppings:
+        rev_hop = hop[1], hop[0]
+        assert hop in sys
+        assert rev_hop in sys
+        assert_equal(sys[hop], t)
+        assert_equal(sys[rev_hop], t.conjugate())
+
+    assert_equal(sys.degree((0, 0)), 2)
+    assert_equal(sorted((s.group,) + s.tag for s in sys.neighbors((0, 0))),
+                 sorted([(sg, 0, 1), (sg, 1, 0)]))
+
+    del sys[hoppings]
+    assert_equal(list(sys.hoppings()), [])
+    sys[hoppings] = t
+
+    del sys[0, 0]
+    assert_equal(sorted((s.group,) + s.tag for s in sys.sites()),
+                 sorted([(sg, 0, 1), (sg, 1, 0)]))
+    assert_equal(list(((a.group,) + a.tag, (sys.group,) + sys.tag)
+                      for a, sys in sys.hoppings()),
+                 [((sg, 0, 1), (sg, 1, 0))])
+
+    assert_equal(list(sys.site_value_pairs()),
+                 [(site, sys[site]) for site in sys.sites()])
+    assert_equal(list(sys.hopping_value_pairs()),
+                 [(hopping, sys[hopping]) for hopping in sys.hoppings()])
+
+
+def test_hermitian_conjugation():
+    def f(i, j):
+        if j[0] == i[0] + 1:
+            return np.array([[1, 2j], [3 + 1j, 4j]])
+        else:
+            raise ValueError
+
+    sys = builder.Builder()
+    sys.default_site_group = builder.SimpleSiteGroup()
+    sys[0,] = sys[1,] = np.identity(2)
+
+    sys[(0,), (1,)] = f
+    assert sys[(0,), (1,)] is f
+    assert isinstance(sys[(1,), (0,)], builder.HermConjOfFunc)
+    assert_equal(sys[(1,), (0,)]((1,), (0,)),
+                 sys[(0,), (1,)]((0,), (1,)).conjugate().transpose())
+    sys[(0,), (1,)] = sys[(1,), (0,)]
+    assert isinstance(sys[(0,), (1,)], builder.HermConjOfFunc)
+    assert sys[(1,), (0,)] is f
+
+
+def test_value_equality_and_identity():
+    m = np.array([[1, 2], [3j, 4j]])
+    sys = builder.Builder()
+    sys.default_site_group = builder.SimpleSiteGroup()
+
+    sys[0,] = m
+    sys[1,] = m
+    assert sys[1,] is m
+
+    sys[(0,), (1,)] = m
+    assert_equal(sys[(1,), (0,)], m.transpose().conjugate())
+    assert sys[(0,), (1,)] is m
+
+    sys[(1,), (0,)] = m
+    assert_equal(sys[(0,), (1,)], m.transpose().conjugate())
+    assert sys[(1,), (0,)] is m
+
+
+def random_onsite_hamiltonian(rng):
+    return 2 * rng.random() - 1
+
+def random_hopping_integral(rng):
+    return complex(2 * rng.random() - 1, 2 * rng.random() - 1)
+
+def check_onsite(fsys, sites, subset=False, check_values=True):
+    freq = {}
+    for node in xrange(fsys.graph.num_nodes):
+        site = fsys.site(node).tag
+        freq[site] = freq.get(site, 0) + 1
+        if check_values and site in sites:
+            assert fsys.onsite_hamiltonians[node] is sites[site]
+    if not subset:
+        # Check that all sites of `fsys` are in `sites`.
+        for site, n in freq.iteritems():
+            assert site in sites
+    # Check that all sites of `sites` are in `fsys`.
+    for site in sites:
+        assert_equal(freq[site], 1)
+
+def check_hoppings(fsys, hops):
+    assert_equal(fsys.graph.num_edges, 2 * len(hops))
+    for edge_id, edge in enumerate(fsys.graph):
+        tail, head = edge
+        tail = fsys.site(tail).tag
+        head = fsys.site(head).tag
+        value = fsys.hoppings[edge_id]
+        if value is builder.other:
+            assert (head, tail) in hops
+        else:
+            assert (tail, head) in hops
+            assert value is hops[tail, head]
+
+def test_finalization():
+    """Test the finalization of finite and infinite systems.
+
+    In order to exactly verify the finalization, low-level features of the
+    build module are used directly.  This is not the way one would use a
+    finalized system in normal code.
+    """
+    def set_sites(dest):
+        while len(dest) < n_sites:
+            site = rng.randrange(size), rng.randrange(size)
+            if site not in dest:
+                dest[site] = random_onsite_hamiltonian(rng)
+
+    def set_hops(dest, sites):
+        while len(dest) < n_hops:
+            a, b = rng.sample(sites, 2)
+            if (a, b) not in dest and (b, a) not in dest:
+                dest[a, b] = random_hopping_integral(rng)
+
+    rng = Random(123)
+    size = 20
+    n_sites = 120
+    n_hops = 500
+
+    # Make scattering region blueprint.
+    sr_sites = {}
+    set_sites(sr_sites)
+    sr_hops = {}
+    set_hops(sr_hops, sr_sites)
+
+    # Make lead blueprint.
+    possible_neighbors = rng.sample(list(sr_sites), n_sites // 2)
+    lead_sites = {}
+    for pn in possible_neighbors:
+        lead_sites[pn] = random_hopping_integral(rng)
+    set_sites(lead_sites)
+    lead_hops = {}        # Hoppings within a single lead unit cell
+    set_hops(lead_hops, lead_sites)
+    lead_sites_list = list(lead_sites)
+    neighbors = set()
+    for i in xrange(n_hops):
+        while True:
+            a = rng.choice(lead_sites_list)
+            b = rng.choice(possible_neighbors)
+            neighbors.add(b)
+            b = b[0] - size, b[1]
+            if rng.randrange(2):
+                a, b = b, a
+            if (a, b) not in lead_hops and (b, a) not in lead_hops:
+                break
+        lead_hops[a, b] = random_hopping_integral(rng)
+    neighbors = sorted(neighbors)
+
+    # Build scattering region from blueprint and test it.
+    sys = builder.Builder()
+    sys.default_site_group = sg = kwant.make_lattice(np.identity(2))
+    for site, value in sr_sites.iteritems():
+        sys[site] = value
+    for hop, value in sr_hops.iteritems():
+        sys[hop] = value
+    fsys = sys.finalized()
+    check_onsite(fsys, sr_sites)
+    check_hoppings(fsys, sr_hops)
+
+    # Build lead from blueprint and test it.
+    lead = builder.Builder(kwant.TranslationalSymmetry([(size, 0)]))
+    lead.default_site_group = sg
+    for site, value in lead_sites.iteritems():
+        shift = rng.randrange(-5, 6) * size
+        site = site[0] + shift, site[1]
+        lead[site] = value
+    for (a, b), value in lead_hops.iteritems():
+        shift = rng.randrange(-5, 6) * size
+        a = a[0] + shift, a[1]
+        b = b[0] + shift, b[1]
+        lead[a, b] = value
+    flead = lead.finalized()
+    all_sites = list(lead_sites)
+    all_sites.extend((x - size, y) for (x, y) in neighbors)
+    check_onsite(flead, all_sites, check_values=False)
+    check_onsite(flead, lead_sites, subset=True)
+    check_hoppings(flead, lead_hops)
+
+    # Attach lead to system.
+    sys.leads.append(builder.BuilderLead(
+            lead, (builder.Site(sg, n) for n in neighbors)))
+    fsys = sys.finalized()
+    assert_equal(len(fsys.lead_neighbor_seqs), 1)
+    assert_equal([fsys.site(i).tag for i in fsys.lead_neighbor_seqs[0]],
+                 neighbors)
+
+    # Add a hopping to the lead which couples two next-nearest slices and check
+    # whether this leads to an error.
+    a = rng.choice(lead_sites_list)
+    b = rng.choice(possible_neighbors)
+    b = b[0] + 2 * size, b[1]
+    lead[a, b] = random_hopping_integral(rng)
+    assert_raises(ValueError, lead.finalized)
+
+
+def test_hamiltonian_evaluation():
+    def f_onsite(site):
+        return site.tag[0]
+
+    def f_hopping(a, b):
+        a, b = a.tag, b.tag
+        return complex(a[0] + b[0], a[1] - b[1])
+
+    tags = [(0, 0), (1, 1), (2, 2), (3, 3)]
+    edges = [(0, 1), (0, 2), (0, 3), (1, 2)]
+
+    sys = builder.Builder()
+    sys.default_site_group = sg = builder.SimpleSiteGroup()
+    sites = [sg(*tag) for tag in tags]
+    sys[tags] = f_onsite
+    sys[((tags[i], tags[j]) for (i, j) in edges)] = f_hopping
+    fsys = sys.finalized()
+
+    assert_equal(fsys.graph.num_nodes, len(tags))
+    assert_equal(fsys.graph.num_edges, 2 * len(edges))
+
+    for i in range(len(tags)):
+        site = fsys.site(i)
+        assert site in sites
+        assert_equal(fsys.hamiltonian(i, i),
+                     sys[site](site))
+
+    for t, h in fsys.graph:
+        tsite = fsys.site(t)
+        hsite = fsys.site(h)
+        assert_equal(fsys.hamiltonian(t, h),
+                     sys[tsite, hsite](tsite, hsite))
+
+
+def test_dangling():
+    def make_system():
+        #        1
+        #       / \
+        #    3-0---2-4-5  6-7  8
+        sys = builder.Builder()
+        sys.default_site_group = builder.SimpleSiteGroup()
+        sys[((i,) for i in range(9))] = None
+        sys[[((0,), (1,)), ((1,), (2,)), ((2,), (0,))]] = None
+        sys[[((0,), (3,)), ((2,), (4,)), ((4,), (5,))]] = None
+        sys[(6,), (7,)] = None
+        return sys
+
+    sys0 = make_system()
+    assert_equal(sorted(site.tag for site in sys0.dangling()),
+                 sorted([(3,), (5,), (6,), (7,), (8,)]))
+    sys0.eradicate_dangling()
+
+    sys1 = make_system()
+    while True:
+        dangling = list(sys1.dangling())
+        if not dangling: break
+        del sys1[dangling]
+
+    assert_equal(sorted(site.tag for site in sys0.sites()),
+                 sorted([(0,), (1,), (2,)]))
+    assert_equal(sorted(site.tag for site in sys0.sites()),
+                 sorted(site.tag for site in sys1.sites()))
+
+
+def test_builder_with_symmetry():
+    g = kwant.make_lattice(np.identity(3))
+    sym = kwant.TranslationalSymmetry([(0, 0, 3), (0, 2, 0)])
+    bob = builder.Builder(sym)
+    bob.default_site_group = g
+
+    t, V = 1.0j, 0.0
+    hoppings = [((5, 0, 0), (0, 5, 0)),
+                ((0, 5, 0), (0, 0, 5)),
+                ((0, 0, 5), (5, 0, 0)),
+                ((0, 3, 0), (0, 0, 5)),
+                ((0, 7, -6), (5, 6, -6))]
+    hoppings_fd = [((5, 0, 0), (0, 5, 0)),
+                   ((0, 1, 0), (0, -4, 5)),
+                   ((0, 0, 2), (5, 0, -3)),
+                   ((0, 1, 0), (0, -2, 5)),
+                   ((0, 1, 0), (5, 0, 0))]
+
+    bob[(a for a, b in hoppings)] = V
+    bob[hoppings] = t
+
+    assert_equal(sorted(site.tag for site in bob.sites()),
+                 sorted(set(a for a, b in hoppings_fd)))
+    for sites in hoppings_fd:
+        for site in sites:
+            assert site in bob
+            assert_equal(bob[site], V)
+
+    assert_equal(sorted((a.tag, b.tag) for a, b in bob.hoppings()),
+                 sorted(hoppings_fd))
+    for hop in hoppings_fd:
+        rhop = hop[1], hop[0]
+        assert hop in bob
+        assert rhop in bob
+        assert_equal(bob[hop], t)
+        assert_equal(bob[rhop], t.conjugate())
+
+    del bob[(0, 6, -4), (0, 11, -9)]
+    assert ((0, 1, 0), (0, -4, 5)) not in bob
+
+    del bob[0, 3, -3]
+    assert_equal(list((a.tag, b.tag) for a, b in bob.hoppings()),
+                 [((0, 0, 2), (5, 0, -3))])
+
+
+class VerySimpleSymmetry(builder.Symmetry):
+    def __init__(self, period):
+        self.period = period
+
+    @property
+    def num_directions(self):
+        return 1
+
+    def which(self, site):
+        return (site.tag[0] // self.period,)
+
+    def act(self, element, a, b=None):
+        delta = (self.period * element[0],) + (len(a.tag) - 1) * (0,)
+        if b is None:
+            return a.shifted(delta)
+        else:
+            return a.shifted(delta), b.shifted(delta)
+
+def test_attach_lead():
+    gr = builder.SimpleSiteGroup()
+
+    sys = builder.Builder()
+    sys.default_site_group = gr
+    sys[(1,)] = 0
+    lead0 = builder.Builder(VerySimpleSymmetry(-2))
+    assert_raises(ValueError, sys.attach_lead, lead0)
+    lead0.default_site_group = gr
+    lead0[(0,)] = lead0[(1,)] = 1
+    lead0[(0,), (1,)] = lead0[(0,), (2,)] = 1
+    assert_raises(ValueError, sys.attach_lead, lead0)
+
+    sys = builder.Builder()
+    sys.default_site_group = gr
+    sys[(1,)] = 0
+    sys[(0,)] = 1
+    assert_raises(ValueError, sys.attach_lead, lead0, gr(5))
+
+    sys = builder.Builder()
+    sys.default_site_group = gr
+    sys[(1,)] = 0
+    sys[(0,)] = 1
+    sys.attach_lead(lead0)
+    assert_equal(len(list(sys.sites())), 3)
+    assert_equal(set(sys.leads[0].neighbors), set([gr(-1), gr(0)]))
+    sys[(-10,)] = sys[(-11,)] = 0
+    sys.attach_lead(lead0)
+    assert_equal(set(sys.leads[1].neighbors), set([gr(-10), gr(-11)]))
+    assert_equal(len(list(sys.sites())), 5)
+    sys.attach_lead(lead0, gr(-5))
+    assert_equal(set(sys.leads[0].neighbors), set([gr(-1), gr(0)]))
+
+
+def test_neighbors_not_in_single_domain():
+    sr = builder.Builder()
+    lead = builder.Builder(VerySimpleSymmetry(-1))
+    lat = builder.SimpleSiteGroup()
+    sr.default_site_group = lead.default_site_group = lat
+    sr[((x, y) for x in range(3) for y in range(3) if x >= y)] = 0
+    sr[sr.possible_hoppings((1, 0), lat, lat)] = 1
+    sr[sr.possible_hoppings((0, 1), lat, lat)] = 1
+    lead[((0, y) for y in range(3))] = 0
+    lead[(((0, y), (1, y)) for y in range(3))] = 1
+    lead[(((0, y), (0, y + 1)) for y in range(2))] = 1
+    sr.leads.append(builder.BuilderLead(lead, [lat(i, i) for i in range(3)]))
+    assert_raises(ValueError, sr.finalized)
diff --git a/kwant/tests/test_lattice.py b/kwant/tests/test_lattice.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4c11084052fc0cb6558ba406dbbd4114ccb77b
--- /dev/null
+++ b/kwant/tests/test_lattice.py
@@ -0,0 +1,124 @@
+from __future__ import division
+from math import sqrt
+import numpy as np
+from nose.tools import assert_raises, assert_not_equal
+from numpy.testing import assert_equal
+from kwant import lattice, builder
+
+
+def test_make_lattice():
+    for lat in (lattice.make_lattice(((1, 0), (0.5, 0.5))),
+                lattice.make_lattice(((1, 0), (0.5, sqrt(3)/2)),
+                                     ((0, 0), (0, 1/sqrt(3))))):
+        for sl in lat.sublattices:
+            tag = (-5, 33)
+            site = sl(*tag)
+            assert_equal(tag, sl.closest(site.pos))
+
+
+def test_pack_unpack():
+    for dim in [1, 2, 3, 5, 10, 99]:
+        group = lattice.make_lattice(np.identity(dim))
+        group_by_pgid = {builder.pgid_of_group(group) : group}
+        tag = tuple(xrange(dim))
+        site = group(*tag)
+        psite = site.packed()
+        same_site = builder.unpack(psite, group_by_pgid)
+        assert_equal(same_site, site)
+
+
+def test_shape():
+    def in_circle(pos):
+        return pos[0]**2 + pos[1]**2 < 3
+
+    lat = lattice.make_lattice(((1, 0), (0.5, sqrt(3)/2)),
+                               ((0, 0), (0, 1/sqrt(3))))
+    sites = set(lat.shape(in_circle, (0, 0)))
+    sites_alt = set()
+    sl0, sl1 = lat.sublattices
+    for x in xrange(-2, 3):
+        for y in xrange(-2, 3):
+            tag = (x, y)
+            for site in (sl0(*tag), sl1(*tag)):
+                if in_circle(site.pos):
+                    sites_alt.add(site)
+    assert_equal(sites, sites_alt)
+    assert_raises(ValueError, lat.shape(in_circle, (10, 10)).next)
+
+
+def test_translational_symmetry():
+    ts = lattice.TranslationalSymmetry
+    g2 = lattice.make_lattice(np.identity(2))
+    g3 = lattice.make_lattice(np.identity(3))
+
+    sym = ts([(0, 0, 4), (0, 5, 0), (0, 0, 2)])
+    assert_raises(ValueError, sym.add_site_group, g3)
+    sym = ts([(3.3, 0)])
+    assert_raises(ValueError, sym.add_site_group, g2)
+
+    # Test lattices with dimension smaller than dimension of space.
+
+    g2in3 = lattice.make_lattice([[4, 4, 0], [4, -4, 0]])
+    sym = ts([(8, 0, 0)])
+    sym.add_site_group(g2in3)
+    sym = ts([(8, 0, 1)])
+    assert_raises(ValueError, sym.add_site_group, g2in3)
+
+    # Test automatic fill-in of transverse vectors.
+    sym = ts([(1, 2)])
+    sym.add_site_group(g2)
+    assert_not_equal(sym.site_group_data[g2][2], 0)
+    sym = ts([(1, 0, 2), (3, 0, 2)])
+    sym.add_site_group(g3)
+    assert_not_equal(sym.site_group_data[g3][2], 0)
+
+    transl_vecs = np.array([[10, 0], [7, 7]], dtype=int)
+    sym = ts(transl_vecs)
+    assert_equal(sym.num_directions, 2)
+    sym2 = ts(transl_vecs[: 1, :])
+    sym2.add_site_group(g2, transl_vecs[1:, :])
+    for site in [g2(0, 0), g2(4, 0), g2(2, 1), g2(5, 5), g2(15, 6)]:
+        assert sym.in_fd(site)
+        assert sym2.in_fd(site)
+        assert_equal(sym.which(site), (0, 0))
+        assert_equal(sym2.which(site), (0,))
+        for v in [(1, 0), (0, 1), (-1, 0), (0, -1), (5, 10), (-111, 573)]:
+            site2 = site.shifted(np.dot(v, transl_vecs))
+            assert not sym.in_fd(site2)
+            assert (v[0] != 0) != sym2.in_fd(site2)
+            assert_equal(sym.to_fd(site2), site)
+            assert (v[1] == 0) == (sym2.to_fd(site2) == site)
+            assert_equal(sym.which(site2), v)
+            assert_equal(sym2.which(site2), v[:1])
+
+            for hop in [(0, 0), (100, 0), (0, 5), (-2134, 3213)]:
+                assert_equal(sym.to_fd(site2, site2.shifted(hop)),
+                             (site, site.shifted(hop)))
+
+
+def test_translational_symmetry_reversed():
+    def assert_equal_symmetry(a, b):
+        np.testing.assert_array_almost_equal(a.periods, b.periods)
+        for i in a.site_group_data:
+            assert i in b.site_group_data
+            data = zip(b.site_group_data[i], a.site_group_data[i])
+            for j in data:
+                assert np.array_equal(j[0], j[1])
+
+    params = [([(3,)], None),
+              ([(0, -1)], None),
+              ([(1, 1)], None),
+              ([(1, 1)], [(-1, 0)]),
+              ([(3, 1, 1), (1, 4, 1), (1, 1, 5)], []),
+              ([(3, 1, 1), (1, 4, 1)], [(1, 1, 5)]),
+              ([(3, 1, 1)], [(1, 4, 1), (1, 1, 5)])]
+    for periods, other_vectors in params:
+        sym = lattice.TranslationalSymmetry(periods)
+        gr = lattice.make_lattice(np.identity(len(periods[0])))
+        sym.add_site_group(gr, other_vectors)
+        rsym = sym.reversed()
+        assert_equal_symmetry(sym, rsym.reversed())
+        rperiods = -np.array(periods, dtype=int)
+        rsym2 = lattice.TranslationalSymmetry(rperiods)
+        rsym2.add_site_group(gr, other_vectors)
+        assert_equal_symmetry(rsym, rsym2)
diff --git a/kwant/tests/test_plotter.py b/kwant/tests/test_plotter.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ca508424a8f0c4d951a975dee0692373ac2cb6b
--- /dev/null
+++ b/kwant/tests/test_plotter.py
@@ -0,0 +1,63 @@
+import tempfile, os
+import numpy as np
+import kwant
+from kwant import plotter
+
+lat = kwant.lattice.Square()
+
+def make_ribbon(width, dir, E, t):
+    b = kwant.Builder(kwant.TranslationalSymmetry([(dir, 0)]))
+
+    # Add sites to the builder.
+    for y in xrange(width):
+        b[lat(0, y)] = E
+
+    # Add hoppings to the builder.
+    for y in xrange(width):
+        b[lat(0, y), lat(1, y)] = t
+        if y+1 < width:
+            b[lat(0, y), lat(0, y+1)] = t
+
+    return b
+
+
+def make_rectangle(length, width, E, t):
+    b = kwant.Builder()
+
+    # Add sites to the builder.
+    for x in xrange(length):
+        for y in xrange(width):
+            b[lat(x, y)] = E
+
+    # Add hoppings to the builder.
+    for x in xrange(length):
+        for y in xrange(width):
+            if x+1 < length:
+                b[lat(x, y), lat(x+1, y)] = t
+            if y+1 < width:
+                b[lat(x, y), lat(x, y+1)] = t
+
+    return b
+
+def test_plot():
+    E = 4.0
+    t = -1.0
+    length = 5
+    width = 5
+
+    b = make_rectangle(length, width, E, t)
+    b.attach_lead(make_ribbon(width, -1, E, t))
+    b.attach_lead(make_ribbon(width, 1, E, t))
+
+    directory = tempfile.mkdtemp()
+    filename = os.path.join(directory, "test.pdf")
+
+    kwant.plot(b.finalized(), filename=filename,
+               symbols=plotter.Circle(r=0.25, fcol=plotter.red),
+               lines=plotter.Line(lw=0.1, lcol=plotter.red),
+               lead_symbols=plotter.Circle(r=0.25, fcol=plotter.black),
+               lead_lines=plotter.Line(lw=0.1, lcol=plotter.black),
+               lead_fading=[0, 0.2, 0.4, 0.6, 0.8])
+
+    os.unlink(filename)
+    os.rmdir(directory)
diff --git a/kwant/tests/test_system.py b/kwant/tests/test_system.py
new file mode 100644
index 0000000000000000000000000000000000000000..abff175c2cd6e8ea91c1654fc169fcd88c421dd9
--- /dev/null
+++ b/kwant/tests/test_system.py
@@ -0,0 +1,65 @@
+import numpy as np
+from scipy import sparse
+from nose.tools import assert_raises, assert_almost_equal
+import kwant
+
+def test_hamiltonian_submatrix():
+    sys = kwant.Builder()
+    sys.default_site_group = kwant.lattice.Chain()
+    for i in xrange(3):
+        sys[(i,)] = 0.5 * i
+    for i in xrange(2):
+        sys[(i,), (i + 1,)] = 1j * (i + 1)
+
+    sys2 = sys.finalized()
+    mat = sys2.hamiltonian_submatrix()
+    assert mat.shape == (3, 3)
+    # Sorting is required due to unknown compression order of builder.
+    perm = np.argsort(sys2.onsite_hamiltonians)
+    mat_should_be = np.mat('0 1j 0; -1j 0.5 2j; 0 -2j 1')
+
+    mat = mat[perm, :]
+    mat = mat[:, perm]
+    np.testing.assert_array_equal(mat, mat_should_be)
+
+    mat = sys2.hamiltonian_submatrix(sparse=True)
+    assert sparse.isspmatrix_coo(mat)
+    mat = mat.todense()
+    mat = mat[perm, :]
+    mat = mat[:, perm]
+    np.testing.assert_array_equal(mat, mat_should_be)
+
+    mat = sys2.hamiltonian_submatrix(perm[[0, 1]], perm[[2]])
+    np.testing.assert_array_equal(mat, mat_should_be[: 2, 2])
+
+    mat = sys2.hamiltonian_submatrix(perm[[0, 1]], perm[[2]], sparse=True)
+    mat = mat.todense()
+    np.testing.assert_array_equal(mat, mat_should_be[: 2, 2])
+
+    # Test for correct treatment of matrix input.
+    sys = kwant.Builder()
+    sys.default_site_group = kwant.lattice.Chain()
+    sys[(0,)] = np.mat('0 1j; -1j 0')
+    sys[(1,)] = np.mat('1')
+    sys[(2,)] = np.mat('2')
+    sys[(1,), (0,)] = np.mat('1 2j')
+    sys[(2,), (1,)] = np.mat('3j')
+    sys2 = sys.finalized()
+    mat_dense = sys2.hamiltonian_submatrix()
+    mat_sp = sys2.hamiltonian_submatrix(sparse=True).todense()
+    np.testing.assert_array_equal(mat_sp, mat_dense)
+
+    # Test for shape errors.
+    sys[(0,), (2,)] = np.mat('1 2')
+    sys2 = sys.finalized()
+    assert_raises(ValueError, sys2.hamiltonian_submatrix)
+    assert_raises(ValueError, sys2.hamiltonian_submatrix, None, None, True)
+
+def test_energies():
+    sys = kwant.Builder(kwant.TranslationalSymmetry([(-1, 0)]))
+    sys.default_site_group = kwant.lattice.Square()
+    sys[[(0, 0), (0, 1)]] = complex(0)
+    sys[[((0, 0), (0, 1)),
+         ((0, 0), (1, 0))]] = complex(0, 1)
+    for e in sys.finalized().energies(0):
+        assert_almost_equal(abs(e), 1)
diff --git a/kwant/version.py b/kwant/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..92ff8d09e0967aab3fedd043a51ec9d3fcb1891b
--- /dev/null
+++ b/kwant/version.py
@@ -0,0 +1,35 @@
+import subprocess, os
+
+__all__ = ['version']
+
+# When changing this function, remember to also change its twin in ../setup.py.
+def get_version_from_git():
+    kwant_dir = os.path.dirname(os.path.abspath(__file__))
+    try:
+        p = subprocess.Popen(['git', 'describe'], cwd=kwant_dir,
+                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    except OSError:
+        return
+
+    if p.wait() != 0:
+        return
+    version = p.communicate()[0].strip()
+
+    if version[0] == 'v':
+        version = version[1:]
+
+    try:
+        p = subprocess.Popen(['git', 'diff', '--quiet'], cwd=kwant_dir)
+    except OSError:
+        version += '-confused'  # This should never happen.
+    else:
+        if p.wait() == 1:
+            version += '-dirty'
+    return version
+
+version = get_version_from_git()
+if version is None:
+    try:
+        from _static_version import version
+    except:
+        version = "unknown"
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000000000000000000000000000000000000..3ff1a1e427307c82248e46dd288cbb65a12682e1
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+import sys, subprocess, os
+from distutils.core import setup
+from distutils.extension import Extension
+import numpy as np
+
+
+# This is an exact copy of the function from kwant/version.py.  We can't import
+# it here (because kwant is not yet built when this scipt is run), so we just
+# include a copy.
+def get_version_from_git():
+    kwant_dir = os.path.dirname(os.path.abspath(__file__))
+    try:
+        p = subprocess.Popen(['git', 'describe'], cwd=kwant_dir,
+                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    except OSError:
+        return
+
+    if p.wait() != 0:
+        return
+    version = p.communicate()[0].strip()
+
+    if version[0] == 'v':
+        version = version[1:]
+
+    try:
+        p = subprocess.Popen(['git', 'diff', '--quiet'], cwd=kwant_dir)
+    except OSError:
+        version += '-confused'  # This should never happen.
+    else:
+        if p.wait() == 1:
+            version += '-dirty'
+    return version
+
+try:
+    from Cython.Distutils import build_ext
+except ImportError:
+    use_cython = False
+else:
+    use_cython = True
+
+def get_static_version():
+    try:
+        with open('kwant/_static_version.py') as f:
+            contents = f.read()
+            assert contents[:11] == "version = '"
+            assert contents[-2:] == "'\n"
+            return contents[11:-2]
+    except:
+        return None
+
+git_version = get_version_from_git()
+static_version = get_static_version()
+if git_version is not None:
+    version = git_version
+    if static_version != git_version:
+        with open('kwant/_static_version.py', 'w') as f:
+            f.write("version = '%s'\n" % version)
+elif static_version is not None:
+    version = static_version
+else:
+    version = 'unknown'
+
+# List of tuples (args, keywords) to be passed to Extension, possibly after
+# replacing ".pyx" with ".c" if Cython is not to be used.
+extensions = [ # (["kwant.graph.scotch", ["kwant/graph/scotch.pyx"]],
+               #  {"libraries" : ["scotch", "scotcherr"]}),
+               (["kwant.graph.core", ["kwant/graph/core.pyx"]], {}),
+               (["kwant.graph.utils", ["kwant/graph/utils.pyx"]], {}),
+               (["kwant.graph.slicer", ["kwant/graph/slicer.pyx",
+                                        "kwant/graph/c_slicer/partitioner.cc",
+                                        "kwant/graph/c_slicer/slicer.cc"]],
+                {}),
+               (["kwant.linalg.lapack", ["kwant/linalg/lapack.pyx"]],
+                {"libraries" : ["lapack", "blas"]}) ]
+
+cmdclass = {}
+ext_modules = []
+include_dirs = [np.get_include()]
+
+for args, keywords in extensions:
+    if not use_cython:
+        if 'language' in keywords:
+            if keywords['language'] == 'c':
+                ext = '.c'
+            elif keywords['language'] == 'c++':
+                ext = '.cpp'
+            else:
+                print >>sys.stderr, 'Unknown language'
+                exit(1)
+        else:
+            ext = '.c'
+        args[1] = [s.replace('.pyx', ext) for s in args[1]]
+    ext_modules.append(Extension(*args, **keywords))
+if use_cython:
+    cmdclass.update({'build_ext': build_ext})
+
+setup(name='kwant',
+      version=version,
+      author='A. R. Akhmerov, C. W. Groth, X. Waintal, M. Wimmer',
+      author_email='cwg@falma.de',
+      description="A package for numerical quantum transport calculations.",
+      license="not to be distributed",
+      packages=["kwant", "kwant.graph", "kwant.linalg", "kwant.physics",
+                "kwant.solvers"],
+      cmdclass=cmdclass,
+      ext_modules=ext_modules,
+      include_dirs = include_dirs)