From 4eb6f2b11f131de27b55f45b5cd8992882193aff Mon Sep 17 00:00:00 2001
From: Joseph Weston <joseph@weston.cloud>
Date: Tue, 27 Feb 2018 14:03:26 +0100
Subject: [PATCH] remove unused functionality from the 'graph' package

This can always be resurrected from Git if it is really needed,
but at the moment its just 3000 lines that aren't used.
---
 doc/source/reference/kwant.graph.rst |   11 -
 kwant/graph/__init__.py              |    2 +-
 kwant/graph/c_slicer.pxd             |   20 -
 kwant/graph/c_slicer/bucket_list.h   |  300 -----
 kwant/graph/c_slicer/graphwrap.h     |  101 --
 kwant/graph/c_slicer/partitioner.cc  | 1695 --------------------------
 kwant/graph/c_slicer/partitioner.h   |  191 ---
 kwant/graph/c_slicer/slicer.cc       |   65 -
 kwant/graph/c_slicer/slicer.h        |   24 -
 kwant/graph/dissection.py            |   80 --
 kwant/graph/slicer.pyx               |   60 -
 kwant/graph/tests/test_dissection.py |   43 -
 kwant/graph/tests/test_slicer.py     |   63 -
 kwant/graph/tests/test_utils.py      |  127 --
 kwant/graph/utils.pyx                |  282 -----
 setup.py                             |   14 -
 16 files changed, 1 insertion(+), 3077 deletions(-)
 delete mode 100644 kwant/graph/c_slicer.pxd
 delete mode 100644 kwant/graph/c_slicer/bucket_list.h
 delete mode 100644 kwant/graph/c_slicer/graphwrap.h
 delete mode 100644 kwant/graph/c_slicer/partitioner.cc
 delete mode 100644 kwant/graph/c_slicer/partitioner.h
 delete mode 100644 kwant/graph/c_slicer/slicer.cc
 delete mode 100644 kwant/graph/c_slicer/slicer.h
 delete mode 100644 kwant/graph/dissection.py
 delete mode 100644 kwant/graph/slicer.pyx
 delete mode 100644 kwant/graph/tests/test_dissection.py
 delete mode 100644 kwant/graph/tests/test_slicer.py
 delete mode 100644 kwant/graph/tests/test_utils.py
 delete mode 100644 kwant/graph/utils.pyx

diff --git a/doc/source/reference/kwant.graph.rst b/doc/source/reference/kwant.graph.rst
index 33025e42..6b65b3d2 100644
--- a/doc/source/reference/kwant.graph.rst
+++ b/doc/source/reference/kwant.graph.rst
@@ -76,17 +76,6 @@ Graph types
    Graph
    CGraph
 
-Graph algorithms
-----------------
-.. autosummary::
-   :toctree: generated/
-
-   slice
-   make_undirected
-   remove_duplicates
-   induced_subgraph
-   print_graph
-
 Other
 -----
 +----------------+------------------------------------------+
diff --git a/kwant/graph/__init__.py b/kwant/graph/__init__.py
index 29057770..950ded75 100644
--- a/kwant/graph/__init__.py
+++ b/kwant/graph/__init__.py
@@ -10,7 +10,7 @@
 
 # Merge the public interface of all submodules.
 __all__ = []
-for module in ['core', 'defs', 'slicer', 'utils']:
+for module in ['core', 'defs']:
     exec('from . import {0}'.format(module))
     exec('from .{0} import *'.format(module))
     exec('__all__.extend({0}.__all__)'.format(module))
diff --git a/kwant/graph/c_slicer.pxd b/kwant/graph/c_slicer.pxd
deleted file mode 100644
index d43128e6..00000000
--- a/kwant/graph/c_slicer.pxd
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-from .defs cimport gint
-
-cdef extern from "c_slicer/slicer.h":
-    struct Slicing:
-        int nslices
-        int *slice_ptr
-        int *slices
-
-    Slicing *slice(gint, gint *, gint *, gint, gint *,
-                   gint, gint *)
-
-    void freeSlicing(Slicing *)
diff --git a/kwant/graph/c_slicer/bucket_list.h b/kwant/graph/c_slicer/bucket_list.h
deleted file mode 100644
index c95f2428..00000000
--- a/kwant/graph/c_slicer/bucket_list.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-#ifndef BUCKET_LIST_H
-#define BUCKET_LIST_H
-
-#include <vector>
-#include <list>
-#include <utility>
-#include <cstdlib>
-
-using std::vector;
-using std::list;
-using std::pair;
-using std::make_pair;
-
-class bucket_list
-{
- private:
-  vector<list<int> > bucket;
-  int max_value;
-  int lower_bound, upper_bound;
-
-  vector<list<int>::iterator > &reference_list;
-  const vector<int> &index_list;
-
- public:
-  bucket_list(int _lower_bound, int _upper_bound,
-              vector<list<int>::iterator > &_reference_list,
-              const vector<int> &_index_list) :
-    bucket(_upper_bound-_lower_bound+2, list<int>(0)),
-    max_value(_lower_bound-1),
-    lower_bound(_lower_bound), upper_bound(_upper_bound),
-    reference_list(_reference_list), index_list(_index_list)
-  {
-    //note that the vector bucket also contains an entry
-    //for lower_bound-1 !
-    //and we fill it with the default variable "-1"
-    //so that the past the end bucket is never empty.
-    //That makes the algorithms simpler
-    bucket[0].push_back(-1);
-  }
-
-  inline bool empty() const
-  {
-    return max_value<lower_bound;
-  }
-
-  inline int front() const
-  {
-    return bucket[max_value - lower_bound + 1].front();
-  }
-
-  inline void pop_front()
-  {
-    if(!empty()) {
-      bucket[max_value - lower_bound + 1].pop_front();
-
-      if(bucket[max_value - lower_bound + 1].empty()) {
-        while(bucket[--max_value - lower_bound + 1].empty())
-          ;
-      }
-    }
-  }
-
-  inline int max_key() const
-  {
-    return max_value;
-  }
-
-  inline void push_back(int _key, int _data)
-  {
-    reference_list[index_list[_data]]=bucket[_key - lower_bound + 1].
-      insert(bucket[_key - lower_bound + 1].end(), _data);
-
-    if(_key > max_value) max_value=_key;
-  }
-
-  inline void push_front(int _key, int _data)
-  {
-    reference_list[index_list[_data]]=bucket[_key - lower_bound + 1].
-      insert(bucket[_key - lower_bound + 1].begin(), _data);
-
-    if(_key > max_value) max_value=_key;
-  }
-
-  inline void push_randomly(int _key, int _data)
-  {
-    //do a push_back() or a push_front with equal
-    //probability
-    if(rand()%2) {
-      push_front(_key, _data);
-    }
-    else {
-      push_back(_key, _data);
-    }
-  }
-
-  inline void rearrange_back(int _old_key, int _new_key, int _data)
-  {
-    bucket[_old_key - lower_bound +1].
-      erase(reference_list[index_list[_data]]);
-
-    reference_list[index_list[_data]]=bucket[_new_key - lower_bound + 1].
-      insert(bucket[_new_key - lower_bound + 1].end(), _data);
-
-    if(_new_key > max_value) {
-      max_value=_new_key;
-    }
-    else if(bucket[max_value - lower_bound + 1].empty()) {
-      while(bucket[--max_value - lower_bound + 1].empty())
-        ;
-    }
-  }
-
-  inline void rearrange_front(int _old_key, int _new_key, int _data)
-  {
-    bucket[_old_key - lower_bound +1].
-      erase(reference_list[index_list[_data]]);
-
-    reference_list[index_list[_data]]=bucket[_new_key - lower_bound + 1].
-      insert(bucket[_new_key - lower_bound + 1].begin(), _data);
-
-    if(_new_key > max_value) {
-      max_value=_new_key;
-    }
-    else if(bucket[max_value - lower_bound + 1].empty()) {
-      while(bucket[--max_value - lower_bound + 1].empty())
-        ;
-    }
-  }
-
-  inline void rearrange_randomly(int _old_key, int _new_key, int _data)
-  {
-    if(rand()%2)
-      rearrange_back(_old_key, _new_key, _data);
-    else
-      rearrange_front(_old_key, _new_key, _data);
-  }
-
-  inline void remove(int _key, int _data)
-  {
-    bucket[_key - lower_bound +1].
-      erase(reference_list[index_list[_data]]);
-
-    if(bucket[max_value - lower_bound + 1].empty()) {
-      while(bucket[--max_value - lower_bound + 1].empty())
-        ;
-    }
-  }
-
- private:
-  inline list<int>::iterator manual_push_back(int _key, int _data)
-  {
-    if(_key > max_value) max_value=_key;
-
-    return bucket[_key - lower_bound + 1].
-      insert(bucket[_key - lower_bound + 1].end(), _data);
-  }
-
-  friend class double_bucket_list;
-};
-
-//-------------------
-
-class double_bucket_list
-{
- private:
-  vector<bucket_list> bucket;
-
-  int max_value;
-  int lower_bound, upper_bound;
-
-  vector<list<int>::iterator > &reference_list;
-  const vector<int> &index_list;
-
- public:
-  double_bucket_list(int _lower_bound1, int _upper_bound1,
-                     int _lower_bound2, int _upper_bound2,
-                     vector<list<int>::iterator > &_reference_list,
-                     const vector<int> &_index_list) :
-    bucket(_upper_bound1-_lower_bound1+2,
-           bucket_list(_lower_bound2, _upper_bound2,
-           _reference_list, _index_list)),
-    max_value(_lower_bound1-1),
-    lower_bound(_lower_bound1), upper_bound(_upper_bound1),
-    reference_list(_reference_list), index_list(_index_list)
-  {
-    //note that the vector bucket also contains an entry
-    //for lower_bound-1 !
-    //and we push a default entry into the past the
-    //end bucket of the corresponding bucket_list
-    bucket[0].manual_push_back(_lower_bound2, -1);
-  }
-
-  inline bool empty()
-  {
-    return max_value < lower_bound;
-  }
-
-  inline int front() const
-  {
-    return bucket[max_value- lower_bound + 1].front();
-  }
-
-  inline void pop_front()
-  {
-    if(!empty()) {
-      bucket[max_value - lower_bound + 1].pop_front();
-
-      if(bucket[max_value - lower_bound + 1].empty()) {
-        while(bucket[--max_value - lower_bound + 1].empty())
-          ;
-      }
-    }
-  }
-
-  inline pair<int,int> max_key() const
-  {
-    return make_pair(max_value, bucket[max_value - lower_bound + 1].max_key());
-  }
-
-  inline void push_back(int _key1, int _key2, int _data)
-  {
-    bucket[_key1 - lower_bound + 1].push_back(_key2, _data);
-
-    if(_key1 > max_value) max_value=_key1;
-  }
-
-  inline void push_front(int _key1, int _key2, int _data)
-  {
-    bucket[_key1 - lower_bound + 1].push_front(_key2, _data);
-
-    if(_key1 > max_value) max_value=_key1;
-  }
-
-  inline void push_randomly(int _key1, int _key2, int _data)
-  {
-    if(rand()%2)
-      push_back(_key1, _key2, _data);
-    else
-      push_front(_key1, _key2, _data);
-  }
-
-  inline void rearrange_back(int _old_key1, int _old_key2,
-                             int _new_key1, int _new_key2, int _data)
-  {
-    if(_old_key1 == _new_key1) {
-      bucket[_old_key1 - lower_bound +1].rearrange_back(_old_key2, _new_key2, _data);
-    }
-    else {
-      bucket[_old_key1 - lower_bound +1].remove(_old_key2, _data);
-      bucket[_new_key1 - lower_bound +1].push_back(_new_key2, _data);
-
-      if(_new_key1 > max_value) {
-        max_value=_new_key1;
-      }
-      else if(bucket[max_value - lower_bound + 1].empty()) {
-        while(bucket[--max_value - lower_bound + 1].empty())
-          ;
-      }
-    }
-  }
-
-  inline void rearrange_front(int _old_key1, int _old_key2,
-                              int _new_key1, int _new_key2, int _data)
-  {
-    if(_old_key1 == _new_key1) {
-      bucket[_old_key1 - lower_bound +1].rearrange_front(_old_key2, _new_key2, _data);
-    }
-    else {
-      bucket[_old_key1 - lower_bound +1].remove(_old_key2, _data);
-      bucket[_new_key1 - lower_bound +1].push_front(_new_key2, _data);
-
-      if(_new_key1 > max_value) {
-        max_value=_new_key1;
-      }
-      else if(bucket[max_value - lower_bound + 1].empty()) {
-        while(bucket[--max_value - lower_bound + 1].empty())
-          ;
-      }
-    }
-  }
-
-  inline void rearrange_randomly(int _old_key1, int _old_key2,
-                                 int _new_key1, int _new_key2, int _data)
-  {
-    if(rand()%2)
-      rearrange_back(_old_key1, _old_key2, _new_key1, _new_key2, _data);
-    else
-      rearrange_front(_old_key1, _old_key2, _new_key1, _new_key2, _data);
-  }
-};
-
-#endif
diff --git a/kwant/graph/c_slicer/graphwrap.h b/kwant/graph/c_slicer/graphwrap.h
deleted file mode 100644
index 67442b02..00000000
--- a/kwant/graph/c_slicer/graphwrap.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-//-*-C++-*-
-#ifndef _GRAPH_WRAPPER_H
-#define _GRAPH_WRAPPER_H
-
-#include <iostream>
-#include <vector>
-#include <deque>
-#include <cmath>
-
-class GraphWrapper
-{
-public:
-  //Some helper classes
-
-  //functor to compare the degree of vertices
-  class DegreeComparator
-  {
-  private:
-    const GraphWrapper &graph;
-
-  public:
-    DegreeComparator( const GraphWrapper &_graph ) : graph(_graph)
-    {
-    }
-
-    bool operator()( int _vertex1, int _vertex2 )
-    {
-      return graph.getEdges(_vertex1).size() < graph.getEdges(_vertex2).size();
-    }
-  };
-
-  template<typename T>
-  class VectorProxy {
-    T *begin_it, *end_it;
-
-  public:
-    VectorProxy(T *_begin_it, T *_end_it) :
-      begin_it(_begin_it), end_it(_end_it)
-    {}
-
-    T *begin() const
-    {
-      return begin_it;
-    }
-
-    T *end() const
-    {
-      return end_it;
-    }
-
-    size_t size() const
-    {
-      return end_it-begin_it;
-    }
-  };
-
-
-protected:
-  //data structure to hold graph in compressed form
-  int *vertex_ptr;
-  int *edges;
-
-  int vertex_num;
-
-public:
-  GraphWrapper(int _vnum, int *_vertex_ptr, int *_edges) :
-    vertex_ptr(_vertex_ptr), edges(_edges), vertex_num(_vnum)
-  {
-  }
-
-public:
-   //information about the graph
-
-   //! number of vertices in the graph
-   inline int size () const
-   {
-     return vertex_num;
-   }
-   //!Get the total number of edges in the graph
-   inline int edgeSize() const
-   {
-     return vertex_ptr[vertex_num];
-   }
-
-  //! functions for accessing the edge structure
-  inline VectorProxy<int> getEdges( int _vertex ) const
-  {
-    return VectorProxy<int>(edges+vertex_ptr[_vertex],
-			    edges+vertex_ptr[_vertex+1]);
-  }
-};
-
-#endif
diff --git a/kwant/graph/c_slicer/partitioner.cc b/kwant/graph/c_slicer/partitioner.cc
deleted file mode 100644
index ea74bddb..00000000
--- a/kwant/graph/c_slicer/partitioner.cc
+++ /dev/null
@@ -1,1695 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-#include "partitioner.h"
-
-//----------------------------------------------------
-//Functions that do the bisection
-//----------------------------------------------------
-
-void Partitioner::bisectFirst(std::vector<int> &_left, std::vector<int> &_right,
-                              double _tolerance,
-                              int _min_opt_passes,
-                              int _max_opt_passes)
-{
-#if DEBUG_LEVEL>=3
-  cout << "bisectFirst" << endl;
-#endif
-
-  //First, determine the total number of slices
-  int slices=0;
-
-  if(_left.size() && _right.size()) {
-    //a starting block is defined on the left and on the right
-    std::deque<int> vertex_stack(_left.begin(), _left.end());
-    std::deque<int> rank_stack(_left.size(), 1);
-
-    std::vector<bool> locked( parts[0].size(), false);
-    std::vector<bool> rightside( parts[0].size(), false);
-
-    std::vector<int>::const_iterator vtx=_right.begin();
-    while(vtx!=_right.end()) {
-      rightside[*vtx]=true;
-
-      vtx++;
-    }
-
-    bool done=false;
-
-    int current_rank=0;
-
-    //mark all the border vertices as visited
-    std::deque<int>::const_iterator border_vertex = vertex_stack.begin();
-    while(border_vertex != vertex_stack.end()) {
-      locked[sliceIndex[*border_vertex]]=true;
-
-      border_vertex++;
-    }
-
-    while(vertex_stack.size() && !done ) {
-      int vertex = vertex_stack.front();
-      int rank   = rank_stack.front();
-
-      vertex_stack.pop_front();
-      rank_stack.pop_front();
-
-      //Have we reached a new slice?
-      if(rank > current_rank) {
-        slices++;
-        current_rank=rank;
-      }
-
-      //visit the edges
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-        //Is the node inside the central slice or already at the right
-        //border?
-        if(rightside[*edge]) {
-          done=true;
-          break;
-        }
-
-        if(!locked[sliceIndex[*edge]]) {
-          vertex_stack.push_back(*edge);
-          rank_stack.push_back(rank+1);
-
-          locked[sliceIndex[*edge]]=true;
-        }
-
-        edge++;
-      }
-    }
-
-    slices++;
-
-#if DEBUG_LEVEL>=3
-    cout << "slices: " << slices << endl;
-#endif
-  }
-  else if(_left.size() || _right.size()) {
-    //TODO, not implemented yet
-  }
-  else {
-    int left_border_vertex, right_border_vertex;
-
-    //TODO, not implemented yet
-    //slices=findPseudoDiameter(graph, left_border_vertex, right_border_vertex);
-
-#if DEBUG_LEVEL>=3
-    cout << "slices: " << slices << endl;
-#endif
-
-    _left.push_back(left_border_vertex);
-    _right.push_back(right_border_vertex);
-  }
-
-#if DEBUG_LEVEL>=3
-  cout << "BTD: Total number of slices = " << slices << endl;
-#endif
-
-  //Return if there is only one slice
-  if(slices<2) return;
-
-  //Find the maximum number of edges (needed or Fiduccia-Mattheyses)
-  size_t max_edges=0;
-
-  for(size_t ivertex=0; ivertex < parts[0].size(); ivertex++) {
-    if(graph.getEdges(parts[0][ivertex]).size() > max_edges) {
-      max_edges=graph.getEdges(parts[0][ivertex]).size();
-    }
-  }
-
-  //insert enough space for the parts arrays
-  parts.insert(parts.begin()+1, slices-1, std::vector<int>(0));
-
-  //now do the bisection
-  bisect(0, slices,
-         _left, _right,
-         max_edges, _tolerance,
-         _min_opt_passes, _max_opt_passes);
-}
-
-//-------------
-
-void Partitioner::bisect(int _part, int _part_slices,
-                         vector<int> &_left_border,
-                         vector<int> &_right_border,
-                         int _max_edges, double _tolerance,
-                         int _min_opt_passes, int _max_opt_passes)
-{
-  std::vector<bool> permanently_locked(parts[_part].size(),false);
-  std::vector<bool> locked(parts[_part].size(),false);
-
-  //the BFS searches start from the border vertices
-  std::deque<int> left_stack(_left_border.begin(), _left_border.end()),
-    right_stack(_right_border.begin(), _right_border.end());
-  std::deque<int> left_rank(_left_border.size(), 1),
-    right_rank(_right_border.size(), 1);
-
-  //The number of slices is already given
-
-  int slices=_part_slices;
-
-#if DEBUG_LEVEL>=3
-  cout << "bisect: Total number of slices = " << slices << endl;
-#endif
-
-  //Return if there is only one slice
-  //(should not happen)
-  if(slices<2) return;
-
-  //determine the sizes of the two parts in slices
-  int left_slices=slices/2;
-  int right_slices=slices-left_slices;
-
-  //calculate the projected slice sizes
-  int left_size=left_slices*parts[_part].size()/slices;
-  int right_size=parts[_part].size()-left_size;
-
-  int real_left_size=0, real_right_size=0;
-
-  //compute the new indexes
-  //according to the index of the first slice in the part
-  int left_index=_part;
-  int right_index=left_index+left_slices;
-
-#if DEBUG_LEVEL >= 3
-  cout << "New indices " << left_index << " " << right_index << endl;
-  cout << "New sizes " << left_size << " " << right_size << endl;
-#endif
-
-  //Now do a breadth first search from both sides
-
-  //mark all the border vertices as locked and ineligible
-  std::deque<int>::const_iterator border_vertex;
-
-  border_vertex=left_stack.begin();
-  while(border_vertex != left_stack.end()) {
-    locked[sliceIndex[*border_vertex]]=true;
-    permanently_locked[sliceIndex[*border_vertex]]=true;
-
-    border_vertex++;
-  }
-
-  border_vertex=right_stack.begin();
-  while(border_vertex != right_stack.end()) {
-    locked[sliceIndex[*border_vertex]]=true;
-    permanently_locked[sliceIndex[*border_vertex]]=true;
-
-    border_vertex++;
-  }
-
-  //  cout << "Found border" << endl;
-
-  int max_rank=1;
-
-  while(left_stack.size() || right_stack.size()) {
-
-    //first from right
-    if(max_rank>right_slices) {
-      break;
-    }
-
-    while(right_stack.size()) {
-      int vertex=right_stack.front();
-      int rank=right_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      right_stack.pop_front();
-      right_rank.pop_front();
-
-      inSlice[vertex]=right_index;
-      real_right_size++;
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !locked[sliceIndex[*edge]]) {
-
-          right_stack.push_back(*edge);
-          right_rank.push_back(rank+1);
-
-          locked[sliceIndex[*edge]]=true;
-          if(rank < right_slices) {
-            permanently_locked[sliceIndex[*edge]]=true;
-          }
-        }
-
-        edge++;
-      }
-    }
-
-    //then from left
-    if(max_rank>left_slices) {
-      break;
-    }
-
-    while(left_stack.size()) {
-      int vertex=left_stack.front();
-      int rank=left_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      left_stack.pop_front();
-      left_rank.pop_front();
-
-      inSlice[vertex]=left_index;
-      real_left_size++;
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !locked[sliceIndex[*edge]]) {
-
-          left_stack.push_back(*edge);
-          left_rank.push_back(rank+1);
-
-          locked[sliceIndex[*edge]]=true;
-
-          if(rank < left_slices) {
-            permanently_locked[sliceIndex[*edge]]=true;
-          }
-        }
-
-        edge++;
-      }
-    }
-
-    //next slices
-    max_rank++;
-  }
-
-  distribute(left_stack, left_rank,
-             left_index, left_size, real_left_size,
-             right_stack, right_rank,
-             right_index, right_size, real_right_size,
-             _part, locked, max_rank);
-
-  //Now optimize the structure according to Fiduccia-Mattheyses
-  for(int i=0; i<_max_opt_passes; i++) {
-    locked=permanently_locked;
-
-    int result=optimize(left_index, left_size, right_index, right_size,
-                        locked, _max_edges,
-                        static_cast<int>(_tolerance*parts[left_index].size()));
-
-    if(!result ||
-       (i >= _min_opt_passes && result == 1) ) {
-      break;
-    }
-  }
-
-  //create new part arrays
-  {
-    std::vector<int> left_part, right_part;
-
-    for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
-      int vertex=parts[left_index][ivertex];
-
-      if(inSlice[vertex]==left_index) {
-        left_part.push_back(vertex);
-      }
-      else {
-        right_part.push_back(vertex);
-      }
-    }
-
-    parts[left_index].swap(left_part);
-    parts[right_index].swap(right_part);
-  }
-
-  //Now update the sliceIndex
-  for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
-    sliceIndex[parts[left_index][ivertex]]=ivertex;
-  }
-  for(size_t ivertex=0; ivertex < parts[right_index].size(); ivertex++) {
-    sliceIndex[parts[right_index][ivertex]]=ivertex;
-  }
-
-  //find the internal borders
-  vector<int> internal_left_border, internal_right_border;
-
-  for(size_t ivertex=0; ivertex < parts[left_index].size(); ivertex++) {
-    int vertex=parts[left_index][ivertex];
-
-    int *edge=graph.getEdges(vertex).begin();
-    while(edge != graph.getEdges(vertex).end()) {
-
-      if(inSlice[*edge]==right_index) {
-        internal_left_border.push_back(vertex);
-
-        break;
-      }
-
-      edge++;
-    }
-  }
-
-  for(size_t ivertex=0; ivertex < parts[right_index].size(); ivertex++) {
-    int vertex=parts[right_index][ivertex];
-
-    int *edge=graph.getEdges(vertex).begin();
-    while(edge != graph.getEdges(vertex).end()) {
-
-      if(inSlice[*edge]==left_index) {
-        internal_right_border.push_back(vertex);
-
-        break;
-      }
-
-      edge++;
-    }
-  }
-
-
-  /*  //debug
-      ostringstream convert;
-      convert << "part" << counter++ << ".eps";
-      write2DToEPS(convert.str().c_str());
-
-      cout << "Test " <<counter << " " << parts[left_index].size() << " " << parts[right_index].size() << endl;
-
-  */
-  //Recursively refine the bisection
-  if(left_slices>1) {
-    bisect(left_index, left_slices,
-           _left_border, internal_left_border,
-           _max_edges, _tolerance,
-           _min_opt_passes, _max_opt_passes);
-  }
-  if(right_slices>1) {
-    bisect(right_index, right_slices,
-           internal_right_border, _right_border,
-           _max_edges, _tolerance,
-           _min_opt_passes, _max_opt_passes);
-  }
-}
-
-//-------------------------------------------------------
-//The initial distributions
-//-------------------------------------------------------
-
-void Partitioner::
-NaturalBalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-                           int _left_index, int _left_size, int &_real_left_size,
-                           std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-                           int _right_index, int _right_size, int &_real_right_size,
-                           int _part, std::vector<bool> &_locked,
-                           int _current_rank)
-{
-  int max_rank=_current_rank;
-
-  while(_left_stack.size() || _right_stack.size()) {
-
-    //first from right
-    while(_right_stack.size()) {
-      int vertex=_right_stack.front();
-      int rank=_right_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _right_stack.pop_front();
-      _right_rank.pop_front();
-
-      //Check if we have already exceeded the size of this
-      //part of the bisection
-      if(_real_right_size >= _right_size) {
-        inSlice[vertex]=_left_index;
-        _real_left_size++;
-      }
-      else {
-        inSlice[vertex]=_right_index;
-        _real_right_size++;
-      }
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _right_stack.push_back(*edge);
-          _right_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-        }
-
-        edge++;
-      }
-    }
-
-    //then from left
-    while(_left_stack.size()) {
-      int vertex=_left_stack.front();
-      int rank=_left_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _left_stack.pop_front();
-      _left_rank.pop_front();
-
-      //Check if we have already exceeded the size of this
-      //part of the bisection
-      if(_real_left_size >= _left_size) {
-        inSlice[vertex]=_right_index;
-        _real_right_size++;
-      }
-      else {
-        inSlice[vertex]=_left_index;
-        _real_left_size++;
-      }
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _left_stack.push_back(*edge);
-          _left_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-
-        }
-
-        edge++;
-      }
-    }
-
-    //next slices
-    max_rank++;
-  }
-
-}
-
-void Partitioner::
-NaturalUnbalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-                             int _left_index, int _left_size, int &_real_left_size,
-                             std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-                             int _right_index, int _right_size, int &_real_right_size,
-                             int _part, std::vector<bool> &_locked,
-                             int _current_rank)
-{
-  int max_rank=_current_rank;
-
-  while(_left_stack.size() || _right_stack.size()) {
-
-    //first from right
-    while(_right_stack.size()) {
-      int vertex=_right_stack.front();
-      int rank=_right_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _right_stack.pop_front();
-      _right_rank.pop_front();
-
-      inSlice[vertex]=_right_index;
-      _real_right_size++;
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _right_stack.push_back(*edge);
-          _right_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-        }
-
-        edge++;
-      }
-    }
-
-    //then from left
-    while(_left_stack.size()) {
-      int vertex=_left_stack.front();
-      int rank=_left_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _left_stack.pop_front();
-      _left_rank.pop_front();
-
-      inSlice[vertex]=_left_index;
-      _real_left_size++;
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _left_stack.push_back(*edge);
-          _left_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-
-        }
-
-        edge++;
-      }
-    }
-
-    //next slices
-    max_rank++;
-  }
-
-}
-
-void Partitioner::
-Random_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-                  int _left_index, int _left_size, int &_real_left_size,
-                  std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-                  int _right_index, int _right_size, int &_real_right_size,
-                  int _part, std::vector<bool> &_locked,
-                  int _current_rank)
-{
-  int max_rank=_current_rank;
-
-  while(_left_stack.size() || _right_stack.size()) {
-
-    //first from right
-    while(_right_stack.size()) {
-      int vertex=_right_stack.front();
-      int rank=_right_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _right_stack.pop_front();
-      _right_rank.pop_front();
-
-      if(rand()%2) {
-        inSlice[vertex]=_right_index;
-        _real_right_size++;
-      }
-      else {
-        inSlice[vertex]=_left_index;
-        _real_left_size++;
-      }
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _right_stack.push_back(*edge);
-          _right_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-        }
-
-        edge++;
-      }
-    }
-
-    //then from left
-    while(_left_stack.size()) {
-      int vertex=_left_stack.front();
-      int rank=_left_rank.front();
-
-      //stop if we have exceeded the desired rank
-      if(rank>max_rank) {
-        break;
-      }
-
-      _left_stack.pop_front();
-      _left_rank.pop_front();
-
-      if(rand()%2) {
-        inSlice[vertex]=_left_index;
-        _real_left_size++;
-      }
-      else {
-        inSlice[vertex]=_right_index;
-        _real_right_size++;
-      }
-
-      int *edge=graph.getEdges(vertex).begin();
-      while(edge != graph.getEdges(vertex).end()) {
-
-        if(inSlice[*edge]==_part &&
-           !_locked[sliceIndex[*edge]]) {
-
-          _left_stack.push_back(*edge);
-          _left_rank.push_back(rank+1);
-
-          _locked[sliceIndex[*edge]]=true;
-
-        }
-
-        edge++;
-      }
-    }
-
-    //next slices
-    max_rank++;
-  }
-}
-
-//-------------------------------------------------------
-//Optimizers
-//-------------------------------------------------------
-
-int Partitioner::
-FM_MinCut_optimize(int _left_index, int _left_size,
-                   int _right_index, int _right_size,
-                   std::vector<bool> &_locked, int _pmax, int _tolerance)
-{
-  std::vector<int> gain(_left_size+_right_size, 0);
-
-  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
-  std::vector<bucket_list> bucket(2, bucket_list(-_pmax, _pmax,
-                                                 vertex_list, sliceIndex));
-
-  std::vector<int> change_buffer;
-  int size[2];
-  int part_index[2];
-
-  part_index[0]=_left_index;
-  part_index[1]=_right_index;
-
-  //relative sizes and tolerances
-
-  size[0] = -_left_size;
-  size[1] = -_right_size;
-
-  int tolerance=std::max(1, _tolerance);
-  //In order for Fiduccia-Mattheyses to work,
-  //we may at least allow an imbalance of 1 vertex
-
-#if DEBUG_LEVEL>=3
-  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
-#endif
-
-  //Initialize gain values
-
-  //first left part
-  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
-    int index, part_index;
-    int vertex=parts[_left_index][ivertex];
-
-    if(inSlice[vertex] == _left_index) {
-      part_index=_left_index;
-      index=0;
-    }
-    else {
-      part_index=_right_index;
-      index=1;
-    }
-
-    //Update the partition size
-    size[index]++;
-
-    //For all the free vertices we go through
-
-    //all the edges to compute the gain
-    if(!_locked[sliceIndex[vertex]]) {
-      int *edge=graph.getEdges(vertex).begin();
-
-      while(edge != graph.getEdges(vertex).end()) {
-        if(inSlice[*edge]!=part_index) {
-          gain[sliceIndex[vertex]]++;
-        }
-        else {
-          gain[sliceIndex[vertex]]--;
-        }
-
-        edge++;
-      }
-
-      //Finally add the vertex to the bucket list
-      bucket[index].push_back(gain[sliceIndex[vertex]], vertex);
-    }
-  }
-
-#if DEBUG_LEVEL>=3
-  cout << size[0] << " " << size[1] << endl;
-#endif
-
-  //characteristics of the best partition
-  int relative_gain=0;
-
-  int old_size=abs(size[0]);
-  //int old_gain not needed, -> relative gain!
-
-  int best_gain=0;
-  int best_size=abs(size[0]);
-
-  int best_move=0;
-
-  while(true) {
-    //Choose the cell that should be moved
-    int from, to;
-
-    int cur_gain[2];
-
-    cur_gain[0]=bucket[0].max_key();
-    cur_gain[1]=bucket[1].max_key();
-
-    //Only allow moves that improve or maintain balance:
-    //check if partition is imbalanced or could be imbalanced
-    //at the next move
-    //(Note: it doesn't matter whether we compare size[0] or
-    // size[1] with tolerance, since size[0]+size[1]=const
-    if(abs(size[0])>=tolerance) {
-      if(size[0] > size[1]) {
-        cur_gain[1]=-_pmax-1;
-      }
-      else {
-        cur_gain[0]=-_pmax-1;
-      }
-    }
-
-    //Choose the cell with the largest gain
-    //(Note: moves that could cause imbalance
-    // are prevented by the previous checks)
-    if(cur_gain[0] != cur_gain[1]) {
-      from=(cur_gain[0] > cur_gain[1]) ? 0 : 1;
-      to=(cur_gain[0] > cur_gain[1]) ? 1 : 0;
-    }
-    //if the gains are equal, check if no further
-    //moves are possible, otherwise choose
-    //the move that improves balance, if it doesn't matter
-    //take the left slice
-    else {
-      if(cur_gain[0] > -_pmax-1) {
-        from=(size[0] >= size[1]) ? 0 : 1;
-        to=(size[0] >= size[1]) ? 1 : 0;
-      }
-      else {
-        //no further moves are possible
-
-        break;
-      }
-    }
-
-    //Remove the vertex and adjust partition size
-    int vertex=bucket[from].front();
-    bucket[from].pop_front();
-
-    _locked[sliceIndex[vertex]]=true;
-    inSlice[vertex]=part_index[to];
-
-    size[from]--;
-    size[to]++;
-
-    relative_gain+=cur_gain[from];
-    //    relative_gain+=gain[sliceIndex[vertex]];
-
-    change_buffer.push_back(vertex);
-
-    //cout << "Moving vertex " << vertex
-    //    << " with gain " << cur_gain[from] << endl;
-
-    //update gains and adjust bucket structure
-    int *edge=graph.getEdges(vertex).begin();
-    while(edge != graph.getEdges(vertex).end()) {
-
-      if(inSlice[*edge]==part_index[from]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          int old_gain=gain[sliceIndex[*edge]];
-
-          gain[sliceIndex[*edge]]+=2;
-
-          bucket[from].rearrange_back(old_gain, old_gain+2, *edge);
-        }
-      }
-      else if(inSlice[*edge]==part_index[to]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          int old_gain=gain[sliceIndex[*edge]];
-
-          gain[sliceIndex[*edge]]-=2;
-
-          bucket[to].rearrange_back(old_gain, old_gain-2, *edge);
-        }
-      }
-      edge++;
-    }
-
-
-    //Have we found a better partition
-    if(relative_gain > best_gain ||
-       (relative_gain == best_gain && best_size > abs(size[0]))) {
-      best_gain=relative_gain;
-      best_size=abs(size[0]);
-
-      best_move=change_buffer.size();
-    }
-
-  }
-
-  //Undo all the changes that did not improve the
-  //bisection any more:
-  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
-    int vertex=change_buffer[ivertex];
-
-    if(inSlice[vertex] == _left_index) {
-      inSlice[vertex]=_right_index;
-    }
-    else {
-      inSlice[vertex]=_left_index;
-    }
-  }
-
-  if(best_move == 0) {
-    return 0; //no improvements possible
-  }
-  else {
-    if(best_gain > 0 ||
-       (best_gain == 0 && best_size < old_size) ){
-      return 2; //definite improvement
-    }
-    else {
-      return 1; //found a partition with the same properties as before
-                //(needed to get out of local minima)
-    }
-  }
-}
-
-
-//----------------
-
-int Partitioner::
-FM_MinNetCut_optimize(int _left_index, int _left_size,
-                      int _right_index, int _right_size,
-                      std::vector<bool> &_locked, int _pmax, int _tolerance)
-{
-#if DEBUG_LEVEL>=3
-  cout << "In FM MinNetCut" << endl;
-#endif
-
-  std::vector<int> net_gain(_left_size+_right_size, 0);
-  std::vector<int> edge_gain(_left_size+_right_size, 0);
-
-  std::vector<int> net_distribution[2];
-
-  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
-  std::vector<bucket_list> bucket(2, bucket_list(-_pmax, _pmax,
-                                                 vertex_list, sliceIndex));
-
-  std::vector<int> change_buffer;
-  int size[2];
-  int part_index[2];
-
-  net_distribution[0].resize(_left_size+_right_size);
-  net_distribution[1].resize(_left_size+_right_size);
-
-  part_index[0]=_left_index;
-  part_index[1]=_right_index;
-
-  //relative sizes and tolerances
-
-  size[0] = -_left_size;
-  size[1] = -_right_size;
-
-  int tolerance=std::max(1, _tolerance);
-  //In order for Fiduccia-Mattheyses to work,
-  //we may at least allow an imbalance of 1 vertex
-
-#if DEBUG_LEVEL>=3
-  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
-#endif
-
-  //Initialize the distribution of the nets
-  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
-    int vertex=parts[_left_index][ivertex];
-
-    net_distribution[0][sliceIndex[vertex]]=0;
-    net_distribution[1][sliceIndex[vertex]]=0;
-
-    //the net includes the node itself ...
-    if(inSlice[vertex] == _left_index) {
-      net_distribution[0][sliceIndex[vertex]]++;
-    }
-    else {
-      net_distribution[1][sliceIndex[vertex]]++;
-    }
-
-    //and it's nearest neighbours
-    int *edge=graph.getEdges(vertex).begin();
-
-    while(edge != graph.getEdges(vertex).end()) {
-
-      if(inSlice[*edge] <= _left_index) {
-        net_distribution[0][sliceIndex[vertex]]++;
-      }
-      else {
-        net_distribution[1][sliceIndex[vertex]]++;
-      }
-
-      edge++;
-    }
-  }
-
-  //Initialize gain values
-  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
-    int index, part_index;
-    int vertex=parts[_left_index][ivertex];
-    int from,to;
-
-    if(inSlice[vertex] == _left_index) {
-      part_index=_left_index;
-      index=0;
-      from=0;to=1;
-    }
-    else {
-      part_index=_right_index;
-      index=1;
-      from=1;to=0;
-    }
-
-    //Update the partition size
-    size[index]++;
-
-    //For all the free vertices we go through
-
-    //all the edges to compute the gain
-    if(!_locked[sliceIndex[vertex]]) {
-      int *edge=graph.getEdges(vertex).begin();
-
-      while(edge != graph.getEdges(vertex).end()) {
-
-        //Update the gain with regard to cut edges
-        if(inSlice[*edge]!=part_index) {
-          edge_gain[sliceIndex[vertex]]++;
-        }
-        else {
-          edge_gain[sliceIndex[vertex]]--;      }
-
-        //and with regard to cut nets
-        if(net_distribution[from][sliceIndex[*edge]] == 1) {
-          net_gain[sliceIndex[vertex]]++;
-        }
-        else if(net_distribution[to][sliceIndex[*edge]] == 0) {
-          net_gain[sliceIndex[vertex]]--;
-        }
-
-        edge++;
-      }
-
-      //Finally add the vertex to the bucket list
-      bucket[index].push_randomly(net_gain[sliceIndex[vertex]], vertex);
-
-    }
-  }
-
-#if DEBUG_LEVEL>=3
-  cout << size[0] << " " << size[1] << endl;
-#endif
-
-  //characteristics of the best partition
-  int relative_gain=0;
-  int relative_edge_gain=0;
-
-  int old_size=abs(size[0]);
-  //int old_gains not needed -> relative gains!
-
-  int best_gain=0;
-  int best_edge_gain=0;
-  int best_size=abs(size[0]);
-
-  int best_move=0;
-
-  while(true) {
-    //Choose the cell that should be moved
-    int from, to;
-
-    int cur_gain[2];
-
-    cur_gain[0]=bucket[0].max_key();
-    cur_gain[1]=bucket[1].max_key();
-
-    //Only allow moves that improve or maintain balance:
-    //check if partition is imbalanced or could be imbalanced
-    //at the next move
-    //(Note: it doesn't matter whether we compare size[0] or
-    // size[1] with tolerance, since size[0]+size[1]=const
-    if(abs(size[0])>=tolerance) {
-      if(size[0] > size[1]) {
-        cur_gain[1]=-_pmax-1;
-      }
-      else {
-        cur_gain[0]=-_pmax-1;
-      }
-    }
-
-    //Choose the cell with the largest gain
-    //(Note: moves that could cause imbalance
-    // are prevented by the previous checks)
-    if(cur_gain[0] != cur_gain[1]) {
-      from=(cur_gain[0] > cur_gain[1]) ? 0 : 1;
-      to=(cur_gain[0] > cur_gain[1]) ? 1 : 0;
-    }
-    //if the gains are equal, check if no further
-    //moves are possible, otherwise choose
-    //the move that improves balance, if it doesn't matter
-    //take the left slice
-    else {
-      if(cur_gain[0] > -_pmax-1) {
-        from=(size[0] >= size[1]) ? 0 : 1;
-        to=(size[0] >= size[1]) ? 1 : 0;
-      }
-      else {
-        //no further moves are possible
-
-        break;
-      }
-    }
-
-    //Remove the vertex and adjust partition size
-    int vertex=bucket[from].front();
-    bucket[from].pop_front();
-
-    _locked[sliceIndex[vertex]]=true;
-    inSlice[vertex]=part_index[to];
-
-    size[from]--;
-    size[to]++;
-
-    relative_gain+=cur_gain[from];
-    relative_edge_gain+=edge_gain[sliceIndex[vertex]];
-    change_buffer.push_back(vertex);
-
-    //update gains and adjust bucket structure
-    int *edge=graph.getEdges(vertex).begin();
-    while(edge != graph.getEdges(vertex).end()) {
-
-      //----------------
-      //update net gains
-      //----------------
-
-      if(net_distribution[to][sliceIndex[*edge]] == 0) {
-        if(!_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]++;
-
-          //Note: all the vertices are in the to part
-          bucket[from].rearrange_randomly(old_gain, old_gain+1, *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(!_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]++;
-
-            //Note: all the vertices are in the from part
-            bucket[from].rearrange_randomly(old_gain, old_gain+1, *edgedge);
-          }
-          edgedge++;
-        }
-      }
-      else if(net_distribution[to][sliceIndex[*edge]] == 1) {
-        if(inSlice[*edge]==part_index[to] && !_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]--;
-
-          //Note: all the vertices are in the to part
-          bucket[to].rearrange_randomly(old_gain, old_gain-1, *edge);
-
-        }
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(inSlice[*edgedge]==part_index[to] && !_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]--;
-
-            bucket[to].rearrange_randomly(old_gain, old_gain-1, *edgedge);
-
-            break; //there is only one vertex in the to part
-            //(well, it's two after the move, nut the moved vertex is locked)
-          }
-
-          edgedge++;
-        }
-      }
-
-      net_distribution[from][sliceIndex[*edge]]--;
-      net_distribution[to][sliceIndex[*edge]]++;
-
-      if(net_distribution[from][sliceIndex[*edge]] == 0) {
-        if(!_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]--;
-
-          //Note: all the vertices are in the to part
-          bucket[to].rearrange_randomly(old_gain, old_gain-1, *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(!_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]--;
-
-            bucket[to].rearrange_randomly(old_gain, old_gain-1, *edgedge);
-          }
-          edgedge++;
-        }
-
-      }
-      else if(net_distribution[from][sliceIndex[*edge]] == 1) {
-        if(inSlice[*edge]==part_index[from] && !_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]++;
-
-          bucket[from].rearrange_randomly(old_gain, old_gain+1, *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(inSlice[*edgedge]==part_index[from] && !_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]++;
-
-            bucket[from].rearrange_randomly(old_gain, old_gain+1, *edgedge);
-            break; //there is only one vertex in the from part
-            //(the other one has been moved)
-          }
-
-          edgedge++;
-        }
-      }
-
-      //-----------------
-      //update edge gains
-      //-----------------
-
-      if(inSlice[*edge]==part_index[from]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          edge_gain[sliceIndex[*edge]]+=2;
-        }
-      }
-      else if(inSlice[*edge]==part_index[to]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          edge_gain[sliceIndex[*edge]]-=2;
-        }
-      }
-
-      edge++;
-    }
-
-    //Have we found a better partition
-    if(relative_gain > best_gain ||
-       (relative_gain == best_gain && best_size >= abs(size[0]) ) ||
-       (relative_gain == best_gain && best_size == abs(size[0]) && relative_edge_gain>=best_edge_gain) ) {
-      best_gain=relative_gain;
-      best_edge_gain=relative_edge_gain;
-      best_size=abs(size[0]);
-
-      best_move=change_buffer.size();
-    }
-
-  }
-
-  //Undo all the changes that did not improve the
-  //bisection any more:
-  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
-    int vertex=change_buffer[ivertex];
-
-    if(inSlice[vertex] == _left_index) {
-      inSlice[vertex]=_right_index;
-    }
-    else {
-      inSlice[vertex]=_left_index;
-    }
-  }
-
-  if(best_move == 0) {
-    return 0; //no improvements possible
-  }
-  else {
-    if(best_gain > 0 ||
-       (best_gain == 0 && best_size < old_size) ||
-       (best_gain == 0 && best_size == old_size && best_edge_gain > 0) ){
-      return 2; //definite improvement
-    }
-    else {
-      return 1; //found a partition with the same properties as before
-                //(needed to get out of local minima)
-    }
-  }
-}
-
-//----------------
-
-int Partitioner::
-FM_MinNetCutMinCut_optimize(int _left_index, int _left_size,
-                            int _right_index, int _right_size,
-                            std::vector<bool> &_locked, int _pmax, int _tolerance)
-{
-#if DEBUG_LEVEL>=3
-  cout << "In FM MinNetCut_MinCut" << endl;
-#endif
-
-  std::vector<int> net_gain(_left_size+_right_size, 0);
-  std::vector<int> edge_gain(_left_size+_right_size, 0);
-
-  std::vector<int> net_distribution[2];
-
-  std::vector<list<int>::iterator> vertex_list(_left_size+_right_size);
-  std::vector<double_bucket_list> bucket(2, double_bucket_list(-_pmax, _pmax,
-                                                               -_pmax, _pmax,
-                                                               vertex_list, sliceIndex));
-
-  std::vector<int> change_buffer;
-  int size[2];
-  int part_index[2];
-
-  net_distribution[0].resize(_left_size+_right_size);
-  net_distribution[1].resize(_left_size+_right_size);
-
-  part_index[0]=_left_index;
-  part_index[1]=_right_index;
-
-  //relative sizes and tolerances
-
-  size[0] = -_left_size;
-  size[1] = -_right_size;
-
-  int tolerance=std::max(1, _tolerance);
-  //In order for Fiduccia-Mattheyses to work,
-  //we may at least allow an imbalance of 1 vertex
-
-#if DEBUG_LEVEL>=3
-  cout << _left_size << " " << _right_size << " " << parts[_left_index].size() << endl;
-#endif
-
-  //Initialize the distribution of the nets
-  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
-    int vertex=parts[_left_index][ivertex];
-
-    net_distribution[0][sliceIndex[vertex]]=0;
-    net_distribution[1][sliceIndex[vertex]]=0;
-
-    //the net includes the node itself ...
-    if(inSlice[vertex] == _left_index) {
-      net_distribution[0][sliceIndex[vertex]]++;
-    }
-    else {
-      net_distribution[1][sliceIndex[vertex]]++;
-    }
-
-    //and it's nearest neighbours
-    int *edge=graph.getEdges(vertex).begin();
-
-    while(edge != graph.getEdges(vertex).end()) {
-
-      if(inSlice[*edge] <= _left_index) {
-        net_distribution[0][sliceIndex[vertex]]++;
-      }
-      else {
-        net_distribution[1][sliceIndex[vertex]]++;
-      }
-
-      edge++;
-    }
-  }
-
-  //Initialize gain values
-  for(size_t ivertex=0; ivertex < parts[_left_index].size(); ivertex++) {
-    int index, part_index;
-    int vertex=parts[_left_index][ivertex];
-    int from,to;
-
-    if(inSlice[vertex] == _left_index) {
-      part_index=_left_index;
-      index=0;
-      from=0;to=1;
-    }
-    else {
-      part_index=_right_index;
-      index=1;
-      from=1;to=0;
-    }
-
-    //Update the partition size
-    size[index]++;
-
-    //For all the free vertices we go through
-
-    //all the edges to compute the gain
-    if(!_locked[sliceIndex[vertex]]) {
-      int *edge=graph.getEdges(vertex).begin();
-
-      while(edge != graph.getEdges(vertex).end()) {
-
-        //Update the gain with regard to cut edges
-        if(inSlice[*edge]!=part_index) {
-          edge_gain[sliceIndex[vertex]]++;
-        }
-        else {
-          edge_gain[sliceIndex[vertex]]--;
-        }
-
-        //and with regard to cut nets
-        if(net_distribution[from][sliceIndex[*edge]] == 1) {
-          net_gain[sliceIndex[vertex]]++;
-        }
-        else if(net_distribution[to][sliceIndex[*edge]] == 0) {
-          net_gain[sliceIndex[vertex]]--;
-        }
-
-        edge++;
-      }
-
-      //Finally add the vertex to the bucket list
-      bucket[index].push_randomly(net_gain[sliceIndex[vertex]],
-                                  edge_gain[sliceIndex[vertex]], vertex);
-    }
-  }
-
-#if DEBUG_LEVEL>=3
-  cout << "deviation: " << size[0] << " " << size[1] << endl;
-#endif
-
-  //characteristics of the best partition
-  int relative_gain=0;
-  int relative_edge_gain=0;
-
-  int old_size=abs(size[0]);
-  //int old_gains not needed -> relative gains!
-
-  int best_gain=0;
-  int best_edge_gain=0;
-  int best_size=abs(size[0]);
-
-  int best_move=0;
-
-  while(true) {
-    //Choose the cell that should be moved
-    int from, to;
-
-    int cur_net_gain[2];
-
-    cur_net_gain[0]=bucket[0].max_key().first;
-    cur_net_gain[1]=bucket[1].max_key().first;
-
-    //Only allow moves that improve or maintain balance:
-    //check if partition is imbalanced or could be imbalanced
-    //at the next move
-    //(Note: it doesn't matter whether we compare size[0] or
-    // size[1] with tolerance, since size[0]+size[1]=const
-    if(abs(size[0])>=tolerance) {
-      if(size[0] > size[1]) {
-        cur_net_gain[1]=-_pmax-1;
-      }
-      else {
-        cur_net_gain[0]=-_pmax-1;
-      }
-    }
-
-    //Choose the cell with the largest gain
-    //(Note: moves that could cause imbalance
-    // are prevented by the previous checks)
-    if(cur_net_gain[0] != cur_net_gain[1]) {
-      from=(cur_net_gain[0] > cur_net_gain[1]) ? 0 : 1;
-      to=(cur_net_gain[0] > cur_net_gain[1]) ? 1 : 0;
-    }
-    //if the gains are equal, check if no further
-    //moves are possible, otherwise choose
-    //the move that improves balance, if it doesn't matter
-    //take the left slice
-    else {
-      if(cur_net_gain[0] > -_pmax-1) {
-        from=(size[0] >= size[1]) ? 0 : 1;
-        to=(size[0] >= size[1]) ? 1 : 0;
-      }
-      else {
-        //no further moves are possible
-
-        break;
-      }
-    }
-
-    //Remove the vertex and adjust partition size
-    int vertex=bucket[from].front();
-    bucket[from].pop_front();
-
-    _locked[sliceIndex[vertex]]=true;
-    inSlice[vertex]=part_index[to];
-
-    size[from]--;
-    size[to]++;
-
-    relative_gain+=cur_net_gain[from];
-    relative_edge_gain+=bucket[from].max_key().second;
-
-    change_buffer.push_back(vertex);
-
-    //update gains and adjust bucket structure
-    int *edge=graph.getEdges(vertex).begin();
-    while(edge != graph.getEdges(vertex).end()) {
-
-      //-----------------
-      //update edge gains
-      //-----------------
-      int old_edge_gain=edge_gain[sliceIndex[*edge]];
-
-      if(inSlice[*edge]==part_index[from]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          edge_gain[sliceIndex[*edge]]+=2;
-
-          bucket[from].rearrange_randomly(net_gain[sliceIndex[*edge]], old_edge_gain,
-                                          net_gain[sliceIndex[*edge]], old_edge_gain+2,
-                                          *edge);
-        }
-      }
-      else if(inSlice[*edge]==part_index[to]) {
-        if(!_locked[sliceIndex[*edge]]) {
-          edge_gain[sliceIndex[*edge]]-=2;
-
-          bucket[to].rearrange_randomly(net_gain[sliceIndex[*edge]], old_edge_gain,
-                                        net_gain[sliceIndex[*edge]], old_edge_gain-2,
-                                        *edge);
-        }
-      }
-
-      //----------------
-      //update net gains
-      //----------------
-
-      if(net_distribution[to][sliceIndex[*edge]] == 0) {
-        if(!_locked[sliceIndex[*edge]]) {
-
-          int old_gain=net_gain[sliceIndex[*edge]]++;
-
-          //Note: all the vertices are in the from part
-          bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
-                                          old_gain+1, edge_gain[sliceIndex[*edge]],
-                                          *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(!_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]++;
-
-            //Note: all the vertices are in the from part
-            bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
-                                            old_gain+1, edge_gain[sliceIndex[*edgedge]],
-                                            *edgedge);
-          }
-          edgedge++;
-        }
-      }
-      else if(net_distribution[to][sliceIndex[*edge]] == 1) {
-        if(inSlice[*edge]==part_index[to] && !_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]--;
-
-          //Note: all the vertices are in the to part
-          bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
-                                        old_gain-1, edge_gain[sliceIndex[*edge]],
-                                        *edge);
-
-        }
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(inSlice[*edgedge]==part_index[to] && !_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]--;
-
-            bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
-                                          old_gain-1, edge_gain[sliceIndex[*edgedge]],
-                                          *edgedge);
-
-            break; //there is only one vertex in the to part
-            //(well, it's two after the move, nut the moved vertex is locked)
-          }
-
-          edgedge++;
-        }
-      }
-
-      net_distribution[from][sliceIndex[*edge]]--;
-      net_distribution[to][sliceIndex[*edge]]++;
-
-      if(net_distribution[from][sliceIndex[*edge]] == 0) {
-        if(!_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]--;
-
-          //Note: all the vertices are in the to part
-          bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
-                                        old_gain-1, edge_gain[sliceIndex[*edge]],
-                                        *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-          if(!_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]--;
-
-            bucket[to].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
-                                          old_gain-1, edge_gain[sliceIndex[*edgedge]],
-                                          *edgedge);
-          }
-          edgedge++;
-        }
-
-      }
-      else if(net_distribution[from][sliceIndex[*edge]] == 1) {
-        if(inSlice[*edge]==part_index[from] && !_locked[sliceIndex[*edge]]) {
-          int old_gain=net_gain[sliceIndex[*edge]]++;
-
-          bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edge]],
-                                          old_gain+1, edge_gain[sliceIndex[*edge]],
-                                          *edge);
-        }
-
-        int *edgedge=graph.getEdges(*edge).begin();
-        while(edgedge != graph.getEdges(*edge).end()) {
-          if(inSlice[*edgedge]!=_left_index &&
-             inSlice[*edgedge]!=_right_index) {
-            edgedge++;
-            continue;
-          }
-
-          if(inSlice[*edgedge]==part_index[from] && !_locked[sliceIndex[*edgedge]]) {
-            int old_gain=net_gain[sliceIndex[*edgedge]]++;
-
-            bucket[from].rearrange_randomly(old_gain, edge_gain[sliceIndex[*edgedge]],
-                                            old_gain+1, edge_gain[sliceIndex[*edgedge]],
-                                            *edgedge);
-            break; //there is only one vertex in the from part
-            //(the other one has been moved)
-          }
-
-          edgedge++;
-        }
-      }
-
-      edge++;
-    }
-
-    //Have we found a better partition
-    if(relative_gain > best_gain ||
-       (relative_gain == best_gain && relative_edge_gain >= best_edge_gain) ||
-       (relative_gain == best_gain && relative_edge_gain==best_edge_gain && best_size >= abs(size[0]))) {
-      best_gain=relative_gain;
-      best_edge_gain=relative_edge_gain;
-      best_size=abs(size[0]);
-
-      best_move=change_buffer.size();
-    }
-
-  }
-
-#if DEBUG_LEVEL>2
-  cout << "best_move: " << best_move << endl;
-  cout << "best gain: " << best_gain << endl;
-  cout << "best size: " << best_size << endl;
-#endif
-
-  //Undo all the changes that did not improve the
-  //bisection any more:
-  for(size_t ivertex=best_move; ivertex < change_buffer.size(); ivertex++) {
-    int vertex=change_buffer[ivertex];
-
-    if(inSlice[vertex] == _left_index) {
-      inSlice[vertex]=_right_index;
-    }
-    else {
-      inSlice[vertex]=_left_index;
-    }
-  }
-
-  if(best_move == 0) {
-    return 0; //no improvements possible
-  }
-  else {
-    if(best_gain > 0 ||
-       (best_gain == 0 && best_size < old_size) ||
-       (best_gain == 0 && best_size == old_size && best_edge_gain > 0) ) {
-      return 2; //definite improvement
-    }
-    else {
-      return 1; //found a partition with the same properties as before
-                //(needed to get out of local minima)
-    }
-  }
-}
diff --git a/kwant/graph/c_slicer/partitioner.h b/kwant/graph/c_slicer/partitioner.h
deleted file mode 100644
index f8cd3a20..00000000
--- a/kwant/graph/c_slicer/partitioner.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-//-*-C++-*-
-#ifndef _BLOCK_TRIDIAGONAL_PARTITIONER_H
-#define _BLOCK_TRIDIAGONAL_PARTITIONER_H
-
-#include <vector>
-#include <deque>
-#include <cmath>
-#include <algorithm>
-
-#include "graphwrap.h"
-#include "bucket_list.h"
-//#include "graphalgorithms.h"
-
-/** @short the first graph-partitioner
- ** *******************************************/
-class Partitioner
-{
-public:
-  enum Distributors {
-    Distribute_Natural_Unbalanced,
-    Distribute_Natural_Balanced,
-    Distribute_Randomly };
-
-  enum Optimizers {
-    Optimize_No,
-    Optimize_FM_MinCut,
-    Optimize_FM_MinNetCut,
-    Optimize_FM_MinNetCutMinCut } ;
-
-public:
-  const GraphWrapper &graph;
-
-  vector<vector<int> > parts;
-
-  vector<int> inSlice;
-  vector<int> sliceIndex;
-
-  enum Distributors distributor;
-  enum Optimizers optimizer;
-
-public:
-  /** @param _graph           the underlying grid
-   ** @param _tolerance
-   ** @param _min_opt_passes
-   ** @param _max_opt_passes
-   ** @param _distributor
-   ** @param _optimizer
-   ** @param _mode            default is TwoBlocks, OneBlock is not implemented yet
-   ** Default behaviour: TwoBlocks, left lead = 1, right lead = 2
-   ** **************************************************************/
-  template<typename Grid>
-  Partitioner( const Grid  &_graph,
-	       std::vector<int> &_left,
-	       std::vector<int> &_right,
-	       double       _tolerance = 0.01,
-	       int          _min_opt_passes = 10,
-	       int          _max_opt_passes = 10,
-	       Distributors _distributor = Distribute_Natural_Balanced,
-	       Optimizers   _optimizer   = Optimize_FM_MinNetCutMinCut) :
-    graph(_graph), parts(1, vector<int>(0)),
-    inSlice(_graph.size(),-1), sliceIndex(_graph.size(),0),
-    distributor(_distributor), optimizer(_optimizer)
-  {
-    parts[0].reserve(_graph.size());
-
-    for(int i=0; i<graph.size(); i++) {
-      parts[0].push_back(i);
-
-      inSlice[i]=0;
-      sliceIndex[i]=parts[0].size()-1;
-    }
-
-    bisectFirst(_left, _right, _tolerance, _min_opt_passes, _max_opt_passes);
-  }
-
-private:
-  /** @short first bisection? **/
-  void bisectFirst( std::vector<int> &, std::vector<int> &, double _tolerance,
-		    int _min_opt_passes, int _max_opt_passes);
-
-  void bisect(int _part, int _part_slices,
-	      std::vector<int> &, std::vector<int> &,
-	      int _max_edges, double _tolerance,
-	      int _min_opt_passes, int _max_opt_passes);
-
-  //interface function that chooses between the different Optimizers
-  inline int optimize(int _left_index, int _left_size,
-		      int _right_index, int _right_size,
-		      std::vector<bool> &_locked, int _pmax, int _tolerance)
-  {
-    if(optimizer == Optimize_FM_MinCut) {
-      return FM_MinCut_optimize(_left_index, _left_size,
-				_right_index, _right_size,
-				_locked, _pmax, _tolerance);
-    }
-    else if(optimizer == Optimize_FM_MinNetCut) {
-      return FM_MinNetCut_optimize(_left_index, _left_size,
-				   _right_index, _right_size,
-				   _locked, _pmax, _tolerance);
-    }
-    else if(optimizer == Optimize_FM_MinNetCutMinCut) {
-      return FM_MinNetCutMinCut_optimize(_left_index, _left_size,
-					 _right_index, _right_size,
-					 _locked, _pmax, _tolerance);
-    }
-    else {
-      return 0;
-    }
-  }
-
-  //
-  inline void distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-			 int _left_index, int _left_size, int &_real_left_size,
-			 std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-			 int _right_index, int _right_size, int &_real_right_size,
-			 int _part, std::vector<bool> &_locked,
-			 int _current_rank)
-  {
-    if(distributor == Distribute_Natural_Balanced) {
-      NaturalBalanced_distribute(_left_stack, _left_rank,
-				 _left_index, _left_size, _real_left_size,
-				 _right_stack, _right_rank,
-				 _right_index, _right_size, _real_right_size,
-				 _part, _locked, _current_rank);
-    }
-    else if(distributor == Distribute_Natural_Unbalanced) {
-      NaturalUnbalanced_distribute(_left_stack, _left_rank,
-				   _left_index, _left_size, _real_left_size,
-				   _right_stack, _right_rank,
-				   _right_index, _right_size, _real_right_size,
-				   _part, _locked, _current_rank);
-    }
-    else if(distributor == Distribute_Randomly) {
-      Random_distribute(_left_stack, _left_rank,
-			_left_index, _left_size, _real_left_size,
-			_right_stack, _right_rank,
-			_right_index, _right_size, _real_right_size,
-			_part, _locked, _current_rank);
-    }
-  }
-
-
-  //The inital distribution
-  void NaturalBalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-				  int _left_index, int _left_size, int &_real_left_size,
-				  std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-				  int _right_index, int _right_size, int &_real_right_size,
-				  int _part,
-				  std::vector<bool> &_locked,
-				  int _current_rank);
-
-  void NaturalUnbalanced_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-				    int _left_index, int _left_size, int &_real_left_size,
-				    std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-				    int _right_index, int _right_size, int &_real_right_size,
-				    int _part,
-				    std::vector<bool> &_locked,
-				    int _current_rank);
-
-  void Random_distribute(std::deque<int> &_left_stack, std::deque<int> &_left_rank,
-			 int _left_index, int _left_size, int &_real_left_size,
-			 std::deque<int> &_right_stack, std::deque<int> &_right_rank,
-			 int _right_index, int _right_size, int &_real_right_size,
-			 int _part,
-			 std::vector<bool> &_locked,
-			 int _current_rank);
-
-  //The different optimzers
-  int FM_MinCut_optimize(int _left_index, int _left_size,
-			 int _right_index, int _right_size,
-			 std::vector<bool> &_locked, int _pmax, int _tolerance);
-
-  int FM_MinNetCut_optimize(int _left_index, int _left_size,
-			    int _right_index, int _right_size,
-			    std::vector<bool> &_locked, int _pmax, int _tolerance);
-
-  int FM_MinNetCutMinCut_optimize(int _left_index, int _left_size,
-				  int _right_index, int _right_size,
-				  std::vector<bool> &_locked, int _pmax, int _tolerance);
-
-};
-
-#endif
diff --git a/kwant/graph/c_slicer/slicer.cc b/kwant/graph/c_slicer/slicer.cc
deleted file mode 100644
index 7b190d1a..00000000
--- a/kwant/graph/c_slicer/slicer.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-#include <algorithm>
-#include <exception>
-
-#include "graphwrap.h"
-#include "partitioner.h"
-
-#include "slicer.h"
-
-extern "C"
-Slicing *slice(int _node_num,
-               int *_vertex_ptr,
-               int *_edges,
-               int _left_num, int *_left,
-               int _right_num, int *_right)
-{
-  GraphWrapper graph(_node_num, _vertex_ptr,
-                     _edges);
-
-  vector<int> left(_left, _left+_left_num),
-    right(_right, _right+_right_num);
-
-  Partitioner parts(graph, left, right);
-
-  Slicing *slicing;
-
-  try {
-    slicing=new Slicing;
-
-    slicing->nslices=parts.parts.size();
-    slicing->slice_ptr=new int[parts.parts.size()+1];
-    slicing->slices=new int[graph.size()];
-  }
-  catch(std::bad_alloc &ba) {
-    return NULL;
-  }
-
-  slicing->slice_ptr[0]=0;
-  for(size_t i=0; i<parts.parts.size(); i++) {
-    std::copy(parts.parts[i].begin(),
-              parts.parts[i].end(),
-              slicing->slices+slicing->slice_ptr[i]);
-    slicing->slice_ptr[i+1]=slicing->slice_ptr[i]+
-      parts.parts[i].size();
-  }
-
-  return slicing;
-}
-
-extern "C"
-void freeSlicing(Slicing *_slicing)
-{
-  if(_slicing) {
-    delete [] _slicing->slices;
-    delete [] _slicing->slice_ptr;
-    delete _slicing;
-  }
-}
diff --git a/kwant/graph/c_slicer/slicer.h b/kwant/graph/c_slicer/slicer.h
deleted file mode 100644
index e9c42281..00000000
--- a/kwant/graph/c_slicer/slicer.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2011-2013 Kwant authors.
-//
-// This file is part of Kwant.  It is subject to the license terms in the file
-// LICENSE.rst found in the top-level directory of this distribution and at
-// http://kwant-project.org/license.  A list of Kwant authors can be found in
-// the file AUTHORS.rst at the top-level directory of this distribution and at
-// http://kwant-project.org/authors.
-
-struct Slicing
-{
-  int nslices;
-  int *slice_ptr, *slices;
-};
-
-#ifdef __cplusplus
-extern "C"
-#endif
-struct Slicing *slice(int, int *, int *, int, int *,
-                      int, int *);
-
-#ifdef __cplusplus
-extern "C"
-#endif
-void freeSlicing(struct Slicing *);
diff --git a/kwant/graph/dissection.py b/kwant/graph/dissection.py
deleted file mode 100644
index 4c048fb4..00000000
--- a/kwant/graph/dissection.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-"""Routines to compute nested dissections of graphs"""
-
-__all__ = ['edge_dissection']
-
-import numpy as np
-from . import core, utils, scotch
-from .defs import gint_dtype
-
-
-def edge_dissection(gr, minimum_size, is_undirected=False):
-    """Returns a nested dissection tree (represented as a nested number of
-    tuples) for the graph gr, based on edge separators.
-
-    minimum_size indicates the smallest size of a part for which the algorithm
-    should still try to dissect the part.
-
-    If the graph gr is already undirected, setting is_undirected=True avoids
-    the work of making the graph undirected.
-    """
-
-    if isinstance(gr, core.Graph):
-        grc = gr.compressed()
-    elif isinstance(gr, core.CGraph):
-        grc = gr
-    else:
-        raise ValueError('edge_dissection expects a Graph or CGraph!')
-
-    # Unless the graph is undirected from the very beginning, make it
-    # undirected.
-    if not is_undirected:
-        grc = utils.make_undirected(grc)
-
-    return edge_bisection(grc, np.arange(grc.num_nodes, dtype=gint_dtype),
-                          minimum_size)
-
-
-def edge_bisection(grc, nodeids, minimum_size):
-    """This function returns a nested dissection tree (represented as a nested
-    number of tuples) for an undirected graph represented as a CGraph and ids
-    (that go into the tree) given in nodeids.  minimum_size indicates the
-    smallest size of a part for which the algorithm should still try to dissect
-    the part.
-    """
-    parts = scotch.bisect(grc)
-
-    # Count the number of nodes in parts 0 or 1.
-    size2 = np.sum(parts)
-    size1 = grc.num_nodes - size2
-
-    # If the size of one of the parts is zero, we can't further dissect.
-    if size1 == 0 or size2 == 0:
-        return nodeids.tolist()
-
-    # Now extract all nodes that are in part 0.
-    sub_nodeids = nodeids[parts == 0]
-
-    if size1 > minimum_size:
-        subgr = utils.induced_subgraph(grc, parts == 0)
-        left = edge_bisection(subgr, sub_nodeids, minimum_size)
-    else:
-        left = sub_nodeids.tolist()
-
-    # Now extract all nodes that are in part 1.
-    sub_nodeids = nodeids[parts == 1]
-
-    if size2 > minimum_size:
-        subgr = utils.induced_subgraph(grc, parts == 1)
-        right = edge_bisection(subgr, sub_nodeids, minimum_size)
-    else:
-        right = sub_nodeids.tolist()
-
-    return left, right
diff --git a/kwant/graph/slicer.pyx b/kwant/graph/slicer.pyx
deleted file mode 100644
index 07458361..00000000
--- a/kwant/graph/slicer.pyx
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-import numpy as np
-cimport numpy as np
-cimport cython
-from libc.string cimport memcpy
-from .defs cimport gint
-from .defs import gint_dtype
-from .core cimport CGraph
-
-from . cimport c_slicer
-
-__all__ = ['slice']
-
-@cython.boundscheck(False)
-def slice(CGraph graph, left, right):
-    """
-    TODO: write me.
-    """
-    cdef np.ndarray[gint, ndim=1] leftarr, rightarr, slc
-    cdef c_slicer.Slicing *slicing
-    cdef int i, slc_size
-
-    leftarr = np.unique(np.array(left, dtype=gint_dtype))
-    rightarr = np.unique(np.array(right, dtype=gint_dtype))
-
-    if leftarr.ndim != 1:
-        raise ValueError("Left cannot be interpreted as a 1D array.")
-
-    if rightarr.ndim != 1:
-        raise ValueError("Right cannot be interpreted as a 1D array.")
-
-    if leftarr.size == 0 or rightarr.size == 0:
-        raise ValueError("Empty boundary arrays are not supported yet.")
-
-    # slicing only possible if there is no overlap between
-    # left and right slices
-    if np.intersect1d(rightarr, leftarr, assume_unique=True).size:
-        return [tuple(range(graph.num_nodes))]
-
-    slicing = c_slicer.slice(graph.num_nodes,
-                             graph.heads_idxs,
-                             graph.heads,
-                             leftarr.size, <gint *>leftarr.data,
-                             rightarr.size, <gint *>rightarr.data)
-    slices = []
-    for i in range(slicing.nslices):
-        slc_size = slicing.slice_ptr[i+1] - slicing.slice_ptr[i]
-        slc = np.empty(slc_size, dtype=gint_dtype)
-        memcpy(slc.data, slicing.slices + slicing.slice_ptr[i],
-               sizeof(gint) * slc_size)
-        slices.append(slc)
-    c_slicer.freeSlicing(slicing)
-    return slices
diff --git a/kwant/graph/tests/test_dissection.py b/kwant/graph/tests/test_dissection.py
deleted file mode 100644
index 1eaaee4d..00000000
--- a/kwant/graph/tests/test_dissection.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-import numpy as np
-# from kwant.graph import Graph, dissection
-# from kwant.graph.dissection import edge_dissection
-
-def _DISABLED_test_edge_dissection():
-    # REMARK: This test is somewhat limited in the sense that it can only test
-    #         for the general sanity of the output, not if the disection is
-    #         really good (balanced, etc.).  Sanity is checked by making sure
-    #         that every node is included exactly once in the tree.
-    size = 5
-    graph = Graph()
-
-    for i in range(size - 1):
-        offset = i * size
-        for j in range(size - 1):
-            graph.add_edge(offset + j, offset + j + 1)
-            graph.add_edge(offset + j + 1, offset + j)
-        if i > 0:
-            for j in range(size):
-                graph.add_edge(offset + j, offset + j - size)
-                graph.add_edge(offset + j - size, offset + j)
-    g = graph.compressed()
-
-    tree = edge_dissection(g, 1)
-    found = np.zeros(g.num_nodes, dtype = int)
-
-    def parse_tree(entry):
-        if type(entry) is tuple:
-            parse_tree(entry[0])
-            parse_tree(entry[1])
-        else:
-            found[entry] += 1
-
-    parse_tree(tree)
-    assert (found == 1).all()
diff --git a/kwant/graph/tests/test_slicer.py b/kwant/graph/tests/test_slicer.py
deleted file mode 100644
index af3d3b0b..00000000
--- a/kwant/graph/tests/test_slicer.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-import kwant
-from kwant.graph import slicer
-
-def assert_sanity(graph, slices):
-    # Slices must comprise all of the graph.
-    slclist = [slices[j][i] for j in range(len(slices))
-               for i in range(len(slices[j]))]
-    slclist.sort()
-    assert slclist == [i for i in range(graph.num_nodes)]
-
-    # Nodes may only have neighbors in neighboring slices.
-    for j in range(len(slices)):
-        for node in slices[j]:
-            for neigh in graph.out_neighbors(node):
-                if j > 0 and j < len(slices) - 1:
-                    assert (neigh in slices[j] or
-                            neigh in slices[j+1] or
-                            neigh in slices[j-1])
-                elif j == 0:
-                    assert (neigh in slices[j] or
-                            neigh in slices[j+1])
-                else:
-                    assert (neigh in slices[j] or
-                            neigh in slices[j-1])
-
-
-def test_rectangle():
-    w = 5
-
-    for l in [1, 2, 5, 10]:
-        syst = kwant.Builder()
-        lead = kwant.Builder(kwant.TranslationalSymmetry((-1, 0)))
-        lat = kwant.lattice.square()
-        lead[(lat(0, i) for i in range(w))] = 0
-        syst[(lat(j, i) for j in range(l) for i in range(w))] = 0
-        for s in [lead, syst]:
-            for kind in [kwant.builder.HoppingKind((1, 0), lat),
-                         kwant.builder.HoppingKind((0, 1), lat)]:
-                s[kind] = -1
-        syst.attach_lead(lead)
-        syst.attach_lead(lead.reversed())
-        fsyst = syst.finalized()
-
-        slices = slicer.slice(fsyst.graph,
-                              fsyst.lead_interfaces[0],
-                              fsyst.lead_interfaces[1])
-
-        # In the rectangle case, the slicing is very constricted and
-        # we know that all slices must have the same shape.
-        assert len(slices) == l
-
-        for j in range(l):
-            assert len(slices[j]) == w
-
-        assert_sanity(fsyst.graph, slices)
diff --git a/kwant/graph/tests/test_utils.py b/kwant/graph/tests/test_utils.py
deleted file mode 100644
index e6d5f02a..00000000
--- a/kwant/graph/tests/test_utils.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-import numpy as np
-from kwant.graph import Graph
-from kwant.graph.utils import (make_undirected, remove_duplicates,
-                               induced_subgraph)
-from kwant.graph.defs import gint_dtype
-
-def test_make_undirected():
-    graph = Graph(True)
-    graph.add_edge(0, 1)
-    graph.add_edge(1, 0)
-    graph.add_edge(1, 2)
-    graph.add_edge(2, -1)
-    g = graph.compressed()
-
-    # First, test with no duplicates removed,
-    g2 = make_undirected(g, remove_dups=False)
-
-    assert g2.num_nodes == g.num_nodes
-    assert g2.num_edges == 6
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-    assert g2.has_edge(2, 1)
-
-    # then with duplicates removed,
-    g2 = make_undirected(g, remove_dups=True)
-
-    assert g2.num_nodes == g.num_nodes
-    assert g2.num_edges == 4
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-    assert g2.has_edge(2, 1)
-
-    # and finally with weights.
-    g2, edge_w2 = make_undirected(g, remove_dups=True, calc_weights=True)
-
-    assert g2.num_nodes == g.num_nodes
-    assert g2.num_edges == 4
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-    assert g2.has_edge(2, 1)
-    assert edge_w2[g2.first_edge_id(0,1)] == 2
-    assert edge_w2[g2.first_edge_id(1,0)] == 2
-    assert edge_w2[g2.first_edge_id(1,2)] == 1
-    assert edge_w2[g2.first_edge_id(2,1)] == 1
-
-def test_remove_duplicates():
-    graph = Graph()
-    graph.add_edge(0, 1)
-    graph.add_edge(0, 1)
-    graph.add_edge(1, 2)
-
-    # First test without edge weights,
-    g = graph.compressed()
-    remove_duplicates(g)
-    assert g.num_edges == 2
-    assert g.has_edge(0, 1)
-    assert g.has_edge(1, 2)
-
-    # then with edge weights.
-    g = graph.compressed()
-    edge_w = np.array([1,1,1], dtype=gint_dtype)
-    remove_duplicates(g, edge_w)
-    assert g.num_edges == 2
-    assert g.has_edge(0, 1)
-    assert g.has_edge(1, 2)
-    assert edge_w[g.first_edge_id(0,1)] == 2
-    assert edge_w[g.first_edge_id(1,2)] == 1
-
-
-def test_induced_subgraph():
-    num_nodes = 6
-
-    graph = Graph()
-    for i in range(num_nodes - 1):
-        graph.add_edge(i, i + 1)
-    graph.add_edge(1, 0)
-    g = graph.compressed()
-
-    # First test select array,
-    select = np.array([True, True, True, False, False, True])
-    g2 = induced_subgraph(g, select)
-    assert g2.num_nodes == 4
-    assert g2.num_edges == 3
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-
-    # then test select function.
-    g2 = induced_subgraph(g, lambda i: select[i])
-    assert g2.num_nodes == 4
-    assert g2.num_edges == 3
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-
-    # Now the same with edge weights.
-    edge_w = np.arange(g.num_edges, dtype=gint_dtype)
-    g2, edge_w2 = induced_subgraph(g, select, edge_w)
-    assert g2.num_nodes == 4
-    assert g2.num_edges == 3
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-    assert edge_w[g.first_edge_id(0,1)] == edge_w2[g2.first_edge_id(0,1)]
-    assert edge_w[g.first_edge_id(1,0)] == edge_w2[g2.first_edge_id(1,0)]
-    assert edge_w[g.first_edge_id(1,2)] == edge_w2[g2.first_edge_id(1,2)]
-
-    g2, edge_w2 = induced_subgraph(g, lambda i: select[i], edge_w)
-    assert g2.num_nodes == 4
-    assert g2.num_edges == 3
-    assert g2.has_edge(0, 1)
-    assert g2.has_edge(1, 0)
-    assert g2.has_edge(1, 2)
-    assert edge_w[g.first_edge_id(0,1)] == edge_w2[g2.first_edge_id(0,1)]
-    assert edge_w[g.first_edge_id(1,0)] == edge_w2[g2.first_edge_id(1,0)]
-    assert edge_w[g.first_edge_id(1,2)] == edge_w2[g2.first_edge_id(1,2)]
diff --git a/kwant/graph/utils.pyx b/kwant/graph/utils.pyx
deleted file mode 100644
index 33ffff6f..00000000
--- a/kwant/graph/utils.pyx
+++ /dev/null
@@ -1,282 +0,0 @@
-# Copyright 2011-2013 Kwant authors.
-#
-# This file is part of Kwant.  It is subject to the license terms in the file
-# LICENSE.rst found in the top-level directory of this distribution and at
-# http://kwant-project.org/license.  A list of Kwant authors can be found in
-# the file AUTHORS.rst at the top-level directory of this distribution and at
-# http://kwant-project.org/authors.
-
-"""Utilities to modify compressed graphs"""
-
-__all__ = ['make_undirected', 'remove_duplicates', 'induced_subgraph',
-           'print_graph']
-
-import numpy as np
-cimport numpy as np
-from libc.string cimport memset
-cimport cython
-from cpython cimport array
-import array
-from .defs cimport gint
-from .defs import gint_dtype
-from .core cimport CGraph, CGraph_malloc
-from .core import CGraph, CGraph_malloc
-
-@cython.boundscheck(False)
-def make_undirected(CGraph gr, remove_dups=True, calc_weights=False):
-    """undirected_graph(gr) expects a CGraph gr as input, which is interpreted
-    as a directed graph, and returns a CGraph that is explicitely undirected,
-    i.e. for every edge (i,j) there is also the edge (j,i). In the process, the
-    function also removes all 'dangling' links, i.e. edges to or from
-    negative node numbers.
-
-    If remove_dups == True (default value is True), any duplicates of edges
-    will be removed (this applies to the case where there are multiple edges
-    (i,j), not to having (i,j) and (j,i)).
-
-    The effect of the duplicate edges can be retained if calc_weights == True
-    (default value is False), in which case a weight array is returned
-    containing the multiplicity of the edges after the graph has been made
-    undirected.
-
-    As a (somewhat drastic but illustrative) example, if make_undirected is
-    applied to a undirected graph, it will return the same graph again
-    (possibly with the order of edges changed) and a weight array with 2
-    everywhere.  (Of course, in this case one does not need to call
-    make_undirected ...)
-
-    make_undirected() will always return a one-way graph, regardless of
-    whether the input was a two-way graph or not (NOTE: This
-    restriction could be lifted, if necessary). In addition, the
-    original edge_ids are lost -- the resulting graph will have
-    edge_ids that are not related to the original ones. (NOTE: there
-    certainly is a relation, but as long as no-one needs it it remains
-    unspecified)
-    """
-
-    cdef gint i, j, p
-
-    # The undirected graph will have twice as many edges than the directed one
-    # (duplicates will be deleted afterwards).
-    cdef CGraph_malloc ret = CGraph_malloc(False, False, gr.num_nodes,
-                                           gr.heads_idxs[gr.num_nodes] * 2,
-                                           0, 0)
-
-    # In the following we build up the Graph directly in compressed format by
-    # adding for every edge (i,j) [with i,j>=0] also the edge (j,i). Taking
-    # care of possible doubling is done in a second step later.
-
-    # Initialize the new index array:
-    # First, compute a histogram of edges.
-    memset(ret.heads_idxs, 0, (ret.num_nodes + 1) * sizeof(gint))
-
-    # This is using Christoph's trick of building up the graph without
-    # additional buffer array.
-    cdef gint *buffer = ret.heads_idxs + 1
-
-    for i in range(gr.num_nodes):
-        for p in range(gr.heads_idxs[i], gr.heads_idxs[i+1]):
-            if gr.heads[p] >= 0:
-                buffer[i] += 1
-                buffer[gr.heads[p]] += 1
-
-    cdef gint s = 0
-    for i in range(ret.num_nodes):
-        s += buffer[i]
-        buffer[i] = s - buffer[i]
-
-    for i in range(gr.num_nodes):
-        for p in range(gr.heads_idxs[i], gr.heads_idxs[i+1]):
-            j = gr.heads[p]
-            if j >= 0:
-                ret.heads[buffer[i]] = j
-                buffer[i] += 1
-                ret.heads[buffer[j]] = i
-                buffer[j] += 1
-
-    ret.num_edges = ret.heads_idxs[ret.num_nodes]
-
-    # Now remove duplicates if desired.
-    cdef np.ndarray[gint, ndim=1] weights
-
-    if calc_weights:
-        weights = np.empty(ret.heads_idxs[ret.num_nodes], dtype=gint_dtype)
-        weights[:] = 1
-
-    if remove_dups:
-        if calc_weights:
-            remove_duplicates(ret, weights)
-        else:
-            remove_duplicates(ret)
-
-    if calc_weights:
-        return ret, weights
-    else:
-        return ret
-
-
-@cython.boundscheck(False)
-def remove_duplicates(CGraph gr, np.ndarray[gint, ndim=1] edge_weights=None):
-    """Remove duplicate edges in the CGraph gr (this applies to the case where
-    there are multiple edges (i,j), not to having (i,j) and (j,i)). This
-    function modifes the graph in place.
-
-    If edge_weights is provided, edge_weights is modified such that the new
-    edge weights are the sum of the old edge weights if there are duplicate
-    edges.
-
-    This function only works on simple graphs (not two-way graphs), and
-    it does not work on graphs which have a relation between the edge number
-    (given by the order the edges are added) and the edge_id (given by the
-    order the edges appear in the graph), see the documentation of CGraph.
-    (Both restrictions could be lifted if necessary.) Furthermore, the
-    function does not support negative node numbers, i.e. dangling links
-    (the concept of being duplicate is more complicated there.)
-    """
-    cdef gint i, j , p, q, nnz
-    cdef np.ndarray[gint, ndim=1] w
-
-    if gr.twoway:
-        raise RuntimeError("remove_duplicates does not support two-way "
-                           "graphs")
-
-    if gr.edge_ids_by_edge_nr:
-        raise RuntimeError("remove_duplicates does not support graphs with "
-                           "a relation between the edge number and the edge "
-                           "id")
-
-    # The array w will hold the position of head j in the heads array.
-    w = np.empty(gr.num_nodes, dtype=gint_dtype)
-    w[:]=-1
-
-    nnz=0
-
-    for i in range(gr.num_nodes):
-        q = nnz
-
-        for p in range(gr.heads_idxs[i], gr.heads_idxs[i+1]):
-            j = gr.heads[p]
-
-            # Check if we have found a previous entry (i,j).  (In this case w
-            # will have an index larger than the indices of all previous edges
-            # with tails < i, as stored in q.)
-            if w[j] >= q:
-                # entry is a duplicate
-                if edge_weights is not None:
-                    edge_weights[w[j]] += edge_weights[p]
-            else:
-                w[j] = nnz
-                gr.heads[nnz] = j
-                nnz += 1
-
-        # Fix the index array.
-        gr.heads_idxs[i] = q
-
-    # Finally the new number of nonzeros
-    gr.heads_idxs[gr.num_nodes] = nnz
-    gr.num_edges = nnz
-
-    # Release memory that is not needed any more.
-    array.resize(gr._heads, nnz)
-    gr.heads = <gint *>gr._heads.data.as_ints
-    if not gr.heads:
-        raise MemoryError
-
-    if edge_weights is not None:
-        edge_weights.resize(nnz, refcheck=False)
-
-@cython.boundscheck(False)
-def induced_subgraph(CGraph gr, select,
-                     np.ndarray[gint, ndim=1] edge_weights=None):
-    """Return a subgraph of the CGraph gr by picking all nodes
-    [0:gr.num_nodes] for which select is True. select can be either a
-    NumPy array, or a function that takes the node number as
-    input. This function returns a CGraph as well.
-
-    The nodes in the new graph are again numbered sequentially from 0
-    to num_nodes-1, where num_nodes is the number of nodes in the
-    subgraph. The numbering is done such that the ordering of the node
-    numbers in the original and the subgraph are preserved (i.e.
-    if nodes n1 and n2 are both in the subgraph, and
-    original node number of n1 < original node number of n2,
-    then also subgraph node number n1 < subgraph node number n2).
-
-    If edge_weights is provided, the function also returns the edge
-    weights for the subgraph which are simply a subset of the original
-    weights.
-
-    This function returns a simple graph, regardless of whether the
-    input was a two-way graph or not (NOTE: This restriction could be
-    lifted, if necessary). Also, the resulting edge_ids are not
-    related to the original ones in any way (NOTE: There certainly is
-    a relation, but as long no-one needs it, we do not specify
-    it). Also, negative nodes are discarded (NOTE: this restriction
-    can also be lifted).
-    """
-
-    cdef np.ndarray[gint, ndim=1] indextab
-    cdef CGraph_malloc subgr
-    cdef np.ndarray[gint, ndim=1] sub_edge_weights = None
-    cdef gint sub_num_nodes, sub_num_edges
-    cdef gint i, iedge, edge_count
-
-    # First figure out the new number of nodes.
-    sub_num_nodes = 0
-    indextab = np.empty(gr.num_nodes, dtype=gint_dtype)
-    indextab[:] = -1
-
-    # Pre-evaluating the select functions seems to be more than twice as fast
-    # as calling select() repeatedly in the loop.  The thing is that one cannot
-    # type ndarray as bool in Cython (yet) [Taking bint results in a strange
-    # crash].  It would be possible to cast it into a cython type using
-    # .astype(), but this didn't seem to make any relevant speed difference.
-    if isinstance(select, np.ndarray):
-        selecttab = select
-    else:
-        selecttab = select(np.arange(gr.num_nodes, dtype=gint_dtype))
-
-    for i in range(gr.num_nodes):
-        if selecttab[i]:
-            indextab[i] = sub_num_nodes
-            sub_num_nodes += 1
-
-    # Now count the number of new edges.
-    sub_num_edges = 0
-
-    for i in range(gr.num_nodes):
-        if indextab[i] > -1:
-            for iedge in range(gr.heads_idxs[i], gr.heads_idxs[i + 1]):
-                if indextab[gr.heads[iedge]] > -1:
-                    sub_num_edges += 1
-
-    # Allocate the new graph.
-    subgr = CGraph_malloc(False, False, sub_num_nodes, sub_num_edges, 0, 0)
-
-    if edge_weights is not None:
-        sub_edge_weights = np.empty(sub_num_edges, dtype=gint_dtype)
-
-    # Now fill the new edge array.
-    edge_count = 0
-
-    for i in range(gr.num_nodes):
-        if indextab[i]>-1:
-            subgr.heads_idxs[indextab[i]] = edge_count
-            for iedge in range(gr.heads_idxs[i], gr.heads_idxs[i+1]):
-                if indextab[gr.heads[iedge]] > -1:
-                    subgr.heads[edge_count] = indextab[gr.heads[iedge]]
-                    if edge_weights is not None:
-                        sub_edge_weights[edge_count] = edge_weights[iedge]
-                    edge_count += 1
-    subgr.heads_idxs[sub_num_nodes] = edge_count
-
-    subgr.num_edges = edge_count
-
-    if edge_weights is not None:
-        return subgr, sub_edge_weights
-    else:
-        return subgr
-
-
-def print_graph(gr):
-    for i in range(gr.num_nodes):
-        print(i, "->", *gr.out_neighbors(i))
diff --git a/setup.py b/setup.py
index 96b01265..e592bf59 100755
--- a/setup.py
+++ b/setup.py
@@ -533,20 +533,6 @@ def main():
          dict(sources=['kwant/graph/core.pyx'],
               depends=['kwant/graph/core.pxd', 'kwant/graph/defs.h',
                        'kwant/graph/defs.pxd'])),
-        ('kwant.graph.utils',
-         dict(sources=['kwant/graph/utils.pyx'],
-              depends=['kwant/graph/defs.h', 'kwant/graph/defs.pxd',
-                       'kwant/graph/core.pxd'])),
-        ('kwant.graph.slicer',
-         dict(sources=['kwant/graph/slicer.pyx',
-                       'kwant/graph/c_slicer/partitioner.cc',
-                       'kwant/graph/c_slicer/slicer.cc'],
-              depends=['kwant/graph/defs.h', 'kwant/graph/defs.pxd',
-                       'kwant/graph/core.pxd', 'kwant/graph/c_slicer.pxd',
-                       'kwant/graph/c_slicer/bucket_list.h',
-                       'kwant/graph/c_slicer/graphwrap.h',
-                       'kwant/graph/c_slicer/partitioner.h',
-                       'kwant/graph/c_slicer/slicer.h'])),
         ('kwant.linalg.lapack',
          dict(sources=['kwant/linalg/lapack.pyx'])),
         ('kwant.linalg._mumps',
-- 
GitLab