Skip to content
Snippets Groups Projects
Commit 1ecd0a32 authored by Kostas Vilkelis's avatar Kostas Vilkelis :flamingo:
Browse files

merge from main

parent c106a831
No related branches found
No related tags found
1 merge request!8Builder fixes
......@@ -2,13 +2,15 @@
import numpy as np
import pytest
from pymf.model import Model
from pymf.solvers import solver
from pymf.tb import utils
from pymf.tb.tb import add_tb
from pymf.tests.test_graphene import compute_gap
from meanfi.tests.test_graphene import compute_gap
from meanfi import (
Model,
solver,
guess_tb,
add_tb,
)
repeat_number = 10
repeat_number = 3
# %%
......@@ -31,9 +33,9 @@ def gap_relation_hubbard(Us, nk, nk_dense, tol=1e-3):
gaps = []
for U in Us:
h_int = {
(0,): U * np.kron(np.ones((2, 2)), np.eye(2)),
(0,): U * np.kron(np.eye(2), np.ones((2, 2))),
}
guess = utils.generate_guess(frozenset(h_int), len(list(h_0.values())[0]))
guess = guess_tb(frozenset(h_int), len(list(h_0.values())[0]))
full_model = Model(h_0, h_int, filling=2)
mf_sol = solver(full_model, guess, nk=nk)
_gap = compute_gap(add_tb(h_0, mf_sol), fermi_energy=0, nk=nk_dense)
......@@ -47,5 +49,5 @@ def gap_relation_hubbard(Us, nk, nk_dense, tol=1e-3):
def test_gap_hubbard(seed):
"""Test the gap prediction for the Hubbard model."""
np.random.seed(seed)
Us = np.linspace(0.5, 5, 50, endpoint=True)
gap_relation_hubbard(Us, nk=30, nk_dense=100, tol=1e-2)
Us = np.linspace(8, 10, 15, endpoint=True)
gap_relation_hubbard(Us, nk=20, nk_dense=100, tol=1e-1)
# %%
import pytest
import numpy as np
from pymf.params.rparams import rparams_to_tb, tb_to_rparams
from pymf.tb.tb import compare_dicts
from pymf.tb.utils import generate_guess
from meanfi.params.rparams import rparams_to_tb, tb_to_rparams
from meanfi.tb.tb import compare_dicts
from meanfi import guess_tb
repeat_number = 10
......@@ -16,7 +16,7 @@ vectors = ((0, 0), (1, 0), (-1, 0), (0, 1), (0, -1), (1, -1), (-1, 1), (1, 1), (
def test_parametrisation(seed):
"""Test the parametrisation of the tight-binding model."""
np.random.seed(seed)
mf_guess = generate_guess(vectors, ndof)
mf_guess = guess_tb(vectors, ndof)
mf_params = tb_to_rparams(mf_guess)
mf_new = rparams_to_tb(mf_params, vectors, ndof)
compare_dicts(mf_guess, mf_new)
......@@ -5,8 +5,8 @@ import numpy as np
import pytest
from scipy.fftpack import ifftn
from pymf.tb.tb import compare_dicts
from pymf.tb.transforms import ifftn_to_tb, tb_to_khamvector
from meanfi.tb.tb import compare_dicts
from meanfi.tb.transforms import ifftn_to_tb, tb_to_kgrid
repeat_number = 10
......@@ -24,6 +24,6 @@ def test_fourier(seed):
keys = [np.arange(-max_order + 1, max_order) for i in range(ndim)]
keys = it.product(*keys)
h_0 = {key: (np.random.rand(matrix_size, matrix_size) - 0.5) * 2 for key in keys}
kham = tb_to_khamvector(h_0, nk=nk)
kham = tb_to_kgrid(h_0, nk=nk)
tb_new = ifftn_to_tb(ifftn(kham, axes=np.arange(ndim)))
compare_dicts(h_0, tb_new)
......@@ -2,10 +2,9 @@
import numpy as np
import pytest
from pymf.model import Model
from pymf.solvers import solver
from pymf.tb import utils
from pymf.tb.tb import add_tb, compare_dicts
from meanfi.tb import utils
from meanfi.tb.tb import compare_dicts
from meanfi import Model, solver, guess_tb, add_tb, fermi_energy
# %%
repeat_number = 10
......@@ -21,16 +20,16 @@ def test_zero_hint(seed):
dim = np.random.randint(0, 3)
ndof = np.random.randint(2, 10)
filling = np.random.randint(1, ndof)
random_hopping_vecs = utils.generate_vectors(cutoff, dim)
random_hopping_vecs = utils.generate_tb_keys(cutoff, dim)
zero_key = tuple([0] * dim)
h_0_random = utils.generate_guess(random_hopping_vecs, ndof, scale=1)
h_int_only_phases = utils.generate_guess(random_hopping_vecs, ndof, scale=0)
guess = utils.generate_guess(random_hopping_vecs, ndof, scale=1)
h_0_random = guess_tb(random_hopping_vecs, ndof, scale=1)
h_int_only_phases = guess_tb(random_hopping_vecs, ndof, scale=0)
guess = guess_tb(random_hopping_vecs, ndof, scale=1)
model = Model(h_0_random, h_int_only_phases, filling=filling)
mf_sol = solver(model, guess, nk=40, optimizer_kwargs={"M": 0, "f_tol": 1e-10})
h_fermi = utils.calculate_fermi_energy(mf_sol, filling=filling, nk=20)
h_fermi = fermi_energy(mf_sol, filling=filling, nk=20)
mf_sol[zero_key] -= h_fermi * np.eye(mf_sol[zero_key].shape[0])
compare_dicts(add_tb(mf_sol, h_0_random), h_0_random, atol=1e-10)
import nox
@nox.session(venv_backend="mamba")
@nox.parametrize(
"python,numpy,scipy,kwant",
[
("3.10", "=1.23", "=1.9", "=1.4"),
("3.11", "=1.24", "=1.10", "=1.4"),
("3.12", ">=1.26", ">=1.13", ">=1.4"),
],
ids=["minimal", "mid", "latest"],
)
def tests(session, numpy, scipy, kwant):
session.run(
"mamba",
"install",
"-y",
f"numpy{numpy}",
f"scipy{scipy}",
f"kwant{kwant}",
"packaging==22.0",
"pytest-cov",
"pytest-randomly",
"pytest-repeat",
"-c",
"conda-forge",
)
session.install(".")
session.run("pip", "install", "ruff", "pytest-ruff")
session.run("pytest", "--ruff", "-x")
......@@ -5,9 +5,9 @@ import memray
import numpy as np
from pyinstrument import Profiler
from pymf.kwant_helper import kwant_examples, utils
from pymf.model import Model
from pymf.tb.utils import generate_guess
from meanfi.kwant_helper import kwant_examples, utils
from meanfi.model import Model
from meanfi.tb.utils import guess_tb
# %%
graphene_builder, int_builder = kwant_examples.graphene_extended_hubbard()
......@@ -19,7 +19,7 @@ nk = 600
h_int = utils.builder_to_tb(int_builder, params)
h_0 = utils.builder_to_tb(graphene_builder)
norbs = len(list(h_0.values())[0])
guess = generate_guess(frozenset(h_int), norbs)
guess = guess_tb(frozenset(h_int), norbs)
model = Model(h_0, h_int, filling)
......
from pymf.params.param_transforms import (
complex_to_real,
flat_to_tb,
real_to_complex,
tb_to_flat,
)
def tb_to_rparams(tb):
"""Convert a mean-field tight-binding model to a set of real parameters.
Parameters
----------
tb : dict
Mean-field tight-binding model.
Returns
-------
dict
Real parameters.
"""
return complex_to_real(tb_to_flat(tb)) # placeholder for now
def rparams_to_tb(r_params, key_list, size):
"""Extract mean-field tight-binding model from a set of real parameters.
Parameters
----------
r_params : dict
Real parameters.
key_list : list
List of the keys of the mean-field tight-binding model, meaning all the
hoppings.
size : tuple
Shape of the mean-field tight-binding model.
Returns
-------
dict
Mean-field tight-binding model.
"""
flat_matrix = real_to_complex(r_params)
return flat_to_tb(flat_matrix, (len(key_list), size, size), key_list)
from functools import partial
import numpy as np
import scipy
from pymf.params.rparams import rparams_to_tb, tb_to_rparams
from pymf.tb.tb import add_tb
from pymf.tb.utils import calculate_fermi_energy
def cost(mf_param, Model, nk=100):
"""Define the cost function for fixed point iteration.
The cost function is the difference between the input mean-field real space
parametrisation and a new mean-field.
Parameters
----------
mf_param : numpy.array
The mean-field real space parametrisation.
Model : Model
The model object.
nk : int, optional
The number of k-points to use in the grid. The default is 100.
"""
shape = Model._size
mf_tb = rparams_to_tb(mf_param, list(Model.h_int), shape)
mf_tb_new = Model.mfield(mf_tb, nk=nk)
mf_params_new = tb_to_rparams(mf_tb_new)
return mf_params_new - mf_param
def solver(
Model, mf_guess, nk=100, optimizer=scipy.optimize.anderson, optimizer_kwargs={}
):
"""Solve the mean-field self-consistent equation.
Parameters
----------
Model : Model
The model object.
mf_guess : numpy.array
The initial guess for the mean-field tight-binding model.
nk : int, optional
The number of k-points to use in the grid. The default is 100. In the
0-dimensional case, this parameter is ignored.
optimizer : scipy.optimize, optional
The optimizer to use to solve for fixed-points. The default is
scipy.optimize.anderson.
optimizer_kwargs : dict, optional
The keyword arguments to pass to the optimizer. The default is {}.
Returns
-------
result : numpy.array
The mean-field tight-binding model.
"""
shape = Model._size
mf_params = tb_to_rparams(mf_guess)
f = partial(cost, Model=Model, nk=nk)
result = rparams_to_tb(
optimizer(f, mf_params, **optimizer_kwargs), list(Model.h_int), shape
)
fermi = calculate_fermi_energy(add_tb(Model.h_0, result), Model.filling, nk=nk)
return add_tb(result, {Model._local_key: -fermi * np.eye(Model._size)})
# %%
import numpy as np
from pymf.solvers import solver
from pymf.tb import utils
from pymf.model import Model
from pymf.tb.tb import add_tb, scale_tb
from pymf import mf
from pymf import observables
import pytest
# %%
def total_energy(ham_tb, rho_tb):
return np.real(observables.expectation_value(rho_tb, ham_tb))
# %%
U0 = 1
filling = 2
nk = 100
repeat_number = 10
hopp = np.kron(np.array([[0, 1], [0, 0]]), np.eye(2))
h_0 = {(0,): hopp + hopp.T.conj(), (1,): hopp, (-1,): hopp.T.conj()}
h_int_U0 = {
(0,): U0 * np.kron(np.eye(2), np.ones((2, 2))),
}
# %%
@np.vectorize
def mf_rescaled(alpha, mf0):
hamiltonian = add_tb(h_0, scale_tb(mf0, alpha))
rho, _ = mf.construct_density_matrix(hamiltonian, filling=filling, nk=nk)
hamiltonian = add_tb(h_0, scale_tb(mf0, np.sign(alpha)))
return total_energy(hamiltonian, rho)
@pytest.mark.parametrize("seed", range(repeat_number))
def test_mexican_hat(seed):
np.random.seed(seed)
guess = utils.generate_guess(frozenset(h_int_U0), len(h_int_U0[(0,)]))
_model = Model(h_0, h_int_U0, filling=filling)
mf_sol_groundstate = solver(
_model, mf_guess=guess, nk=nk, optimizer_kwargs={"M": 0}
)
alphas = np.random.uniform(0, 50, 100)
alphas = np.where(alphas == 1, 0, alphas)
assert np.all(
mf_rescaled(alphas, mf0=mf_sol_groundstate)
> mf_rescaled(np.array([1]), mf0=mf_sol_groundstate)
)
......@@ -3,43 +3,44 @@ requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "pymf"
name = "meanfi"
dynamic = ["version"]
authors = [
{name="pymf developers"},
{name="MeanFi developers"},
]
description = "Package to perform self-consistent mean-field calculations on tight-binding systems"
readme = "README.md"
requires-python = ">=3.9"
requires-python = ">=3.10"
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"numpy>=1.23",
"scipy>=1.8",
"scipy>=1.9",
"kwant>=1.4",
"packaging>=22.0", # For version parsing
]
[tool.hatch.version]
source = "vcs"
[tool.hatch.build.hooks.vcs]
version-file = "pymf/_version.py"
version-file = "meanfi/_version.py"
[project.urls]
"Documentation" = "https://kwant-scf.readthedocs.io/en/latest/"
"Repository" = "https://gitlab.kwant-project.org/qt/kwant-scf"
"Bug Tracker" = "https://gitlab.kwant-project.org/qt/kwant-scf/-/issues"
"Documentation" = "https://meanfi.readthedocs.io/en/latest/"
"Repository" = "https://gitlab.kwant-project.org/qt/meanfi"
"Bug Tracker" = "https://gitlab.kwant-project.org/qt/meanfi/-/issues"
[tool.hatch.build.targets.wheel]
packages = ["pymf"]
packages = ["meanfi"]
[tool.hatch.build.targets.sdist]
include = [
"pymf",
"meanfi",
"README.md",
"LICENSE",
"pyproject.toml",
......@@ -47,5 +48,5 @@ include = [
]
[tool.codespell]
skip = "*.ipynb,"
ignore-words-list = "multline,"
skip = "*.ipynb"
ignore-words-list = "multline, ket, bra, braket, nwo"
[pytest]
minversion = 7.0
addopts = --cov-config=.coveragerc --verbose --junitxml=junit.xml --cov=pymf
addopts = --cov-config=.coveragerc --verbose --junitxml=junit.xml --cov=meanfi
--cov-report term --cov-report html --cov-report xml --ruff
testpaths = pymf
testpaths = meanfi
required_plugins = pytest-randomly pytest-cov pytest-ruff pytest-repeat
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment