Skip to content
Snippets Groups Projects
Commit 478e1236 authored by Antonio Manesco's avatar Antonio Manesco
Browse files

some progress, not there yet

parent 642416c0
Branches
Tags
1 merge request!3create solvers and interface modules
......@@ -2,10 +2,9 @@ from scipy.ndimage import convolve
import numpy as np
import codes.utils as utils
from functools import partial
from scipy.optimize import anderson
from scipy.optimize import anderson, minimize
def mean_field_F(vals, vecs, E_F):
def density_matrix(vals, vecs, E_F):
"""
Returns the mean field F_ij(k) = <psi_i(k)|psi_j(k)> for all k-points and
eigenvectors below the Fermi level.
......@@ -37,19 +36,19 @@ def mean_field_F(vals, vecs, E_F):
occ_vecs_flat = np.transpose(occ_vecs_flat, axes=[0, 2, 1])
# inner products between eigenvectors
F_ij = np.einsum("kie,kje->kij", occ_vecs_flat, occ_vecs_flat.conj())
rho_ij = np.einsum("kie,kje->kij", occ_vecs_flat, occ_vecs_flat.conj())
reshape_order = [nk for i in range(dim)]
reshape_order.extend([norbs, norbs])
F = F_ij.reshape(*reshape_order)
rho = rho_ij.reshape(*reshape_order)
else:
unocc_vals = vals > E_F
occ_vecs = vecs
occ_vecs[:, unocc_vals] = 0
# Outter products between eigenvectors
F = occ_vecs @ occ_vecs.T.conj()
rho = occ_vecs @ occ_vecs.T.conj()
return F
return rho
def convolution(M1, M2):
......@@ -82,7 +81,7 @@ def convolution(M1, M2):
return V_output
def compute_mf(vals, vecs, filling, H_int):
def compute_mf(rho, H_int):
"""
Compute mean-field correction at self-consistent loop.
......@@ -102,25 +101,69 @@ def compute_mf(vals, vecs, filling, H_int):
mf : nd-array
Meanf-field correction with same format as `H_int`.
"""
dim = len(vals.shape) - 1
nk = rho.shape[0]
dim = len(rho.shape) - 2
if dim > 0:
H0_int = H_int[*([0]*dim)]
local_density = np.diag(np.average(rho, axis=tuple([i for i in range(dim)])))
exchange_mf = convolution(rho, H_int) * nk ** (-dim)
direct_mf = np.diag(np.einsum("i,ij->j", local_density, H0_int))
else:
local_density = np.diag(rho)
exchange_mf = rho * H_int
direct_mf = np.diag(np.einsum("i,ij->j", local_density, H_int))
return direct_mf - exchange_mf
def total_energy(h, rho):
return np.sum(np.trace(h @ rho, axis1=-1, axis2=-2))
class Model:
def __init__(self, tb_model, int_model=None, Vk=None, guess=None):
self.tb_model = tb_model
self.hk = utils.model2hk(tb_model=tb_model)
if int_model is not None:
self.int_model = int_model
self.Vk = utils.model2hk(tb_model=int_model)
else:
self.Vk = Vk
self.dim = len([*tb_model.keys()][0])
self.ndof = len([*tb_model.values()][0])
self.guess = guess
def random_guess(self, vectors):
self.guess = utils.generate_guess(
vectors=vectors,
ndof=self.ndof,
scale=1
)
nk = vals.shape[0]
def kgrid_evaluation(self, nk):
self.hamiltonians_0, self.ks = utils.kgrid_hamiltonian(
nk=nk,
hk=self.hk,
dim=self.dim,
return_ks=True
)
self.H_int = utils.kgrid_hamiltonian(nk=nk, hk=self.Vk, dim=self.dim)
self.mf_k = utils.kgrid_hamiltonian(
nk=nk,
hk=utils.model2hk(self.guess),
dim=self.dim,
)
E_F = utils.get_fermi_energy(vals, filling)
F = mean_field_F(vals=vals, vecs=vecs, E_F=E_F)
def flatten_mf(self):
flat = self.mf_k.flatten()
return flat[:len(flat)//2] + 1j * flat[len(flat)//2:]
if dim > 0:
H0_int = H_int[*[0 for i in range(dim)]]
rho = np.diag(np.average(F, axis=tuple([i for i in range(dim)])))
exchange_mf = convolution(F, H_int) * nk ** (-dim)
direct_mf = np.diag(np.einsum("i,ij->j", rho, H0_int))
else:
rho = np.diag(F)
exchange_mf = F * H_int
direct_mf = np.diag(np.einsum("i,ij->j", rho, H_int))
return direct_mf - exchange_mf - E_F * np.eye(H0_int.shape[-1])
def reshape_mf(self, mf_flat):
mf_flat = np.concatenate((np.real(mf_flat), np.imag(mf_flat)))
return mf_flat.reshape(*self.hamiltonians_0.shape)
def update_mf(mf, H_int, filling, hamiltonians_0):
def updated_matrices(mf_k, model):
"""
Self-consistent loop.
......@@ -141,30 +184,32 @@ def update_mf(mf, H_int, filling, hamiltonians_0):
Updated mean-field solution.
"""
# Generate the Hamiltonian
hamiltonians = hamiltonians_0 + mf
hamiltonians = model.hamiltonians_0 + mf_k
vals, vecs = np.linalg.eigh(hamiltonians)
vecs = np.linalg.qr(vecs)[0]
return compute_mf(vals=vals, vecs=vecs, filling=filling, H_int=H_int)
def default_cost(mf, H_int, filling, hamiltonians_0):
mf_new = update_mf(mf, H_int, filling, hamiltonians_0)
diff = mf_new - mf
return diff
E_F = utils.get_fermi_energy(vals, model.filling)
rho = density_matrix(vals=vals, vecs=vecs, E_F=E_F)
return rho, compute_mf(rho=rho, H_int=model.H_int) - E_F * np.eye(model.hamiltonians_0.shape[-1])
def default_cost(mf, model):
model.rho, model.mf_k = updated_matrices(mf_k=mf, model=model)
model.energy = total_energy(h=model.hamiltonians_0 + model.mf_k, rho=model.rho)
h = model.hamiltonians_0 + model.mf_k
commutator = h@model.rho - model.rho@h
n_herm = (mf - np.moveaxis(mf, -1, -2).conj())/2
delta_mf = model.mf_k - mf
quantities = np.array([commutator, delta_mf, n_herm])
idx_max = np.argmax(np.linalg.norm(quantities.reshape(3, -1), axis=-1))
return quantities[idx_max]
def find_groundstate_ham(
hk,
Vk,
cutoff,
dim,
model,
cutoff_Vk,
filling,
nk=10,
cost_function=default_cost,
guess=None,
optimizer=anderson,
optimizer_kwargs={},
return_model=False
):
"""
Self-consistent loop to find groundstate Hamiltonian.
......@@ -187,33 +232,22 @@ def find_groundstate_ham(
scf_model : dict
Tight-binding model of Hartree-Fock solution.
"""
hamiltonians_0, ks = utils.kgrid_hamiltonian(
nk=nk,
hk=hk,
dim=dim,
return_ks=True
)
H_int = utils.kgrid_hamiltonian(nk=nk, hk=Vk, dim=dim)
vectors = utils.generate_vectors(cutoff, dim)
if guess is None:
guess = utils.generate_guess(
vectors=vectors,
ndof=hamiltonians_0.shape[-1],
scale=np.max(np.abs(H_int))
)
guess_k = utils.kgrid_hamiltonian(
nk=nk,
hk=utils.model2hk(guess),
dim=dim,
)
cost_function_wrapped = partial(
model.nk=nk
model.filling=filling
vectors = utils.generate_vectors(cutoff_Vk, model.dim)
model.vectors=[*vectors, *model.tb_model.keys()]
if model.guess is None:
model.random_guess(model.vectors)
model.kgrid_evaluation(nk=nk)
fun = partial(
cost_function,
hamiltonians_0=hamiltonians_0,
H_int=H_int,
filling=filling,
model=model
)
if return_model:
mf_k = optimizer(cost_function_wrapped, guess_k, **optimizer_kwargs)
return mf_k, utils.hk2tb_model(hamiltonians_0 + mf_k, vectors, ks)
else:
return optimizer(cost_function_wrapped, guess_k, **optimizer_kwargs)
mf_k = optimizer(
fun,
model.mf_k,
**optimizer_kwargs
)
assert np.allclose((mf_k - np.moveaxis(mf_k, -1, -2).conj())/2, 0, atol=1e-5)
mf_k = (mf_k + np.moveaxis(mf_k, -1, -2).conj())/2
return utils.hk2tb_model(model.hamiltonians_0 + mf_k, model.vectors, model.ks)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment