diff --git a/docs/source/index.md b/docs/source/index.md index 5bc8c5593f3f720812dc7723dc692b85c172db73..342f8acf3cc35799a66a79dc9977586ae2a02133 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -18,6 +18,7 @@ kernelspec: :maxdepth: 1 :caption: Tutorials +<<<<<<< HEAD mf_notes.md algorithm.md @@ -33,6 +34,17 @@ graphene_example.md mf_notes.md algorithm.md +======= +``` + +```{toctree} +:hidden: +:maxdepth: 1 +:caption: Documentation + +mf_notes.md +algorithm.md +>>>>>>> main documentation/pymf.md ``` diff --git a/pymf/tests/test_graphene.py b/pymf/tests/test_graphene.py index 90e1c1aecd2fdc22dcba7824da0bca036b5d82ba..42a77503f60c1218d59c189645691165227ecd8c 100644 --- a/pymf/tests/test_graphene.py +++ b/pymf/tests/test_graphene.py @@ -37,7 +37,7 @@ def compute_gap(tb, fermi_energy=0, nk=100): return np.abs(emin - emax) -repeat_number = 10 +repeat_number = 5 # %% graphene_builder, int_builder = kwant_examples.graphene_extended_hubbard() h_0 = utils.builder_to_tb(graphene_builder) @@ -77,9 +77,7 @@ def gap_prediction(U, V): guess = generate_guess(frozenset(h_int), len(list(h_0.values())[0])) model = Model(h_0, h_int, filling) - mf_sol = solver( - model, guess, nk=nk, optimizer_kwargs={"verbose": True, "M": 0, "f_tol": 1e-8} - ) + mf_sol = solver(model, guess, nk=nk, optimizer_kwargs={"M": 0, "f_tol": 1e-8}) gap = compute_gap(add_tb(h_0, mf_sol), nk=200) # Check if the gap is predicted correctly diff --git a/pymf/tests/test_hat.py b/pymf/tests/test_hat.py index 48cf0110a8c603fa7ba39267ba39e7925c555cd0..620c6729a8c6bd5d0a400c4a94c5282a37f2b5c2 100644 --- a/pymf/tests/test_hat.py +++ b/pymf/tests/test_hat.py @@ -12,6 +12,8 @@ from pymf import ( construct_density_matrix, ) +from pymf.tb.utils import generate_tb_keys + # %% def total_energy(ham_tb, rho_tb): @@ -21,37 +23,43 @@ def total_energy(ham_tb, rho_tb): # %% U0 = 1 filling = 2 -nk = 100 -repeat_number = 10 - -hopp = np.kron(np.array([[0, 1], [0, 0]]), np.eye(2)) -h_0 = {(0,): hopp + hopp.T.conj(), (1,): hopp, (-1,): hopp.T.conj()} -h_int_U0 = { - (0,): U0 * np.kron(np.eye(2), np.ones((2, 2))), -} +nk = 10 +repeat_number = 3 +ndof = 4 +cutoff = 1 # %% @np.vectorize -def mf_rescaled(alpha, mf0): - hamiltonian = add_tb(h_0, scale_tb(mf0, alpha)) +def mf_rescaled(alpha, mf0, h0): + hamiltonian = add_tb(h0, scale_tb(mf0, alpha)) rho, _ = construct_density_matrix(hamiltonian, filling=filling, nk=nk) - hamiltonian = add_tb(h_0, scale_tb(mf0, np.sign(alpha))) + hamiltonian = add_tb(h0, scale_tb(mf0, np.sign(alpha))) return total_energy(hamiltonian, rho) @pytest.mark.parametrize("seed", range(repeat_number)) def test_mexican_hat(seed): np.random.seed(seed) - guess = generate_guess(frozenset(h_int_U0), len(h_int_U0[(0,)])) - _model = Model(h_0, h_int_U0, filling=filling) - mf_sol_groundstate = solver( - _model, mf_guess=guess, nk=nk, optimizer_kwargs={"M": 0} - ) - - alphas = np.random.uniform(0, 50, 100) - alphas = np.where(alphas == 1, 0, alphas) - assert np.all( - mf_rescaled(alphas, mf0=mf_sol_groundstate) - > mf_rescaled(np.array([1]), mf0=mf_sol_groundstate) - ) + h0s = [] + h_ints = [] + for ndim in np.arange(4): + keys = generate_tb_keys(cutoff, ndim) + h0s.append(generate_guess(keys, ndof)) + h_int = generate_guess(keys, ndof) + h_int[keys[len(keys) // 2]] += U0 + h_ints.append(h_int) + + for h0, h_int in zip(h0s, h_ints): + guess = generate_guess(frozenset(h_int), ndof) + _model = Model(h0, h_int, filling=filling) + mf_sol_groundstate = solver( + _model, mf_guess=guess, nk=nk, optimizer_kwargs={"M": 0} + ) + + alphas = np.random.uniform(0, 50, 100) + alphas = np.where(alphas == 1, 0, alphas) + assert np.all( + mf_rescaled(alphas, mf0=mf_sol_groundstate, h0=h0) + > mf_rescaled(np.array([1]), mf0=mf_sol_groundstate, h0=h0) + ) diff --git a/pymf/tests/test_hubbard.py b/pymf/tests/test_hubbard.py index 9007d995909c750cb007d7673913b7651201d786..3647c28a5412c9b02b3a7004b75ff905812eef8c 100644 --- a/pymf/tests/test_hubbard.py +++ b/pymf/tests/test_hubbard.py @@ -10,7 +10,7 @@ from pymf import ( add_tb, ) -repeat_number = 10 +repeat_number = 3 # %% @@ -33,7 +33,7 @@ def gap_relation_hubbard(Us, nk, nk_dense, tol=1e-3): gaps = [] for U in Us: h_int = { - (0,): U * np.kron(np.ones((2, 2)), np.eye(2)), + (0,): U * np.kron(np.eye(2), np.ones((2, 2))), } guess = generate_guess(frozenset(h_int), len(list(h_0.values())[0])) full_model = Model(h_0, h_int, filling=2) @@ -49,5 +49,5 @@ def gap_relation_hubbard(Us, nk, nk_dense, tol=1e-3): def test_gap_hubbard(seed): """Test the gap prediction for the Hubbard model.""" np.random.seed(seed) - Us = np.linspace(0.5, 5, 50, endpoint=True) - gap_relation_hubbard(Us, nk=30, nk_dense=100, tol=1e-2) + Us = np.linspace(8, 10, 15, endpoint=True) + gap_relation_hubbard(Us, nk=20, nk_dense=100, tol=1e-1)