Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions moldga/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ def __init__(self):
self.previous_sc_path: str = "./"
self.use_lambda_correction: bool = False
self.restrict_chi_phys: bool = False
self.anderson_prev_res: float | None = None


class EliashbergConfig:
Expand Down Expand Up @@ -148,6 +149,8 @@ def __init__(self):
self.mu: float = 0.0
self.n: float = 0.0
self.n_bands: int = 1
self.nd_bands: int = 1
self.np_bands: int = 0
self.occ: np.ndarray = np.ndarray(0)
self.occ_k: np.ndarray = np.ndarray(0)
self.occ_dmft: np.ndarray = np.ndarray(0)
Expand Down
94 changes: 87 additions & 7 deletions moldga/dga_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,13 @@ def uniquify_path(path: str = None):
return path


def load_from_w2dyn_file_and_update_config() -> tuple[GreensFunction, SelfEnergy, LocalFourPoint, LocalFourPoint]:
def load_from_w2dyn_file_and_update_config(
combine_two_atoms_to_one_obj: bool = False,
) -> tuple[GreensFunction, SelfEnergy, LocalFourPoint, LocalFourPoint]:
"""
Loads data from the w2dyn file and updates the config file.
If combine_atoms_to_one_obj is True, we are doubling the orbital space and putting the data from the
inequivalent atoms into the diagonal blocks after we average over them.
"""
file = w2dyn_aux.W2dynFile(fname=str(os.path.join(config.dmft.input_path, config.dmft.fname_1p)))

Expand All @@ -51,36 +55,106 @@ def load_from_w2dyn_file_and_update_config() -> tuple[GreensFunction, SelfEnergy
config.lattice.interaction.vpp = file.get_vpp()

config.sys.mu = file.get_mu()
config.sys.n_bands = file.get_nd() + file.get_np()
config.sys.nd_bands = file.get_nd()
config.sys.np_bands = file.get_np()
config.sys.n_bands = config.sys.nd_bands + config.sys.np_bands
config.sys.n = file.get_totdens()
config.sys.occ_dmft = 2 * np.mean(file.get_rho1(), axis=(1, 3))

if combine_two_atoms_to_one_obj:
config.sys.n_bands *= 2
config.sys.nd_bands *= 2
config.sys.np_bands *= 2
config.sys.n *= 2

config.sys.occ_dmft = np.zeros((config.sys.n_bands, config.sys.n_bands))
rho_1_mean = 0.5 * (np.mean(file.get_rho1(), axis=(1, 3)) + np.mean(file.get_rho1(ineq=2), axis=(1, 3)))
config.sys.occ_dmft[0:2, 0:2] = 2 * rho_1_mean
config.sys.occ_dmft[2:4, 2:4] = 2 * rho_1_mean

if config.sys.n == 0:
config.sys.n = 2 * np.sum(config.sys.occ_dmft)

file2 = w2dyn_aux.W2dynG4iwFile(fname=str(os.path.join(config.dmft.input_path, config.dmft.fname_2p)))
g2_dens = LocalFourPoint(file2.read_g2_full_multiband(config.sys.n_bands, name="dens"), channel=SpinChannel.DENS)
g2_magn = LocalFourPoint(file2.read_g2_full_multiband(config.sys.n_bands, name="magn"), channel=SpinChannel.MAGN)
g2_dens = LocalFourPoint(
file2.read_g2_full_multiband(file.get_nd() + file.get_np(), name="dens"), channel=SpinChannel.DENS
)
g2_magn = LocalFourPoint(
file2.read_g2_full_multiband(file.get_nd() + file.get_np(), name="magn"), channel=SpinChannel.MAGN
)

if combine_two_atoms_to_one_obj:
g2_dens_2 = LocalFourPoint(
file2.read_g2_full_multiband(file.get_nd() + file.get_np(), ineq=2, name="dens"), channel=SpinChannel.DENS
)
g2_magn_2 = LocalFourPoint(
file2.read_g2_full_multiband(file.get_nd() + file.get_np(), ineq=2, name="magn"), channel=SpinChannel.MAGN
)

def construct_large_g2(g2_1: LocalFourPoint, g2_2: LocalFourPoint) -> LocalFourPoint:
g2_mean = 0.5 * (g2_1.mat + g2_2.mat)
del g2_2
g2_1.mat = np.zeros((4, 4, 4, 4, *g2_mean.shape[4:]))
g2_1.mat[0:2, 0:2, 0:2, 0:2] = g2_mean
g2_1.mat[2:4, 2:4, 2:4, 2:4] = g2_mean
del g2_mean
return g2_1

g2_dens = construct_large_g2(g2_dens, g2_dens_2)
g2_magn = construct_large_g2(g2_magn, g2_magn_2)

file2.close()

update_frequency_boxes(g2_dens.niw, g2_dens.niv)

def extend_orbital(arr: np.ndarray) -> np.ndarray:
return np.einsum("i...,ij->ij...", arr, np.eye(config.sys.n_bands))
return np.einsum("i...,ij->ij...", arr, np.eye(arr.shape[0]))

giw_spin_mean = np.mean(file.get_giw(), axis=1) # [band,spin,niv]
niv_dmft = giw_spin_mean.shape[-1] // 2
niv_cut = config.box.niw_core + config.box.niv_full + 10
giw_spin_mean = giw_spin_mean[..., niv_dmft - niv_cut : niv_dmft + niv_cut]
g_dmft = GreensFunction(extend_orbital(giw_spin_mean))

if combine_two_atoms_to_one_obj:
giw_spin_mean_2 = np.mean(file.get_giw(ineq=2), axis=1)[..., niv_dmft - niv_cut : niv_dmft + niv_cut]
giw_spin_mean = 0.5 * (giw_spin_mean + giw_spin_mean_2)
del giw_spin_mean_2
giw_spin_mean_large = np.zeros((2 * giw_spin_mean.shape[0], *giw_spin_mean.shape[1:]))
giw_spin_mean_large[0:2] = giw_spin_mean
giw_spin_mean_large[2:4] = giw_spin_mean
g_dmft = GreensFunction(extend_orbital(giw_spin_mean_large))
del giw_spin_mean_large

siw_spin_mean = np.mean(file.get_siw(), axis=1) # [band,spin,niv]
siw_spin_mean = extend_orbital(siw_spin_mean)[None, None, None, ...]
siw_dc_spin_mean = np.mean(file.get_dc(), axis=-1) # [band,spin]
siw_dc_spin_mean = extend_orbital(siw_dc_spin_mean)[None, None, None, ..., None]
siw_spin_mean = siw_spin_mean[..., niv_dmft - niv_cut : niv_dmft + niv_cut]
sigma_dmft = SelfEnergy(siw_spin_mean, estimate_niv_core=True) + siw_dc_spin_mean

if combine_two_atoms_to_one_obj:
siw_spin_mean_2 = np.mean(file.get_siw(ineq=2), axis=1)
siw_spin_mean_2 = extend_orbital(siw_spin_mean_2)[None, None, None, ...]
siw_dc_spin_mean_2 = np.mean(file.get_dc(ineq=2), axis=-1)
siw_dc_spin_mean_2 = extend_orbital(siw_dc_spin_mean_2)[None, None, None, ..., None]
siw_spin_mean_2 = siw_spin_mean_2[..., niv_dmft - niv_cut : niv_dmft + niv_cut]
siw_spin_mean = 0.5 * (siw_spin_mean + siw_spin_mean_2)
del siw_spin_mean_2
siw_dc_spin_mean = 0.5 * (siw_dc_spin_mean + siw_dc_spin_mean_2)
del siw_dc_spin_mean_2

siw_spin_mean_large = np.zeros(
(1, 1, 1, 2 * siw_spin_mean.shape[3], 2 * siw_spin_mean.shape[3], siw_spin_mean.shape[-1])
)
siw_spin_mean_large[:, :, :, 0:2, 0:2, ...] = siw_spin_mean
siw_spin_mean_large[:, :, :, 2:4, 2:4, ...] = siw_spin_mean
siw_dc_spin_mean_large = np.zeros_like(siw_spin_mean_large)
siw_dc_spin_mean_large[:, :, :, 0:2, 0:2, ...] = siw_dc_spin_mean
siw_dc_spin_mean_large[:, :, :, 2:4, 2:4, ...] = siw_dc_spin_mean
sigma_dmft = SelfEnergy(siw_spin_mean_large, estimate_niv_core=True) + siw_dc_spin_mean_large
del siw_spin_mean_large, siw_dc_spin_mean_large

del giw_spin_mean, siw_spin_mean, siw_dc_spin_mean

file.close()
Expand Down Expand Up @@ -198,11 +272,17 @@ def set_hamiltonian(er_type: str, er_input: str | list, int_type: str, int_input
if int_type == "one_band_from_dmft" or int_type == "" or int_type is None:
return ham.single_band_interaction(config.lattice.interaction.udd)
elif int_type == "kanamori_from_dmft":
return ham.kanamori_interaction(
config.sys.n_bands,
return ham.kanamori_interaction_dp(
config.sys.nd_bands,
config.sys.np_bands,
config.lattice.interaction.udd,
config.lattice.interaction.upp,
config.lattice.interaction.udp,
config.lattice.interaction.jdd,
config.lattice.interaction.jpp,
config.lattice.interaction.jdp,
config.lattice.interaction.vdd,
config.lattice.interaction.vpp,
)
elif int_type == "custom":
if not isinstance(int_input, str):
Expand Down
73 changes: 64 additions & 9 deletions moldga/hamiltonian.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,26 +107,81 @@ def interaction_orbital_diagonal(self, u: float, n_bands: int = 1) -> "Hamiltoni

return self._add_interaction_term(interaction_elements)

def kanamori_interaction(self, n_bands: int, udd: float, jdd: float, vdd: float = None) -> "Hamiltonian":
def kanamori_interaction_d(self, n_bands: int, udd: float, jdd: float, vdd: float = None) -> "Hamiltonian":
"""
Adds the Kanamori interaction terms to the Hamiltonian. The interaction terms are defined by the Hubbard 'udd' (U),
the exchange interaction 'jdd' (J) and the pair hopping 'vdd' (V or sometimes U'). 'vdd' is an optional parameter, if left empty,
it is set to U-2J.
Adds the Kanamori interaction terms ONLY for d orbitals to the Hamiltonian.
The interaction terms are defined by the Hubbard 'udd' (U),
the exchange interaction 'jdd' (J) and the pair hopping 'vdd' (V or sometimes U').
'vdd' is an optional parameter, if left empty, it is set to V=U-2J.
"""
return self.kanamori_interaction_dp(nd_bands=n_bands, udd=udd, jdd=jdd, vdd=vdd)

def kanamori_interaction_p(self, n_bands: int, upp: float, jpp: float, vpp: float = None) -> "Hamiltonian":
"""
Adds the Kanamori interaction terms ONLY for p orbitals to the Hamiltonian.
The interaction terms are defined by the Hubbard 'udd' (U),
the exchange interaction 'jpp' (J) and the pair hopping 'vpp' (V or sometimes U').
'vpp' is an optional parameter, if left empty, it is set to V=U-2J.
"""
return self.kanamori_interaction_dp(np_bands=n_bands, upp=upp, jpp=jpp, vpp=vpp)

def kanamori_interaction_dp(
self,
nd_bands: int = 0,
np_bands: int = 0,
udd: float = 0.0,
upp: float = 0.0,
udp: float = 0.0,
jdd: float = 0.0,
jpp: float = 0.0,
jdp: float = 0.0,
vdd: float = None,
vpp: float = None,
) -> "Hamiltonian":
"""
Adds the full Kanamori interaction terms for d and p orbitals to the Hamiltonian.
The interaction terms are defined by the local interaction Hubbard U,
the exchange interaction J and the pair hopping V or sometimes U'.
vdd (vpp) (vdp) are optional parameters, if left empty, they are set to V=U-2J.
"""

# Default V=U-2J
if vdd is None:
vdd = udd - 2 * jdd
if vpp is None:
vpp = upp - 2 * jpp

def orb_type(i: int) -> str:
return "d" if i < nd_bands else "p" # d bands come first

def get_params(o1: int, o2: int) -> tuple[float, float, float]:
# Return correct (U, J, V) depending on orbital types of o1, o2
t1, t2 = orb_type(o1), orb_type(o2)

if t1 == "d" and t2 == "d":
return udd, jdd, vdd
elif t1 == "p" and t2 == "p":
return upp, jpp, vpp
else:
return 0, jdp, udp # udp is used as "Vdp", since there is no Udp possible that fits U_llll

r_loc = [0, 0, 0]
n_tot = nd_bands + np_bands

interaction_elements = []
for a, b, c, d in it.product(range(n_bands), repeat=4):

for a, b, c, d in it.product(range(n_tot), repeat=4):
bands = [a + 1, b + 1, c + 1, d + 1]

# choose parameters based on (a,b) pair (equivalently (c,d))
u, j, v = get_params(a, b)

if a == b == c == d: # U_{llll}
interaction_elements.append(InteractionElement(r_loc, bands, udd))
elif (a == d and b == c) or (a == b and c == d): # U_{lmml} or U_{llmm}
interaction_elements.append(InteractionElement(r_loc, bands, jdd))
interaction_elements.append(InteractionElement(r_loc, bands, u))
elif (a == d and b == c) or (a == b and c == d): # U_{lmml}, U_{llmm}
interaction_elements.append(InteractionElement(r_loc, bands, j))
elif a == c and b == d: # U_{lmlm}
interaction_elements.append(InteractionElement(r_loc, bands, vdd))
interaction_elements.append(InteractionElement(r_loc, bands, v))

return self._add_interaction_term(interaction_elements)

Expand Down
97 changes: 91 additions & 6 deletions moldga/nonlocal_sde.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,9 +496,7 @@ def read_last_n_sigmas_from_files(n: int, output_path: str = "./", previous_sc_p
last_iterations = sorted(
[(int(match.group(1)), f) for f in files if (match := re.search(r"sigma_dga_iteration_(\d+)\.npy$", f))],
key=lambda x: x[0],
reverse=True,
)
last_iterations = last_iterations[:n] if len(last_iterations) >= n else last_iterations
)[-n:]
return [np.load(file) for _, file in last_iterations]


Expand Down Expand Up @@ -536,7 +534,7 @@ def calculate_self_energy_q(
sigma_old, starting_iter = get_starting_sigma(config.self_consistency.previous_sc_path, sigma_dmft)
if starting_iter > 0:
logger.info(
f"Using previous calculation and starting the self-consistency loop at iteration {starting_iter+1}."
f"Using previous calculation and starting the self-consistency loop at iteration {starting_iter + 1}."
)

sigma_old = sigma_old.cut_niv(config.box.niw_core + config.box.niv_full + 10)
Expand Down Expand Up @@ -637,7 +635,7 @@ def calculate_self_energy_q(
config.sys.mu = update_mu(
old_mu, config.sys.n, giwk_full.ek, sigma_new.mat, config.sys.beta, sigma_new.fit_smom()[0]
)
# linear mixing of mu to ensure more stable mu convergence

config.sys.mu = (
config.self_consistency.mixing * config.sys.mu + (1 - config.self_consistency.mixing) * old_mu
)
Expand Down Expand Up @@ -771,9 +769,96 @@ def get_result(idx: int):
sigma_new.mat[..., niv - niv_core : niv + niv_core] = get_proposal(-1).reshape(shape) + update.reshape(shape)

logger.info(
f"Pulay mixing applied with {n_hist} previous iterations and a mixing parameter of {config.self_consistency.mixing}."
f"Pulay mixing applied with {n_hist} previous iterations and "
f"a mixing parameter of {config.self_consistency.mixing}."
)

return sigma_new
if (
config.self_consistency.mixing_strategy == "anderson"
and current_iter > n_hist
and config.self_consistency.save_iter
and config.output.save_quantities
):
last_sigmas = read_last_n_sigmas_from_files(
n_hist, config.output.output_path, config.self_consistency.previous_sc_path
)

niv = sigma_new.current_shape[-1] // 2
niv_core = config.box.niv_core
sl = slice(niv - niv_core, niv + niv_core)

sigma_dmft_stacked = np.tile(sigma_dmft.mat, (config.lattice.k_grid.nk_tot, 1, 1, 1))

last_proposals = [sigma_dmft_stacked] + last_sigmas # [dmft, s1, ..., s_{n-1}]
last_results = last_sigmas + [sigma_new.mat] # [s1, s2, ..., s_new]
last_proposals = [s[..., sl] for s in last_proposals]
last_results = [s[..., sl] for s in last_results]

shape = last_results[-1].shape
n_total = int(np.prod(shape))
flat = lambda x: x.reshape(-1)

# Current residual f_n = F(x_n) - x_n
f_curr = flat(last_results[-1]) - flat(last_proposals[-1])
f_vec = np.concatenate([f_curr.real, f_curr.imag])
norm_f = np.linalg.norm(f_vec)

# Build dX and dF matrices (n_hist columns)
# dX[:,i] = x_{n-i} - x_{n-i-1} (proposal differences)
# dF[:,i] = f_{n-i} - f_{n-i-1} (residual differences)
dx_cols = []
df_cols = []
for i in range(n_hist):
dx = flat(last_proposals[-1 - i]) - flat(last_proposals[-2 - i])
dx_cols.append(np.concatenate([dx.real, dx.imag]))

df_i = flat(last_results[-1 - i]) - flat(last_proposals[-1 - i])
df_im1 = flat(last_results[-2 - i]) - flat(last_proposals[-2 - i])
df = df_i - df_im1
df_cols.append(np.concatenate([df.real, df.imag]))

dx_matrix = np.column_stack(dx_cols) # (2*n_total, n_hist)
df_matrix = np.column_stack(df_cols) # (2*n_total, n_hist)

# Anderson: solve min ||f_curr - dF @ c||
try:
u, s, vh = np.linalg.svd(df_matrix, full_matrices=False)

s_max = s[0] if len(s) > 0 else 1.0
cutoff = 1e-5 * s_max
mask = s > cutoff

if not np.any(mask):
raise np.linalg.LinAlgError("All singular values below threshold.")

s_reg = s[mask] / (s[mask] ** 2 + cutoff**2)
coeffs = vh[mask].T @ (s_reg * (u[:, mask].T @ f_vec))

except np.linalg.LinAlgError:
logger.warning("Anderson SVD failed — falling back to linear mixing.")
return alpha * sigma_new + (1 - alpha) * sigma_old

# Undamped Anderson proposal: x_n + f_n - (dX + dF) @ c
x_n = flat(last_proposals[-1])
x_anderson = np.concatenate([x_n.real, x_n.imag]) + f_vec - (dx_matrix + df_matrix) @ coeffs
x_anderson = x_anderson[:n_total] + 1j * x_anderson[n_total:]

# Damp between old proposal and Anderson proposal
x_n_complex = x_n
candidate = (1 - alpha) * x_n_complex + alpha * x_anderson.reshape(-1)

# Safety clamp
update = candidate - x_n_complex
norm_u = np.linalg.norm(update)
if norm_f > 0 and norm_u > 3.0 * norm_f:
candidate = x_n_complex + update * (3.0 * norm_f / norm_u)
logger.warning(f"Anderson step clamped (norm_u={norm_u:.3e}, norm_f={norm_f:.3e}).")

sigma_new.mat[..., sl] = candidate.reshape(shape)

logger.info(f"Anderson acceleration applied (m={n_hist}, alpha={alpha:.3f}, norm_f={norm_f:.3e}).")

return sigma_new

sigma_new = config.self_consistency.mixing * sigma_new + (1 - config.self_consistency.mixing) * sigma_old
Expand Down
Loading
Loading