diff --git a/ggce/_tests/petsc/test_dispersion.py b/ggce/_tests/petsc/test_dispersion.py new file mode 100644 index 0000000..4a9f329 --- /dev/null +++ b/ggce/_tests/petsc/test_dispersion.py @@ -0,0 +1,317 @@ +import pytest +import numpy as np + +from ggce import Model, System, MassSolverMUMPS +from ggce.utils.utils import process_dispersion + +mpi4py_imported = False +try: + from mpi4py import MPI + + mpi4py_imported = True +except ImportError: + pass + +petsc_imported = False +try: + from ggce.executors.petsc4py.solvers import MassSolverMUMPS + + petsc_imported = True +except ImportError: + pass + +ATOL = 1.0e-4 + +H_disp = { + "disp": np.array( + [ + -2.13357591, + -2.11002857, + -2.04152528, + -1.93577551, + -1.81115636, + -1.70275774, + -1.63772221, + -1.60489404, + -1.58675713, + -1.57505631, + -1.56689429, + -1.56031997, + -1.55514015, + -1.55080771, + -1.54719531, + -1.54424225, + -1.54177714, + -1.54015774, + -1.53918069, + -1.53886704 + ], + ), + "lftm": np.array( + [ + 0.05002659, + 0.05002811, + 0.05004279, + 0.05009543, + 0.05026857, + 0.05079536, + 0.05166395, + 0.05223035, + 0.0526713 , + 0.05322507, + 0.05320764, + 0.05365926, + 0.05371972, + 0.0538172 , + 0.05391855, + 0.05401722, + 0.05443246, + 0.05444023, + 0.05444205, + 0.05441458 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.0 + ), + "model_add_params": dict( + coupling_type="Holstein", + phonon_frequency=0.5, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.2, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + + +Ht_disp = { + "disp": np.array( + [ + -2.27032010, + -2.24710519, + -2.17884402, + -2.07004391, + -1.92889763, + -1.76884135, + -1.60981514, + -1.47473396, + -1.37678952, + -1.31205636, + -1.26949333, + -1.24038372, + -1.21935644, + -1.20362145, + -1.19155228, + -1.18237100, + -1.17545341, + -1.17069893, + -1.16788926, + -1.16691047 + ], + ), + "lftm": np.array( + [ + 0.05003133, + 0.05003679, + 0.05006176, + 0.05001756, + 0.05003599, + 0.0500623 , + 0.05011613, + 0.05022575, + 0.0503448 , + 0.05048927, + 0.05063826, + 0.05064263, + 0.0507317 , + 0.0507624 , + 0.05082355, + 0.05077456, + 0.05085719, + 0.05081329, + 0.05077591, + 0.05085204 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.1 + ), + "model_add_params": dict( + coupling_type="Holstein", + phonon_frequency=1.0, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.3, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + +P_disp = { + "disp": np.array( + [ + -2.05221829, + -2.02962257, + -1.96366601, + -1.86098486, + -1.73851190, + -1.63596423, + -1.58668088, + -1.56751988, + -1.55763702, + -1.55055595, + -1.54450427, + -1.53879446, + -1.53337647, + -1.52834307, + -1.52374767, + -1.51939479, + -1.51433297, + -1.12053083, + -1.12487661, + -1.12842012 + ], + ), + "lftm": np.array( + [ + 0.05000364, + 0.05000494, + 0.05001221, + 0.05003807, + 0.05021303, + 0.05112328, + 0.05284144, + 0.05389852, + 0.05450736, + 0.05511493, + 0.05560135, + 0.05633197, + 0.05737958, + 0.05891222, + 0.06126965, + 0.06522925, + 0.07291950, + 0.20392106, + 0.18979730, + 0.17680745 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.0 + ), + "model_add_params": dict( + coupling_type="Peierls", + phonon_frequency=0.5, + phonon_extent=4, + phonon_number=3, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.2, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + +Pt_disp = { + "disp": np.array( + [ + -2.03860534, + -2.01424822, + -1.94216566, + -1.82544492, + -1.66981736, + -1.48539322, + -1.29253027, + -1.13655662, + -1.06103682, + -1.03309347, + -1.02002084, + -1.01205506, + -1.00665565, + -1.00202791, + -0.99794541, + -0.99394416, + -0.98742742, + -0.08718754, + -0.08316406, + -0.08190966 + ], + ), + "lftm": np.array( + [ + 0.05000570, + 0.05000590, + 0.05000530, + 0.05000756, + 0.05001251, + 0.05002866, + 0.05011567, + 0.05076352, + 0.05263697, + 0.05473189, + 0.05657045, + 0.05836157, + 0.05977918, + 0.06213563, + 0.06537793, + 0.07031304, + 0.08309610, + 0.39414916, + 0.39360121, + 0.38676333 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.1 + ), + "model_add_params": dict( + coupling_type="Peierls", + phonon_frequency=1.0, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.1, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + + +@pytest.mark.skipif(not petsc_imported, reason="PETSc not installed") +@pytest.mark.skipif(not mpi4py_imported, reason="mpi4py not installed") +@pytest.mark.parametrize( + "p", + [H_disp, P_disp, + Ht_disp, Pt_disp], +) +def test_dispersion_finiteT(p): + disp = np.array(p["disp"]) + lftm = np.array(p["lftm"]) + model = Model.from_parameters(**p["model_params"]) + model.add_(**p["model_add_params"]) + + kgrid = p["kgrid"] + w0 = p["w0"] + eta = p["eta"] + + COMM = MPI.COMM_WORLD + executor_petsc = MassSolverMUMPS(System(model), mpi_comm=COMM) + results = executor_petsc.dispersion(kgrid, w0, eta=eta, nmax=1000, \ + eta_step_div=10, peak_routine="scipy", next_k_offset_factor=2) + dispersion, lifetimes = process_dispersion(results) + + assert np.allclose(disp, dispersion, atol=ATOL) + + assert np.allclose(lftm, lifetimes, atol=ATOL) + diff --git a/ggce/_tests/test_dispersion.py b/ggce/_tests/test_dispersion.py new file mode 100644 index 0000000..87cb4d0 --- /dev/null +++ b/ggce/_tests/test_dispersion.py @@ -0,0 +1,307 @@ +import pytest +import numpy as np + +from ggce import Model, System, SparseSolver, DenseSolver, MassSolverMUMPS +from ggce.utils.utils import process_dispersion + +ATOL = 1.0e-4 + +H_disp = { + "disp": np.array( + [ + -2.13357591, + -2.11002857, + -2.04152528, + -1.93577551, + -1.81115636, + -1.70275774, + -1.63772221, + -1.60489404, + -1.58675713, + -1.57505631, + -1.56689429, + -1.56031997, + -1.55514015, + -1.55080771, + -1.54719531, + -1.54424225, + -1.54177714, + -1.54015774, + -1.53918069, + -1.53886704 + ], + ), + "lftm": np.array( + [ + 0.05002659, + 0.05002811, + 0.05004279, + 0.05009543, + 0.05026857, + 0.05079536, + 0.05166395, + 0.05223035, + 0.0526713 , + 0.05322507, + 0.05320764, + 0.05365926, + 0.05371972, + 0.0538172 , + 0.05391855, + 0.05401722, + 0.05443246, + 0.05444023, + 0.05444205, + 0.05441458 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.0 + ), + "model_add_params": dict( + coupling_type="Holstein", + phonon_frequency=0.5, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.2, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + + +Ht_disp = { + "disp": np.array( + [ + -2.27032010, + -2.24710519, + -2.17884402, + -2.07004391, + -1.92889763, + -1.76884135, + -1.60981514, + -1.47473396, + -1.37678952, + -1.31205636, + -1.26949333, + -1.24038372, + -1.21935644, + -1.20362145, + -1.19155228, + -1.18237100, + -1.17545341, + -1.17069893, + -1.16788926, + -1.16691047 + ], + ), + "lftm": np.array( + [ + 0.05003133, + 0.05003679, + 0.05006176, + 0.05001756, + 0.05003599, + 0.0500623 , + 0.05011613, + 0.05022575, + 0.0503448 , + 0.05048927, + 0.05063826, + 0.05064263, + 0.0507317 , + 0.0507624 , + 0.05082355, + 0.05077456, + 0.05085719, + 0.05081329, + 0.05077591, + 0.05085204 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.1 + ), + "model_add_params": dict( + coupling_type="Holstein", + phonon_frequency=1.0, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.3, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + +P_disp = { + "disp": np.array( + [ + -2.05221829, + -2.02962257, + -1.96366601, + -1.86098486, + -1.73851190, + -1.63596423, + -1.58668088, + -1.56751988, + -1.55763702, + -1.55055595, + -1.54450427, + -1.53879446, + -1.53337647, + -1.52834307, + -1.52374767, + -1.51939479, + -1.51433297, + -1.12053083, + -1.12487661, + -1.12842012 + ], + ), + "lftm": np.array( + [ + 0.05000364, + 0.05000494, + 0.05001221, + 0.05003807, + 0.05021303, + 0.05112328, + 0.05284144, + 0.05389852, + 0.05450736, + 0.05511493, + 0.05560135, + 0.05633197, + 0.05737958, + 0.05891222, + 0.06126965, + 0.06522925, + 0.07291950, + 0.20392106, + 0.18979730, + 0.17680745 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.0 + ), + "model_add_params": dict( + coupling_type="Peierls", + phonon_frequency=0.5, + phonon_extent=4, + phonon_number=3, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.2, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + + +Pt_disp = { + "disp": np.array( + [ + -2.03860534, + -2.01424822, + -1.94216566, + -1.82544492, + -1.66981736, + -1.48539322, + -1.29253027, + -1.13655662, + -1.06103682, + -1.03309347, + -1.02002084, + -1.01205506, + -1.00665565, + -1.00202791, + -0.99794541, + -0.99394416, + -0.98742742, + -0.08718754, + -0.08316406, + -0.08190966 + ], + ), + "lftm": np.array( + [ + 0.05000570, + 0.05000590, + 0.05000530, + 0.05000756, + 0.05001251, + 0.05002866, + 0.05011567, + 0.05076352, + 0.05263697, + 0.05473189, + 0.05657045, + 0.05836157, + 0.05977918, + 0.06213563, + 0.06537793, + 0.07031304, + 0.08309610, + 0.39414916, + 0.39360121, + 0.38676333 + ], + ), + "model_params": dict( + hopping=1.0, phonon_max_per_site=None, temperature=0.1 + ), + "model_add_params": dict( + coupling_type="Peierls", + phonon_frequency=1.0, + phonon_extent=2, + phonon_number=2, + phonon_extent_tfd=2, + phonon_number_tfd=2, + dimensionless_coupling_strength=0.1, + ), + "w0": -2.5, + "eta": 0.05, + "kgrid": np.linspace(0, np.pi, 20), +} + + +@pytest.mark.parametrize( + "p", + [H_disp, P_disp, + Ht_disp, Pt_disp], +) +def test_dispersion_finiteT(p): + disp = np.array(p["disp"]) + lftm = np.array(p["lftm"]) + model = Model.from_parameters(**p["model_params"]) + model.add_(**p["model_add_params"]) + + kgrid = p["kgrid"] + w0 = p["w0"] + eta = p["eta"] + + executor_sparse = SparseSolver(System(model)) + results = executor_sparse.dispersion(kgrid, w0, eta=eta, nmax=1000, \ + eta_step_div=10, peak_routine="scipy", next_k_offset_factor=2) + dispersion, lifetimes = process_dispersion(results) + + assert np.allclose(disp, dispersion, atol=ATOL) + + assert np.allclose(lftm, lifetimes, atol=ATOL) + + executor_dense = DenseSolver(System(model)) + results = executor_dense.dispersion(kgrid, w0, eta=eta, nmax=1000, \ + eta_step_div=10, peak_routine="scipy", next_k_offset_factor=2) + dispersion, lifetimes = process_dispersion(results) + + assert np.allclose(disp, dispersion, atol=ATOL) + + assert np.allclose(lftm, lifetimes, atol=ATOL) \ No newline at end of file diff --git a/ggce/executors/petsc4py/base.py b/ggce/executors/petsc4py/base.py index 75fc880..473602b 100644 --- a/ggce/executors/petsc4py/base.py +++ b/ggce/executors/petsc4py/base.py @@ -10,7 +10,10 @@ from ggce.logger import logger from ggce.utils.physics import G0_k_omega -from ggce.utils.utils import chunk_jobs, padded_kw, float_to_list +from ggce.utils.utils import chunk_jobs, padded_kw, \ + float_to_list, peak_location_and_weight, \ + peak_location_and_weight_wstep, \ + peak_location_and_weight_scipy from ggce.executors.solvers import Solver BYTES_TO_GB = 1073741274 @@ -682,3 +685,149 @@ def _get_matr_size(matr_dir): matrsize = max(row_ind) + 1 return matrsize + + def dispersion( + self, kgrid, w0, eta, eta_div=3.0, eta_step_div=5.0, incl_w_pts=10, + next_k_offset_factor=1.5, nmax=1000, peak_routine="change_eta", **solve_kwargs + ): + """Computes the dispersion of the peak closest to the provided w0 by + assuming that the peak is Lorentzian in nature. This allows us to + take two points, each at a different value of the broadening, eta, and + compute the location of the Lorentzian (ground state energy) and + quasi-particle weight exactly, at least in principle. As stated, we + rely on the assumption that the peak is Lorentzian, which is only true + in some cases (e.g. the polaron). + + This method works as follows: (1) An initial guess for the peak + location of the first entry in kgrid is provided (w0). (2) The location + of the peak is found by slowly increasing w in increments of + eta / eta_step_div until the first time the value of A decreases from + the previou sone. (3) The location is found (as is the weight) by + computing A using a second broadening given by eta / eta_div. (4) This + value is logged in results, and the algorithm moves to the next + k-point. The new initial guess for the next peak location is given by + the found location of the previous k-point minus + eta * next_k_offset_factor. + + UPDATE: The method can now be run using PETSc "ParallelSparse" protocol. + It is parallel in that the for a single (k,w) point, the matrix is + distributed across different tasks: however, it is "serial" in that + it still works its way through one (k,w) point at a time. If you try to + call this using ParallelDenseExecutor you will get a NotImplementedError. + + Parameters + ---------- + kgrid : list + A list of the k-points to calculate. + w0 : float + The initial guess for the peak location for the first k-point only. + eta : float + The broadening parameter. + eta_div : float, optional + Used for the computation of the second A value (the default is + 3.0, a good empirical value). + eta_step_div : float, optional + Defines the step in frequency space as eta / eta_step_div (the + default is 5.0). + next_k_offset_factor : float, optional + Defines how far back from the found peak location to start the + algorithm at the next k-point. The next start location is given by + the found location minus eta * next_k_offset_vactor (the default is + 1.5). + nmax : int, optional + The maximum number of steps to take in eta before gracefully + erroring out and returning the previously found values (the + default is 1000). + + Returns + ------- + list + List of dictionaries, each of which contains 5 keys: the k-value at + which the calculation was run ('k'), lists for the w-values and + spectrum values ('w' and 'A'), and the ground state energy and + quasi-particle weight ('ground_state' and 'weight'). + """ + + results = [] + w_val = w0 + nk = len(kgrid) + for ii, k_val in enumerate(kgrid): + + current_n_w = 0 + reference = 0.0 + + results.append({ + 'k': k_val, + 'w': [], + 'A': [], + 'ground_state': None, + 'weight': None, + 'lifetime': None + }) + + while True: + + if current_n_w > nmax: + logger.error("Exceeded maximum omega points") + return results + + G, _ = self.solve(k_val, w_val, eta) + A = -G.imag / np.pi + results[ii]['w'].append(w_val) + results[ii]['A'].append(A) + + # Check and see whether or not we've found a local maxima + if reference < A: + + # This is not a maximum + reference = A + + current_n_w += 1 + w_val += eta / eta_step_div + continue + + # This is a maximum, run the calculation again one dw step prior to this + if peak_routine == "change_eta": + eta_prime = eta / eta_step_div + G2 = self.solve(k_val, w_val, eta_prime) + A2 = -G2.imag / np.pi + loc, weight = peak_location_and_weight( + w_val, A, A2, eta, eta_prime) + lifetime = eta + elif peak_routine == "change_w": + w_val_prime = w_val - 2. * eta / eta_step_div + G2 = self.solve(k_val, w_val_prime, eta) + A2 = -G2.imag / np.pi + loc, weight = peak_location_and_weight_wstep(w_val, + w_val_prime, A, A2, eta) + lifetime = eta + elif peak_routine == "scipy": + assert len(results[ii]["w"]) >= incl_w_pts, \ + f"The number of w points solved for is smaller than required for a scipy fit."\ + f"\nRestart the calculation at smaller w0, or , if this happens halfway through"\ + f" the dispersion search, try increasing next_k_offset_factor." + wrange = results[ii]["w"][-incl_w_pts:] + Arange = results[ii]["A"][-incl_w_pts:] + fit_params, error = peak_location_and_weight_scipy( + wrange, Arange, eta) + loc, weight, lifetime = fit_params + # from ggce.utils.utils import lorentzian + # import matplotlib.pyplot as plt + # fig, ax0 = plt.subplots() + # wrange_pred = np.linspace(wrange[0], wrange[-1], 100) + # Arange_pred = lorentzian(wrange_pred, *fit_params) + # ax0.plot(wrange_pred, Arange_pred) + # ax0.scatter(wrange, Arange) + # plt.show() + + results[ii]['ground_state'] = loc + results[ii]['weight'] = weight + results[ii]['lifetime'] = lifetime + w_val = loc - eta * next_k_offset_factor + logger.info( + f"For k ({ii:03}/{nk:03}) = {k_val:.02f}: GS={loc:.08f}, " + f"wt={weight:.02e}, lifetime={lifetime:.04f}" + ) + break + + return results diff --git a/ggce/executors/solvers.py b/ggce/executors/solvers.py index c602b7a..787c062 100644 --- a/ggce/executors/solvers.py +++ b/ggce/executors/solvers.py @@ -9,6 +9,10 @@ from scipy.sparse.linalg import spsolve from tqdm import tqdm +from ggce.utils.utils import eff_mass_fit, \ + peak_location_and_weight, \ + peak_location_and_weight_scipy, \ + peak_location_and_weight_wstep from ggce.logger import logger, disable_logger from ggce.utils.utils import float_to_list, chunk_jobs from ggce.engine.system import System @@ -189,6 +193,151 @@ def greens_function(self, k, w, eta, pbar=False): return np.array(s).reshape(len(k), len(w)) + def dispersion( + self, kgrid, w0, eta, eta_div=3.0, eta_step_div=5.0, incl_w_pts=10, + next_k_offset_factor=1.5, nmax=1000, peak_routine="change_eta", **solve_kwargs + ): + """Computes the dispersion of the peak closest to the provided w0 by + assuming that the peak is Lorentzian in nature. This allows us to + take two points, each at a different value of the broadening, eta, and + compute the location of the Lorentzian (ground state energy) and + quasi-particle weight exactly, at least in principle. As stated, we + rely on the assumption that the peak is Lorentzian, which is only true + in some cases (e.g. the polaron). + + This method works as follows: (1) An initial guess for the peak + location of the first entry in kgrid is provided (w0). (2) The location + of the peak is found by slowly increasing w in increments of + eta / eta_step_div until the first time the value of A decreases from + the previou sone. (3) The location is found (as is the weight) by + computing A using a second broadening given by eta / eta_div. (4) This + value is logged in results, and the algorithm moves to the next + k-point. The new initial guess for the next peak location is given by + the found location of the previous k-point minus + eta * next_k_offset_factor. + + UPDATE: The method can now be run using PETSc "ParallelSparse" protocol. + It is parallel in that the for a single (k,w) point, the matrix is + distributed across different tasks: however, it is "serial" in that + it still works its way through one (k,w) point at a time. If you try to + call this using ParallelDenseExecutor you will get a NotImplementedError. + + Parameters + ---------- + kgrid : list + A list of the k-points to calculate. + w0 : float + The initial guess for the peak location for the first k-point only. + eta : float + The broadening parameter. + eta_div : float, optional + Used for the computation of the second A value (the default is + 3.0, a good empirical value). + eta_step_div : float, optional + Defines the step in frequency space as eta / eta_step_div (the + default is 5.0). + next_k_offset_factor : float, optional + Defines how far back from the found peak location to start the + algorithm at the next k-point. The next start location is given by + the found location minus eta * next_k_offset_vactor (the default is + 1.5). + nmax : int, optional + The maximum number of steps to take in eta before gracefully + erroring out and returning the previously found values (the + default is 1000). + + Returns + ------- + list + List of dictionaries, each of which contains 5 keys: the k-value at + which the calculation was run ('k'), lists for the w-values and + spectrum values ('w' and 'A'), and the ground state energy and + quasi-particle weight ('ground_state' and 'weight'). + """ + + results = [] + w_val = w0 + nk = len(kgrid) + for ii, k_val in enumerate(kgrid): + + current_n_w = 0 + reference = 0.0 + + results.append({ + 'k': k_val, + 'w': [], + 'A': [], + 'ground_state': None, + 'weight': None, + 'lifetime': None + }) + + while True: + + if current_n_w > nmax: + logger.error("Exceeded maximum omega points") + return results + + G = self.solve(k_val, w_val, eta) + A = -G.imag / np.pi + results[ii]['w'].append(w_val) + results[ii]['A'].append(A) + + # Check and see whether or not we've found a local maxima + if reference < A: + + # This is not a maximum + reference = A + + current_n_w += 1 + w_val += eta / eta_step_div + continue + + # This is a maximum, run the calculation again one dw step prior to this + if peak_routine == "change_eta": + eta_prime = eta / eta_step_div + G2 = self.solve(k_val, w_val, eta_prime) + A2 = -G2.imag / np.pi + loc, weight = peak_location_and_weight( + w_val, A, A2, eta, eta_prime) + lifetime = eta + elif peak_routine == "change_w": + w_val_prime = w_val - 2. * eta / eta_step_div + G2 = self.solve(k_val, w_val_prime, eta) + A2 = -G2.imag / np.pi + loc, weight = peak_location_and_weight_wstep(w_val, + w_val_prime, A, A2, eta) + lifetime = eta + elif peak_routine == "scipy": + assert len(results[ii]["w"]) >= incl_w_pts, \ + f"The number of w points solved for is smaller than required for a scipy fit."\ + f"\nRestart the calculation at smaller w0, or , if this happens halfway through"\ + f" the dispersion search, try increasing next_k_offset_factor." + wrange = results[ii]["w"][-incl_w_pts:] + Arange = results[ii]["A"][-incl_w_pts:] + fit_params, error = peak_location_and_weight_scipy( + wrange, Arange, eta) + loc, weight, lifetime = fit_params + # from ggce.utils.utils import lorentzian + # import matplotlib.pyplot as plt + # fig, ax0 = plt.subplots() + # wrange_pred = np.linspace(wrange[0], wrange[-1], 100) + # Arange_pred = lorentzian(wrange_pred, *fit_params) + # ax0.plot(wrange_pred, Arange_pred) + # ax0.scatter(wrange, Arange) + # plt.show() + + results[ii]['ground_state'] = loc + results[ii]['weight'] = weight + results[ii]['lifetime'] = lifetime + w_val = loc - eta * next_k_offset_factor + logger.info( + f"For k ({ii:03}/{nk:03}) = {k_val:.02f}: GS={loc:.08f}, " + f"wt={weight:.02e}, lifetime={lifetime:.04f}" + ) + break + + return results class SparseSolver(BasicSolver): """A sparse, serial solver for the Green's function. Useful for when the diff --git a/ggce/utils/utils.py b/ggce/utils/utils.py index 5a143f1..a0d9649 100644 --- a/ggce/utils/utils.py +++ b/ggce/utils/utils.py @@ -192,3 +192,36 @@ def peak_location_and_weight_scipy(wrange, Arange, eta): def lorentzian(w, loc, scale, eta): return scale * (eta / np.pi) / ((w - loc) ** 2 + eta**2) + + +def quadratic_band(k, m_star, offset): + + return k**2 / (2.*m_star) - offset + + +def process_dispersion(dispersion_results): + ''' + Helper to process the output of the dispersion() + method of a given solver to extract the following + a) kgrid + b) ground state energy, 1d-ndarray + c) ground state lifetime, 1d-ndarray + ''' + kgrid = [dispersion_results[ii]["k"] \ + for ii in range(len(dispersion_results))] + dispersion = [dispersion_results[ii]["ground_state"] \ + for ii in range(len(dispersion_results))] + lifetimes = [dispersion_results[ii]["lifetime"] \ + for ii in range(len(dispersion_results))] + return dispersion, lifetimes + + +def eff_mass_fit(kgrid, energy_k): + ''' + Helper function to fit a quadratic to a polaron dispersion + at k = 0. In doing so, extracts the dispersion + ''' + best_fit_pars, covmat = curve_fit(quadratic_band, kgrid, energy_k, \ + p0 = [0.5, energy_k[0]]) + mstar = best_fit_pars[0] / 0.5041061511616247 + return mstar