diff --git a/SimPEG/__init__.py b/SimPEG/__init__.py index 18cf134a19..ff20906fb9 100644 --- a/SimPEG/__init__.py +++ b/SimPEG/__init__.py @@ -76,6 +76,8 @@ maps.LinearMap maps.IdentityMap maps.InjectActiveCells + maps.InjectActiveFaces + maps.InjectActiveEdges maps.MuRelative maps.LogMap maps.ParametricBlock diff --git a/SimPEG/base/__init__.py b/SimPEG/base/__init__.py index d7425ebd92..49a2a81b06 100644 --- a/SimPEG/base/__init__.py +++ b/SimPEG/base/__init__.py @@ -2,5 +2,8 @@ BasePDESimulation, BaseElectricalPDESimulation, BaseMagneticPDESimulation, + BaseFaceEdgeElectricalPDESimulation, with_property_mass_matrices, + with_surface_property_mass_matrices, + with_line_property_mass_matrices, ) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index a213d922ad..1d1764e3fd 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -1,6 +1,6 @@ import numpy as np import scipy.sparse as sp -from discretize.utils import Zero, TensorType +from discretize.utils import Zero, TensorType, sdinv from ..simulation import BaseSimulation from .. import props from scipy.constants import mu_0 @@ -409,6 +409,279 @@ def _clear_on_prop_update(self): return decorator +def with_surface_property_mass_matrices(property_name): + """ + This decorator will automatically populate all of the surface property mass matrices. + + Given the property "prop", this will add properties and functions to the class + representing all of the possible mass matrix operations on the mesh. + + For a given property, "prop", they will be named: + + * MeProp + * MePropDeriv + * MePropI + * MePropIDeriv + * MfProp + * MfPropDeriv + * MfPropI + * MfPropIDeriv + """ + + def decorator(cls): + arg = property_name.lower() + arg = arg[0].upper() + arg[1:] + + @property + def Mf_prop(self): + """ + Face property inner product surface matrix. + """ + stash_name = f"__Mf_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_face_inner_product_surface(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Mf{arg}", Mf_prop) + + @property + def Me_prop(self): + """ + Edge property inner product surface matrix. + """ + stash_name = f"__Me_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_surface(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}", Me_prop) + + @property + def MfI_prop(self): + """ + Face property inner product inverse matrix. + """ + stash_name = f"__MfI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_face_inner_product_surface( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Mf{arg}I", MfI_prop) + + @property + def MeI_prop(self): + """ + Edge property inner product inverse matrix. + """ + stash_name = f"__MeI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_surface( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}I", MeI_prop) + + def MfDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MfProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Mf_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_face_inner_product_surface_deriv( + np.ones(self.mesh.n_faces) + )(np.ones(self.mesh.n_faces)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Mf{arg}Deriv", MfDeriv_prop) + + def MeDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MeProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Me_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_edge_inner_product_surface_deriv( + np.ones(self.mesh.n_faces) + )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Me{arg}Deriv", MeDeriv_prop) + + def MfIDeriv_prop(self, u, v=None, adjoint=False): + """I + Derivative of `MfPropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Mf{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Mf{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Mf{arg}IDeriv", MfIDeriv_prop) + + def MeIDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MePropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Me{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Me{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Me{arg}IDeriv", MeIDeriv_prop) + + @property + def _clear_on_prop_update(self): + items = [ + f"__Mf_{arg}", + f"__Me_{arg}", + f"__MfI_{arg}", + f"__MeI_{arg}", + f"__Mf_{arg}_deriv", + f"__Me_{arg}_deriv", + ] + return items + + setattr(cls, f"_clear_on_{arg.lower()}_update", _clear_on_prop_update) + return cls + + return decorator + + +def with_line_property_mass_matrices(property_name): + """ + This decorator will automatically populate all of the line property mass matrices. + + Given the property "prop", this will add properties and functions to the class + representing all of the possible mass matrix operations on the mesh. + + For a given property, "prop", they will be named: + + * MeProp + * MePropDeriv + * MePropI + * MePropIDeriv + """ + + def decorator(cls): + arg = property_name.lower() + arg = arg[0].upper() + arg[1:] + + @property + def Me_prop(self): + """ + Edge property inner product line matrix. + """ + stash_name = f"__Me_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_line(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}", Me_prop) + + @property + def MeI_prop(self): + """ + Edge property inner product inverse matrix. + """ + stash_name = f"__MeI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_line( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}I", MeI_prop) + + def MeDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MeProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Me_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_edge_inner_product_line_deriv( + np.ones(self.mesh.n_edges) + )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Me{arg}Deriv", MeDeriv_prop) + + def MeIDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MePropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Me{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Me{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Me{arg}IDeriv", MeIDeriv_prop) + + @property + def _clear_on_prop_update(self): + items = [ + f"__Me_{arg}", + f"__MeI_{arg}", + f"__Me_{arg}_deriv", + ] + return items + + setattr(cls, f"_clear_on_{arg.lower()}_update", _clear_on_prop_update) + return cls + + return decorator + + class BasePDESimulation(BaseSimulation): @property def Vol(self): @@ -554,3 +827,135 @@ def deleteTheseOnModelUpdate(self): if self.muMap is not None or self.muiMap is not None: toDelete = toDelete + self._clear_on_mu_update + self._clear_on_mui_update return toDelete + + +@with_surface_property_mass_matrices("tau") +@with_line_property_mass_matrices("kappa") +class BaseFaceEdgeElectricalPDESimulation(BaseElectricalPDESimulation): + tau, tauMap, tauDeriv = props.Invertible( + "Electrical conductivity times thickness (S); i.e. conductance", + ) + kappa, kappaMap, kappaDeriv = props.Invertible( + "Electrical conductivity times cross-sectional area (Sm)", + ) + + def __init__( + self, + mesh, + sigma=1e-8, + sigmaMap=None, + rho=None, + rhoMap=None, + tau=0.0, + tauMap=None, + kappa=0.0, + kappaMap=None, + **kwargs, + ): + super().__init__(mesh=mesh, **kwargs) + self.sigma = sigma + self.rho = rho + self.sigmaMap = sigmaMap + self.rhoMap = rhoMap + self.tau = tau + self.tauMap = tauMap + self.kappa = kappa + self.kappaMap = kappaMap + + def __setattr__(self, name, value): + super().__setattr__(name, value) + if name in ["sigma", "rho", "tau", "kappa"]: + mat_list = ( + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] + ) + for mat in mat_list: + if hasattr(self, mat): + delattr(self, mat) + + @property + def _MeSigmaTauKappa(self): + if getattr(self, "__MeSigmaTauKappa", None) is None: + M_prop = self.MeSigma + self._MeTau + self._MeKappa + setattr(self, "__MeSigmaTauKappa", M_prop) # noqa: B010 + return getattr(self, "__MeSigmaTauKappa") # noqa: B009 + + @property + def _MeSigmaTauKappaI(self): + if getattr(self, "__MeSigmaTauKappaI", None) is None: + M_prop = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + setattr(self, "__MeSigmaTauKappaI", M_prop) # noqa: B010 + return getattr(self, "__MeSigmaTauKappaI") # noqa: B009 + + def _MeSigmaTauKappaDeriv_sigma(self, u, v=None, adjoint=False): + """Only derivative wrt to sigma""" + return self.MeSigmaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaDeriv_tau(self, u, v=None, adjoint=False): + """Only derivative wrt tau""" + return self._MeTauDeriv(u, v, adjoint) + + def _MeSigmaTauKappaDeriv_kappa(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + return self._MeKappaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + return ( + self.MeSigmaDeriv(u, v, adjoint) + + self._MeTauDeriv(u, v, adjoint) + + self._MeKappaDeriv(u, v, adjoint) + ) + + def _MeSigmaTauKappaIDeriv_sigma(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self.MeSigmaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaIDeriv_tau(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self._MeTauDeriv(u, v, adjoint) + + def _MeSigmaTauKappaIDeriv_kappa(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self._MeKappaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return ( + self.MeSigmaDeriv(u, v, adjoint) + + self._MeTauDeriv(u, v, adjoint) + + self._MeKappaDeriv(u, v, adjoint) + ) + + @property + def deleteTheseOnModelUpdate(self): + """ + items to be deleted if the model for cell, face or edge conductivity is updated + """ + toDelete = super().deleteTheseOnModelUpdate + if ( + self.sigmaMap is not None + or self.rhoMap is not None + or self.tauMap is not None + or self.kappaMap is not None + ): + toDelete = ( + toDelete + + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] + ) + return toDelete diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index f1c85f44e1..f62eaff47f 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -357,7 +357,12 @@ def _compute_hankel_coefficients(self): Is = [] n_w_past = 0 i_count = 0 - for src in survey.source_list: + # Note: coefficients are needed to be updated if we are + # inverting for the source height. + if self.hMap is not None: + hvec = self.h # source height above topo + + for i_src, src in enumerate(survey.source_list): # doing the check for source type by checking its name # to avoid importing and checking "isinstance" class_name = type(src).__name__ @@ -365,11 +370,11 @@ def _compute_hankel_coefficients(self): is_mag_dipole = class_name == "MagDipole" is_wire_loop = class_name == "LineCurrent" - if is_circular_loop: - if np.any(src.orientation[:-1] != 0.0): - raise ValueError("Can only simulate horizontal circular loops") + if is_circular_loop and np.any(src.orientation[:-1] != 0.0): + raise ValueError("Can only simulate horizontal circular loops") + if self.hMap is not None: - h = 0 # source height above topo + h = hvec[i_src] else: h = src.location[2] - self.topo[-1] @@ -573,6 +578,7 @@ def deleteTheseOnModelUpdate(self): toDelete += ["_J", "_gtgdiag"] return toDelete + # TODO: need to revisit this: def depth_of_investigation_christiansen_2012(self, std, thres_hold=0.8): pred = self.survey._pred.copy() delta_d = std * np.log(abs(self.survey.dobs)) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py new file mode 100644 index 0000000000..bbd43b676e --- /dev/null +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -0,0 +1,499 @@ +from scipy.constants import mu_0 +import numpy as np +from ..simulation import BaseSimulation +from .. import props +from .. import utils +from ..utils.code_utils import ( + validate_integer, + validate_ndarray_with_shape, + validate_type, +) + +############################################################################### +# # +# BaseStitchedEM1DSimulation # +# # +############################################################################### + +__all__ = ["BaseStitchedEM1DSimulation"] + + +class BaseStitchedEM1DSimulation(BaseSimulation): + """ + Base class for the stitched 1D simulation. This simulation models the EM + response for a set of 1D EM soundings. + """ + + _formulation = "1D" + + # Properties for electrical conductivity/resistivity + sigma, sigmaMap, sigmaDeriv = props.Invertible( + "Electrical conductivity at infinite frequency (S/m)" + ) + + eta = props.PhysicalProperty("Intrinsic chargeability (V/V), 0 <= eta < 1") + tau = props.PhysicalProperty("Time constant for Cole-Cole model (s)") + c = props.PhysicalProperty("Frequency Dependency for Cole-Cole model, 0 < c < 1") + + # Properties for magnetic susceptibility + mu, muMap, muDeriv = props.Invertible( + "Magnetic permeability at infinite frequency (SI)" + ) + chi = props.PhysicalProperty( + "DC magnetic susceptibility for viscous remanent magnetization contribution (SI)" + ) + tau1 = props.PhysicalProperty( + "Lower bound for log-uniform distribution of time-relaxation constants for viscous remanent magnetization (s)" + ) + tau2 = props.PhysicalProperty( + "Upper bound for log-uniform distribution of time-relaxation constants for viscous remanent magnetization (s)" + ) + + # Additional properties + h, hMap, hDeriv = props.Invertible("Receiver Height (m), h > 0") + + thicknesses, thicknessesMap, thicknessesDeriv = props.Invertible( + "layer thicknesses (m)" + ) + + def __init__( + self, + sigma=None, + sigmaMap=None, + thicknesses=None, + thicknessesMap=None, + mu=mu_0, + muMap=None, + h=None, + hMap=None, + eta=None, + tau=None, + c=None, + dchi=None, + tau1=None, + tau2=None, + fix_Jmatrix=False, + topo=None, + parallel=False, + n_cpu=None, + **kwargs, + ): + super().__init__(mesh=None, **kwargs) + self.sigma = sigma + self.sigmaMap = sigmaMap + self.mu = mu + self.muMap = muMap + self.h = h + self.hMap = hMap + if thicknesses is None: + thicknesses = np.array([]) + self.thicknesses = thicknesses + self.thicknessesMap = thicknessesMap + self.eta = eta + self.tau = tau + self.c = c + self.dchi = dchi + self.tau1 = tau1 + self.tau2 = tau2 + self.fix_Jmatrix = fix_Jmatrix + self.topo = topo + if self.topo is None: + self.set_null_topography() + + self.parallel = parallel + self.n_cpu = n_cpu + + if self.parallel: + if self.verbose: + print(">> Use multiprocessing for parallelization") + if self.n_cpu is None: + self.n_cpu = multiprocessing.cpu_count() + print((">> n_cpu: %i") % (self.n_cpu)) + else: + if self.verbose: + print(">> Serial version is used") + + @property + def fix_Jmatrix(self): + """Whether to fix the sensitivity matrix. + + Returns + ------- + bool + """ + return self._fix_Jmatrix + + @fix_Jmatrix.setter + def fix_Jmatrix(self, value): + self._fix_Jmatrix = validate_type("fix_Jmatrix", value, bool) + + @property + def topo(self): + """Topography. + + Returns + ------- + numpy.ndarray of float + """ + return self._topo + + @topo.setter + def topo(self, value): + self._topo = validate_ndarray_with_shape("topo", value, shape=("*", 3)) + + @property + def parallel(self): + """ + Run the computation as a parallel process. + + Returns + ------- + bool + """ + return self._parallel + + @parallel.setter + def parallel(self, value): + self._parallel = validate_type("parallel", value, bool) + + @property + def n_cpu(self): + """Number of cpus + + Returns + ------- + int + """ + return self._n_cpu + + @n_cpu.setter + def n_cpu(self, value): + self._n_cpu = validate_integer("n_cpu", value, min_val=1) + + @property + def invert_height(self): + if self.hMap is None: + return False + else: + return True + + @property + def halfspace_switch(self): + """True = halfspace, False = layered Earth""" + if (self.thicknesses is None) | (len(self.thicknesses) == 0): + return True + else: + return False + + @property + def n_layer(self): + if self.thicknesses is None: + return 1 + else: + return len(self.thicknesses) + 1 + + @property + def n_sounding(self): + return len(self.survey.source_location_by_sounding) + + @property + def data_index(self): + return self.survey.data_index + + # ------------- For physical properties ------------- # + @property + def sigma_matrix(self): + if getattr(self, "_sigma_matrix", None) is None: + # Ordering: first z then x + self._sigma_matrix = self.sigma.reshape((self.n_sounding, self.n_layer)) + return self._sigma_matrix + + @property + def thickness_matrix(self): + if getattr(self, "_thickness_matrix", None) is None: + # Ordering: first z then x + if len(self.thicknesses) == int(self.n_sounding * (self.n_layer - 1)): + self._thickness_matrix = self.thicknesses.reshape( + (self.n_sounding, self.n_layer - 1) + ) + else: + self._thickness_matrix = np.tile(self.thicknesses, (self.n_sounding, 1)) + return self._thickness_matrix + + @property + def eta_matrix(self): + if getattr(self, "_eta_matrix", None) is None: + # Ordering: first z then x + if self.eta is None: + self._eta_matrix = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._eta_matrix = self.eta.reshape((self.n_sounding, self.n_layer)) + return self._eta_matrix + + @property + def tau_matrix(self): + if getattr(self, "_tau_matrix", None) is None: + # Ordering: first z then x + if self.tau is None: + self._tau_matrix = 1e-3 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._tau_matrix = self.tau.reshape((self.n_sounding, self.n_layer)) + return self._tau_matrix + + @property + def c_matrix(self): + if getattr(self, "_c_matrix", None) is None: + # Ordering: first z then x + if self.c is None: + self._c_matrix = np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._c_matrix = self.c.reshape((self.n_sounding, self.n_layer)) + return self._c_matrix + + @property + def chi_matrix(self): + if getattr(self, "_chi_matrix", None) is None: + # Ordering: first z then x + if self.chi is None: + self._chi_matrix = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._chi_matrix = self.chi.reshape((self.n_sounding, self.n_layer)) + return self._chi_matrix + + @property + def dchi_matrix(self): + if getattr(self, "_dchi_matrix", None) is None: + # Ordering: first z then x + if self.dchi is None: + self._dchi_matrix = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._dchi_matrix = self.dchi.reshape((self.n_sounding, self.n_layer)) + return self._dchi_matrix + + @property + def tau1_matrix(self): + if getattr(self, "_tau1_matrix", None) is None: + # Ordering: first z then x + if self.tau1 is None: + self._tau1_matrix = 1e-10 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._tau1_matrix = self.tau1.reshape((self.n_sounding, self.n_layer)) + return self._tau1_matrix + + @property + def tau2_matrix(self): + if getattr(self, "_tau2_matrix", None) is None: + # Ordering: first z then x + if self.tau2 is None: + self._tau2_matrix = 100.0 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" + ) + else: + self._tau2_matrix = self.tau2.reshape((self.n_sounding, self.n_layer)) + return self._tau2_matrix + + @property + def JtJ_sigma(self): + return self._JtJ_sigma + + def JtJ_height(self): + return self._JtJ_height + + @property + def h_vector(self): + if self.hMap is None: + h = self.source_locations_for_sounding[:, 2] - self.topo[:, 2] + return h + else: + return self.h + + # ------------- Etcetra .... ------------- # + @property + def IJLayers(self): + if getattr(self, "_IJLayers", None) is None: + # Ordering: first z then x + self._IJLayers = self.set_ij_n_layer() + return self._IJLayers + + @property + def IJHeight(self): + if getattr(self, "_IJHeight", None) is None: + # Ordering: first z then x + self._IJHeight = self.set_ij_n_layer(n_layer=1) + return self._IJHeight + + # ------------- For physics ------------- # + + def get_uniq_soundings(self): + self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( + self.survey._sounding_types, return_index=True + ) + + def input_args(self, i_sounding, output_type="forward"): + output = ( + self.survey.get_sources_by_sounding_number(i_sounding), + self.topo[i_sounding, :], + self.thickness_matrix[i_sounding, :], + self.sigma_matrix[i_sounding, :], + self.eta_matrix[i_sounding, :], + self.tau_matrix[i_sounding, :], + self.c_matrix[i_sounding, :], + self.chi_matrix[i_sounding, :], + self.dchi_matrix[i_sounding, :], + self.tau1_matrix[i_sounding, :], + self.tau2_matrix[i_sounding, :], + self.h_vector[i_sounding], + output_type, + ) + return output + + def fields(self, m): + if self.verbose: + print("Compute fields") + + return self.forward(m) + + def dpred(self, m, f=None): + """ + Return predicted data. + Predicted data, (`_pred`) are computed when + self.fields is called. + """ + if f is None: + f = self.fields(m) + + return f + + @property + def sounding_number(self): + self._sounding_number = [ + key for key in self.survey.source_location_by_sounding.keys() + ] + return self._sounding_number + + @property + def n_chunk(self): + self._n_chunk = len(self.sounding_number_chunks) + return self._n_chunk + + @property + def source_locations_for_sounding(self): + if getattr(self, "_source_locations_for_sounding", None) is None: + self._source_locations_for_sounding = np.vstack( + [ + self.survey._source_location_by_sounding[ii][0] + for ii in range(self.n_sounding) + ] + ) + return self._source_locations_for_sounding + + def set_null_topography(self): + self.topo = self.source_locations_for_sounding.copy() + self.topo[:, 2] = 0.0 + + def set_ij_n_layer(self, n_layer=None): + """ + Compute (I, J) indicies to form sparse sensitivity matrix + This will be used in GlobalEM1DSimulation when after sensitivity matrix + for each sounding is computed + """ + I = [] + J = [] + shift_for_J = 0 + shift_for_I = 0 + if n_layer is None: + m = self.n_layer + else: + m = n_layer + + for i_sounding in range(self.n_sounding): + n = self.survey.vnD_by_sounding[i_sounding] + J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J + I_temp = ( + np.tile(np.arange(n), (1, m)).reshape((n, m), order="F") + shift_for_I + ) + J.append(utils.mkvc(J_temp)) + I.append(utils.mkvc(I_temp)) + shift_for_J += m + shift_for_I = I_temp[-1, -1] + 1 + J = np.hstack(J).astype(int) + I = np.hstack(I).astype(int) + return (I, J) + + def set_ij_height(self): + """ + Compute (I, J) indicies to form sparse sensitivity matrix + This will be used in GlobalEM1DSimulation when after sensitivity matrix + for each sounding is computed + """ + J = [] + I = np.arange(self.survey.nD) + for i_sounding in range(self.n_sounding): + n = self.survey.vnD_by_sounding[i_sounding] + J.append(np.ones(n) * i_sounding) + J = np.hstack(J).astype(int) + return (I, J) + + def Jvec(self, m, v, f=None): + J_sigma = self.getJ_sigma(m) + Jv = J_sigma @ (self.sigmaDeriv @ v) + if self.hMap is not None: + J_height = self.getJ_height(m) + Jv += J_height @ (self.hDeriv @ v) + return Jv + + def Jtvec(self, m, v, f=None): + J_sigma = self.getJ_sigma(m) + Jtv = self.sigmaDeriv.T @ (J_sigma.T @ v) + if self.hMap is not None: + J_height = self.getJ_height(m) + Jtv += self.hDeriv.T @ (J_height.T @ v) + return Jtv + + # Revisit this + def getJtJdiag(self, m, W=None, threshold=1e-8): + """ + Compute diagonal component of JtJ or + trace of sensitivity matrix (J) + """ + if getattr(self, "_gtgdiag", None) is None: + J_sigma = self.getJ_sigma(m) + J_matrix = J_sigma @ (self.sigmaDeriv) + + if self.hMap is not None: + J_height = self.getJ_height(m) + J_matrix += J_height * self.hDeriv + + if W is None: + W = utils.speye(J_matrix.shape[0]) + J_matrix = W * J_matrix + gtgdiag = (J_matrix.T * J_matrix).diagonal() + gtgdiag /= gtgdiag.max() + gtgdiag += threshold + self._gtgdiag = gtgdiag + return self._gtgdiag + + @property + def deleteTheseOnModelUpdate(self): + toDelete = super().deleteTheseOnModelUpdate + if self.fix_Jmatrix is False: + toDelete += [ + "_sigma_matrix", + "_J", + "_Jmatrix_sigma", + "_Jmatrix_height", + "_gtg_diag", + ] + return toDelete diff --git a/SimPEG/electromagnetics/frequency_domain/__init__.py b/SimPEG/electromagnetics/frequency_domain/__init__.py index 3dad3cde28..b6bf19bf60 100644 --- a/SimPEG/electromagnetics/frequency_domain/__init__.py +++ b/SimPEG/electromagnetics/frequency_domain/__init__.py @@ -16,6 +16,8 @@ Simulation3DMagneticFluxDensity Simulation3DCurrentDensity Simulation3DMagneticField + Simulation3DElectricFieldFaceEdgeConductivity + Simulation3DMagneticFluxDensityFaceEdgeConductivity Receivers @@ -60,6 +62,8 @@ Fields3DMagneticFluxDensity Fields3DCurrentDensity Fields3DMagneticField + Fields3DElectricFieldFaceEdgeConductivity + Fields3DMagneticFluxDensityFaceEdgeConductivity Base Classes ============ @@ -81,13 +85,18 @@ Simulation3DMagneticFluxDensity, Simulation3DCurrentDensity, Simulation3DMagneticField, + Simulation3DElectricFieldFaceEdgeConductivity, + Simulation3DMagneticFluxDensityFaceEdgeConductivity, ) from .simulation_1d import Simulation1DLayered +from .simulation_1d_stitched import Simulation1DLayeredStitched from .fields import ( Fields3DElectricField, Fields3DMagneticFluxDensity, Fields3DCurrentDensity, Fields3DMagneticField, + Fields3DElectricFieldFaceEdgeConductivity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, ) from . import sources as Src diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 06900f5dc3..8bce30eea6 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -622,6 +622,94 @@ def _charge_density(self, eSolution, source_list): ) / self.mesh.cell_volumes[:, None] +class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): + r""" + Fields object for Simulation3DElectricFieldFaceEdgeConductivity. + + In this case, the discrete Ohm's law relationship accounts for volume, face + and edge currents. So: + + .. math:: + \mathbf{M_e \, J} = \left ( \mathbf{M_{e\sigma} + M_{e\tau} + + M_{e\kappa}} \right ) \mathbf{e} + + :param discretize.base.BaseMesh mesh: mesh + :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey + """ + + def startup(self): + self._edgeCurl = self.simulation.mesh.edge_curl + self._aveE2CCV = self.simulation.mesh.aveE2CCV + self._aveF2CCV = self.simulation.mesh.aveF2CCV + self._nC = self.simulation.mesh.nC + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self._MfMui = self.simulation.MfMui + self._MfMuiDeriv = self.simulation.MfMuiDeriv + self._MeI = self.simulation.MeI + self._MfI = self.simulation.MfI + + def _j(self, eSolution, source_list): + """ + Current density from eSolution + + :param numpy.ndarray eSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: current density + """ + return self._MeI * (self.__MeSigmaTauKappa * self._e(eSolution, source_list)) + + def _jDeriv_u(self, src, du_dm_v, adjoint=False): + """ + Derivative of the current density with respect to the thing we solved + for + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray du_dm_v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the derivative of the current density with respect + to the field we solved for with a vector + """ + if adjoint: + return self._eDeriv_u( + src, self.__MeSigmaTauKappa.T * (self._MeI.T * du_dm_v), adjoint=adjoint + ) + return self._MeI * ( + self.__MeSigmaTauKappa * (self._eDeriv_u(src, du_dm_v, adjoint=adjoint)) + ) + + def _jDeriv_m(self, src, v, adjoint=False): + """ + Derivative of the current density with respect to the inversion model. + + This includes derivatives for volume, face and/or edge conductivities + depending on whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the current density derivative with respect to the + inversion model with a vector + """ + e = self[src, "e"] + + if adjoint: + return ( + self.__MeSigmaTauKappaDeriv(e, (self._MeI.T * v), adjoint=adjoint) + + self._eDeriv_m(src, (self._MeI.T * v), adjoint=adjoint) + ) + src.jPrimaryDeriv(self.simulation, v, adjoint) + return ( + self._MeI + * ( + self._eDeriv_m(src, v, adjoint=adjoint) + + self.__MeSigmaTauKappaDeriv(e, v, adjoint=adjoint) + ) + ) + src.jPrimaryDeriv(self.simulation, v, adjoint) + + class Fields3DMagneticFluxDensity(FieldsFDEM): """ Fields object for Simulation3DMagneticFluxDensity. @@ -952,6 +1040,138 @@ def _charge_density(self, bSolution, source_list): ) / self.mesh.cell_volumes[:, None] +class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): + r""" + Fields object for Simulation3DMagneticFluxDensityFaceEdgeConductivity. + + In this case, the discrete Ohm's law relationship accounts for volume, face + and edge currents. So: + + .. math:: + \mathbf{M_e \, J} = \left ( \mathbf{M_{e\sigma} + M_{e\tau} + + M_{e\kappa}} \right ) \mathbf{e} + + :param discretize.base.BaseMesh mesh: mesh + :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey + """ + + def startup(self): + self._edgeCurl = self.simulation.mesh.edge_curl + self._MfMui = self.simulation.MfMui + self._MfMuiDeriv = self.simulation.MfMuiDeriv + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self.__MeSigmaTauKappaIDeriv = self.simulation._MeSigmaTauKappaIDeriv + self._Me = self.simulation.Me + self._aveF2CCV = self.simulation.mesh.aveF2CCV + self._aveE2CCV = self.simulation.mesh.aveE2CCV + self._sigma = self.simulation.sigma + self._mui = self.simulation.mui + self._nC = self.simulation.mesh.nC + self._MeI = self.simulation.MeI + self._MfI = self.simulation.MfI + + def _eSecondary(self, bSolution, source_list): + """ + Secondary electric field from bSolution + + :param numpy.ndarray bSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: secondary electric field + """ + + e = self._edgeCurl.T * (self._MfMui * bSolution) + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation) + e[:, i] = e[:, i] + -s_e + + if self.simulation.permittivity is not None: + MeyhatI = ( + self.simulation._get_edge_admittivity_property_matrix( + src.frequency, invert_matrix=True + ) + + self.__MeTau + + self.__MeKappa + ) + e[:, i] = MeyhatI * e[:, i] + + if self.simulation.permittivity is None: + return self.__MeSigmaTauKappaI * e + else: + return e + + def _eDeriv_u(self, src, du_dm_v, adjoint=False): + """ + Derivative of the electric field with respect to the thing we solved + for + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the derivative of the electric field with respect + to the field we solved for with a vector + """ + + if not adjoint: + return self.__MeSigmaTauKappaI * ( + self._edgeCurl.T * (self._MfMui * du_dm_v) + ) + return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * du_dm_v)) + + def _eDeriv_m(self, src, v, adjoint=False): + bSolution = mkvc(self[src, "bSolution"]) + s_e = src.s_e(self.simulation) + + w = -s_e + self._edgeCurl.T * (self._MfMui * bSolution) + + if adjoint: + s_eDeriv = src.s_eDeriv( + self.simulation, self.__MeSigmaTauKappaI.T * v, adjoint + ) + return ( + self.__MeSigmaTauKappaIDeriv(w, v, adjoint) + + self._MfMuiDeriv( + bSolution, self._edgeCurl * (self.__MeSigmaTauKappaI.T * v), adjoint + ) + - s_eDeriv + + src.ePrimaryDeriv(self.simulation, v, adjoint) + ) + s_eDeriv = src.s_eDeriv(self.simulation, v, adjoint) + return ( + self.__MeSigmaTauKappaIDeriv(w, v) + + self.__MeSigmaTauKappaI + * (self._edgeCurl.T * self._MfMuiDeriv(bSolution, v)) + - self.__MeSigmaTauKappaI * s_eDeriv + + src.ePrimaryDeriv(self.simulation, v, adjoint) + ) + + def _j(self, bSolution, source_list): + """ + Secondary current density from bSolution + + :param numpy.ndarray bSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: primary current density + """ + + if self.simulation.permittivity is None: + j = self._edgeCurl.T * (self._MfMui * bSolution) + + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation) + j[:, i] = j[:, i] - s_e + + return self._MeI * j + else: + return self._MeI * ( + self.__MeSigmaTauKappa * self._e(bSolution, source_list) + ) + + class Fields3DCurrentDensity(FieldsFDEM): """ Fields object for Simulation3DCurrentDensity. diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 50b138cf9e..5ab3c57f63 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -4,14 +4,17 @@ from ... import props from ...data import Data -from ...utils import mkvc, validate_type +from ...utils import mkvc, validate_type, sdinv +from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from ..utils import omega from .survey import Survey from .fields import ( FieldsFDEM, Fields3DElectricField, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, ) @@ -426,6 +429,108 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): ) * s_eDeriv(v) +class Simulation3DElectricFieldFaceEdgeConductivity( + Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation +): + r""" + By eliminating the magnetic flux density using + + .. math :: + + \mathbf{b} = \frac{1}{i \omega}\left(-\mathbf{C} \mathbf{e} + + \mathbf{s_m}\right) + + + we can write Maxwell's equations as a second order system in + :math:`mathbf{e}` only: + + .. math :: + + \left(\mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C} + + i \omega \left \mathbf{M^e_{\sigma} + M^e_\tau + M^e_\kappa} \right ) + \right) \mathbf{e} = \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}\mathbf{s_m} + - i\omega\mathbf{M^e}\mathbf{s_e} + + which we solve for :math:`\mathbf{e}`. + + :param discretize.base.BaseMesh mesh: mesh + """ + + _solutionType = "eSolution" + _formulation = "EB" + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity + + def getA(self, freq): + r""" + System matrix + + .. math :: + + \mathbf{A} = \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C} + + i \omega \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}} \right) + + :param float freq: Frequency + :rtype: scipy.sparse.csr_matrix + :return: A + """ + MfMui = self.MfMui + C = self.mesh.edge_curl + + if self.permittivity is None: + MeSigmaTauKappa = self._MeSigmaTauKappa + A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * MeSigmaTauKappa + else: + Meyhat = ( + self._get_edge_admittivity_property_matrix(freq) + + self._MeTau + + self._MeKappa + ) + A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * Meyhat + + return A + + def getADeriv_sigma(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nE,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + + dMe_dsigma_v = self._MeSigmaTauKappaDeriv(u, v, adjoint) + return 1j * omega(freq) * dMe_dsigma_v + + def getADeriv(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + model and a vector. + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nE,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + return ( + self.getADeriv_sigma(freq, u, v, adjoint) + + self.getADeriv_mui(freq, u, v, adjoint) + # + self.getADeriv_permittivity(freq, u, v, adjoint) + ) + + class Simulation3DMagneticFluxDensity(BaseFDEMSimulation): r""" We eliminate :math:`\mathbf{e}` using @@ -608,6 +713,188 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): return RHSderiv + SrcDeriv +class Simulation3DMagneticFluxDensityFaceEdgeConductivity( + Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation +): + r""" + We eliminate :math:`\mathbf{e}` using + + .. math :: + + \mathbf{e} = \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \left(\mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - \mathbf{s_e}\right) + + and solve for :math:`\mathbf{b}` using: + + .. math :: + + \left(\mathbf{C} \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} + i \omega \right)\mathbf{b} = \mathbf{s_m} + + \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \mathbf{M^e}\mathbf{s_e} + + .. note :: + The inverse problem will not work with full anisotropy + + :param discretize.base.BaseMesh mesh: mesh + """ + + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity + + def getA(self, freq): + r""" + System matrix + + .. math :: + + \mathbf{A} = \mathbf{C} \left ( \mathbf{M^e_{\sigma}} + + \mathbf{M^e_{\tau}} + \mathbf{M^e_{\kappa}}\right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} + i \omega + + :param float freq: Frequency + :rtype: scipy.sparse.csr_matrix + :return: A + """ + + MfMui = self.MfMui + C = self.mesh.edge_curl + iomega = 1j * omega(freq) * sp.eye(self.mesh.nF) + + if self.permittivity is None: + MeSigmaTauKappaI = self._MeSigmaTauKappaI + A = C * (MeSigmaTauKappaI * (C.T.tocsr() * MfMui)) + iomega + else: + MeyhatI = self._get_edge_admittivity_property_matrix( + freq, invert_matrix=True + ) + A = C * (MeyhatI * (C.T.tocsr() * MfMui)) + iomega + + if self._makeASymmetric: + return MfMui.T.tocsr() * A + return A + + def getADeriv_sigma(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + model and a vector. + + This includes derivatives for volume, face and/or edge conductivities + depending on whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + + MfMui = self.MfMui + C = self.mesh.edge_curl + MeSigmaTauKappaIDeriv = self._MeSigmaTauKappaIDeriv + vec = C.T * (MfMui * u) + + if adjoint: + return MeSigmaTauKappaIDeriv(vec, C.T * v, adjoint) + return C * MeSigmaTauKappaIDeriv(vec, v, adjoint) + + def getADeriv_mui(self, freq, u, v, adjoint=False): + MfMuiDeriv = self.MfMuiDeriv(u) + MeSigmaTauKappaI = self._MeSigmaTauKappaI + C = self.mesh.edge_curl + + if adjoint: + return MfMuiDeriv.T * (C * (MeSigmaTauKappaI.T * (C.T * v))) + return C * (MeSigmaTauKappaI * (C.T * (MfMuiDeriv * v))) + + def getADeriv(self, freq, u, v, adjoint=False): + if adjoint is True and self._makeASymmetric: + v = self.MfMui * v + + ADeriv = self.getADeriv_sigma(freq, u, v, adjoint) + self.getADeriv_mui( + freq, u, v, adjoint + ) + + if adjoint is False and self._makeASymmetric: + return self.MfMui.T * ADeriv + + return ADeriv + + def getRHS(self, freq): + r""" + Right hand side for the system + + .. math :: + + \mathbf{RHS} = \mathbf{s_m} + + \mathbf{M^e_{\sigma}}^{-1}\mathbf{s_e} + + :param float freq: Frequency + :rtype: numpy.ndarray + :return: RHS (nE, nSrc) + """ + + s_m, s_e = self.getSourceTerm(freq) + C = self.mesh.edge_curl + + if self.permittivity is None: + MeSigmaTauKappaI = self._MeSigmaTauKappaI + RHS = s_m + C * (MeSigmaTauKappaI * s_e) + else: + MeyhatI = sdinv( + self._get_edge_admittivity_property_matrix(freq, invert_matrix=False) + + self._MeTau + + self._MeKappa + ) + RHS = s_m + C * (MeyhatI * s_e) + + if self._makeASymmetric is True: + MfMui = self.MfMui + return MfMui.T * RHS + + return RHS + + def getRHSDeriv(self, freq, src, v, adjoint=False): + """ + Derivative of the right hand side with respect to the model + + :param float freq: frequency + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of rhs deriv with a vector + """ + + C = self.mesh.edge_curl + s_m, s_e = src.eval(self) + MfMui = self.MfMui + + if self._makeASymmetric and adjoint: + v = self.MfMui * v + + # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) + s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) + + if not adjoint: + # RHSderiv = C * (MeSigmaIDeriv * v) + RHSderiv = C * self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) + SrcDeriv = s_mDeriv(v) + C * (self._MeSigmaTauKappaI * s_eDeriv(v)) + elif adjoint: + # RHSderiv = MeSigmaIDeriv.T * (C.T * v) + RHSderiv = self._MeSigmaTauKappaIDeriv(s_e, C.T * v, adjoint) + SrcDeriv = s_mDeriv(v) + s_eDeriv(self._MeSigmaTauKappaI.T * (C.T * v)) + + if self._makeASymmetric is True and not adjoint: + return MfMui.T * (SrcDeriv + RHSderiv) + + return RHSderiv + SrcDeriv + + ############################################################################### # H-J Formulation # ############################################################################### diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index fc9200b146..902ce90c61 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -104,10 +104,10 @@ def fields(self, m): receiver and outputs it as a list. Used for computing response or sensitivities. """ - self._compute_coefficients() - self.model = m + self._compute_coefficients() + C0s = self._C0s C1s = self._C1s W = self._W @@ -149,28 +149,10 @@ def getJ(self, m, f=None): # Grab a copy C0s_dh = C0s.copy() C1s_dh = C1s.copy() - h_vec = self.h - i = 0 - for i_src, src in enumerate(self.survey.source_list): - class_name = type(src).__name__ - is_wire_loop = class_name == "LineCurrent" - - h = h_vec[i_src] - if is_wire_loop: - n_quad_points = src.n_segments * self.n_points_per_path - nD = sum( - rx.locations.shape[0] * n_quad_points - for rx in src.receiver_list - ) - else: - nD = sum(rx.locations.shape[0] for rx in src.receiver_list) - ip1 = i + nD - v = np.exp(-lambs[i:ip1] * h) - C0s_dh[i:ip1] *= v * -lambs[i:ip1] - C1s_dh[i:ip1] *= v * -lambs[i:ip1] - i = ip1 - # J will be n_d * n_src (each source has it's own h)... + # It seems to be the 2 * lambs to be multiplied, but had to drop factor of 2 + C0s_dh *= -lambs + C1s_dh *= -lambs rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[i_freq] rTE = np.take_along_axis(rTE, inv_lambs, axis=1) @@ -181,7 +163,7 @@ def getJ(self, m, f=None): # need to re-arange v_dh as it's currently (n_data x 1) # however it already contains all the relevant information... # just need to map it from the rx index to the source index associated.. - v_dh = np.zeros((self.survey.nSrc, v_dh_temp.shape[0])) + v_dh = np.zeros((self.survey.nSrc, v_dh_temp.shape[0]), dtype=complex) i = 0 for i_src, src in enumerate(self.survey.source_list): diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py new file mode 100644 index 0000000000..928aa568ba --- /dev/null +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py @@ -0,0 +1,182 @@ +import numpy as np +from scipy import sparse as sp +from ... import utils +from ..base_1d_stitched import BaseStitchedEM1DSimulation +from .simulation_1d import Simulation1DLayered +from .survey import Survey +from ... import maps +from multiprocessing import Pool + + +def run_simulation_frequency_domain(args): + """ + This method simulates the EM response or computes the sensitivities for + a single sounding. The method allows for parallelization of + the stitched 1D problem. + :param src: a EM1DFM source object + :param topo: Topographic location (x, y, z) + :param np.array thicknesses: np.array(N-1,) layer thicknesses for a single sounding + :param np.array sigma: np.array(N,) layer conductivities for a single sounding + :param np.array eta: np.array(N,) intrinsic chargeabilities for a single sounding + :param np.array tau: np.array(N,) Cole-Cole time constant for a single sounding + :param np.array c: np.array(N,) Cole-Cole frequency distribution constant for a single sounding + :param np.array chi: np.array(N,) magnetic susceptibility for a single sounding + :param np.array dchi: np.array(N,) DC susceptibility for magnetic viscosity for a single sounding + :param np.array tau1: np.array(N,) lower time-relaxation constant for magnetic viscosity for a single sounding + :param np.array tau2: np.array(N,) upper time-relaxation constant for magnetic viscosity for a single sounding + :param float h: source height for a single sounding + :param string output_type: "response", "sensitivity_sigma", "sensitivity_height" + :param bool invert_height: boolean switch for inverting for source height + :return: response or sensitivities + """ + + ( + source_list, + topo, + thicknesses, + sigma, + eta, + tau, + c, + chi, + dchi, + tau1, + tau2, + h, + output_type, + ) = args + + n_layer = len(thicknesses) + 1 + local_survey = Survey(source_list) + n_src = len(source_list) + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) + sigma_map = wires.sigma + h_map = wires.h + + sim = Simulation1DLayered( + survey=local_survey, + thicknesses=thicknesses, + sigmaMap=sigma_map, + hMap=h_map, + eta=eta, + tau=tau, + c=c, + topo=topo, + hankel_filter="key_101_2009", + ) + + model = np.r_[sigma, h * np.ones(n_src)] + if output_type == "sensitivity": + J = sim.getJ(model) + J["dh"] = J["dh"].sum(axis=1) + return J + else: + em_response = sim.dpred(model) + return em_response + + +####################################################################### +# STITCHED 1D SIMULATION CLASS AND GLOBAL FUNCTIONS +####################################################################### + + +class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): + _simulation_type = "frequency" + + def forward(self, m): + self.model = m + + if self.verbose: + print(">> Compute response") + + run_simulation = run_simulation_frequency_domain + + if self.parallel: + if self.verbose: + print("parallel") + # This assumes the same # of layers for each of sounding + # if self.n_sounding_for_chunk is None: + pool = Pool(self.n_cpu) + result = pool.map( + run_simulation, + [ + self.input_args(i, output_type="forward") + for i in range(self.n_sounding) + ], + ) + + pool.close() + pool.join() + else: + result = [ + run_simulation(self.input_args(i, output_type="forward")) + for i in range(self.n_sounding) + ] + + return np.hstack(result) + + def getJ(self, m): + """ + Compute d F / d sigma + """ + self.model = m + if getattr(self, "_J", None) is None: + if self.verbose: + print(">> Compute J") + + # if self._coefficients_set is False: + # self.get_coefficients() + + run_simulation = run_simulation_frequency_domain + + if self.parallel: + if self.verbose: + print(">> Start pooling") + + pool = Pool(self.n_cpu) + + # Deprecate this for now, but revisit later + # It is an idea of chunking for parallelization + # if self.n_sounding_for_chunk is None: + self._J = pool.map( + run_simulation, + [ + self.input_args(i, output_type="sensitivity") + for i in range(self.n_sounding) + ], + ) + + if self.verbose: + print(">> End pooling and form J matrix") + + else: + self._J = [ + run_simulation(self.input_args(i, output_type="sensitivity")) + for i in range(self.n_sounding) + ] + return self._J + + def getJ_sigma(self, m): + """ + Compute d F / d sigma + """ + if getattr(self, "_Jmatrix_sigma", None) is None: + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack( + [utils.mkvc(J[i]["ds"]) for i in range(self.n_sounding)] + ) + self._Jmatrix_sigma = sp.coo_matrix( + (self._Jmatrix_sigma, self.IJLayers), dtype=float + ).tocsr() + return self._Jmatrix_sigma + + def getJ_height(self, m): + if getattr(self, "_Jmatrix_height", None) is None: + J = self.getJ(m) + self._Jmatrix_height = np.hstack( + [utils.mkvc(J[i]["dh"]) for i in range(self.n_sounding)] + ) + self._Jmatrix_height = sp.coo_matrix( + (self._Jmatrix_height, self.IJHeight), dtype=float + ).tocsr() + return self._Jmatrix_height diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index bc54e0fdf4..61fac07c95 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -39,9 +39,10 @@ class BaseFDEMSrc(BaseEMSrc): _hPrimary = None _jPrimary = None - def __init__(self, receiver_list, frequency, location=None, **kwargs): + def __init__(self, receiver_list, frequency, location=None, i_sounding=0, **kwargs): super().__init__(receiver_list=receiver_list, location=location, **kwargs) self.frequency = frequency + self.i_sounding = i_sounding @property def frequency(self): @@ -59,6 +60,20 @@ def frequency(self, freq): freq = validate_float("frequency", freq, min_val=0.0) self._frequency = freq + @property + def i_sounding(self): + """Sounding number for the source + + Returns + ------- + int + """ + return self._i_sounding + + @i_sounding.setter + def i_sounding(self, value): + self._i_sounding = validate_integer("i_sounding", value, min_val=0) + def bPrimary(self, simulation): """Compute primary magnetic flux density diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index 4df3a90f05..9fe0858c11 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -16,13 +16,22 @@ def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) _frequency_dict = {} + _source_location = {} + _source_location_by_sounding = {} for src in self.source_list: if src.frequency not in _frequency_dict: _frequency_dict[src.frequency] = [] _frequency_dict[src.frequency] += [src] + if src.i_sounding not in _source_location: + _source_location[src.i_sounding] = [] + _source_location_by_sounding[src.i_sounding] = [] + _source_location[src.i_sounding] += [src] + _source_location_by_sounding[src.i_sounding] += [src.location] self._frequency_dict = _frequency_dict self._frequencies = sorted([f for f in self._frequency_dict]) + self._source_location = _source_location + self._source_location_by_sounding = _source_location_by_sounding @property def source_list(self): @@ -97,3 +106,34 @@ def get_sources_by_frequency(self, frequency): frequency in self._frequency_dict ), "The requested frequency is not in this survey." return self._frequency_dict[frequency] + + @property + def source_location_by_sounding(self) -> dict: + """ + Source locations in the survey as a dictionary + """ + return self._source_location_by_sounding + + def get_sources_by_sounding_number(self, i_sounding): + """ + Returns the sources associated with a specific source location. + :param float i_sounding: source location number + :rtype: dictionary + :return: sources at the sepcified source location + """ + assert ( + i_sounding in self._source_location + ), "The requested sounding is not in this survey." + return self._source_location[i_sounding] + + @property + def vnD_by_sounding(self) -> dict: + if getattr(self, "_vnD_by_sounding", None) is None: + self._vnD_by_sounding = {} + for i_sounding in self.source_location_by_sounding: + source_list = self.get_sources_by_sounding_number(i_sounding) + nD = 0 + for src in source_list: + nD += src.nD + self._vnD_by_sounding[i_sounding] = nD + return self._vnD_by_sounding diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index dcf8dde9a8..f4b57dbd05 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -13,7 +13,9 @@ Simulation1DLayered Simulation3DMagneticFluxDensity + Simulation3DMagneticFluxDensityFaceEdgeConductivity Simulation3DElectricField + Simulation3DElectricFieldFaceEdgeConductivity Simulation3DMagneticField Simulation3DCurrentDensity @@ -70,7 +72,9 @@ :toctree: generated/ Fields3DMagneticFluxDensity + Fields3DMagneticFluxDensityConductance Fields3DElectricField + Fields3DElectricFieldConductance Fields3DMagneticField Fields3DCurrentDensity @@ -91,14 +95,19 @@ """ from .simulation import ( Simulation3DMagneticFluxDensity, + Simulation3DMagneticFluxDensityFaceEdgeConductivity, Simulation3DElectricField, + Simulation3DElectricFieldFaceEdgeConductivity, Simulation3DMagneticField, Simulation3DCurrentDensity, ) from .simulation_1d import Simulation1DLayered +from .simulation_1d_stitched import Simulation1DLayeredStitched from .fields import ( Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DElectricField, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, ) diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 384432c736..c7ea27400a 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -272,6 +272,88 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) +class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): + """Field Storage for a TDEM simulation.""" + + # knownFields = {"bSolution": "F"} + # aliasFields = { + # "b": ["bSolution", "F", "_b"], + # "h": ["bSolution", "F", "_h"], + # "e": ["bSolution", "E", "_e"], + # "j": ["bSolution", "E", "_j"], + # "dbdt": ["bSolution", "F", "_dbdt"], + # "dhdt": ["bSolution", "F", "_dhdt"], + # } + + def startup(self): + self._times = self.simulation.times + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self.__MeSigmaTauKappaIDeriv = self.simulation._MeSigmaTauKappaIDeriv + self._edgeCurl = self.simulation.mesh.edge_curl + self._MfMui = self.simulation.MfMui + self._timeMesh = self.simulation.time_mesh + + def _e(self, bSolution, source_list, tInd): + e = self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * bSolution)) + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation, self._times[tInd]) + e[:, i] = e[:, i] - self.__MeSigmaTauKappaI * s_e + return e + + def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint is True: + return self._MfMui.T * ( + self._edgeCurl * (self.__MeSigmaTauKappaI.T * dun_dm_v) + ) + return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) + + def _eDeriv_m(self, tInd, src, v, adjoint=False): + _, s_e = src.eval(self.simulation, self._times[tInd]) + bSolution = self[[src], "bSolution", tInd].flatten() + + _, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint) + + if adjoint is True: + return self.__MeSigmaTauKappaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - s_eDeriv(self.__MeSigmaTauKappaI.T * v) + + return self.__MeSigmaTauKappaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - self.__MeSigmaTauKappaI * s_eDeriv(v) + + def _j(self, hSolution, source_list, tInd): + return self.simulation.MeI * ( + self.__MeSigmaTauKappa * self._e(hSolution, source_list, tInd) + ) + + def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._eDeriv_u( + tInd, + src, + self.__MeSigmaTauKappa.T * (self.simulation.MeI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappa * self._eDeriv_u(tInd, src, dun_dm_v) + ) + + def _jDeriv_m(self, tInd, src, v, adjoint=False): + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self.__MeSigmaTauKappaDeriv(e).T * w + self._eDeriv_m( + tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappaDeriv(e) * v + + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) + ) + + class Fields3DElectricField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" @@ -391,6 +473,46 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) +class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): + """Fancy Field Storage for a TDEM simulation.""" + + def startup(self): + self._times = self.simulation.times + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self._edgeCurl = self.simulation.mesh.edge_curl + self._MfMui = self.simulation.MfMui + + def _j(self, eSolution, source_list, tInd): + return self.simulation.MeI * ( + self.__MeSigmaTauKappa * self._e(eSolution, source_list, tInd) + ) + + def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._eDeriv_u( + tInd, + src, + (self.__MeSigmaTauKappa).T * (self.simulation.MeI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappa * self._eDeriv_u(tInd, src, dun_dm_v) + ) + + def _jDeriv_m(self, tInd, src, v, adjoint=False): + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self.__MeSigmaTauKappaDeriv(e).T * w + self._eDeriv_m( + tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappaDeriv(e) * v + + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) + ) + + class Fields3DMagneticField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index 3179c527af..f8dd65b4c1 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -1,6 +1,6 @@ import scipy.sparse as sp -from ...utils import mkvc, validate_type, validate_direction +from ...utils import mkvc, validate_type, validate_direction, validate_float from discretize.utils import Zero from ...survey import BaseTimeRx import warnings @@ -25,6 +25,10 @@ def __init__( times, orientation="z", use_source_receiver_offset=False, + bw_cutoff_frequency=3e5, + bw_power=0.0, + lp_cutoff_frequency=2.1e5, + lp_power=0.0, **kwargs ): proj = kwargs.pop("projComp", None) @@ -45,6 +49,11 @@ def __init__( self.orientation = orientation self.use_source_receiver_offset = use_source_receiver_offset + self.bw_cutoff_frequency = bw_cutoff_frequency + self.bw_power = bw_power + self.lp_cutoff_frequency = lp_cutoff_frequency + self.lp_power = lp_power + super().__init__(locations=locations, times=times, **kwargs) @property @@ -84,6 +93,70 @@ def use_source_receiver_offset(self, val): "use_source_receiver_offset", val, bool ) + @property + def bw_cutoff_frequency(self): + """Butter worth low pass filter + + Returns + ------- + numpy.ndarray + Butter worth low pass filter + """ + return self._bw_cutoff_frequency + + @bw_cutoff_frequency.setter + def bw_cutoff_frequency(self, var): + self._bw_cutoff_frequency = validate_float( + "bw_cutoff_frequency", var, min_val=0.0 + ) + + @property + def lp_cutoff_frequency(self): + """Low pass filter + + Returns + ------- + numpy.ndarray + Low pass filter + """ + return self._lp_cutoff_frequency + + @lp_cutoff_frequency.setter + def lp_cutoff_frequency(self, var): + self._lp_cutoff_frequency = validate_float( + "lp_cutoff_frequency", var, min_val=0.0 + ) + + @property + def bw_power(self): + """Butter worth low pass filter + + Returns + ------- + numpy.ndarray + Butter worth low pass filter + """ + return self._bw_power + + @bw_power.setter + def bw_power(self, var): + self._bw_power = validate_float("bw_power", var, min_val=0.0, max_val=2) + + @property + def lp_power(self): + """Low pass filter + + Returns + ------- + numpy.ndarray + Low pass filter + """ + return self._lp_power + + @lp_power.setter + def lp_power(self, var): + self._lp_power = validate_float("lp_power", var, min_val=0.0, max_val=0.99999) + def getSpatialP(self, mesh, f): """Get spatial projection matrix from mesh to receivers. diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 09ae43a85e..71d7c42d0d 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -4,11 +4,14 @@ from ...data import Data from ...simulation import BaseTimeSimulation from ...utils import mkvc, sdiag, speye, Zero, validate_type, validate_float +from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from .survey import Survey from .fields import ( Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DElectricField, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, FieldsDerivativesEB, @@ -962,6 +965,341 @@ def getAdcDeriv(self, u, v, adjoint=False): # self.Adcinv.clean() +# ------------------------------- Simulation3DElectricField ------------------------------- # +class Simulation3DMagneticFluxDensityFaceEdgeConductivity( + Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation +): + r""" + Starting from the quasi-static E-B formulation of Maxwell's equations + (semi-discretized) + + .. math:: + + \mathbf{C} \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = + \mathbf{s_m} \\ + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right ) + \mathbf{e} = \mathbf{s_e} + + + where :math:`\mathbf{s_e}` is an integrated quantity, we eliminate + :math:`\mathbf{e}` using + + .. math:: + + \mathbf{e} = \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + + + to obtain a second order semi-discretized system in :math:`\mathbf{b}` + + .. math:: + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \frac{\partial \mathbf{b}}{\partial t} = \mathbf{C} + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + \mathbf{s_m} + + + and moving everything except the time derivative to the rhs gives + + .. math:: + \frac{\partial \mathbf{b}}{\partial t} = + -\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + \mathbf{s_m} + + For the time discretization, we use backward euler. To solve for the + :math:`n+1` th time step, we have + + .. math:: + + \frac{\mathbf{b}^{n+1} - \mathbf{b}^{n}}{\mathbf{dt}} = + -\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b}^{n+1} + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e}^{n+1} + + \mathbf{s_m}^{n+1} + + + re-arranging to put :math:`\mathbf{b}^{n+1}` on the left hand side gives + + .. math:: + + (\mathbf{I} + \mathbf{dt} \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) \mathbf{b}^{n+1} = + \mathbf{b}^{n} + \mathbf{dt}(\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{s_e}^{n+1} + \mathbf{s_m}^{n+1}) + + """ + + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object + + def getAdiag(self, tInd): + r""" + System matrix at a given time index + + .. math:: + + (\mathbf{I} + \mathbf{dt} \mathbf{C} \left ( + \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) + + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI + MfMui = self.MfMui + I = speye(self.mesh.n_faces) + + A = 1.0 / dt * I + (C * (MeSigmaTauKappaI * (C.T.tocsr() * MfMui))) + + if self._makeASymmetric is True: + return MfMui.T.tocsr() * A + return A + + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + C = self.mesh.edge_curl + MfMui = self.MfMui + + u = C.T * (MfMui * u) + + if adjoint: + if self._makeASymmetric is True: + v = MfMui * v + return self._MeSigmaTauKappaIDeriv(u, C.T * v, adjoint) + + ADeriv = C * self._MeSigmaTauKappaIDeriv(u, v, adjoint) + + if self._makeASymmetric is True: + return MfMui.T * ADeriv + return ADeriv + + def getRHS(self, tInd): + """ + Assemble the RHS + """ + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI + MfMui = self.MfMui + + s_m, s_e = self.getSourceTerm(tInd) + + rhs = C * (MeSigmaTauKappaI * s_e) + s_m + if self._makeASymmetric is True: + return MfMui.T * rhs + return rhs + + def getRHSDeriv(self, tInd, src, v, adjoint=False): + """ + Derivative of the RHS. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + """ + + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI + + _, s_e = src.eval(self, self.times[tInd]) + s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) + + if adjoint: + if self._makeASymmetric is True: + v = self.MfMui * v + if isinstance(s_e, Zero): + MeSigmaTauKappaIDerivT_v = Zero() + else: + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv( + s_e, C.T * v, adjoint + ) + + RHSDeriv = ( + MeSigmaTauKappaIDerivT_v + + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + + s_mDeriv(v) + ) + + return RHSDeriv + + if isinstance(s_e, Zero): + MeSigmaTauKappaIDeriv_v = Zero() + else: + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) + + RHSDeriv = ( + C * MeSigmaTauKappaIDeriv_v + + C * MeSigmaTauKappaI * s_eDeriv(v) + + s_mDeriv(v) + ) + + if self._makeASymmetric is True: + return self.MfMui.T * RHSDeriv + return RHSDeriv + + +# ------------------------------- Simulation3DElectricField ------------------------------- # +class Simulation3DElectricFieldFaceEdgeConductivity( + Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation +): + r""" + Solve the EB-formulation of Maxwell's equations for the electric field, e. + Takes into account volume, face and edge conductivities. + + Starting with + + .. math:: + + \nabla \times \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = \mathbf{s_m} \ + \nabla \times \mu^{-1} \mathbf{b} - \sigma \mathbf{e} = \mathbf{s_e} + + + we eliminate :math:`\frac{\partial b}{\partial t}` using + + .. math:: + + \frac{\partial \mathbf{b}}{\partial t} = - \nabla \times \mathbf{e} + \mathbf{s_m} + + + taking the time-derivative of Ampere's law, we see + + .. math:: + + \frac{\partial}{\partial t}\left( \nabla \times \mu^{-1} \mathbf{b} - \sigma \mathbf{e} \right) = \frac{\partial \mathbf{s_e}}{\partial t} \ + \nabla \times \mu^{-1} \frac{\partial \mathbf{b}}{\partial t} - \sigma \frac{\partial\mathbf{e}}{\partial t} = \frac{\partial \mathbf{s_e}}{\partial t} + + + which gives us + + .. math:: + \nabla \times \mu^{-1} \nabla \times \mathbf{e} + \sigma \frac{\partial\mathbf{e}}{\partial t} = \nabla \times \mu^{-1} \mathbf{s_m} + \frac{\partial \mathbf{s_e}}{\partial t} + + """ + + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity + + def getAdiag(self, tInd): + """ + Diagonal of the system matrix at a given time index. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + C = self.mesh.edge_curl + MfMui = self.MfMui + MeSigmaTauKappa = self._MeSigmaTauKappa + + return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa + + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + r""" + Product of the derivative of diagonal system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + if adjoint: + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + + def getAsubdiag(self, tInd): + """ + Matrix below the diagonal + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + MeSigmaTauKappa = self._MeSigmaTauKappa + + return -1.0 / dt * MeSigmaTauKappa + + def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): + r""" + Product of the derivative of off-diagonal system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + dt = self.time_steps[tInd] + + if adjoint: + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + + def getAdc(self): + MeSigmaTauKappa = self._MeSigmaTauKappa + + Grad = self.mesh.nodal_gradient + Adc = Grad.T.tocsr() * MeSigmaTauKappa * Grad + # Handling Null space of A + Adc[0, 0] = Adc[0, 0] + 1.0 + return Adc + + def getAdcDeriv(self, u, v, adjoint=False): + r""" + Product of the derivative of the DC system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + Grad = self.mesh.nodal_gradient + if not adjoint: + return Grad.T * self._MeSigmaTauKappaDeriv(-u, v, adjoint) + else: + return self._MeSigmaTauKappaDeriv(-u, Grad * v, adjoint) + + ############################################################################### # # # H-J Formulation # diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 83568857e3..2659cd1e38 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -11,6 +11,7 @@ from scipy.constants import mu_0 from scipy.interpolate import InterpolatedUnivariateSpline as iuSpline from scipy.special import roots_legendre +from scipy import signal from empymod import filters from empymod.transform import get_dlf_points @@ -78,6 +79,7 @@ def get_coefficients(self): self._inv_lambs, self._C0s, self._C1s, + self._W, ) def _set_coefficients(self, coefficients): @@ -88,6 +90,7 @@ def _set_coefficients(self, coefficients): self._inv_lambs = coefficients[4] self._C0s = coefficients[5] self._C1s = coefficients[6] + self._W = coefficients[7] self._coefficients_set = True return @@ -218,10 +221,10 @@ def fields(self, m): receiver and outputs it as a list. Used for computing response or sensitivities. """ - self._compute_coefficients() - self.model = m + self._compute_coefficients() + C0s = self._C0s C1s = self._C1s @@ -261,17 +264,23 @@ def getJ(self, m, f=None): # Grab a copy C0s_dh = C0s.copy() C1s_dh = C1s.copy() - h_vec = self.h - i = 0 - for i_src, src in enumerate(self.survey.source_list): - h = h_vec[i_src] - nD = sum(rx.locations.shape[0] for rx in src.receiver_list) - ip1 = i + nD - v = np.exp(-lambs[i:ip1] * h) - C0s_dh[i:ip1] *= v * -lambs[i:ip1] - C1s_dh[i:ip1] *= v * -lambs[i:ip1] - i = ip1 - # J will be n_d * n_src (each source has it's own h)... + # h_vec = self.h + # i = 0 + # for i_src, src in enumerate(self.survey.source_list): + # rx = src.receiver_list[0] + # h = h_vec[i_src] + # # if rx.use_source_receiver_offset: + # # dz = rx.locations[:, 2] + # # else: + # # dz = rx.locations[:, 2] - src.location[2] + # nD = sum(rx.locations.shape[0] for rx in src.receiver_list) + # ip1 = i + nD + # C0s_dh[i:ip1] *= 2 * -lambs[i:ip1] + # C1s_dh[i:ip1] *= 2 * -lambs[i:ip1] + # i = ip1 + # # J will be n_d * n_src (each source has it's own h)... + C0s_dh *= 2 * -lambs + C1s_dh *= 2 * -lambs rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[:, inv_lambs] @@ -285,7 +294,7 @@ def getJ(self, m, f=None): # need to re-arange v_dh as it's currently (n_data x n_freqs) # however it already contains all the relevant information... # just need to map it from the rx index to the source index associated.. - v_dh = np.zeros((self.survey.nSrc, *v_dh_temp.shape)) + v_dh = np.zeros((self.survey.nSrc, *v_dh_temp.shape), dtype=complex) i = 0 for i_src, src in enumerate(self.survey.source_list): @@ -353,13 +362,25 @@ def _project_to_data(self, v): v_slice = v[np.arange(i, i_p1)] # this should order it as location changing faster than time # i.e. loc_1 t_1, loc_2 t_1, loc1 t2, loc2 t2 + + frequencies = self._frequencies + w = 2 * np.pi * frequencies + wc_lp = 2 * np.pi * rx.lp_cutoff_frequency + h_lp = (1 + 1j * w / wc_lp) ** (-rx.lp_power) # low pass filter + wc_bw = 2 * np.pi * rx.bw_cutoff_frequency + numer, denom = signal.butter(rx.bw_power, wc_bw, "low", analog=True) + _, h_bw = signal.freqs(numer, denom, worN=w) + h = h_lp * h_bw + if v.ndim == 3: + v_slice *= h[None, :, None] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.imag) else: d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.real) out[i_dat:i_datp1] = d.reshape((-1, v.shape[-1]), order="F") else: + v_slice *= h[None, :] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...j->...i", As[i_A], v_slice.imag) else: diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py new file mode 100644 index 0000000000..9f05cc0811 --- /dev/null +++ b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py @@ -0,0 +1,208 @@ +import numpy as np +from scipy import sparse as sp +from ... import utils +from ..base_1d_stitched import BaseStitchedEM1DSimulation +from .simulation_1d import Simulation1DLayered +from .survey import Survey +from ... import maps +from multiprocessing import Pool + + +def run_simulation_time_domain(args): + import os + + os.environ["MKL_NUM_THREADS"] = "1" + """ + This method simulates the EM response or computes the sensitivities for + a single sounding. The method allows for parallelization of + the stitched 1D problem. + :param src: a EM1DTM source object + :param topo: Topographic location (x, y, z) + :param np.array thicknesses: np.array(N-1,) layer thicknesses for a single sounding + :param np.array sigma: np.array(N,) layer conductivities for a single sounding + :param np.array eta: np.array(N,) intrinsic chargeabilities for a single sounding + :param np.array tau: np.array(N,) Cole-Cole time constant for a single sounding + :param np.array c: np.array(N,) Cole-Cole frequency distribution constant for a single sounding + :param np.array chi: np.array(N,) magnetic susceptibility for a single sounding + :param np.array dchi: np.array(N,) DC susceptibility for magnetic viscosity for a single sounding + :param np.array tau1: np.array(N,) lower time-relaxation constant for magnetic viscosity for a single sounding + :param np.array tau2: np.array(N,) upper time-relaxation constant for magnetic viscosity for a single sounding + :param float h: source height for a single sounding + :param string output_type: "response", "sensitivity_sigma", "sensitivity_height" + :param bool invert_height: boolean switch for inverting for source height + :return: response or sensitivities + """ + + ( + source_list, + topo, + thicknesses, + sigma, + eta, + tau, + c, + chi, + dchi, + tau1, + tau2, + h, + output_type, + # return_projection, + # coefficients + ) = args + + n_layer = len(thicknesses) + 1 + n_src = len(source_list) + + local_survey = Survey(source_list) + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) + sigma_map = wires.sigma + h_map = wires.h + + sim = Simulation1DLayered( + survey=local_survey, + thicknesses=thicknesses, + sigmaMap=sigma_map, + hMap=h_map, + eta=eta, + tau=tau, + c=c, + topo=topo, + hankel_filter="key_101_2009", + ) + model = np.r_[sigma, h * np.ones(n_src)] + if output_type == "sensitivity": + J = sim.getJ(model) + # we assumed the tx heights in a sounding is fixed + J["dh"] = J["dh"].sum(axis=1) + return J + else: + em_response = sim.dpred(model) + return em_response + + +####################################################################### +# STITCHED 1D SIMULATION CLASS AND GLOBAL FUNCTIONS +####################################################################### + + +class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): + _simulation_type = "time" + + def get_coefficients(self): + run_simulation = run_simulation_time_domain + + if self.verbose: + print(">> Calculate coefficients") + if self.parallel: + pool = Pool(self.n_cpu) + self._coefficients = pool.map( + run_simulation, + [self.input_args_for_coeff(i) for i in range(self.n_sounding)], + ) + self._coefficients_set = True + pool.close() + pool.join() + else: + self._coefficients = [ + run_simulation(self.input_args_for_coeff(i)) + for i in range(self.n_sounding) + ] + + def forward(self, m): + self.model = m + + if self.verbose: + print(">> Compute response") + + # Set flat topo at zero + # if self.topo is None: + + run_simulation = run_simulation_time_domain + + if self.parallel: + if self.verbose: + print("parallel") + # This assumes the same # of layers for each of sounding + # if self.n_sounding_for_chunk is None: + pool = Pool(self.n_cpu) + result = pool.map( + run_simulation, + [ + self.input_args(i, output_type="forward") + for i in range(self.n_sounding) + ], + ) + + pool.close() + pool.join() + else: + result = [ + run_simulation(self.input_args(i, output_type="forward")) + for i in range(self.n_sounding) + ] + + return np.hstack(result) + + def getJ(self, m): + """ + Compute d F / d sigma + """ + self.model = m + if getattr(self, "_J", None) is None: + if self.verbose: + print(">> Compute J") + + run_simulation = run_simulation_time_domain + + if self.parallel: + if self.verbose: + print(">> Start pooling") + + pool = Pool(self.n_cpu) + + # Deprecate this for now, but revisit later + # It is an idea of chunking for parallelization + # if self.n_sounding_for_chunk is None: + self._J = pool.map( + run_simulation, + [ + self.input_args(i, output_type="sensitivity") + for i in range(self.n_sounding) + ], + ) + + if self.verbose: + print(">> End pooling and form J matrix") + + else: + self._J = [ + run_simulation(self.input_args(i, output_type="sensitivity")) + for i in range(self.n_sounding) + ] + return self._J + + def getJ_sigma(self, m): + """ + Compute d F / d sigma + """ + if getattr(self, "_Jmatrix_sigma", None) is None: + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack( + [utils.mkvc(J[i]["ds"]) for i in range(self.n_sounding)] + ) + self._Jmatrix_sigma = sp.coo_matrix( + (self._Jmatrix_sigma, self.IJLayers), dtype=float + ).tocsr() + return self._Jmatrix_sigma + + def getJ_height(self, m): + if getattr(self, "_Jmatrix_height", None) is None: + J = self.getJ(m) + self._Jmatrix_height = np.hstack( + [utils.mkvc(J[i]["dh"]) for i in range(self.n_sounding)] + ) + self._Jmatrix_height = sp.coo_matrix( + (self._Jmatrix_height, self.IJHeight), dtype=float + ).tocsr() + return self._Jmatrix_height diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index fa37081259..1a77bf8650 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -3,6 +3,7 @@ import numpy as np from geoana.em.static import CircularLoopWholeSpace, MagneticDipoleWholeSpace from scipy.constants import mu_0 +from discretize import SimplexMesh from ...utils import Zero, sdiag from ...utils.code_utils import ( @@ -1037,6 +1038,7 @@ def __init__( location=None, waveform=None, srcType=None, + i_sounding=0, **kwargs, ): if waveform is None: @@ -1049,6 +1051,8 @@ def __init__( if srcType is not None: self.srcType = srcType + self.i_sounding = i_sounding + @property def waveform(self): """Current waveform for the source @@ -1079,6 +1083,20 @@ def srcType(self): def srcType(self, var): self._srcType = validate_string("srcType", var, ["inductive", "galvanic"]) + @property + def i_sounding(self): + """Sounding number for the source + + Returns + ------- + int + """ + return self._i_sounding + + @i_sounding.setter + def i_sounding(self, value): + self._i_sounding = validate_integer("i_sounding", value, min_val=0) + def bInitial(self, simulation): """Return initial B-field (``Zero`` for ``BaseTDEMSrc`` class) @@ -1310,25 +1328,39 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): def _aSrc(self, simulation): coordinates = "cartesian" - if simulation._formulation == "EB": - gridX = simulation.mesh.gridEx - gridY = simulation.mesh.gridEy - gridZ = simulation.mesh.gridEz - elif simulation._formulation == "HJ": - gridX = simulation.mesh.gridFx - gridY = simulation.mesh.gridFy - gridZ = simulation.mesh.gridFz - - if simulation.mesh._meshType == "CYL": - coordinates = "cylindrical" - if simulation.mesh.is_symmetric: - return self._srcFct(gridY)[:, 1] - - ax = self._srcFct(gridX, coordinates)[:, 0] - ay = self._srcFct(gridY, coordinates)[:, 1] - az = self._srcFct(gridZ, coordinates)[:, 2] - a = np.concatenate((ax, ay, az)) + if isinstance(simulation.mesh, SimplexMesh): + if simulation._formulation == "EB": + edges = simulation.mesh.edges + edge_tangents = simulation.mesh.edge_tangents + axyz = self._srcFct(edges, coordinates) + a = np.sum(axyz * edge_tangents, axis=1) + else: + faces = simulation.mesh.faces + face_normals = simulation.mesh.face_normals + axyz = self._srcFct(faces, coordinates) + a = np.sum(axyz * face_normals, axis=1) + + else: + if simulation._formulation == "EB": + gridX = simulation.mesh.gridEx + gridY = simulation.mesh.gridEy + gridZ = simulation.mesh.gridEz + + elif simulation._formulation == "HJ": + gridX = simulation.mesh.gridFx + gridY = simulation.mesh.gridFy + gridZ = simulation.mesh.gridFz + + if simulation.mesh._meshType == "CYL": + coordinates = "cylindrical" + if simulation.mesh.is_symmetric: + return self._srcFct(gridY)[:, 1] + + ax = self._srcFct(gridX, coordinates)[:, 0] + ay = self._srcFct(gridY, coordinates)[:, 1] + az = self._srcFct(gridZ, coordinates)[:, 2] + a = np.concatenate((ax, ay, az)) return a diff --git a/SimPEG/electromagnetics/time_domain/survey.py b/SimPEG/electromagnetics/time_domain/survey.py index e8798d8048..929813d662 100644 --- a/SimPEG/electromagnetics/time_domain/survey.py +++ b/SimPEG/electromagnetics/time_domain/survey.py @@ -20,6 +20,19 @@ class Survey(BaseSurvey): def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) + _source_location = {} + _source_location_by_sounding = {} + + for src in source_list: + if src.i_sounding not in _source_location: + _source_location[src.i_sounding] = [] + _source_location_by_sounding[src.i_sounding] = [] + _source_location[src.i_sounding] += [src] + _source_location_by_sounding[src.i_sounding] += [src.location] + + self._source_location = _source_location + self._source_location_by_sounding = _source_location_by_sounding + @property def source_list(self): """List of TDEM sources associated with the survey @@ -36,3 +49,34 @@ def source_list(self, new_list): self._source_list = validate_list_of_types( "source_list", new_list, BaseTDEMSrc, ensure_unique=True ) + + @property + def source_location_by_sounding(self): + """ + Source location in the survey as a dictionary + """ + return self._source_location_by_sounding + + def get_sources_by_sounding_number(self, i_sounding): + """ + Returns the sources associated with a specific source location. + :param float i_sounding: source location number + :rtype: dictionary + :return: sources at the sepcified source location + """ + assert ( + i_sounding in self._source_location + ), "The requested sounding is not in this survey." + return self._source_location[i_sounding] + + @property + def vnD_by_sounding(self): + if getattr(self, "_vnD_by_sounding", None) is None: + self._vnD_by_sounding = {} + for i_sounding in self.source_location_by_sounding: + source_list = self.get_sources_by_sounding_number(i_sounding) + nD = 0 + for src in source_list: + nD += src.nD + self._vnD_by_sounding[i_sounding] = nD + return self._vnD_by_sounding diff --git a/SimPEG/electromagnetics/utils/em1d_utils.py b/SimPEG/electromagnetics/utils/em1d_utils.py index 21a08dbd6a..1d023f5af9 100644 --- a/SimPEG/electromagnetics/utils/em1d_utils.py +++ b/SimPEG/electromagnetics/utils/em1d_utils.py @@ -1,8 +1,22 @@ import numpy as np from geoana.em.fdem.base import skin_depth from geoana.em.tdem import diffusion_distance +import matplotlib.pyplot as plt from SimPEG import utils +from discretize import TensorMesh + +from SimPEG.utils.code_utils import ( + validate_ndarray_with_shape, +) + +from scipy.spatial import cKDTree as kdtree +import scipy.sparse as sp +from matplotlib.colors import LogNorm + + +def set_mesh_1d(hz): + return TensorMesh([hz], x0=[0]) def get_vertical_discretization(n_layer, minimum_dz, geomtric_factor): @@ -219,3 +233,428 @@ def LogUniform(f, chi_inf=0.05, del_chi=0.05, tau1=1e-5, tau2=1e-2): return chi_inf + del_chi * ( 1 - np.log((1 + 1j * w * tau2) / (1 + 1j * w * tau1)) / np.log(tau2 / tau1) ) + + +############################################################# +# PLOTTING RESTIVITY MODEL +############################################################# + + +class Stitched1DModel: + def __init__( + self, + topography=None, + physical_property=None, + line=None, + time_stamp=None, + thicknesses=None, + **kwargs + ): + super().__init__(**kwargs) + + self.topography = topography + self.physical_property = physical_property + self.line = line + self.time_stamp = time_stamp + self.thicknesses = thicknesses + + @property + def topography(self): + """Topography + + Returns + ------- + (n_sounding, n_dim) np.ndarray + Topography. + """ + return self._topography + + @topography.setter + def topography(self, locs): + self._topography = validate_ndarray_with_shape( + "topography", locs, shape=("*", "*"), dtype=float + ) + + @property + def physical_property(self): + """Physical property + + Returns + ------- + (n_sounding x n_layer,) np.ndarray + physical_property. + """ + return self._physical_property + + @physical_property.setter + def physical_property(self, values): + self._physical_property = validate_ndarray_with_shape( + "physical_property", values, shape=("*"), dtype=float + ) + + @property + def line(self): + """Line number + + Returns + ------- + (n_sounding,) np.ndarray + line. + """ + return self._line + + @line.setter + def line(self, values): + self._line = validate_ndarray_with_shape("line", values, shape=("*"), dtype=int) + + @property + def timestamp(self): + """Time stamp + + Returns + ------- + (n_sounding,) np.ndarray + timestamp. + """ + return self._timestamp + + @timestamp.setter + def timestamp(self, values): + self._timestamp = validate_ndarray_with_shape( + "timestamp", values, shape=("*"), dtype=float + ) + + @property + def thicknesses(self): + """Layer thicknesses + + Returns + ------- + (n_sounding,) np.ndarray + thicknesses. + """ + return self._thicknesses + + @thicknesses.setter + def thicknesses(self, values): + self._thicknesses = validate_ndarray_with_shape( + "thicknesses", values, shape=("*"), dtype=float + ) + + @property + def n_layer(self): + return len(self.hz) + + @property + def hz(self): + if getattr(self, "_hz", None) is None: + self._hz = np.r_[self.thicknesses, self.thicknesses[-1]] + return self._hz + + @property + def n_sounding(self): + if getattr(self, "_n_sounding", None) is None: + self._n_sounding = self.topography.shape[0] + return self._n_sounding + + @property + def unique_line(self): + if getattr(self, "_unique_line", None) is None: + if self.line is None: + raise Exception("line information is required!") + self._unique_line = np.unique(self.line) + return self._unique_line + + @property + def xyz(self): + if getattr(self, "_xyz", None) is None: + xyz = np.empty((self.n_layer, self.topography.shape[0], 3), order="F") + for i_xy in range(self.topography.shape[0]): + z = -self.mesh_1d.vectorCCx + self.topography[i_xy, 2] + x = np.ones_like(z) * self.topography[i_xy, 0] + y = np.ones_like(z) * self.topography[i_xy, 1] + xyz[:, i_xy, :] = np.c_[x, y, z] + self._xyz = xyz + return self._xyz + + @property + def mesh_1d(self): + if getattr(self, "_mesh_1d", None) is None: + if self.thicknesses is None: + raise Exception("thicknesses information is required!") + self._mesh_1d = set_mesh_1d(np.r_[self.hz[: self.n_layer]]) + return self._mesh_1d + + @property + def mesh_3d(self): + if getattr(self, "_mesh_3d", None) is None: + if self.mesh_3d is None: + raise Exception("Run get_mesh_3d!") + return self._mesh_3d + + @property + def physical_property_matrix(self): + if getattr(self, "_physical_property_matrix", None) is None: + if self.physical_property is None: + raise Exception("physical_property information is required!") + self._physical_property_matrix = self.physical_property.reshape( + (self.n_layer, self.n_sounding), order="F" + ) + return self._physical_property_matrix + + @property + def depth_matrix(self): + if getattr(self, "_depth_matrix", None) is None: + if self.hz.size == self.n_layer: + depth = np.cumsum(np.r_[0, self.hz]) + self._depth_matrix = np.tile(depth, (self.n_sounding, 1)).T + else: + self._depth_matrix = np.hstack( + ( + np.zeros((self.n_sounding, 1)), + np.cumsum( + self.hz.reshape((self.n_sounding, self.n_layer)), axis=1 + ), + ) + ).T + return self._depth_matrix + + @property + def distance(self): + if getattr(self, "_distance", None) is None: + self._distance = np.zeros(self.n_sounding, dtype=float) + for line_tmp in self.unique_line: + ind_line = self.line == line_tmp + xy_line = self.topography[ind_line, :2] + distance_line = np.r_[ + 0, np.cumsum(np.sqrt((np.diff(xy_line, axis=0) ** 2).sum(axis=1))) + ] + self._distance[ind_line] = distance_line + return self._distance + + def plot_section( + self, + i_layer=0, + i_line=0, + x_axis="x", + plot_type="contour", + physical_property=None, + clim=None, + ax=None, + cmap="viridis", + ncontour=20, + scale="log", + show_colorbar=True, + aspect=1, + zlim=None, + dx=20.0, + invert_xaxis=False, + alpha=0.7, + pcolorOpts={}, + ): + ind_line = self.line == self.unique_line[i_line] + if physical_property is not None: + physical_property_matrix = physical_property.reshape( + (self.n_layer, self.n_sounding), order="F" + ) + else: + physical_property_matrix = self.physical_property_matrix + + if x_axis.lower() == "y": + x_ind = 1 + xlabel = "Northing (m)" + elif x_axis.lower() == "x": + x_ind = 0 + xlabel = "Easting (m)" + elif x_axis.lower() == "distance": + xlabel = "Distance (m)" + + if ax is None: + plt.figure(figsize=(15, 10)) + ax = plt.subplot(111) + + if clim is None: + vmin = np.percentile(physical_property_matrix, 5) + vmax = np.percentile(physical_property_matrix, 95) + else: + vmin, vmax = clim + + if scale == "log": + norm = LogNorm(vmin=vmin, vmax=vmax) + vmin = None + vmax = None + else: + norm = None + + ind_line = np.arange(ind_line.size)[ind_line] + + for i in ind_line: + inds_temp = [i] + if x_axis == "distance": + x_tmp = self.distance[i] + else: + x_tmp = self.topography[i, x_ind] + + topo_temp = np.c_[x_tmp - dx, x_tmp + dx] + out = ax.pcolormesh( + topo_temp, + -self.depth_matrix[:, i] + self.topography[i, 2], + physical_property_matrix[:, inds_temp], + cmap=cmap, + alpha=alpha, + vmin=vmin, + vmax=vmax, + norm=norm, + shading="auto", + **pcolorOpts + ) + + if show_colorbar: + cb = plt.colorbar(out, ax=ax, fraction=0.01) + cb.set_label("Conductivity (S/m)") + + ax.set_aspect(aspect) + ax.set_xlabel(xlabel) + ax.set_ylabel("Elevation (m)") + if zlim is not None: + ax.set_ylim(zlim) + + if x_axis == "distance": + xlim = ( + self.distance[ind_line].min() - dx, + self.distance[ind_line].max() + dx, + ) + else: + xlim = ( + self.topography[ind_line, x_ind].min() - dx, + self.topography[ind_line, x_ind].max() + dx, + ) + if invert_xaxis: + ax.set_xlim(xlim[1], xlim[0]) + else: + ax.set_xlim(xlim) + + plt.tight_layout() + + if show_colorbar: + return out, ax, cb + else: + return out, ax + return (ax,) + + def get_3d_mesh( + self, + dx=None, + dy=None, + dz=None, + npad_x=0, + npad_y=0, + npad_z=0, + core_z_length=None, + nx=100, + ny=100, + ): + xmin, xmax = self.topography[:, 0].min(), self.topography[:, 0].max() + ymin, ymax = self.topography[:, 1].min(), self.topography[:, 1].max() + zmin, zmax = self.topography[:, 2].min(), self.topography[:, 2].max() + zmin -= self.mesh_1d.vectorNx.max() + + lx = xmax - xmin + ly = ymax - ymin + lz = zmax - zmin + + if dx is None: + dx = lx / nx + print((">> dx:%.1e") % (dx)) + if dy is None: + dy = ly / ny + print((">> dy:%.1e") % (dy)) + if dz is None: + dz = np.median(self.mesh_1d.hx) + + nx = int(np.floor(lx / dx)) + ny = int(np.floor(ly / dy)) + nz = int(np.floor(lz / dz)) + + if nx * ny * nz > 1e6: + warnings.warn( + ("Size of the mesh (%i) will greater than 1e6") % (nx * ny * nz) + ) + hx = [(dx, npad_x, -1.2), (dx, nx), (dx, npad_x, -1.2)] + hy = [(dy, npad_y, -1.2), (dy, ny), (dy, npad_y, -1.2)] + hz = [(dz, npad_z, -1.2), (dz, nz)] + + zmin = self.topography[:, 2].max() - utils.meshTensor(hz).sum() + self._mesh_3d = TensorMesh([hx, hy, hz], x0=[xmin, ymin, zmin]) + + return self.mesh_3d + + @property + def P(self): + if getattr(self, "_P", None) is None: + raise Exception("Run get_interpolation_matrix first!") + return self._P + + def get_interpolation_matrix(self, npts=20, epsilon=None): + tree_2d = kdtree(self.topography[:, :2]) + xy = utils.ndgrid(self.mesh_3d.vectorCCx, self.mesh_3d.vectorCCy) + + distance, inds = tree_2d.query(xy, k=npts) + if epsilon is None: + epsilon = np.min([self.mesh_3d.hx.min(), self.mesh_3d.hy.min()]) + + w = 1.0 / (distance + epsilon) ** 2 + w = utils.sdiag(1.0 / np.sum(w, axis=1)) * (w) + I = utils.mkvc(np.arange(inds.shape[0]).reshape([-1, 1]).repeat(npts, axis=1)) + J = utils.mkvc(inds) + + self._P = sp.coo_matrix( + (utils.mkvc(w), (I, J)), shape=(inds.shape[0], self.topography.shape[0]) + ) + + mesh_1d = TensorMesh([np.r_[self.hz[:-1], 1e20]]) + + z = self.P * self.topography[:, 2] + + self._actinds = utils.surface2ind_topo(self.mesh_3d, np.c_[xy, z]) + + Z = np.empty(self.mesh_3d.vnC, dtype=float, order="F") + Z = self.mesh_3d.gridCC[:, 2].reshape( + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" + ) + ACTIND = self._actinds.reshape( + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" + ) + + self._Pz = [] + + # This part can be cythonized or parallelized + for i_xy in range(self.mesh_3d.nCx * self.mesh_3d.nCy): + actind_temp = ACTIND[i_xy, :] + z_temp = -(Z[i_xy, :] - z[i_xy]) + self._Pz.append(mesh_1d.getInterpolationMat(z_temp[actind_temp])) + + def interpolate_from_1d_to_3d(self, physical_property_1d): + physical_property_2d = self.P * ( + physical_property_1d.reshape((self.n_layer, self.n_sounding), order="F").T + ) + physical_property_3d = ( + np.ones( + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), + order="C", + dtype=float, + ) + * np.nan + ) + + ACTIND = self._actinds.reshape( + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" + ) + + for i_xy in range(self.mesh_3d.nCx * self.mesh_3d.nCy): + actind_temp = ACTIND[i_xy, :] + physical_property_3d[i_xy, actind_temp] = ( + self._Pz[i_xy] * physical_property_2d[i_xy, :] + ) + + return physical_property_3d diff --git a/SimPEG/electromagnetics/utils/testing_utils.py b/SimPEG/electromagnetics/utils/testing_utils.py index 3315c71061..a1818856e0 100644 --- a/SimPEG/electromagnetics/utils/testing_utils.py +++ b/SimPEG/electromagnetics/utils/testing_utils.py @@ -138,6 +138,130 @@ def getFDEMProblem(fdemType, comp, SrcList, freq, useMu=False, verbose=False): return prb +def getFDEMProblem_FaceEdgeConductivity( + fdemType, comp, SrcList, freq, useMu=False, verbose=False +): + cs = 10.0 + ncx, ncy, ncz = 0, 0, 0 + npad = 8 + hx = [(cs, npad, -1.3), (cs, ncx), (cs, npad, 1.3)] + hy = [(cs, npad, -1.3), (cs, ncy), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] + mesh = TensorMesh([hx, hy, hz], ["C", "C", "C"]) + + if useMu is True: + wire_map = maps.Wires( + ("log_sigma", mesh.nC), + ("log_tau", mesh.nF), + ("log_kappa", mesh.nE), + ("mu", mesh.nC), + ) + else: + wire_map = maps.Wires( + ("log_sigma", mesh.nC), ("log_tau", mesh.nF), ("log_kappa", mesh.nE) + ) + + sigma_map = maps.ExpMap(nP=mesh.nC) * wire_map.log_sigma + tau_map = maps.ExpMap(nP=mesh.nF) * wire_map.log_tau + kappa_map = maps.ExpMap(nP=mesh.nE) * wire_map.log_kappa + if useMu: + mu_map = maps.IdentityMap(nP=mesh.nC) * wire_map.mu + else: + mu_map = None + + x = ( + np.array( + [np.linspace(-5.0 * cs, -2.0 * cs, 3), np.linspace(5.0 * cs, 2.0 * cs, 3)] + ) + + cs / 4.0 + ) # don't sample right by the source, slightly off alignment from either staggered grid + XYZ = utils.ndgrid(x, x, np.linspace(-2.0 * cs, 2.0 * cs, 5)) + Rx0 = getattr(fdem.Rx, "Point" + comp[0]) + if comp[-1] == "r": + real_or_imag = "real" + elif comp[-1] == "i": + real_or_imag = "imag" + rx0 = Rx0(XYZ, comp[1], real_or_imag) + + Src = [] + + for SrcType in SrcList: + if SrcType == "MagDipole": + Src.append( + fdem.Src.MagDipole([rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0]) + ) + elif SrcType == "MagDipole_Bfield": + Src.append( + fdem.Src.MagDipole_Bfield( + [rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0] + ) + ) + elif SrcType == "CircularLoop": + Src.append( + fdem.Src.CircularLoop( + [rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0] + ) + ) + elif SrcType == "LineCurrent": + Src.append( + fdem.Src.LineCurrent( + [rx0], + frequency=freq, + location=np.array([[0.0, 0.0, 0.0], [20.0, 0.0, 0.0]]), + ) + ) + elif SrcType == "RawVec": + S_m = np.zeros(mesh.nF) + S_e = np.zeros(mesh.nE) + S_m[ + mesh.closest_points_index([0.0, 0.0, 0.0], "Fz") + np.sum(mesh.vnF[:1]) + ] = 1e-3 + S_e[ + mesh.closest_points_index([0.0, 0.0, 0.0], "Ez") + np.sum(mesh.vnE[:1]) + ] = 1e-3 + Src.append( + fdem.Src.RawVec([rx0], freq, S_m, mesh.get_edge_inner_product() * S_e) + ) + + if verbose: + print(" Fetching {0!s} problem".format((fdemType))) + + if fdemType == "e": + survey = fdem.Survey(Src) + prb = fdem.Simulation3DElectricFieldFaceEdgeConductivity( + mesh, + survey=survey, + sigmaMap=sigma_map, + tauMap=tau_map, + kappaMap=kappa_map, + muMap=mu_map, + ) + + elif fdemType == "b": + survey = fdem.Survey(Src) + prb = fdem.Simulation3DMagneticFluxDensityFaceEdgeConductivity( + mesh, + survey=survey, + sigmaMap=sigma_map, + tauMap=tau_map, + kappaMap=kappa_map, + muMap=mu_map, + ) + + else: + raise NotImplementedError("NO SIMULATION FOR H OR J FORMULATION") + + try: + from pymatsolver import Pardiso + + prb.solver = Pardiso + except ImportError: + prb.solver = SolverLU + # prb.solver_opts = dict(check_accuracy=True) + + return prb + + def crossCheckTest( SrcList, fdemType1, @@ -147,11 +271,18 @@ def crossCheckTest( useMu=False, TOL=1e-5, verbose=False, + sigma_only=True, ): def l2norm(r): return np.sqrt(r.dot(r)) - prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) + if sigma_only: + prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) + else: + prb1 = getFDEMProblem_FaceEdgeConductivity( + fdemType1, comp, SrcList, freq, useMu, verbose + ) + mesh = prb1.mesh print( "Cross Checking Forward: {0!s}, {1!s} formulations - {2!s}".format( @@ -160,23 +291,38 @@ def l2norm(r): ) logsig = np.log(np.ones(mesh.nC) * CONDUCTIVITY) + logtau = np.log(np.ones(mesh.nF) * CONDUCTIVITY * np.min(mesh.h[0])) + logkappa = np.log(np.ones(mesh.nE) * CONDUCTIVITY * np.min(mesh.h[0]) ** 2) mu = np.ones(mesh.nC) * MU if addrandoms is True: logsig += np.random.randn(mesh.nC) * np.log(CONDUCTIVITY) * 1e-1 + logtau += np.random.randn(mesh.nF) * np.log(CONDUCTIVITY) * 1e-1 + logkappa += np.random.randn(mesh.nE) * np.log(CONDUCTIVITY) * 1e-1 mu += np.random.randn(mesh.nC) * MU * 1e-1 - if useMu is True: - m = np.r_[logsig, mu] + if sigma_only: + if useMu: + m = np.r_[logsig, mu] + else: + m = logsig else: - m = logsig + if useMu: + m = np.r_[logsig, logtau, logkappa, mu] + else: + m = np.r_[logsig, logtau, logkappa] d1 = prb1.dpred(m) if verbose: print(" Problem 1 solved") - prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) + if sigma_only: + prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) + else: + prb2 = getFDEMProblem_FaceEdgeConductivity( + fdemType2, comp, SrcList, freq, useMu, verbose + ) d2 = prb2.dpred(m) diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 5cc526b0a0..88cf2073f6 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3315,6 +3315,354 @@ def deriv(self, m, v=None): return self.P +class InjectActiveFaces(IdentityMap): + r"""Map active faces model to all faces of a mesh. + + The ``InjectActiveFaces`` class is used to define the mapping when + the model consists of diagnostic property values defined on a set of active + mesh faces; e.g. faces below topography, z-faces only. For a discrete set of + model parameters :math:`\mathbf{m}` defined on a set of active + faces, the mapping :math:`\mathbf{u}(\mathbf{m})` is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d}\, m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive faces value + :math:`m_\perp` to all mesh faces. + + Parameters + ---------- + mesh : discretize.BaseMesh + A discretize mesh + indActive : numpy.ndarray + Active faces array. Can be a boolean ``numpy.ndarray`` of length *mesh.nF* + or a ``numpy.ndarray`` of ``int`` containing the indices of the active faces. + valInactive : float or numpy.ndarray + The physical property value assigned to all inactive faces in the mesh + + """ + + def __init__(self, mesh, indActive=None, valInactive=0.0, nF=None): + self.mesh = mesh + self.nF = nF or mesh.nF + + self._indActive = validate_active_indices("indActive", indActive, self.nF) + self._nP = np.sum(self.indActive) + + self.P = sp.eye(self.nF, format="csr")[:, self.indActive] + + self.valInactive = valInactive + + @property + def valInactive(self): + """The physical property value assigned to all inactive faces in the mesh. + + Returns + ------- + numpy.ndarray + """ + return self._valInactive + + @valInactive.setter + def valInactive(self, value): + n_inactive = self.nF - self.nP + try: + value = validate_float("valInactive", value) + value = np.full(n_inactive, value) + except Exception: + pass + value = validate_ndarray_with_shape("valInactive", value, shape=(n_inactive,)) + + self._valInactive = np.zeros(self.nF, dtype=float) + self._valInactive[~self.indActive] = value + + @property + def indActive(self): + """ + + Returns + ------- + numpy.ndarray of bool + + """ + return self._indActive + + @property + def shape(self): + """Dimensions of the mapping + + Returns + ------- + tuple of int + Where *nP* is the number of active faces and *nF* is + number of faces in the mesh, **shape** returns a + tuple (*nF* , *nP*). + """ + return (self.nF, self.nP) + + @property + def nP(self): + """Number of parameters the model acts on. + + Returns + ------- + int + Number of parameters the model acts on; i.e. the number of active faces. + """ + return int(self.indActive.sum()) + + def _transform(self, m): + if m.ndim > 1: + return self.P * m + self.valInactive[:, None] + return self.P * m + self.valInactive + + def inverse(self, u): + r"""Recover the model parameters (active faces) from a set of physical + property values defined on the entire mesh. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active faces, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \,m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nR* , 1) matrix that projects the inactive face value + :math:`m_\perp` to all mesh faces. + + The inverse mapping is given by: + + .. math:: + \mathbf{m}(\mathbf{u}) = \mathbf{P^T u} + + Parameters + ---------- + u : (mesh.nF) numpy.ndarray + A vector which contains physical property values for all + mesh faces. + """ + return self.P.T * u + + def deriv(self, m, v=None): + r"""Derivative of the mapping with respect to the input parameters. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active faces, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \, m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive face value + :math:`m_\perp` to all inactive mesh faces. + + the **deriv** method returns the derivative of :math:`\mathbf{u}` with respect + to the model parameters; i.e.: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} = \mathbf{P} + + Note that in this case, **deriv** simply returns a sparse projection matrix. + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters. + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v*. + + Returns + ------- + scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + if v is not None: + return self.P * v + return self.P + + +class InjectActiveEdges(IdentityMap): + r"""Map active edges model to all edges of a mesh. + + The ``InjectActiveEdges`` class is used to define the mapping when + the model consists of diagnostic property values defined on a set of active + mesh edges; e.g. edges below topography, z-edges only. For a discrete set of + model parameters :math:`\mathbf{m}` defined on a set of active + edges, the mapping :math:`\mathbf{u}(\mathbf{m})` is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d}\, m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nE* , 1) matrix that projects the inactive edges value + :math:`m_\perp` to all mesh edges. + + Parameters + ---------- + mesh : discretize.BaseMesh + A discretize mesh + indActive : numpy.ndarray + Active edges array. Can be a boolean ``numpy.ndarray`` of length *mesh.nE* + or a ``numpy.ndarray`` of ``int`` containing the indices of the active edges. + valInactive : float or numpy.ndarray + The physical property value assigned to all inactive edges in the mesh. + + """ + + def __init__(self, mesh, indActive=None, valInactive=0.0, nE=None): + self.mesh = mesh + self.nE = nE or mesh.nE + + self._indActive = validate_active_indices("indActive", indActive, self.nE) + self._nP = np.sum(self.indActive) + + self.P = sp.eye(self.nE, format="csr")[:, self.indActive] + + self.valInactive = valInactive + + @property + def valInactive(self): + """The physical property value assigned to all inactive edges in the mesh. + + Returns + ------- + numpy.ndarray + """ + return self._valInactive + + @valInactive.setter + def valInactive(self, value): + n_inactive = self.nE - self.nP + try: + value = validate_float("valInactive", value) + value = np.full(n_inactive, value) + except Exception: + pass + value = validate_ndarray_with_shape("valInactive", value, shape=(n_inactive,)) + + self._valInactive = np.zeros(self.nE, dtype=float) + self._valInactive[~self.indActive] = value + + @property + def indActive(self): + """ + + Returns + ------- + numpy.ndarray of bool. + + """ + return self._indActive + + @property + def shape(self): + """Dimensions of the mapping + + Returns + ------- + tuple of int + Where *nP* is the number of active edges and *nE* is + number of edges in the mesh, **shape** returns a + tuple (*nE* , *nP*). + """ + return (self.nE, self.nP) + + @property + def nP(self): + """Number of parameters the model acts on. + + Returns + ------- + int + Number of parameters the model acts on; i.e. the number of active edges. + """ + return int(self.indActive.sum()) + + def _transform(self, m): + if m.ndim > 1: + return self.P * m + self.valInactive[:, None] + return self.P * m + self.valInactive + + def inverse(self, u): + r"""Recover the model parameters (active edges) from a set of physical + property values defined on the entire mesh. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active edges, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \,m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nE* , 1) matrix that projects the inactive edge value + :math:`m_\perp` to all mesh edges. + + The inverse mapping is given by: + + .. math:: + \mathbf{m}(\mathbf{u}) = \mathbf{P^T u} + + Parameters + ---------- + u : (mesh.nE) numpy.ndarray + A vector which contains physical property values for all + mesh edges. + """ + return self.P.T * u + + def deriv(self, m, v=None): + r"""Derivative of the mapping with respect to the input parameters. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active edges, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \, m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive edge value + :math:`m_\perp` to all mesh edges. + + the **deriv** method returns the derivative of :math:`\mathbf{u}` with respect + to the model parameters; i.e.: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} = \mathbf{P} + + Note that in this case, **deriv** simply returns a sparse projection matrix. + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters. + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v*. + + Returns + ------- + scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + if v is not None: + return self.P * v + return self.P + + ############################################################################### # # # Parametric Maps # diff --git a/SimPEG/optimization.py b/SimPEG/optimization.py index dbbc23ef21..b666d4bb03 100644 --- a/SimPEG/optimization.py +++ b/SimPEG/optimization.py @@ -241,16 +241,16 @@ class Minimize(object): name = "General Optimization Algorithm" #: The name of the optimization algorithm maxIter = 20 #: Maximum number of iterations - maxIterLS = 10 #: Maximum number of iterations for the line-search + maxIterLS = 20 #: Maximum number of iterations for the line-search maxStep = np.inf #: Maximum step possible, used in scaling before the line-search. LSreduction = 1e-4 #: Expected decrease in the line-search LScurvature = ( 0.9 #: Expected decrease of the slope for line search Wolfe Curvature criteria ) LSshorten = 0.5 #: Line-search step is shortened by this amount each time. - tolF = 1e-1 #: Tolerance on function value decrease - tolX = 1e-1 #: Tolerance on norm(x) movement - tolG = 1e-1 #: Tolerance on gradient norm + tolF = 1e-2 #: Tolerance on function value decrease + tolX = 1e-2 #: Tolerance on norm(x) movement + tolG = 1e-2 #: Tolerance on gradient norm eps = 1e-5 #: Small value stopNextIteration = False #: Stops the optimization program nicely. @@ -763,8 +763,8 @@ def _doEndIterationRemember(self, *args): class ProjectedGradient(Minimize, Remember): name = "Projected Gradient" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 lower = -np.inf upper = np.inf @@ -1043,8 +1043,8 @@ def __init__(self, **kwargs): name = "Inexact Gauss Newton" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 @property def approxHinv(self): @@ -1184,8 +1184,8 @@ def __init__(self, **kwargs): name = "Projected GNCG" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 cg_count = 0 stepOffBoundsFact = 1e-2 # perturbation of the inactive set off the bounds stepActiveset = True diff --git a/SimPEG/potential_fields/base.py b/SimPEG/potential_fields/base.py index 753a80f8fd..6e13b41932 100644 --- a/SimPEG/potential_fields/base.py +++ b/SimPEG/potential_fields/base.py @@ -1,4 +1,5 @@ import os +import warnings from multiprocessing.pool import Pool import discretize @@ -359,6 +360,12 @@ def get_dist_wgt(mesh, receiver_locations, actv, R, R0): wr : (n_cell) numpy.ndarray Distance weighting model; 0 for all inactive cells """ + warnings.warn( + "The get_dist_wgt function has been deprecated, please import " + "SimPEG.utils.distance_weighting. This will be removed in SimPEG 0.22.0", + FutureWarning, + stacklevel=2, + ) # Find non-zero cells if actv.dtype == "bool": inds = ( diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py new file mode 100644 index 0000000000..71dbad9100 --- /dev/null +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -0,0 +1,525 @@ +""" +Numba functions for magnetic simulation of rectangular prisms +""" +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + +def _sensitivity_mag( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, +): + """ + Fill the sensitivity matrix for single mag component + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity_mag = jit(nopython=True, parallel=True)(_sensitivity_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = _kernels_in_nodes_to_cell(kx, nodes_indices) + uy = _kernels_in_nodes_to_cell(ky, nodes_indices) + uz = _kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * ux + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * uy + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * uz + ) + + +def _sensitivity_tmi( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, + scalar_model, +): + """ + Fill the sensitivity matrix for TMI + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_nodes)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` + if ``scalar_model`` is False. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) + + +def _forward_mag( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, +): + """ + Forward model single magnetic component + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. + Susceptibilities are expected if ``scalar_model`` is True, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``scalar_model`` is False, + and the array should have ``3 * n_active_cells`` elements. + fields : (n_receivers) array + Array full of zeros where the magnetic component on each receiver will + be stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the forward will be computing assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computing assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + + Notes + ----- + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = _kernels_in_nodes_to_cell(kx, nodes_indices) + uy = _kernels_in_nodes_to_cell(ky, nodes_indices) + uz = _kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + ux * model[k] + + uy * model[k + n_cells] + + uz * model[k + 2 * n_cells] + ) + ) + + +def _forward_tmi( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + constant_factor, + scalar_model, +): + """ + Forward model the TMI + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + bx * model[k] + + by * model[k + n_cells] + + bz * model[k + 2 * n_cells] + ) + ) + + +@jit(nopython=True) +def _kernels_in_nodes_to_cell(kernels, nodes_indices): + """ + Evaluate integral on a given cell from evaluation of kernels on nodes + + Parameters + ---------- + kernels : (n_active_nodes,) array + Array with kernel values on each one of the nodes in the mesh. + nodes_indices : (8,) array of int + Indices of the nodes for the current cell in "F" order (x changes + faster than y, and y faster than z). + + Returns + ------- + float + """ + result = ( + -kernels[nodes_indices[0]] + + kernels[nodes_indices[1]] + + kernels[nodes_indices[2]] + - kernels[nodes_indices[3]] + + kernels[nodes_indices[4]] + - kernels[nodes_indices[5]] + - kernels[nodes_indices[6]] + + kernels[nodes_indices[7]] + ) + return result + + +_sensitivity_tmi_serial = jit(nopython=True, parallel=False)(_sensitivity_tmi) +_sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) +_forward_tmi_serial = jit(nopython=True, parallel=False)(_forward_tmi) +_forward_tmi_parallel = jit(nopython=True, parallel=True)(_forward_tmi) +_forward_mag_serial = jit(nopython=True, parallel=False)(_forward_mag) +_forward_mag_parallel = jit(nopython=True, parallel=True)(_forward_mag) +_sensitivity_mag_serial = jit(nopython=True, parallel=False)(_sensitivity_mag) +_sensitivity_mag_parallel = jit(nopython=True, parallel=True)(_sensitivity_mag) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 11fdfb5a70..2cc2fd47b1 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -20,6 +20,28 @@ from .analytics import CongruousMagBC from .survey import Survey +import discretize + +from ._numba_functions import ( + choclo, + _sensitivity_tmi_parallel, + _sensitivity_tmi_serial, + _sensitivity_mag_parallel, + _sensitivity_mag_serial, + _forward_tmi_parallel, + _forward_tmi_serial, + _forward_mag_parallel, + _forward_mag_serial, +) + +if choclo is not None: + CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} + CHOCLO_KERNELS = { + "bx": (choclo.prism.kernel_ee, choclo.prism.kernel_en, choclo.prism.kernel_eu), + "by": (choclo.prism.kernel_en, choclo.prism.kernel_nn, choclo.prism.kernel_nu), + "bz": (choclo.prism.kernel_eu, choclo.prism.kernel_nu, choclo.prism.kernel_uu), + } + class Simulation3DIntegral(BasePFSimulation): """ @@ -36,7 +58,9 @@ def __init__( chiMap=None, model_type="scalar", is_amplitude_data=False, - **kwargs + engine="geoana", + choclo_parallel=True, + **kwargs, ): self.model_type = model_type super().__init__(mesh, **kwargs) @@ -48,6 +72,18 @@ def __init__( self._gtg_diagonal = None self.is_amplitude_data = is_amplitude_data self.modelMap = self.chiMap + self.engine = engine + if self.engine == "choclo": + if choclo_parallel: + self._sensitivity_tmi = _sensitivity_tmi_parallel + self._sensitivity_mag = _sensitivity_mag_parallel + self._forward_tmi = _forward_tmi_parallel + self._forward_mag = _forward_mag_parallel + else: + self._sensitivity_tmi = _sensitivity_tmi_serial + self._sensitivity_mag = _sensitivity_mag_serial + self._forward_tmi = _forward_tmi_serial + self._forward_mag = _forward_mag_serial @property def model_type(self): @@ -103,7 +139,10 @@ def fields(self, model): self.model = model # model = self.chiMap * model if self.store_sensitivities == "forward_only": - fields = mkvc(self.linear_operator()) + if self.engine == "choclo": + fields = self._forward(self.chi) + else: + fields = mkvc(self.linear_operator()) else: fields = np.asarray( self.G @ self.chi.astype(self.sensitivity_dtype, copy=False) @@ -117,7 +156,10 @@ def fields(self, model): @property def G(self): if getattr(self, "_G", None) is None: - self._G = self.linear_operator() + if self.engine == "choclo": + self._G = self._sensitivity_matrix() + else: + self._G = self.linear_operator() return self._G @@ -494,6 +536,214 @@ def deleteTheseOnModelUpdate(self): deletes = deletes + ["_gtg_diagonal", "_ampDeriv"] return deletes + def _forward(self, model): + """ + Forward model the fields of active cells in the mesh on receivers. + + Parameters + ---------- + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. + Susceptibilities are expected if ``model_type`` is ``"scalar"``, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``model_type`` is + ``"vector"``, and the array should have ``3 * n_active_cells`` + elements. + + Returns + ------- + (nD, ) array + Always return a ``np.float64`` array. + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + # Allocate fields array + fields = np.zeros(self.survey.nD, dtype=self.sensitivity_dtype) + # Define the constant factor + constant_factor = 1 / 4 / np.pi + # Start computing the fields + index_offset = 0 + scalar_model = self.model_type == "scalar" + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + if component == "tmi": + self._forward_tmi( + receivers, + active_nodes, + model, + fields[vector_slice], + active_cell_nodes, + regional_field, + constant_factor, + scalar_model, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + self._forward_mag( + receivers, + active_nodes, + model, + fields[vector_slice], + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + ) + index_offset += n_rows + return fields + + def _sensitivity_matrix(self): + """ + Compute the sensitivity matrix G + + Returns + ------- + (nD, n_active_cells) array + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + # Allocate sensitivity matrix + if self.model_type == "scalar": + n_columns = self.nC + else: + n_columns = 3 * self.nC + shape = (self.survey.nD, n_columns) + sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) + # Define the constant factor + constant_factor = 1 / 4 / np.pi + # Start filling the sensitivity matrix + index_offset = 0 + scalar_model = self.model_type == "scalar" + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + matrix_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + if component == "tmi": + self._sensitivity_tmi( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + constant_factor, + scalar_model, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + self._sensitivity_mag( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + ) + index_offset += n_rows + return sensitivity_matrix + + def _get_cell_nodes(self): + """ + Return indices of nodes for each cell in the mesh. + """ + if isinstance(self.mesh, discretize.TreeMesh): + cell_nodes = self.mesh.cell_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + cell_nodes = self._get_tensormesh_cell_nodes() + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + return cell_nodes + + def _get_tensormesh_cell_nodes(self): + """ + Quick implmentation of ``cell_nodes`` for a ``TensorMesh``. + + This method should be removed after ``TensorMesh.cell_nodes`` is added + in discretize. + """ + inds = np.arange(self.mesh.n_nodes).reshape(self.mesh.shape_nodes, order="F") + cell_nodes = [ + inds[:-1, :-1, :-1].reshape(-1, order="F"), + inds[1:, :-1, :-1].reshape(-1, order="F"), + inds[:-1, 1:, :-1].reshape(-1, order="F"), + inds[1:, 1:, :-1].reshape(-1, order="F"), + inds[:-1, :-1, 1:].reshape(-1, order="F"), + inds[1:, :-1, 1:].reshape(-1, order="F"), + inds[:-1, 1:, 1:].reshape(-1, order="F"), + inds[1:, 1:, 1:].reshape(-1, order="F"), + ] + cell_nodes = np.stack(cell_nodes, axis=-1) + return cell_nodes + + def _get_active_nodes(self): + """ + Return locations of nodes only for active cells + + Also return an array containing the indices of the "active nodes" for + each active cell in the mesh + """ + # Get all nodes in the mesh + if isinstance(self.mesh, discretize.TreeMesh): + nodes = self.mesh.total_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + nodes = self.mesh.nodes + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + # Get original cell_nodes but only for active cells + cell_nodes = self._get_cell_nodes() + # If all cells in the mesh are active, return nodes and cell_nodes + if self.nC == self.mesh.n_cells: + return nodes, cell_nodes + # Keep only the cell_nodes for active cells + cell_nodes = cell_nodes[self.ind_active] + # Get the unique indices of the nodes that belong to every active cell + # (these indices correspond to the original `nodes` array) + unique_nodes, active_cell_nodes = np.unique(cell_nodes, return_inverse=True) + # Select only the nodes that belong to the active cells (active nodes) + active_nodes = nodes[unique_nodes] + # Reshape indices of active cells for each active cell in the mesh + active_cell_nodes = active_cell_nodes.reshape(cell_nodes.shape) + return active_nodes, active_cell_nodes + + def _get_components_and_receivers(self): + """Generator for receiver locations and their field components.""" + if not hasattr(self.survey, "source_field"): + raise AttributeError( + f"The survey '{self.survey}' has no 'source_field' attribute." + ) + for receiver_object in self.survey.source_field.receiver_list: + yield receiver_object.components, receiver_object.locations + class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 5d1a7910ac..984c537509 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -149,25 +149,26 @@ from ..utils.code_utils import deprecate_class from .base import ( BaseRegularization, - WeightedLeastSquares, BaseSimilarityMeasure, Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder, + WeightedLeastSquares, ) -from .regularization_mesh import RegularizationMesh -from .sparse import BaseSparse, SparseSmallness, SparseSmoothness, Sparse -from .pgi import PGIsmallness, PGI -from .cross_gradient import CrossGradient from .correspondence import LinearCorrespondence +from .cross_gradient import CrossGradient from .jtv import JointTotalVariation +from .pgi import PGI, PGIsmallness +from .regularization_mesh import RegularizationMesh +from .regularization_mesh_lateral import LCRegularizationMesh +from .sparse import BaseSparse, Sparse, SparseSmallness, SparseSmoothness from .vector import ( + AmplitudeSmallness, + AmplitudeSmoothnessFirstOrder, + BaseAmplitude, BaseVectorRegularization, CrossReferenceRegularization, - BaseAmplitude, VectorAmplitude, - AmplitudeSmallness, - AmplitudeSmoothnessFirstOrder, ) diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 656c1a2572..22cbe2632c 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -8,6 +8,7 @@ from ..objective_function import BaseObjectiveFunction, ComboObjectiveFunction from .. import utils from .regularization_mesh import RegularizationMesh +from .regularization_mesh_lateral import LCRegularizationMesh from SimPEG.utils.code_utils import deprecate_property, validate_ndarray_with_shape @@ -882,20 +883,24 @@ def __init__( self, mesh, orientation="x", reference_model_in_smooth=False, **kwargs ): self.reference_model_in_smooth = reference_model_in_smooth + if isinstance(mesh, LCRegularizationMesh): + if orientation not in ["r", "z"]: + raise ValueError("Orientation must be 'r' or 'z'") + else: + if orientation not in ["x", "y", "z"]: + raise ValueError("Orientation must be 'x', 'y' or 'z'") - if orientation not in ["x", "y", "z"]: - raise ValueError("Orientation must be 'x', 'y' or 'z'") + if orientation == "y" and mesh.dim < 2: + raise ValueError( + "Mesh must have at least 2 dimensions to regularize along the " + "y-direction." + ) + elif orientation == "z" and mesh.dim < 3: + raise ValueError( + "Mesh must have at least 3 dimensions to regularize along the " + "z-direction" + ) - if orientation == "y" and mesh.dim < 2: - raise ValueError( - "Mesh must have at least 2 dimensions to regularize along the " - "y-direction." - ) - elif orientation == "z" and mesh.dim < 3: - raise ValueError( - "Mesh must have at least 3 dimensions to regularize along the " - "z-direction" - ) self._orientation = orientation super().__init__(mesh=mesh, **kwargs) diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py new file mode 100644 index 0000000000..b23f391a4e --- /dev/null +++ b/SimPEG/regularization/laterally_constrained.py @@ -0,0 +1,142 @@ +import numpy as np +from .sparse import SparseSmoothness, SparseSmallness, Sparse +from .regularization_mesh_lateral import LCRegularizationMesh + + +class LaterallyConstrainedSmallness(SparseSmallness): + """ + Duplicate of SparseSmallness Class + """ + + +class LaterallyConstrainedSmoothness(SparseSmoothness): + """ + Modification of SparseSmoothness Class + for addressing radial and vertical gradients of model parameters, + which is a 1D vertical resistivity profile at each of lateral locations. + """ + + def __init__(self, mesh, orientation="r", gradient_type="total", **kwargs): + if "gradientType" in kwargs: + self.gradientType = kwargs.pop("gradientType") + else: + self.gradient_type = gradient_type + super().__init__(mesh=mesh, orientation=orientation, **kwargs) + + +class LaterallyConstrained(Sparse): + """ + This regularization function is designed to regularize model parameters + connected with a 2D simplex mesh and 1D vertical mesh. + Motivating example is a stitched inversion of the electromagnetic data. + In such a case, a model is a 1D vertical conductivity (or resistivity) profile + at each sounding location. Each profile has the same number of layers. + The 2D simplex mesh connects resistivity values of each layer in lateral dimensions + while the 1D vertical mesh connects resistivity values along the vertical profile. + This LaterallyConstrained class is designed in a way that can handle sparse norm inversion. + And that is the reason why it inherits the Sparse Class. + + """ + + def __init__( + self, + mesh, + active_cells=None, + active_edges=None, + alpha_r=None, + length_scale_r=None, + norms=None, + gradient_type="total", + irls_scaled=True, + irls_threshold=1e-8, + objfcts=None, + **kwargs, + ): + if not isinstance(mesh, LCRegularizationMesh): + mesh = LCRegularizationMesh(mesh) + + if not isinstance(mesh, LCRegularizationMesh): + TypeError( + f"'regularization_mesh' must be of type {LCRegularizationMesh}. " + f"Value of type {type(mesh)} provided." + ) + self._regularization_mesh = mesh + if active_cells is not None: + self._regularization_mesh.active_cells = active_cells + if active_edges is not None: + self._regularization_mesh.active_edges = active_edges + + if alpha_r is not None: + if length_scale_r is not None: + raise ValueError( + "Attempted to set both alpha_r and length_scale_r at the same time. Please " + "use only one of them" + ) + self.alpha_r = alpha_r + else: + self.length_scale_r = length_scale_r + + if objfcts is None: + objfcts = [ + SparseSmallness(mesh=self.regularization_mesh), + SparseSmoothness(mesh=self.regularization_mesh, orientation="r"), + SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), + ] + + super().__init__( + self.regularization_mesh, + objfcts=objfcts, + **kwargs, + ) + + @property + def alpha_r(self): + """Multiplier constant for first-order smoothness along x. + + Returns + ------- + float + Multiplier constant for first-order smoothness along x. + """ + return self._alpha_r + + @alpha_r.setter + def alpha_r(self, value): + try: + value = float(value) + except (ValueError, TypeError): + raise TypeError(f"alpha_r must be a real number, saw type{type(value)}") + if value < 0: + raise ValueError(f"alpha_r must be non-negative, not {value}") + self._alpha_r = value + + @property + def length_scale_r(self): + r"""Multiplier constant for smoothness along x relative to base scale length. + + Where the :math:`\Delta h` defines the base length scale (i.e. minimum cell dimension), + and :math:`\alpha_r` defines the multiplier constant for first-order smoothness along x, + the length-scale is given by: + + .. math:: + L_x = \bigg ( \frac{\alpha_r}{\Delta h} \bigg )^{1/2} + + Returns + ------- + float + Multiplier constant for smoothness along x relative to base scale length. + """ + return np.sqrt(self.alpha_r) / self.regularization_mesh.base_length + + @length_scale_r.setter + def length_scale_r(self, value: float): + if value is None: + value = 1.0 + try: + value = float(value) + except (TypeError, ValueError): + raise TypeError( + f"length_scale_r must be a real number, saw type{type(value)}" + ) + print("Set alpha_s") + self.alpha_r = (value * self.regularization_mesh.base_length) ** 2 diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index c07f92504a..a16dd8eaab 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -111,7 +111,7 @@ def vol(self) -> np.ndarray: return self._vol @property - def nC(self) -> int: + def n_cells(self) -> int: """Number of active cells. Returns @@ -121,7 +121,9 @@ def nC(self) -> int: """ if self.active_cells is not None: return int(self.active_cells.sum()) - return self.mesh.nC + return self.mesh.n_cells + + nC = n_cells @property def dim(self) -> int: diff --git a/SimPEG/regularization/regularization_mesh_lateral.py b/SimPEG/regularization/regularization_mesh_lateral.py new file mode 100644 index 0000000000..d68e47acec --- /dev/null +++ b/SimPEG/regularization/regularization_mesh_lateral.py @@ -0,0 +1,386 @@ +import numpy as np +import scipy.sparse as sp +from SimPEG.utils.code_utils import validate_active_indices + +from .. import utils +from .regularization_mesh import RegularizationMesh + + +class LCRegularizationMesh(RegularizationMesh): + """ + **LCRegularization Mesh** + + :param list mesh: lit including two discretize meshes + :param numpy.ndarray active_cells: bool array, size nC, that is True where we have active cells. Used to reduce the operators so we regularize only on active cells + :param numpy.ndarray active_edges: bool array, size nE, that is True where we have active edges. Used to reduce the operators so we regularize only on active edges + + """ + + _active_edges = None + + def __init__(self, mesh, active_cells=None, active_edges=None, **kwargs): + self.mesh_radial = mesh[0] + self.mesh_vertical = mesh[1] + self.active_edges = active_edges + utils.setKwargs(self, **kwargs) + + @property + def active_cells(self) -> np.ndarray: + """Active cells on the regularization mesh. + + A boolean array defining the cells in the regularization mesh that are active + (i.e. updated) throughout the inversion. The values of inactive cells + remain equal to their starting model values. + + Returns + ------- + (n_cells, ) array of bool + + Notes + ----- + If the property is set using a ``numpy.ndarray`` of ``int``, the setter interprets the + array as representing the indices of the active cells. When called however, the quantity + will have been internally converted to a boolean array. + """ + return self._active_cells + + @active_cells.setter + def active_cells(self, values: np.ndarray): + if getattr(self, "_active_cells", None) is not None and not all( + self._active_cells == values + ): + raise AttributeError( + "The RegulatizationMesh already has an 'active_cells' property set." + ) + if values is not None: + values = validate_active_indices("values", values, self.nC) + # Ensure any cached operators created when + # active_cells was None are deleted + self._vol = None + self._Pac = None + self._Paer = None + self._Pafz = None + self._h_gridded_r = None + self._h_gridded_z = None + self._cell_gradient_z = None + self._aveCC2Fz = None + self._aveFz2CC = None + self._active_cells = values + + @property + def active_edges(self) -> np.ndarray: + return self._active_edges + + @active_edges.setter + def active_edges(self, values: np.ndarray): + if getattr(self, "_active_edges", None) is not None and not all( + self._active_edges == values + ): + raise AttributeError( + "The RegulatizationMesh already has an 'active_edges' property set." + ) + if values is not None: + self._aveCC2Fr = None + self._cell_gradient_r = None + self._aveFr2CC = None + + self._active_edges = values + + @property + def vol(self) -> np.ndarray: + # Assume a unit area for the radial points) + # We could use the average of cells to nodes + self._vol = ( + np.ones(self.n_nodes, dtype=float)[:, None] * self.mesh_vertical.h[0] + ).flatten() + return self._vol[self.active_cells].flatten() + + @property + def h_gridded_r(self) -> np.ndarray: + """ + Length of cells in the raidal direction + + """ + if getattr(self, "_h_gridded_r", None) is None: + # assume a unit length scale in radial direction + n = self.nz * self.n_nodes + self._h_gridded_r = np.ones(n) + return self._h_gridded_r + + @property + def h_gridded_z(self) -> np.ndarray: + """ + Length of cells in the vertical direction + + """ + if getattr(self, "_h_gridded_z", None) is None: + self._h_gridded_z = np.tile(self.mesh_vertical.h[0], self.n_nodes).flatten() + return self._h_gridded_z + + @property + def base_length(self) -> float: + """Smallest dimension (i.e. edge length) for smallest cell in the mesh. + + Returns + ------- + float + Smallest dimension (i.e. edge length) for smallest cell in the mesh. + """ + if getattr(self, "_base_length", None) is None: + self._base_length = self.mesh_vertical.h[0].min() + return self._base_length + + @property + def dim(self) -> int: + """Dimension of regularization mesh. + + Returns + ------- + {2} + Dimension of the regularization mesh. + """ + return 2 + + @property + def cell_gradient(self) -> sp.csr_matrix: + """Cell gradient operator (cell centers to faces). + + Built from :py:property:`~discretize.operators.differential_operators.DiffOperators.cell_gradient`. + + Returns + ------- + (n_faces, n_cells) scipy.sparse.csr_matrix + Cell gradient operator (cell centers to faces). + """ + return sp.vstack([self.cell_gradient_r, self.cell_gradient_z]) + + @property + def nodal_gradient_stencil(self) -> sp.csr_matrix: + ind_ptr = 2 * np.arange(self.mesh_radial.n_edges + 1) + col_inds = self.mesh_radial._edges.reshape(-1) + Aijs = ( + np.ones(self.mesh_radial.n_edges, dtype=float)[:, None] * [-1, 1] + ).reshape(-1) + + return sp.csr_matrix( + (Aijs, col_inds, ind_ptr), shape=(self.mesh_radial.n_edges, self.n_nodes) + ) + + @property + def cell_gradient_r(self) -> sp.csr_matrix: + """ + Nodal gradient in radial direction + + """ + if getattr(self, "_cell_gradient_r", None) is None: + grad = self.nodal_gradient_stencil + self._cell_gradient_r = ( + self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac + ) + return self._cell_gradient_r + + @property + def aveCC2Fr(self) -> sp.csr_matrix: + """ + Average of cells in the radial direction + + """ + if getattr(self, "_aveCC2Fr", None) is None: + ave = self.mesh_radial.average_node_to_edge + self._aveCC2Fr = self.Paer.T * sp.kron(ave, utils.speye(self.nz)) * self.Pac + return self._aveCC2Fr + + @property + def cell_distances_r(self) -> np.ndarray: + """Cell center distance array along the r-direction. + + Returns + ------- + (n_active_faces_r, ) numpy.ndarray + Cell center distance array along the r-direction. + """ + if getattr(self, "_cell_distances_r", None) is None: + Ave = self.aveCC2Fr + self._cell_distances_r = Ave * (self.Pac.T * self.h_gridded_r) + return self._cell_distances_r + + @property + def cell_gradient_z(self) -> sp.csr_matrix: + """ + Cell gradeint in vertical direction + + """ + if getattr(self, "_cell_gradient_z", None) is None: + grad = self.mesh_vertical.stencil_cell_gradient + self._cell_gradient_z = ( + self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac + ) + return self._cell_gradient_z + + @property + def aveCC2Fz(self) -> sp.csr_matrix: + """ + Average of cells in the vertical direction + + """ + if getattr(self, "_aveCC2Fz", None) is None: + ave = self.mesh_vertical.average_cell_to_face + self._aveCC2Fz = ( + self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac + ) + return self._aveCC2Fz + + @property + def cell_distances_z(self) -> np.ndarray: + """Cell center distance array along the r-direction. + + Returns + ------- + (n_active_faces_z, ) numpy.ndarray + Cell center distance array along the r-direction. + """ + if getattr(self, "_cell_distances_z", None) is None: + Ave = self.aveCC2Fr + self._cell_distances_z = Ave * (self.Pac.T * self.h_gridded_z) + return self._cell_distances_z + + @property + def nz(self) -> int: + """ + Number of cells of the 1D vertical mesh + """ + if getattr(self, "_nz", None) is None: + self._nz = self.mesh_vertical.n_cells + return self._nz + + @property + def nFz(self) -> int: + """ + Number of faces in the vertical direction + """ + if getattr(self, "_nFz", None) is None: + self._nFz = self.mesh_vertical.n_faces * self.n_nodes + return self._nFz + + @property + def nE(self) -> int: + """ + Number of edges in the radial direction + """ + if getattr(self, "_nE", None) is None: + self._nE = self.nz * self.n_edges + return self._nE + + @property + def nC(self) -> int: + """ + reduced number of cells + + :rtype: int + :return: number of cells being regularized + """ + if self.active_cells is not None: + return int(self.active_cells.sum()) + return self.nz * self.n_nodes + + @property + def n_nodes(self) -> int: + """ + Number of nodes of the 2D simplex mesh + """ + if getattr(self, "_n_nodes", None) is None: + self._n_nodes = self.mesh_radial.n_nodes + return self._n_nodes + + @property + def n_edges(self) -> int: + """ + Number of edges of the 2D simplex mesh + """ + if getattr(self, "_n_edges", None) is None: + self._n_edges = self.mesh_radial.n_edges + return self._n_edges + + @property + def Pafz(self): + """ + projection matrix that takes from the reduced space of active z-faces + to full modelling space (ie. nFz x nactive_cells_Fz ) + + :rtype: scipy.sparse.csr_matrix + :return: active face-x projection matrix + """ + if getattr(self, "_Pafz", None) is None: + if self.active_cells is None: + self._Pafz = utils.speye(self.nFz) + else: + ave = self.mesh_vertical.average_face_to_cell + aveFz2CC = sp.kron(utils.speye(self.n_nodes), ave) + active_cells_Fz = aveFz2CC.T * self.active_cells >= 1 + self._Pafz = utils.speye(self.nFz)[:, active_cells_Fz] + return self._Pafz + + @property + def Pac(self): + """ + projection matrix that takes from the reduced space of active cells to + full modelling space (ie. nC x nactive_cells) + + :rtype: scipy.sparse.csr_matrix + :return: active cell projection matrix + """ + if getattr(self, "_Pac", None) is None: + if self.active_cells is None: + self._Pac = utils.speye(self.nz * self.n_nodes) + else: + self._Pac = utils.speye(self.nz * self.n_nodes)[:, self.active_cells] + return self._Pac + + @property + def Paer(self): + """ + projection matrix that takes from the reduced space of active edges + to full modelling space (ie. nE x nactive_cells_E ) + + :rtype: scipy.sparse.csr_matrix + :return: active edge projection matrix + """ + if getattr(self, "_Paer", None) is None: + if self.active_edges is None: + self._Paer = utils.speye(self.nE) + else: + ave = self.mesh_vertical.average_face_to_cell + self._Paer = utils.speye(self.nE)[:, self.active_edges] + return self._Paer + + @property + def aveFz2CC(self): + """ + averaging from active cell centers to active x-faces + + :rtype: scipy.sparse.csr_matrix + :return: averaging from active cell centers to active x-faces + """ + if getattr(self, "_aveFz2CC", None) is None: + ave = self.mesh_vertical.average_face_to_cell + self._aveFz2CC = ( + self.Pac.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pafz + ) + return self._aveFz2CC + + @property + def aveFr2CC(self): + """ + averaging from active nodes to active edges + + :rtype: scipy.sparse.csr_matrix + :return: averaging from active cell centers to active edges + """ + + if getattr(self, "_aveFr2CC", None) is None: + ave = self.mesh_radial.average_node_to_edge.T + self._aveFr2CC = self.Pac.T * sp.kron(ave, utils.speye(self.nz)) * self.Paer + return self._aveFr2CC + + +LCRegularizationMesh.__module__ = "SimPEG.regularization" diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py new file mode 100644 index 0000000000..1eaf3166a2 --- /dev/null +++ b/SimPEG/regularization/rotated.py @@ -0,0 +1,527 @@ +from typing import Literal + +import numpy as np +import scipy.sparse as sp +from discretize import TensorMesh, TreeMesh +from discretize.base import BaseMesh +from scipy.interpolate import NearestNDInterpolator + +from ..utils.code_utils import ( + validate_float, + validate_ndarray_with_shape, + validate_type, +) +from ..utils.mat_utils import coterminal +from . import BaseRegularization, RegularizationMesh, Sparse, SparseSmallness + + +class SmoothnessFullGradient(BaseRegularization): + r"""Measures the gradient of a model using optionally anisotropic weighting. + + This regularizer measures the first order smoothness in a mesh ambivalent way + by observing that the N-d smoothness operator can be represented as an + inner product with an arbitrarily anisotropic weight. + + By default it assumes uniform weighting in each dimension, which works + for most ``discretize`` mesh types. + + Parameters + ---------- + mesh : discretize.BaseMesh + The mesh object to use for regularization. The mesh should either have + a `cell_gradient` or a `stencil_cell_gradient` defined. + alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. + The weights of the regularization for each axis. This can be defined for each cell + in the mesh. Default is uniform weights equal to the smallest edge length squared. + reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float + Matrix or list of matrices whose columns represent the regularization directions. + Each matrix should be orthonormal. Default is Identity. + ortho_check : bool, optional + Whether to check `reg_dirs` for orthogonality. + kwargs : + Keyword arguments passed to the parent class ``BaseRegularization``. + + Examples + -------- + Construct of 2D measure with uniform smoothing in each direction. + + >>> from discretize import TensorMesh + >>> from SimPEG.regularization import SmoothnessFullGradient + >>> mesh = TensorMesh([32, 32]) + >>> reg = SmoothnessFullGradient(mesh) + + We can instead create a measure that smooths twice as much in the 1st dimension + than it does in the second dimension. + >>> reg = SmoothnessFullGradient(mesh, [2, 1]) + + The `alphas` parameter can also be indepenant for each cell. Here we set all cells + lower than 0.5 in the x2 to twice as much in the first dimension + otherwise it is uniform smoothing. + >>> alphas = np.ones((mesh.n_cells, mesh.dim)) + >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] + >>> reg = SmoothnessFullGradient(mesh, alphas) + + We can also rotate the axis in which we want to preferentially smooth. Say we want to + smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, + effectively rotating our smoothing 45 degrees. Note and the columns of the matrix + represent the directional vectors (not the rows). + >>> sqrt2 = np.sqrt(2) + >>> reg_dirs = np.array([ + ... [sqrt2, -sqrt2], + ... [sqrt2, sqrt2], + ... ]) + >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) + + Notes + ----- + The regularization object is the discretized form of the continuous regularization + + ..math: + f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V + + The tensor quantity `a` is used to represent the potential preferential directions of + regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: + + ..math: + \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} + + `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting + along each direction, with ``alphas`` along its diagonal. These are multiplied to form the + anisotropic alpha used for rotated gradients. + """ + + _multiplier_pair = "alpha_x" + + def __init__( + self, + mesh, + alphas=None, + reg_dirs=None, + ortho_check=True, + norm=2, + irls_scaled=True, + irls_threshold=1e-8, + reference_model_in_smooth=False, + **kwargs, + ): + self.reference_model_in_smooth = reference_model_in_smooth + + if mesh.dim < 2: + raise TypeError("Mesh must have dimension higher than 1") + super().__init__(mesh=mesh, **kwargs) + + self.norm = norm + self.irls_threshold = irls_threshold + self.irls_scaled = irls_scaled + + if alphas is None: + edge_length = np.min(mesh.edge_lengths) + alphas = edge_length**2 * np.ones(mesh.dim) + alphas = validate_ndarray_with_shape( + "alphas", + alphas, + shape=[(mesh.dim,), ("*", mesh.dim)], + dtype=float, + ) + n_active_cells = self.regularization_mesh.n_cells + if len(alphas.shape) == 1: + alphas = np.tile(alphas, (mesh.n_cells, 1)) + if alphas.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if alphas.shape[0] == n_active_cells and self.active_cells is not None: + alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) + alpha_temp[self.active_cells] = alphas + alphas = alpha_temp + else: + raise IndexError( + f"`alphas` first dimension, {alphas.shape[0]}, must be either number " + f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + if np.any(alphas < 0): + raise ValueError("`alpha` must be non-negative") + anis_alpha = alphas + + if reg_dirs is not None: + reg_dirs = validate_ndarray_with_shape( + "reg_dirs", + reg_dirs, + shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], + dtype=float, + ) + if reg_dirs.shape == (mesh.dim, mesh.dim): + reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) + if reg_dirs.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if ( + reg_dirs.shape[0] == n_active_cells + and self.active_cells is not None + ): + reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) + reg_dirs_temp[self.active_cells] = reg_dirs + reg_dirs = reg_dirs_temp + else: + raise IndexError( + f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " + f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + # check orthogonality? + if ortho_check: + eye = np.eye(mesh.dim) + for i, M in enumerate(reg_dirs): + if not np.allclose(eye, M @ M.T): + raise ValueError(f"Matrix {i} is not orthonormal") + # create a stack of matrices of dir @ alphas @ dir.T + anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) + # Then select the upper diagonal components for input to discretize + if mesh.dim == 2: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 0, 1], + ), + axis=-1, + ) + elif mesh.dim == 3: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 2, 2], + anis_alpha[..., 0, 1], + anis_alpha[..., 0, 2], + anis_alpha[..., 1, 2], + ), + axis=-1, + ) + self._anis_alpha = anis_alpha + + @property + def reference_model_in_smooth(self) -> bool: + """ + whether to include reference model in gradient or not + + :return: True or False + """ + return self._reference_model_in_smooth + + @reference_model_in_smooth.setter + def reference_model_in_smooth(self, value: bool): + if not isinstance(value, bool): + raise TypeError( + f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided." + ) + self._reference_model_in_smooth = value + + def _delta_m(self, m): + if self.reference_model is None or not self.reference_model_in_smooth: + return m + return m - self.reference_model + + def f_m(self, m): + dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) + + if self.units is not None and self.units.lower() == "radian": + return coterminal(dfm_dl * self._cell_distances) / self._cell_distances + return dfm_dl + + def f_m_deriv(self, m): + return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) + + # overwrite the call, deriv, and deriv2... + def __call__(self, m): + M_f = self.W + r = self.f_m(m) + return 0.5 * r @ M_f @ r + + def deriv(self, m): + m_d = self.f_m_deriv(m) + M_f = self.W + r = self.f_m(m) + return m_d.T @ (M_f @ r) + + def deriv2(self, m, v=None): + m_d = self.f_m_deriv(m) + M_f = self.W + if v is None: + return m_d.T @ (M_f @ m_d) + + return m_d.T @ (M_f @ (m_d @ v)) + + @property + def cell_gradient(self): + """The (approximate) cell gradient operator + + Returns + ------- + scipy.sparse.csr_matrix + """ + if getattr(self, "_cell_gradient", None) is None: + mesh = self.regularization_mesh.mesh + try: + cell_gradient = mesh.cell_gradient + except AttributeError: + a = mesh.face_areas + v = mesh.average_cell_to_face @ mesh.cell_volumes + cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient + + v = np.ones(mesh.n_cells) + # Turn off cell_gradient at boundary faces + if self.active_cells is not None: + v[~self.active_cells] = 0 + + dv = cell_gradient @ v + P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) + cell_gradient = P @ cell_gradient + if self.active_cells is not None: + cell_gradient = cell_gradient[:, self.active_cells] + self._cell_gradient = cell_gradient + return self._cell_gradient + + @property + def W(self): + """The inner product operator using rotated coordinates + + Returns + ------- + scipy.sparse.csr_matrix + """ + if getattr(self, "_W", None) is None: + mesh = self.regularization_mesh.mesh + cell_weights = np.ones(len(mesh)) + for values in self._weights.values(): + # project values to full mesh + # dirty fix of original PR + projection = NearestNDInterpolator( + mesh.cell_centers[self.active_cells], values + ) + proj_values = projection(mesh.cell_centers) + cell_weights *= proj_values + reg_model = self._anis_alpha * cell_weights[:, None] + # turn off measure in inactive cells + if self.active_cells is not None: + reg_model[~self.active_cells] = 0.0 + + self._W = mesh.get_face_inner_product(reg_model) + return self._W + + def update_weights(self, m): + f_m = self.f_m(m) + irls_weights = self.get_lp_weights(f_m) + irls_weights = self.regularization_mesh.mesh.average_face_to_cell @ irls_weights + self.set_weights(irls=irls_weights[self.active_cells]) + + def get_lp_weights(self, f_m): + lp_scale = np.ones_like(f_m) + if self.irls_scaled: + # Scale on l2-norm gradient: f_m.max() + l2_max = np.ones_like(f_m) * np.abs(f_m).max() + # Compute theoretical maximum gradients for p < 1 + l2_max[self.norm < 1] = self.irls_threshold / np.sqrt( + 1.0 - self.norm[self.norm < 1] + ) + lp_values = l2_max / (l2_max**2.0 + self.irls_threshold**2.0) ** ( + 1.0 - self.norm / 2.0 + ) + lp_scale[lp_values != 0] = np.abs(f_m).max() / lp_values[lp_values != 0] + + return lp_scale / (f_m**2.0 + self.irls_threshold**2.0) ** ( + 1.0 - self.norm / 2.0 + ) + + @property + def irls_scaled(self) -> bool: + """Scale IRLS weights. + + When ``True``, scaling is applied when computing IRLS weights. + The scaling acts to preserve the balance between the data misfit and the components of + the regularization based on the derivative of the l2-norm measure. And it assists the + convergence by ensuring the model does not deviate + aggressively from the global 2-norm solution during the first few IRLS iterations. + For a comprehensive description, see the documentation for :py:meth:`get_lp_weights` . + + Returns + ------- + bool + Whether to scale IRLS weights. + """ + return self._irls_scaled + + @irls_scaled.setter + def irls_scaled(self, value: bool): + self._irls_scaled = validate_type("irls_scaled", value, bool, cast=False) + + @property + def irls_threshold(self): + r"""Stability constant for computing IRLS weights. + + Returns + ------- + float + Stability constant for computing IRLS weights. + """ + return self._irls_threshold + + @irls_threshold.setter + def irls_threshold(self, value): + self._irls_threshold = validate_float( + "irls_threshold", value, min_val=0.0, inclusive_min=False + ) + + @property + def norm(self): + r"""Norm for the sparse regularization. + + Returns + ------- + None, float, (n_cells, ) numpy.ndarray + Norm for the sparse regularization. If ``None``, a 2-norm is used. + A float within the interval [0,2] represents a constant norm applied for all cells. + A ``numpy.ndarray`` object, where each entry is used to apply a different norm to each cell in the mesh. + """ + return self._norm + + @norm.setter + def norm(self, value: float | np.ndarray | None): + if value is None: + value = np.ones(self.cell_gradient.shape[0]) * 2.0 + else: + value = np.ones(self.cell_gradient.shape[0]) * value + if np.any(value < 0) or np.any(value > 2): + raise ValueError( + "Value provided for 'norm' should be in the interval [0, 2]" + ) + self._norm = value + + @property + def units(self) -> str | None: + """Units for the model parameters. + + Some regularization classes behave differently depending on the units; e.g. 'radian'. + + Returns + ------- + str + Units for the model parameters. + """ + return self._units + + @units.setter + def units(self, units: str | None): + if units is not None and not isinstance(units, str): + raise TypeError( + f"'units' must be None or type str. Value of type {type(units)} provided." + ) + self._units = units + + @property + def _cell_distances(self) -> np.ndarray: + """ + cell size average on faces + + :return: np.ndarray + """ + cell_distances = self.cell_gradient.max(axis=1).toarray().ravel() + cell_distances[cell_distances == 0] = 1 + cell_distances = cell_distances ** (-1) + + return cell_distances + + +class RotatedSparse(Sparse): + """ + Class that wraps the rotated gradients in a ComboObjectiveFunction similar to Sparse. + """ + + def __init__( + self, + mesh: TensorMesh | TreeMesh, + reg_dirs: np.ndarray, + alphas_rot: tuple[float, float, float], + active_cells: np.ndarray | None = None, + norms: list[float] = [2.0, 2.0], + gradient_type: Literal["components", "total"] = "total", + irls_scaled: bool = True, + irls_threshold: float = 1e-8, + objfcts: list[BaseRegularization] | None = None, + **kwargs, + ): + """ + Class to wrap rotated gradient into a ComboObjective Function + + :param mesh: mesh + :param reg_dirs: rotation matrix + :param alphas_rot: alphas for rotated gradients + :param active_cells: active cells, defaults to None + :param norms: norms, defaults to [2, 2] + :param gradient_type: gradient_type, defaults to "total" + :param irls_scaled: irls_scaled, defaults to True + :param irls_threshold: irls_threshold, defaults to 1e-8 + :param objfcts: objfcts, defaults to None + """ + if not isinstance(mesh, RegularizationMesh): + mesh = RegularizationMesh(mesh) + + if not isinstance(mesh, RegularizationMesh): + TypeError( + f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " + f"Value of type {type(mesh)} provided." + ) + self._regularization_mesh = mesh + if active_cells is not None: + self._regularization_mesh.active_cells = active_cells + + if objfcts is None: + objfcts = [ + SparseSmallness(mesh=self.regularization_mesh), + SmoothnessFullGradient( + mesh=self.regularization_mesh.mesh, + active_cells=active_cells, + reg_dirs=reg_dirs, + alphas=alphas_rot, + norm=norms[1], + irls_scaled=irls_scaled, + irls_threshold=irls_threshold, + ), + ] + + super().__init__( + self.regularization_mesh, + objfcts=objfcts, + active_cells=active_cells, + gradient_type=gradient_type, + norms=norms[:2], + irls_scaled=irls_scaled, + irls_threshold=irls_threshold, + **kwargs, + ) + + @property + def alpha_y(self): + """Multiplier constant for first-order smoothness along y. + + Returns + ------- + float + Multiplier constant for first-order smoothness along y. + """ + return self._alpha_y + + @alpha_y.setter + def alpha_y(self, value): + self._alpha_y = None + + @property + def alpha_z(self): + """Multiplier constant for first-order smoothness along z. + + Returns + ------- + float + Multiplier constant for first-order smoothness along z. + """ + return self._alpha_z + + @alpha_z.setter + def alpha_z(self, value): + self._alpha_z = None diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index 49e3e6193a..5edf2565c9 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -145,25 +145,29 @@ """ from discretize.utils.interpolation_utils import interpolation_matrix +from . import io_utils, model_builder, solver_utils from .code_utils import ( - mem_profile_class, + Report, + as_array_n_by_dim, + call_hooks, + check_stoppers, + dependent_property, + deprecate_class, + deprecate_function, + deprecate_method, + deprecate_module, + deprecate_property, hook, - set_kwargs, - print_titles, + mem_profile_class, + print_done, print_line, - check_stoppers, print_stoppers, - print_done, - call_hooks, - deprecate_property, - deprecate_module, - deprecate_method, - deprecate_function, - deprecate_class, - dependent_property, - as_array_n_by_dim, + print_titles, requires, - Report, + set_kwargs, + validate_active_indices, + validate_callable, + validate_direction, validate_float, validate_integer, validate_list_of_types, @@ -171,70 +175,60 @@ validate_ndarray_with_shape, validate_string, validate_type, - validate_callable, - validate_direction, - validate_active_indices, ) - +from .coord_utils import rotate_points_from_normals, rotation_matrix_from_normals +from .counter_utils import Counter, count, timeIt +from .curv_utils import ( + example_curvilinear_grid, + face_info, + index_cube, + volume_tetrahedron, +) +from .io_utils import download from .mat_utils import ( + Identity, + TensorType, + Zero, + av, + av_extrap, + cartesian2spherical, + coterminal, + ddx, + define_plane_from_points, + eigenvalue_by_power_iteration, + estimate_diagonal, + get_subarray, + ind2sub, + inverse_2x2_block_diagonal, + inverse_3x3_block_diagonal, + inverse_property_tensor, + kron3, + make_property_tensor, mkvc, + ndgrid, sdiag, sdinv, speye, - kron3, + spherical2cartesian, spzeros, - ddx, - av, - av_extrap, - ndgrid, - ind2sub, sub2ind, - get_subarray, - inverse_3x3_block_diagonal, - inverse_2x2_block_diagonal, - TensorType, - make_property_tensor, - inverse_property_tensor, - estimate_diagonal, - Zero, - Identity, unique_rows, - eigenvalue_by_power_iteration, - cartesian2spherical, - spherical2cartesian, - coterminal, - define_plane_from_points, ) from .mesh_utils import ( - unpack_widths, closest_points_index, extract_core_mesh, surface2inds, + unpack_widths, ) -from .curv_utils import ( - volume_tetrahedron, - index_cube, - face_info, - example_curvilinear_grid, -) -from .counter_utils import Counter, count, timeIt -from . import model_builder -from . import solver_utils -from . import io_utils -from .coord_utils import ( - rotation_matrix_from_normals, - rotate_points_from_normals, -) -from .model_utils import surface2ind_topo, depth_weighting -from .plot_utils import plot2Ddata, plotLayer, plot_1d_layer_model -from .io_utils import download +from .model_utils import depth_weighting, distance_weighting, surface2ind_topo from .pgi_utils import ( GaussianMixture, - WeightedGaussianMixture, - GaussianMixtureWithPrior, GaussianMixtureWithNonlinearRelationships, GaussianMixtureWithNonlinearRelationshipsWithPrior, + GaussianMixtureWithPrior, + WeightedGaussianMixture, ) +from .plot_utils import plot2Ddata, plot_1d_layer_model, plotLayer # Deprecated imports interpmat = deprecate_function( @@ -242,39 +236,27 @@ ) from .code_utils import ( + asArray_N_x_Dim, + callHooks, + checkStoppers, + dependentProperty, memProfileWrapper, - setKwargs, - printTitles, + printDone, printLine, - checkStoppers, printStoppers, - printDone, - callHooks, - dependentProperty, - asArray_N_x_Dim, + printTitles, + setKwargs, ) +from .coord_utils import rotatePointsFromNormals, rotationMatrixFromNormals +from .curv_utils import exampleLrmGrid, faceInfo, indexCube, volTetra from .mat_utils import ( - sdInv, + diagEst, getSubArray, - inv3X3BlockDiagonal, inv2X2BlockDiagonal, - makePropertyTensor, + inv3X3BlockDiagonal, invPropertyTensor, - diagEst, + makePropertyTensor, + sdInv, uniqueRows, ) -from .mesh_utils import ( - meshTensor, - closestPoints, - ExtractCoreMesh, -) -from .curv_utils import ( - volTetra, - faceInfo, - indexCube, - exampleLrmGrid, -) -from .coord_utils import ( - rotatePointsFromNormals, - rotationMatrixFromNormals, -) +from .mesh_utils import ExtractCoreMesh, closestPoints, meshTensor diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index 5d128d9970..c7b3d07fcb 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -396,7 +396,7 @@ def coterminal(theta): \theta = 2\pi N + \gamma and *N* is an integer, the function returns the value of :math:`\gamma`. - The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi]`. + The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi)`. Parameters ---------- @@ -409,12 +409,8 @@ def coterminal(theta): Coterminal angles """ - sub = theta[np.abs(theta) >= np.pi] - sub = -np.sign(sub) * (2 * np.pi - np.abs(sub)) - - theta[np.abs(theta) >= np.pi] = sub - - return theta + coterminal = (theta + np.pi) % (2 * np.pi) - np.pi + return coterminal def define_plane_from_points(xyz1, xyz2, xyz3): diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 8c6d19b1ab..4b67f584e8 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,10 +1,28 @@ -from .mat_utils import mkvc +import warnings +from typing import Literal, Optional + +import discretize import numpy as np -from scipy.interpolate import griddata -from scipy.spatial import cKDTree import scipy.sparse as sp from discretize.utils import active_from_xyz -import warnings +from scipy.interpolate import griddata +from scipy.spatial import cKDTree +from scipy.spatial.distance import cdist + +from .mat_utils import mkvc + +try: + import numba + from numba import njit, prange +except ImportError: + numba = None + + # Define dummy njit decorator + def njit(*args, **kwargs): + return lambda f: f + + # Define dummy prange function + prange = range def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): @@ -195,3 +213,181 @@ def depth_weighting( wz = wz[active_cells] return wz / np.nanmax(wz) + + +@njit(parallel=True) +def _distance_weighting_numba( + cell_centers: np.ndarray, + cell_volumes: np.ndarray, + reference_locs: np.ndarray, + threshold: float, + exponent: float = 2.0, +) -> np.ndarray: + r""" + distance weighting kernel in numba. + + If numba is not installed, this will work as a regular for loop. + + Parameters + ---------- + cell_centers : np.ndarray + cell centers of the mesh. + cell_volumes : np.ndarray + cell volumes of the mesh. + reference_locs : float or (n, ndim) numpy.ndarray + Reference location for the distance weighting. + It can be a ``float``, which value is the component for + the reference location. + Or it can be a 2d array, with multiple reference locations, where each + row should contain the coordinates of a single location point in the + following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D + meshes). + The coordinate of the reference location, usually the receiver locations + exponent : float, optional + Exponent parameter for distance weighting. + The exponent should match the natural decay power of the potential + field. For example, for gravity acceleration, set it to 2; for magnetic + fields, to 3. + threshold : float or None, optional + Threshold parameters used in the distance weighting. + If ``None``, it will be set to half of the smallest cell width. + + Returns + ------- + (n_active) numpy.ndarray + Normalized distance weights for the mesh at every active cell as + a 1d-array. + """ + + distance_weights = np.zeros(len(cell_centers)) + n_reference_locs = len(reference_locs) + for i in prange(n_reference_locs): + rl = reference_locs[i] + dst_wgt = ( + np.sqrt(((cell_centers - rl) ** 2).sum(axis=1)) + threshold + ) ** exponent + dst_wgt = (cell_volumes / dst_wgt) ** 2 + distance_weights += dst_wgt + + distance_weights = distance_weights**0.5 + distance_weights /= cell_volumes + distance_weights /= np.nanmax(distance_weights) + + return distance_weights + + +def distance_weighting( + mesh: discretize.base.BaseMesh, + reference_locs: np.ndarray, + active_cells: Optional[np.ndarray] = None, + exponent: float = 2.0, + threshold: Optional[float] = None, + engine: Literal["loop", "cdist"] = "loop", + cdist_opts: Optional[dict] = None, +): + r""" + Construct diagonal elements of a distance weighting matrix + + Builds the model weights following the distance weighting strategy, a method + to generate weights based on the distance between mesh cell centers and some + reference location(s). + Use these weights in regularizations to counteract the natural decay of + potential field data with distance. + + Parameters + ---------- + mesh : discretize.base.BaseMesh + Discretized model space. + reference_locs : float or (n, ndim) numpy.ndarray + Reference location for the distance weighting. + It can be a ``float``, which value is the component for + the reference location. + Or it can be a 2d array, with multiple reference locations, where each + row should contain the coordinates of a single location point in the + following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D + meshes). + The coordinate of the reference location, usually the receiver locations + active_cells : (mesh.n_cells) numpy.ndarray of bool, optional + Index vector for the active cells on the mesh. + If ``None``, every cell will be assumed to be active. + exponent : float, optional + Exponent parameter for distance weighting. + The exponent should match the natural decay power of the potential + field. For example, for gravity acceleration, set it to 2; for magnetic + fields, to 3. + threshold : float or None, optional + Threshold parameters used in the distance weighting. + If ``None``, it will be set to half of the smallest cell width. + engine: str, 'loop' or 'cdist' + pick between a `scipy.spatial.distance.cdist` computation (memory intensive) or `for` loop implementation, + parallelized with numba if available. Default to 'loop'. + cdist_opts: dct, optional + Only valid with `engine=='cdist'`. Options to pass to scipy.spatial.distance.cdist. Default to None. + + Returns + ------- + (n_active) numpy.ndarray + Normalized distance weights for the mesh at every active cell as + a 1d-array. + """ + + active_cells = ( + np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells + ) + + # Default threshold value + if threshold is None: + threshold = 0.5 * mesh.h_gridded.min() + + reference_locs = np.asarray(reference_locs) + + cell_centers = mesh.cell_centers[active_cells] + cell_volumes = mesh.cell_volumes[active_cells] + + # address 1D case + if mesh.dim == 1: + cell_centers = cell_centers.reshape(-1, 1) + reference_locs = reference_locs.reshape(-1, 1) + + if engine == "loop": + if numba is None: + warnings.warn( + "numba is not installed. 'loop' computations might be slower.", + stacklevel=2, + ) + if cdist_opts is not None: + warnings.warn( + f"`cdist_opts` is only valid with `engine=='cdist'`, currently {engine=}", + stacklevel=2, + ) + distance_weights = _distance_weighting_numba( + cell_centers, + cell_volumes, + reference_locs, + exponent=exponent, + threshold=threshold, + ) + + elif engine == "cdist": + warnings.warn( + "scipy.spatial.distance.cdist computations can be memory intensive. Consider switching to `engine='loop'` " + "if you run into memory overflow issues", + stacklevel=2, + ) + cdist_opts = cdist_opts or dict() + distance = cdist(cell_centers, reference_locs, **cdist_opts) + + distance_weights = ( + (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 + ).sum(axis=1) + + distance_weights = distance_weights**0.5 + distance_weights /= cell_volumes + distance_weights /= np.nanmax(distance_weights) + + else: + raise ValueError( + f"engine should be either 'cdist' or 'loop', instead {engine=}" + ) + + return distance_weights diff --git a/tests/base/test_cross_gradient.py b/tests/base/regularizations/test_cross_gradient.py similarity index 100% rename from tests/base/test_cross_gradient.py rename to tests/base/regularizations/test_cross_gradient.py diff --git a/tests/base/regularizations/test_full_gradient.py b/tests/base/regularizations/test_full_gradient.py new file mode 100644 index 0000000000..9f12d4f43f --- /dev/null +++ b/tests/base/regularizations/test_full_gradient.py @@ -0,0 +1,23 @@ +from discretize.tests import OrderTest +import numpy as np +import matplotlib.pyplot as plt +from SimPEG.regularization import SmoothnessFullGradient + + +class RegOrderTest(OrderTest): + meshTypes = ["uniformTensorMesh", "uniformTree"] + meshSizes = [4, 8, 16, 32] + meshDimension = 2 + + def getError(self): + true_val = 59.2176264065362 / 2 + x = self.M.cell_centers[:, 0] + y = self.M.cell_centers[:, 1] + # a function that is zero at edge with zero derivative + f_cc = (1 - np.cos(2 * x * np.pi)) * (1 - np.cos(2 * y * np.pi)) + + reg = SmoothnessFullGradient(self.M, alphas=[1, 1]) + return reg(f_cc) - true_val + + def test_orderWeakCellGradIntegral(self): + self.orderTest() diff --git a/tests/base/test_jtv.py b/tests/base/regularizations/test_jtv.py similarity index 100% rename from tests/base/test_jtv.py rename to tests/base/regularizations/test_jtv.py diff --git a/tests/base/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py similarity index 100% rename from tests/base/test_pgi_regularization.py rename to tests/base/regularizations/test_pgi_regularization.py diff --git a/tests/base/test_regularization.py b/tests/base/regularizations/test_regularization.py similarity index 99% rename from tests/base/test_regularization.py rename to tests/base/regularizations/test_regularization.py index 82abaca799..d32a0c0ce6 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -1,20 +1,19 @@ -import numpy as np +import inspect import unittest +import discretize +import numpy as np import pytest -import inspect -import discretize from SimPEG import maps, objective_function, regularization, utils +from SimPEG.objective_function import ComboObjectiveFunction from SimPEG.regularization import ( BaseRegularization, - WeightedLeastSquares, Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder, + WeightedLeastSquares, ) -from SimPEG.objective_function import ComboObjectiveFunction - TOL = 1e-7 testReg = True diff --git a/tests/base/test_maps.py b/tests/base/test_maps.py index 9f6c8aaec3..ad6362c84d 100644 --- a/tests/base/test_maps.py +++ b/tests/base/test_maps.py @@ -25,6 +25,8 @@ "ComboMap", "ActiveCells", "InjectActiveCells", + "InjectActiveFaces", + "InjectActiveEdges", "LogMap", "LinearMap", "ReciprocalMap", @@ -52,6 +54,8 @@ "ComboMap", "ActiveCells", "InjectActiveCells", + "InjectActiveFaces", + "InjectActiveEdges", "LogMap", "LinearMap", "ReciprocalMap", @@ -694,6 +698,14 @@ def test_linearity(): mesh3, mesh3.cell_centers[:, -1] < 0.75, ), + maps.InjectActiveFaces( + mesh3, + mesh3.faces[:, -1] < 0.75, + ), + maps.InjectActiveEdges( + mesh3, + mesh3.edges[:, -1] < 0.75, + ), maps.TileMap( mesh_tree, mesh_tree.cell_centers[:, -1] < 0.75, diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index 7dd1d82ebc..7bbb657fac 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -1,4 +1,9 @@ -from SimPEG.base import with_property_mass_matrices, BasePDESimulation +from SimPEG.base import ( + with_property_mass_matrices, + with_surface_property_mass_matrices, + with_line_property_mass_matrices, + BasePDESimulation, +) from SimPEG import props, maps import unittest import discretize @@ -13,19 +18,40 @@ # define a very simple class... @with_property_mass_matrices("sigma") @with_property_mass_matrices("mu") +@with_surface_property_mass_matrices("tau") +@with_line_property_mass_matrices("kappa") class SimpleSim(BasePDESimulation): sigma, sigmaMap, sigmaDeriv = props.Invertible("Electrical conductivity (S/m)") - + rho, rhoMap, rhoDeriv = props.Invertible("Electrical conductivity (S/m)") + props.Reciprocal(sigma, rho) mu, muMap, muDeriv = props.Invertible("Magnetic Permeability") + tau, tauMap, tauDeriv = props.Invertible("Face conductivity, conductance (S)") + kappa, kappaMap, kappaDeriv = props.Invertible( + "Edge conductivity, conductivity times area (Sm)" + ) def __init__( - self, mesh, survey=None, sigma=None, sigmaMap=None, mu=mu_0, muMap=None + self, + mesh, + survey=None, + sigma=None, + sigmaMap=None, + mu=mu_0, + muMap=None, + tau=None, + tauMap=None, + kappa=None, + kappaMap=None, ): super().__init__(mesh=mesh, survey=survey) self.sigma = sigma self.mu = mu + self.tau = tau + self.kappa = kappa self.sigmaMap = sigmaMap self.muMap = muMap + self.tauMap = tauMap + self.kappaMap = kappaMap @property def deleteTheseOnModelUpdate(self): @@ -35,6 +61,10 @@ def deleteTheseOnModelUpdate(self): toDelete = super().deleteTheseOnModelUpdate if self.sigmaMap is not None or self.rhoMap is not None: toDelete = toDelete + self._clear_on_sigma_update + if self.tauMap is not None: + toDelete = toDelete + self._clear_on_tau_update + if self.kappaMap is not None: + toDelete = toDelete + self._clear_on_kappa_update return toDelete @@ -787,6 +817,604 @@ def test_MfI_adjoint(self): np.testing.assert_allclose(yJv, vJty) +class TestSimSurfaceProperties(unittest.TestCase): + def setUp(self): + self.mesh = discretize.TensorMesh([5, 6, 7]) + + self.sim = SimpleSim(self.mesh, tauMap=maps.ExpMap()) + self.start_mod = np.log(1e-2 * np.ones(self.mesh.n_faces)) + np.random.randn( + self.mesh.n_faces + ) + + def test_zero_returns(self): + n_f = self.mesh.n_faces + n_e = self.mesh.n_edges + sim = self.sim + + v = np.random.rand(n_f) + u_f = np.random.rand(n_f) + u_e = np.random.rand(n_e) + + # Test zero return on u passed as Zero + assert sim._MfTauDeriv(Zero(), v).__class__ == Zero + assert sim._MeTauDeriv(Zero(), v).__class__ == Zero + assert sim._MfTauIDeriv(Zero(), v).__class__ == Zero + assert sim._MeTauIDeriv(Zero(), v).__class__ == Zero + + # Test zero return on v as Zero + assert sim._MfTauDeriv(u_f, Zero()).__class__ == Zero + assert sim._MeTauDeriv(u_e, Zero()).__class__ == Zero + assert sim._MfTauIDeriv(u_f, Zero()).__class__ == Zero + assert sim._MeTauIDeriv(u_e, Zero()).__class__ == Zero + + def test_forward_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + # if U.shape (n_f, ) + u = np.random.rand(n_f) + v = np.random.randn(n_f) + u2 = np.random.rand(n_f, 2) + v2 = np.random.randn(n_f, 4) + + # These cases should all return an array of shape (n_f, ) + # if V.shape (n_c, ) + out = sim._MfTauDeriv(u, v) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u, v[:, None]) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v[:, None]) + assert out.shape == (n_f,) + + # now check passing multiple V's + out = sim._MfTauDeriv(u, v2) + assert out.shape == (n_f, 4) + out = sim._MfTauDeriv(u[:, None], v2) + assert out.shape == (n_f, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u[:, None], v2[:, i]) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MfTauDeriv(u2, v) + assert out.shape == (n_f, 2) + out = sim._MfTauDeriv(u2, v[:, None]) + assert out.shape == (n_f, 2) + + # and with multiple RHS + out = sim._MfTauDeriv(u2, v2) + assert out.shape == (n_f, v2.shape[1], 2) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i, :] = sim._MfTauDeriv(u2, v2[:, i]) + np.testing.assert_equal(out, out_2) + + # test None as v + UM = sim._MfTauDeriv(u) + np.testing.assert_allclose(UM @ v, sim._MfTauDeriv(u, v)) + + UM = sim._MfTauDeriv(u2) + np.testing.assert_allclose( + UM @ v, sim._MfTauDeriv(u2, v).reshape(-1, order="F") + ) + + def test_adjoint_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + + u = np.random.rand(n_f) + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + u2 = np.random.rand(n_f, 2) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # These cases should all return an array of shape (n_c, ) + # if V.shape (n_f, ) + out = sim._MfTauDeriv(u, v, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u, v[:, None], adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v[:, None], adjoint=True) + assert out.shape == (n_f,) + + # now check passing multiple V's + out = sim._MfTauDeriv(u, v2, adjoint=True) + assert out.shape == (n_f, 4) + out = sim._MfTauDeriv(u[:, None], v2, adjoint=True) + assert out.shape == (n_f, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u, v2[:, i], adjoint=True) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MfTauDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_f,) + + # and with multiple RHS + out = sim._MfTauDeriv(u2, v3, adjoint=True) + assert out.shape == (n_f, v3.shape[1]) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u2, v3[:, i, :], adjoint=True) + np.testing.assert_equal(out, out_2) + + # test None as v + UMT = sim._MfTauDeriv(u, adjoint=True) + np.testing.assert_allclose(UMT @ v, sim._MfTauDeriv(u, v, adjoint=True)) + + UMT = sim._MfTauDeriv(u2, adjoint=True) + np.testing.assert_allclose( + UMT @ v2_2.reshape(-1, order="F"), sim._MfTauDeriv(u2, v2_2, adjoint=True) + ) + + def test_adjoint_opp_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + + u = np.random.rand(n_f) + u2 = np.random.rand(n_f, 2) + + y = np.random.rand(n_f) + y2 = np.random.rand(n_f, 4) + + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # u1, y1 -> v1 + vJy = v @ sim._MfTauDeriv(u, y) + yJtv = y @ sim._MfTauDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MfTauDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MfTauDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MfTauDeriv(u2, y)) + yJtv = np.sum(y * sim._MfTauDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MfTauDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MfTauDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # Also test Inverse opp, just to be sure... + # u1, y1 -> v1 + vJy = v @ sim._MfTauIDeriv(u, y) + yJtv = y @ sim._MfTauIDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MfTauIDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MfTauIDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MfTauIDeriv(u2, y)) + yJtv = np.sum(y * sim._MfTauIDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MfTauIDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MfTauIDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + def test_Me_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeTau @ u + + def Jvec(v): + sim.model = x0 + return sim._MeTauDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Mf_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MfTau @ u + + def Jvec(v): + sim.model = x0 + return sim._MfTauDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MeI_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeTauI @ u + + def Jvec(v): + sim.model = x0 + return sim._MeTauIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MfI_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MfTauI @ u + + def Jvec(v): + sim.model = x0 + return sim._MfTauIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Me_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MeTauDeriv(u, v) + vJty = v @ sim._MeTauDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_Mf_adjoint(self): + n_items = self.mesh.n_faces + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MfTauDeriv(u, v) + vJty = v @ sim._MfTauDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MeI_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MeTauIDeriv(u, v) + vJty = v @ sim._MeTauIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MfI_adjoint(self): + n_items = self.mesh.n_faces + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MfTauIDeriv(u, v) + vJty = v @ sim._MfTauIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + +class TestSimEdgeProperties(unittest.TestCase): + def setUp(self): + self.mesh = discretize.TensorMesh([5, 6, 7]) + + self.sim = SimpleSim(self.mesh, kappaMap=maps.ExpMap()) + self.start_mod = np.log(1e-2 * np.ones(self.mesh.n_edges)) + np.random.randn( + self.mesh.n_edges + ) + + def test_zero_returns(self): + n_e = self.mesh.n_edges + sim = self.sim + + v = np.random.rand(n_e) + u_e = np.random.rand(n_e) + + # Test zero return on u passed as Zero + assert sim._MeKappaDeriv(Zero(), v).__class__ == Zero + assert sim._MeKappaIDeriv(Zero(), v).__class__ == Zero + + # Test zero return on v as Zero + assert sim._MeKappaDeriv(u_e, Zero()).__class__ == Zero + assert sim._MeKappaIDeriv(u_e, Zero()).__class__ == Zero + + def test_forward_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + # if U.shape (n_f, ) + u = np.random.rand(n_e) + v = np.random.randn(n_e) + u2 = np.random.rand(n_e, 2) + v2 = np.random.randn(n_e, 4) + + # These cases should all return an array of shape (n_f, ) + # if V.shape (n_c, ) + out = sim._MeKappaDeriv(u, v) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u, v[:, None]) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v[:, None]) + assert out.shape == (n_e,) + + # now check passing multiple V's + out = sim._MeKappaDeriv(u, v2) + assert out.shape == (n_e, 4) + out = sim._MeKappaDeriv(u[:, None], v2) + assert out.shape == (n_e, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u[:, None], v2[:, i]) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MeKappaDeriv(u2, v) + assert out.shape == (n_e, 2) + out = sim._MeKappaDeriv(u2, v[:, None]) + assert out.shape == (n_e, 2) + + # and with multiple RHS + out = sim._MeKappaDeriv(u2, v2) + assert out.shape == (n_e, v2.shape[1], 2) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i, :] = sim._MeKappaDeriv(u2, v2[:, i]) + np.testing.assert_equal(out, out_2) + + # test None as v + UM = sim._MeKappaDeriv(u) + np.testing.assert_allclose(UM @ v, sim._MeKappaDeriv(u, v)) + + UM = sim._MeKappaDeriv(u2) + np.testing.assert_allclose( + UM @ v, sim._MeKappaDeriv(u2, v).reshape(-1, order="F") + ) + + def test_adjoint_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + + u = np.random.rand(n_e) + v = np.random.randn(n_e) + v2 = np.random.randn(n_e, 4) + u2 = np.random.rand(n_e, 2) + v2_2 = np.random.randn(n_e, 2) + v3 = np.random.rand(n_e, 4, 2) + + # These cases should all return an array of shape (n_c, ) + # if V.shape (n_f, ) + out = sim._MeKappaDeriv(u, v, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u, v[:, None], adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v[:, None], adjoint=True) + assert out.shape == (n_e,) + + # now check passing multiple V's + out = sim._MeKappaDeriv(u, v2, adjoint=True) + assert out.shape == (n_e, 4) + out = sim._MeKappaDeriv(u[:, None], v2, adjoint=True) + assert out.shape == (n_e, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u, v2[:, i], adjoint=True) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MeKappaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_e,) + + # and with multiple RHS + out = sim._MeKappaDeriv(u2, v3, adjoint=True) + assert out.shape == (n_e, v3.shape[1]) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u2, v3[:, i, :], adjoint=True) + np.testing.assert_equal(out, out_2) + + # test None as v + UMT = sim._MeKappaDeriv(u, adjoint=True) + np.testing.assert_allclose(UMT @ v, sim._MeKappaDeriv(u, v, adjoint=True)) + + UMT = sim._MeKappaDeriv(u2, adjoint=True) + np.testing.assert_allclose( + UMT @ v2_2.reshape(-1, order="F"), sim._MeKappaDeriv(u2, v2_2, adjoint=True) + ) + + def test_adjoint_opp_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + + u = np.random.rand(n_e) + u2 = np.random.rand(n_e, 2) + + y = np.random.rand(n_e) + y2 = np.random.rand(n_e, 4) + + v = np.random.randn(n_e) + v2 = np.random.randn(n_e, 4) + v2_2 = np.random.randn(n_e, 2) + v3 = np.random.rand(n_e, 4, 2) + + # u1, y1 -> v1 + vJy = v @ sim._MeKappaDeriv(u, y) + yJtv = y @ sim._MeKappaDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MeKappaDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MeKappaDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MeKappaDeriv(u2, y)) + yJtv = np.sum(y * sim._MeKappaDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MeKappaDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MeKappaDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # Also test Inverse opp, just to be sure... + # u1, y1 -> v1 + vJy = v @ sim._MeKappaIDeriv(u, y) + yJtv = y @ sim._MeKappaIDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MeKappaIDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MeKappaIDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MeKappaIDeriv(u2, y)) + yJtv = np.sum(y * sim._MeKappaIDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MeKappaIDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MeKappaIDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + def test_Me_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeKappa @ u + + def Jvec(v): + sim.model = x0 + return sim._MeKappaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MeI_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeKappaI @ u + + def Jvec(v): + sim.model = x0 + return sim._MeKappaIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Me_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_edges) + y = np.random.randn(n_items) + + yJv = y @ sim._MeKappaDeriv(u, v) + vJty = v @ sim._MeKappaDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MeI_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_edges) + y = np.random.randn(n_items) + + yJv = y @ sim._MeKappaIDeriv(u, v) + vJty = v @ sim._MeKappaIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_bad_derivative_stash(): mesh = discretize.TensorMesh([5, 6, 7]) sim = SimpleSim(mesh, sigmaMap=maps.ExpMap()) @@ -806,3 +1434,7 @@ def test_bad_derivative_stash(): with pytest.raises(TypeError): sim.MeSigmaDeriv(u, v) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index d7d57f6d80..9b85dfad3e 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -1,12 +1,9 @@ import unittest import numpy as np - from discretize import TensorMesh -from SimPEG import ( - utils, -) +from SimPEG import utils class DepthWeightingTest(unittest.TestCase): @@ -72,5 +69,81 @@ def test_depth_weighting_2D(self): np.testing.assert_allclose(wz, wz2) +class DistancehWeightingTest(unittest.TestCase): + def test_distance_weighting_3D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hy, hz], "CCN") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + with self.assertRaises(ValueError): + utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="test" + ) + + def test_distance_weighting_2D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hz], "CN") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + def test_distance_weighting_1D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + mesh = TensorMesh([hx], "C") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 1) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + if __name__ == "__main__": unittest.main() diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 541ed1642b..5952b755fc 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -1,4 +1,5 @@ import unittest +import pytest import numpy as np import scipy.sparse as sp import os @@ -22,6 +23,7 @@ Counter, download, surface2ind_topo, + coterminal, ) import discretize @@ -342,5 +344,35 @@ def test_downloads(self): shutil.rmtree(os.path.expanduser("./test_url")) +class TestCoterminalAngle: + """ + Tests for the coterminal function + """ + + @pytest.mark.parametrize( + "coterminal_angle", + (1 / 4 * np.pi, 3 / 4 * np.pi, -3 / 4 * np.pi, -1 / 4 * np.pi), + ids=("pi/4", "3/4 pi", "-3/4 pi", "-pi/4"), + ) + def test_angles_in_quadrants(self, coterminal_angle): + """ + Test coterminal for angles in each quadrant + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + @pytest.mark.parametrize( + "coterminal_angle", + (0, np.pi / 2, -np.pi, -np.pi / 2), + ids=("0", "pi/2", "-pi", "-pi/2"), + ) + def test_right_angles(self, coterminal_angle): + """ + Test coterminal for right angles + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers.py b/tests/em/em1d/test_EM1D_FD_jac_layers.py index 83c78f9758..20c331f295 100644 --- a/tests/em/em1d/test_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_EM1D_FD_jac_layers.py @@ -148,7 +148,7 @@ def test_EM1DFDJtvec_Layers(self): np.log(np.ones(self.nlayers) * sigma_half), np.log(np.ones(self.nlayers) * 1.5 * mu_half), np.log(self.thicknesses) * 0.9, - np.log(0.5 * self.height), + np.log(self.height) * 1.5, ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs @@ -307,7 +307,7 @@ def test_EM1DFDJtvec_Layers(self): np.log(np.ones(self.nlayers) * sigma_half), np.log(np.ones(self.nlayers) * 1.5 * mu_half), np.log(self.thicknesses) * 0.9, - np.log(0.5 * self.height), + np.log(self.height) * 1.5, ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs diff --git a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py index dd90a32a72..513600b080 100644 --- a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py +++ b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py @@ -23,7 +23,7 @@ def setUp(self): start_time=-0.01, peak_time=-0.005, off_time=0.0 ) - # Receiver list + # xceiver list # Define receivers at each location. b_receiver = tdem.receivers.PointMagneticFluxDensity( diff --git a/tests/em/em1d/test_EM1D_TD_off_fwd.py b/tests/em/em1d/test_EM1D_TD_off_fwd.py index 78b45f61ab..24acd91504 100644 --- a/tests/em/em1d/test_EM1D_TD_off_fwd.py +++ b/tests/em/em1d/test_EM1D_TD_off_fwd.py @@ -91,7 +91,7 @@ def test_line_current_failures(self): rx_locs, times, orientation="z", use_source_receiver_offset=False ) src = tdem.sources.LineCurrent([rx], tx_locs) - survey = tdem.Survey(src) + survey = tdem.Survey([src]) with self.assertRaises(ValueError): tdem.Simulation1DLayered(survey) @@ -103,7 +103,7 @@ def test_line_current_failures(self): [2.5, 2.5, 0], ] src = tdem.sources.LineCurrent([rx], tx_locs) - survey = tdem.Survey(src) + survey = tdem.Survey([src]) tdem.Simulation1DLayered(survey) assert src.n_segments == 4 diff --git a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py new file mode 100644 index 0000000000..0663fc3cc6 --- /dev/null +++ b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py @@ -0,0 +1,142 @@ +from __future__ import print_function +import unittest +import numpy as np +import SimPEG.electromagnetics.frequency_domain as fdem +from SimPEG import maps, tests +from discretize import TensorMesh + +np.random.seed(41) + + +class STITCHED_EM1D_FD_Jacobian_Test_MagDipole(unittest.TestCase): + def setUp(self, parallel=False): + dz = 1 + geometric_factor = 1.1 + n_layer = 20 + thicknesses = dz * geometric_factor ** np.arange(n_layer - 1) + + frequencies = np.array([900, 7200, 56000], dtype=float) + n_sounding = 50 + dx = 20.0 + hx = np.ones(n_sounding) * dx + hz = np.r_[thicknesses, thicknesses[-1]] + + mesh = TensorMesh([hx, hz], x0="00") + + x = mesh.cell_centers_x + y = np.zeros_like(x) + z = np.ones_like(x) * 30.0 + receiver_locations = np.c_[x + 8.0, y, z] + source_locations = np.c_[x, y, z] + topo = np.c_[x, y, z - 30.0].astype(float) + + source_list = [] + + for i_sounding in range(0, n_sounding): + source_location = mkvc(source_locations[i_sounding, :]) + receiver_location = mkvc(receiver_locations[i_sounding, :]) + receiver_list = [] + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_location, orientation="z", component="both" + ) + ) + + for frequency in frequencies: + src = fdem.sources.MagDipole( + receiver_list, + frequency, + source_location, + orientation="z", + i_sounding=i_sounding, + ) + source_list.append(src) + + survey = fdem.Survey(source_list) + wires = maps.Wires(("sigma", n_layer * n_sounding), ("h", n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer * n_sounding) * wires.sigma + hMap = maps.ExpMap(nP=n_sounding) * wires.h + + simulation = fdem.Simulation1DLayeredStitched( + survey=survey, + thicknesses=thicknesses, + sigmaMap=sigmaMap, + hMap=hMap, + topo=topo, + parallel=parallel, + n_cpu=2, + verbose=False, + ) + self.sim = simulation + self.mesh = mesh + + def test_EM1DFDJvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_stitched * 0.5 + + passed = tests.check_derivative( + derChk, m_stitched, num=4, dx=dm, plotIt=False, eps=1e-15 + ) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jvec test works") + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] + + dobs = self.sim.dpred(m_stitched) + + m_ini = np.r_[ + np.log(1.0 / 100.0) * np.ones(self.mesh.n_cells), + np.ones(self.sim.n_sounding) * np.log(30.0) * 1.5, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 + dmisfit = self.sim.Jtvec(m, dr) + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-27) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jtvec test works") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py new file mode 100644 index 0000000000..8a93fba6e0 --- /dev/null +++ b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py @@ -0,0 +1,186 @@ +from __future__ import print_function +import unittest +import numpy as np +import SimPEG.electromagnetics.time_domain as tdem +from SimPEG import maps, tests +from discretize import TensorMesh +from pymatsolver import PardisoSolver + +np.random.seed(41) + + +class STITCHED_EM1D_TD_Jacobian_Test_MagDipole(unittest.TestCase): + def setUp(self, parallel=False): + times_hm = np.logspace(-6, -3, 31) + times_lm = np.logspace(-5, -2, 31) + + # Waveforms + waveform_hm = tdem.sources.TriangularWaveform( + start_time=-0.01, peak_time=-0.005, off_time=0.0 + ) + waveform_lm = tdem.sources.TriangularWaveform( + start_time=-0.01, peak_time=-0.0001, off_time=0.0 + ) + + dz = 1 + geometric_factor = 1.1 + n_layer = 20 + thicknesses = dz * geometric_factor ** np.arange(n_layer - 1) + n_layer = 20 + + n_sounding = 5 + dx = 20.0 + hx = np.ones(n_sounding) * dx + hz = np.r_[thicknesses, thicknesses[-1]] + mesh = TensorMesh([hx, hz], x0="00") + inds = mesh.cell_centers[:, 1] < 25 + inds_1 = mesh.cell_centers[:, 1] < 50 + sigma = np.ones(mesh.nC) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + + x = mesh.cell_centers_x + y = np.zeros_like(x) + z = np.ones_like(x) * 30.0 + source_locations = np.c_[x, y, z] + source_orientation = "z" + + receiver_offset_r = 13.25 + receiver_offset_z = 2.0 + + receiver_locations = np.c_[ + x + receiver_offset_r, + np.zeros(n_sounding), + 30.0 * np.ones(n_sounding) + receiver_offset_z, + ] + receiver_orientation = "z" # "x", "y" or "z" + + topo = np.c_[x, y, z - 30.0].astype(float) + + source_list = [] + + for i_sounding in range(0, n_sounding): + source_location = source_locations[i_sounding, :] + receiver_location = receiver_locations[i_sounding, :] + + # Receiver list + + # Define receivers at each location. + dbzdt_receiver_hm = tdem.receivers.PointMagneticFluxTimeDerivative( + receiver_location, times_hm, receiver_orientation + ) + dbzdt_receiver_lm = tdem.receivers.PointMagneticFluxTimeDerivative( + receiver_location, times_lm, receiver_orientation + ) + # Make a list containing all receivers even if just one + + # Must define the transmitter properties and associated receivers + source_list.append( + tdem.sources.MagDipole( + [dbzdt_receiver_hm], + location=source_location, + waveform=waveform_hm, + orientation=source_orientation, + i_sounding=i_sounding, + ) + ) + + source_list.append( + tdem.sources.MagDipole( + [dbzdt_receiver_lm], + location=source_location, + waveform=waveform_lm, + orientation=source_orientation, + i_sounding=i_sounding, + ) + ) + survey = tdem.Survey(source_list) + wires = maps.Wires(("sigma", n_layer * n_sounding), ("h", n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer * n_sounding) * wires.sigma + hMap = maps.ExpMap(nP=n_sounding) * wires.h + + simulation = tdem.Simulation1DLayeredStitched( + survey=survey, + thicknesses=thicknesses, + sigmaMap=sigmaMap, + hMap=hMap, + topo=topo, + parallel=False, + n_cpu=2, + verbose=False, + solver=PardisoSolver, + ) + + self.sim = simulation + self.mesh = mesh + + def test_EM1TDJvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_stitched * 0.5 + + passed = tests.check_derivative( + derChk, m_stitched, num=4, dx=dm, plotIt=False, eps=1e-15 + ) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jvec test works") + + def test_EM1TDJtvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] + + dobs = self.sim.dpred(m_stitched) + + m_ini = np.r_[ + np.log(1.0 / 100.0) * np.ones(self.mesh.n_cells), + np.ones(self.sim.n_sounding) * np.log(30.0) * 1.5, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 + dmisfit = self.sim.Jtvec(m, dr) + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-27) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jtvec test works") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py new file mode 100644 index 0000000000..641cc442bc --- /dev/null +++ b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py @@ -0,0 +1,246 @@ +import unittest + +import discretize +import numpy as np +from scipy.constants import mu_0 +from SimPEG import maps +from SimPEG.electromagnetics import frequency_domain as fdem + + +def analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, +): + # Some static parameters + loop_radius = np.pi**-0.5 + receiver_location = np.c_[12.0, 0.0, 1.0] + source_location = np.r_[0.0, 0.0, 1.0] + frequencies = np.logspace(2, 3, 2) + + layer_depth = 40.0 + layer_thickness = 0.1 + layer_conductivity = 100 + background_conductivity = 2.5e-3 + + tau = layer_thickness * layer_conductivity + + # 1D LAYER MODEL + thicknesses = np.array([layer_depth - layer_thickness / 2, layer_thickness]) + n_layer = len(thicknesses) + 1 + + sigma_1d = background_conductivity * np.ones(n_layer) + sigma_1d[1] = layer_conductivity + + sigma_map_1d = maps.IdentityMap(nP=n_layer) + + # 3D LAYER MODEL + if mesh_type == "CYL": + hr = [(2.0, 120), (2.0, 25, 1.3)] + hz = [(2.0, 25, -1.3), (2.0, 200), (2.0, 25, 1.3)] + + mesh = discretize.CylindricalMesh([hr, 1, hz], x0="00C") + + ind = np.where(mesh.h[2] == np.min(mesh.h[2]))[0] + ind = ind[int(len(ind) / 2)] + + mesh.origin = mesh.origin - np.r_[0.0, 0.0, mesh.nodes_z[ind] - 24] + + elif mesh_type == "TREE": + dh = 2.5 # base cell width + dom_width = 8000.0 # domain width + nbc = 2 ** int( + np.round(np.log(dom_width / dh) / np.log(2.0)) + ) # num. base cells + + h = [(dh, nbc)] + mesh = discretize.TreeMesh([h, h, h], x0="CCC") + mesh.refine_points( + np.reshape(source_location, (1, 3)), + level=-1, + padding_cells_by_level=[8, 4, 4, 4], + finalize=False, + ) + x0s = np.vstack([ii * np.c_[-60, -60, -60] for ii in range(1, 5)]) + x1s = np.vstack([ii * np.c_[60, 60, 10] for ii in range(1, 5)]) + + mesh.refine_box(x0s, x1s, levels=[-2, -3, -4, -5], finalize=False) + mesh.finalize() + + sigma_3d = 1e-8 * np.ones(mesh.nC) + sigma_3d[mesh.cell_centers[:, -1] < 0.0] = background_conductivity + + tau_3d = np.zeros(mesh.nF) + tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau + tau_map = maps.IdentityMap(nP=mesh.n_faces) + + # DEFINE SURVEY + rx_list = [ + getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( + receiver_location, component="real", orientation=orientation + ), + getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( + receiver_location, component="imag", orientation=orientation + ), + ] + + # 1D SURVEY AND SIMULATION + src_1d = [ + fdem.sources.MagDipole( + rx_list, f, location=np.r_[0.0, 0.0, 1.0], orientation=orientation + ) + for f in frequencies + ] + survey_1d = fdem.Survey(src_1d) + + sim_1d = fdem.Simulation1DLayered( + survey=survey_1d, + thicknesses=thicknesses, + sigmaMap=sigma_map_1d, + ) + + # 3D SURVEY AND SIMULATION + if mesh_type == "CYL": + src_3d = [ + fdem.sources.CircularLoop( + rx_list, + f, + radius=loop_radius, + location=source_location, + ) + for f in frequencies + ] + else: + src_3d = [ + fdem.sources.MagDipole( + rx_list, + f, + location=source_location, + orientation=orientation, + ) + for f in frequencies + ] + + survey_3d = fdem.Survey(src_3d) + + # DEFINE THE SIMULATIONS + if formulation == "MagneticFluxDensity": + sim_3d = fdem.simulation.Simulation3DMagneticFluxDensityFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + else: + sim_3d = fdem.simulation.Simulation3DElectricFieldFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + + # COMPUTE SOLUTIONS + analytic_solution = mu_0 * sim_1d.dpred(sigma_1d) # ALWAYS RETURNS H-FIELD + numeric_solution = sim_3d.dpred(tau_3d) + + # print(analytic_solution) + # print(numeric_solution) + + diff = np.linalg.norm( + np.abs(numeric_solution - analytic_solution) + ) / np.linalg.norm(np.abs(analytic_solution)) + + print( + " |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}".format( + ana=np.linalg.norm(analytic_solution), + num=np.linalg.norm(numeric_solution), + diff=np.linalg.norm(analytic_solution - numeric_solution), + ) + ) + print("Difference: {}".format(diff)) + + return diff + + +class LayerConductanceTests(unittest.TestCase): + # Compares analytic 1D layered Earth solution to a plate of equivalent + # conductance. + + def test_tree_Bform_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_tree_Bform_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_cyl_Bform_loop_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tree_Eform_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_tree_Eform_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_cyl_Eform_loop_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py index 814cefebaa..889b959d81 100644 --- a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py +++ b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py @@ -1,7 +1,10 @@ import unittest import numpy as np from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem +from SimPEG.electromagnetics.utils.testing_utils import ( + getFDEMProblem, + getFDEMProblem_FaceEdgeConductivity, +) testE = True testB = True @@ -18,12 +21,17 @@ SrcList = ["RawVec", "MagDipole"] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec' -def adjointTest(fdemType, comp): - prb = getFDEMProblem(fdemType, comp, SrcList, freq) +def adjointTest(fdemType, comp, sigma_only=True): + if sigma_only: + prb = getFDEMProblem(fdemType, comp, SrcList, freq) + else: + prb = getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq) # prb.solverOpts = dict(check_accuracy=True) print("Adjoint {0!s} formulation - {1!s}".format(fdemType, comp)) - m = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) + m = np.log( + np.ones(prb.sigmaMap.nP) * CONDUCTIVITY + ) # works for sigma_only and sigma, tau, kappa mu = np.ones(prb.mesh.nC) * MU if addrandoms is True: @@ -36,7 +44,7 @@ def adjointTest(fdemType, comp): u = prb.fields(m) v = np.random.rand(survey.nD) - w = np.random.rand(prb.mesh.nC) + w = np.random.rand(prb.sigmaMap.nP) # works for sigma_only and sigma, tau, kappa vJw = v.dot(prb.Jvec(m, w, u)) wJtv = w.dot(prb.Jtvec(m, v, u)) @@ -47,7 +55,7 @@ def adjointTest(fdemType, comp): class FDEM_AdjointTests(unittest.TestCase): if testE: - + # SIGMA ONLY def test_Jtvec_adjointTest_exr_Eform(self): self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"])) @@ -120,6 +128,79 @@ def test_Jtvec_adjointTest_hyi_Eform(self): def test_Jtvec_adjointTest_hzi_Eform(self): self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"])) + # FACE EDGE CONDUCTIVITY + def test_Jtvec_adjointTest_exr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"], False)) + + def test_Jtvec_adjointTest_eyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "y", "r"], False)) + + def test_Jtvec_adjointTest_ezr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "z", "r"], False)) + + def test_Jtvec_adjointTest_exi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "x", "i"], False)) + + def test_Jtvec_adjointTest_eyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "y", "i"], False)) + + def test_Jtvec_adjointTest_ezi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "z", "i"], False)) + + def test_Jtvec_adjointTest_bxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_byr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_bzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_bxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_byi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_bzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_jxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_jyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_jzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_jxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_jyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_jzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_hxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "x", "r"], False)) + + def test_Jtvec_adjointTest_hyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "y", "r"], False)) + + def test_Jtvec_adjointTest_hzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "z", "r"], False)) + + def test_Jtvec_adjointTest_hxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "x", "i"], False)) + + def test_Jtvec_adjointTest_hyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "y", "i"], False)) + + def test_Jtvec_adjointTest_hzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"], False)) + if testB: def test_Jtvec_adjointTest_exr_Bform(self): @@ -193,3 +274,80 @@ def test_Jtvec_adjointTest_hyi_Bform(self): def test_Jtvec_adjointTest_hzi_Bform(self): self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"])) + + # FACE EDGE CONDUCTIVITY + def test_Jtvec_adjointTest_exr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "x", "r"], False)) + + def test_Jtvec_adjointTest_eyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "y", "r"], False)) + + def test_Jtvec_adjointTest_ezr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "z", "r"], False)) + + def test_Jtvec_adjointTest_exi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "x", "i"], False)) + + def test_Jtvec_adjointTest_eyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "y", "i"], False)) + + def test_Jtvec_adjointTest_ezi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "z", "i"], False)) + + def test_Jtvec_adjointTest_bxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_byr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_bzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_bxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_byi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_bzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_jxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_jyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_jzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_jxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_jyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_jzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_hxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "x", "r"], False)) + + def test_Jtvec_adjointTest_hyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "y", "r"], False)) + + def test_Jtvec_adjointTest_hzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "z", "r"], False)) + + def test_Jtvec_adjointTest_hxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "x", "i"], False)) + + def test_Jtvec_adjointTest_hyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "y", "i"], False)) + + def test_Jtvec_adjointTest_hzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"], False)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py index dc02014c59..996352d2d0 100644 --- a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py +++ b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py @@ -2,7 +2,10 @@ import numpy as np from SimPEG import tests from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem +from SimPEG.electromagnetics.utils.testing_utils import ( + getFDEMProblem, + getFDEMProblem_FaceEdgeConductivity, +) testE = False testB = True @@ -27,12 +30,16 @@ ] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec' -def derivTest(fdemType, comp, src): - prb = getFDEMProblem(fdemType, comp, SrcType, freq) +def derivTest(fdemType, comp, src, sigma_only=True): + if sigma_only: + prb = getFDEMProblem(fdemType, comp, SrcType, freq) + else: + prb = getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcType, freq) # prb.solverOpts = dict(check_accuracy=True) print(f"{fdemType} formulation {src} - {comp}") - x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) + + x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # should work # mu = np.log(np.ones(prb.mesh.nC)*MU) if addrandoms is True: @@ -82,6 +89,58 @@ def test_Jvec_h_Eform(self): derivTest("e", ["MagneticField", orientation, comp], src) ) + def test_Jvec_e_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "e", + ["ElectricField", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_b_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "e", + ["MagneticFluxDensity", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_j_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "e", + ["CurrentDensity", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_h_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "e", + ["MagneticField", orientation, comp], + src, + sigma_only=False, + ) + ) + if testB: def test_Jvec_e_Bform(self): @@ -118,6 +177,58 @@ def test_Jvec_h_Bform(self): derivTest("b", ["MagneticField", orientation, comp], src) ) + def test_Jvec_e_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "b", + ["ElectricField", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_b_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "b", + ["MagneticFluxDensity", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_j_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "b", + ["CurrentDensity", orientation, comp], + src, + sigma_only=False, + ) + ) + + def test_Jvec_h_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "b", + ["MagneticField", orientation, comp], + src, + sigma_only=False, + ) + ) + if testJ: def test_Jvec_e_Jform(self): @@ -189,3 +300,7 @@ def test_Jvec_h_Hform(self): self.assertTrue( derivTest("h", ["MagneticField", orientation, comp], src) ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/tdem/test_TDEM_DerivAdjoint.py b/tests/em/tdem/test_TDEM_DerivAdjoint.py index 1820044ca1..70abb10996 100644 --- a/tests/em/tdem/test_TDEM_DerivAdjoint.py +++ b/tests/em/tdem/test_TDEM_DerivAdjoint.py @@ -33,7 +33,8 @@ def get_mesh(): ) -def get_mapping(mesh): +def get_sigma_mapping(mesh): + # H AND J FORMULATIONS UNSTABLE WITHOUT SURJECT VERTICAL 1D active = mesh.cell_centers_z < 0.0 activeMap = maps.InjectActiveCells( mesh, active, np.log(1e-8), nC=mesh.shape_cells[2] @@ -41,9 +42,55 @@ def get_mapping(mesh): return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap -def get_prob(mesh, mapping, formulation, **kwargs): +def get_wire_mappings(mesh): + # active cells, faces + edges + active_cells = mesh.cell_centers[:, -1] < 0.0 + active_faces = mesh.faces[:, -1] < 0.0 + active_edges = mesh.edges[:, -1] < 0.0 + n_active_cells = np.sum(active_cells) + n_active_faces = np.sum(active_faces) + n_active_edges = np.sum(active_edges) + + # wire map + wire_map = maps.Wires( + ("log_sigma", n_active_cells), + ("log_tau", n_active_faces), + ("log_kappa", n_active_edges), + ) + + sigma_map = ( + maps.InjectActiveCells(mesh, active_cells, 1e-8) + * maps.ExpMap(nP=n_active_cells) + * wire_map.log_sigma + ) + tau_map = ( + maps.InjectActiveFaces(mesh, active_faces, 0) + * maps.ExpMap(nP=n_active_faces) + * wire_map.log_tau + ) + kappa_map = ( + maps.InjectActiveEdges(mesh, active_edges, 0) + * maps.ExpMap(nP=n_active_edges) + * wire_map.log_kappa + ) + + return sigma_map, tau_map, kappa_map + + +def get_prob(mesh, formulation, sigma_map, **kwargs): prb = getattr(tdem, "Simulation3D{}".format(formulation))( - mesh, sigmaMap=mapping, **kwargs + mesh, sigmaMap=sigma_map, **kwargs + ) + prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] + prb.solver = Solver + return prb + + +def get_face_edge_prob( + mesh, formulation, sigma_map=None, tau_map=None, kappa_map=None, **kwargs +): + prb = getattr(tdem, "Simulation3D{}".format(formulation))( + mesh, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, **kwargs ) prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] prb.solver = Solver @@ -64,12 +111,42 @@ class Base_DerivAdjoint_Test(unittest.TestCase): def setUpClass(self): # create a prob where we will store the fields mesh = get_mesh() - mapping = get_mapping(mesh) self.survey = get_survey() - self.prob = get_prob(mesh, mapping, self.formulation, survey=self.survey) - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn( - self.prob.sigmaMap.nP - ) + + if "FaceEdgeConductivity" in self.formulation: + # sigma_map = get_sigma_mapping(mesh) + # self.prob = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.survey) + # self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + + active_cells = mesh.cell_centers[:, -1] < 0.0 + active_faces = mesh.faces[:, -1] < 0.0 + active_edges = mesh.edges[:, -1] < 0.0 + + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) + self.prob = get_face_edge_prob( + mesh, + self.formulation, + sigma_map=sigma_map, + tau_map=tau_map, + kappa_map=kappa_map, + survey=self.survey, + ) + self.m = np.r_[ + np.log(1e-1) * np.ones(np.sum(active_cells)) + + 1e-3 * np.random.randn(np.sum(active_cells)), + np.log(10 * 1e-1) * np.ones(np.sum(active_faces)) + + 1e-3 * np.random.randn(np.sum(active_faces)), + np.log(100 * 1e-1) * np.ones(np.sum(active_edges)) + + 1e-3 * np.random.randn(np.sum(active_edges)), + ] + + else: + sigma_map = get_sigma_mapping(mesh) + self.prob = get_prob(mesh, self.formulation, sigma_map, survey=self.survey) + self.m = np.log(1e-1) * np.ones( + self.prob.sigmaMap.nP + ) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + print("Solving Fields for problem {}".format(self.formulation)) t = time.time() self.fields = self.prob.fields(self.m) @@ -78,9 +155,25 @@ def setUpClass(self): # create a prob where will be re-computing fields at each jvec # iteration mesh = get_mesh() - mapping = get_mapping(mesh) self.surveyfwd = get_survey() - self.probfwd = get_prob(mesh, mapping, self.formulation, survey=self.surveyfwd) + if "FaceEdgeConductivity" in self.formulation: + # sigma_map = get_sigma_mapping(mesh) + # self.probfwd = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.surveyfwd) + + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) + self.probfwd = get_face_edge_prob( + mesh, + self.formulation, + sigma_map=sigma_map, + tau_map=tau_map, + kappa_map=kappa_map, + survey=self.surveyfwd, + ) + else: + sigma_map = get_sigma_mapping(mesh) + self.probfwd = get_prob( + mesh, self.formulation, sigma_map, survey=self.surveyfwd + ) def get_rx(self, rxcomp): rxOffset = 15.0 @@ -176,6 +269,52 @@ def test_eDeriv_u_adjoint(self): class DerivAdjoint_E(Base_DerivAdjoint_Test): formulation = "ElectricField" + if testDeriv: + + def test_Jvec_e_dbxdt(self): + self.JvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_e_dbzdt(self): + self.JvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_e_ey(self): + self.JvecTest("ElectricFieldy") + + def test_Jvec_e_dhxdt(self): + self.JvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_e_dhzdt(self): + self.JvecTest("MagneticFieldTimeDerivativez") + + def test_Jvec_e_jy(self): + self.JvecTest("CurrentDensityy") + + if testAdjoint: + + def test_Jvec_adjoint_e_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_adjoint_e_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_adjoint_e_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") + + def test_Jvec_adjoint_e_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_adjoint_e_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + + def test_Jvec_adjoint_e_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") + + pass + + +class DerivAdjoint_E_FaceEdgeConductivity(Base_DerivAdjoint_Test): + formulation = "ElectricFieldFaceEdgeConductivity" + if testDeriv: def test_Jvec_e_dbxdt(self): @@ -282,6 +421,74 @@ def test_Jvec_adjoint_b_jy(self): self.JvecVsJtvecTest("CurrentDensityy") +class DerivAdjoint_B_FaceEdgeConductivity(Base_DerivAdjoint_Test): + formulation = "MagneticFluxDensityFaceEdgeConductivity" + + if testDeriv: + + def test_Jvec_b_bx(self): + self.JvecTest("MagneticFluxDensityx") + + def test_Jvec_b_bz(self): + self.JvecTest("MagneticFluxDensityz") + + def test_Jvec_b_dbdtx(self): + self.JvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_b_dbdtz(self): + self.JvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_b_ey(self): + self.JvecTest("ElectricFieldy") + + def test_Jvec_b_hx(self): + self.JvecTest("MagneticFieldx") + + def test_Jvec_b_hz(self): + self.JvecTest("MagneticFieldz") + + def test_Jvec_b_dhdtx(self): + self.JvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_b_dhdtz(self): + self.JvecTest("MagneticFieldTimeDerivativez") + + def test_Jvec_b_jy(self): + self.JvecTest("CurrentDensityy") + + if testAdjoint: + + def test_Jvec_adjoint_b_bx(self): + self.JvecVsJtvecTest("MagneticFluxDensityx") + + def test_Jvec_adjoint_b_bz(self): + self.JvecVsJtvecTest("MagneticFluxDensityz") + + def test_Jvec_adjoint_b_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_adjoint_b_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_adjoint_b_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") + + def test_Jvec_adjoint_b_hx(self): + self.JvecVsJtvecTest("MagneticFieldx") + + def test_Jvec_adjoint_b_hz(self): + self.JvecVsJtvecTest("MagneticFieldz") + + def test_Jvec_adjoint_b_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_adjoint_b_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + + def test_Jvec_adjoint_b_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") + + class DerivAdjoint_H(Base_DerivAdjoint_Test): formulation = "MagneticField" @@ -392,3 +599,7 @@ def test_Jvec_adjoint_j_dbdtx(self): def test_Jvec_adjoint_j_dbdtz(self): self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/tdem/test_TDEM_forward_Analytic.py b/tests/em/tdem/test_TDEM_forward_Analytic.py index 9594e3de86..6227c06cf5 100644 --- a/tests/em/tdem/test_TDEM_forward_Analytic.py +++ b/tests/em/tdem/test_TDEM_forward_Analytic.py @@ -311,6 +311,168 @@ def analytic_halfspace_mag_dipole_comparison( return log10diff +def analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, +): + # Some static parameters + PHI = np.linspace(0, 2 * np.pi, 21) + loop_radius = np.pi**-0.5 + receiver_location = np.c_[40.0, 0.0, 1.0] + source_location = np.r_[0.0, 0.0, 1.0] + + if orientation == "X": + source_nodes = np.c_[ + np.zeros_like(PHI), + loop_radius * np.cos(PHI), + 1.0 + loop_radius * np.sin(PHI), + ] + elif orientation == "Z": + source_nodes = np.c_[ + loop_radius * np.cos(PHI), loop_radius * np.sin(PHI), np.ones_like(PHI) + ] + + layer_depth = 24.0 + layer_thickness = 0.1 + layer_conductivity = 10.0 + background_conductivity = 2.5e-3 + + tau = layer_thickness * layer_conductivity + + if bounds is None: + bounds = [1e-5, 1e-3] + + # 1D LAYER MODEL + thicknesses = np.array([layer_depth - layer_thickness / 2, layer_thickness]) + n_layer = len(thicknesses) + 1 + + sigma_1d = background_conductivity * np.ones(n_layer) + sigma_1d[1] = layer_conductivity + + sigma_map_1d = maps.IdentityMap(nP=n_layer) + + # 3D LAYER MODEL + if mesh_type == "CYL": + cs, ncx, ncz, npad = 4.0, 40, 20, 20 + hx = [(cs, ncx), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] + mesh = discretize.CylindricalMesh([hx, 1, hz], "00C") + + elif mesh_type == "TENSOR": + cs, nc, npad = 8.0, 14, 8 + hx = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hy = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + mesh = discretize.TensorMesh([hx, hy, hz], "CCC") + + sigma_3d = 1e-8 * np.ones(mesh.nC) + sigma_3d[mesh.cell_centers[:, -1] < 0.0] = background_conductivity + + tau_3d = np.zeros(mesh.nF) + tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau + tau_map = maps.IdentityMap(nP=mesh.n_faces) + + # DEFINE SURVEY + times = np.logspace(-5, -4, 21) + rx = getattr(tdem.receivers, "Point{}".format(rx_type))( + receiver_location, times, orientation=orientation + ) + + # 1D SURVEY AND SIMULATION + src_1d = tdem.sources.MagDipole( + [rx], + location=np.r_[0.0, 0.0, 1.0], + orientation=orientation, + waveform=tdem.sources.StepOffWaveform(), + ) + survey_1d = tdem.Survey([src_1d]) + + sim_1d = tdem.Simulation1DLayered( + survey=survey_1d, + thicknesses=thicknesses, + sigmaMap=sigma_map_1d, + ) + + # 3D SURVEY AND SIMULATION + if mesh_type == "CYL": + src_3d = tdem.sources.CircularLoop( + [rx], + radius=loop_radius, + location=source_location, + waveform=tdem.sources.StepOffWaveform(), + ) + else: + if rx_type == "MagneticFluxDensity": + src_3d = tdem.sources.MagDipole( + [rx], + location=source_location, + orientation=orientation, + waveform=tdem.sources.StepOffWaveform(), + ) + else: + src_3d = tdem.sources.LineCurrent( + [rx], location=source_nodes, waveform=tdem.sources.StepOffWaveform() + ) + + survey_3d = tdem.Survey([src_3d]) + + # DEFINE THE SIMULATIONS + if rx_type == "MagneticFluxDensity": + sim_3d = tdem.simulation.Simulation3DMagneticFluxDensityFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + else: + sim_3d = tdem.simulation.Simulation3DElectricFieldFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + + sim_3d.time_steps = [ + (1e-06, 40), + (5e-06, 40), + (1e-05, 40), + (5e-05, 40), + (0.0001, 40), + (0.0005, 40), + ] + + # COMPUTE SOLUTIONS + analytic_solution = sim_1d.dpred(sigma_1d) + numeric_solution = sim_3d.dpred(tau_3d) + + ind = np.logical_and(rx.times > bounds[0], rx.times < bounds[1]) + log10diff = np.linalg.norm( + np.log10(np.abs(numeric_solution[ind])) + - np.log10(np.abs(analytic_solution[ind])) + ) / np.linalg.norm(np.log10(np.abs(analytic_solution[ind]))) + + print( + " |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}".format( + ana=np.linalg.norm(analytic_solution), + num=np.linalg.norm(numeric_solution), + diff=np.linalg.norm(analytic_solution - numeric_solution), + ) + ) + print("Difference: {}".format(log10diff)) + + if plotIt is True: + plt.loglog( + rx.times[numeric_solution > 0], + numeric_solution[numeric_solution > 0], + "r", + rx.times[numeric_solution < 0], + -numeric_solution[numeric_solution < 0], + "r--", + ) + plt.loglog(rx.times, abs(analytic_solution), "b*") + plt.title("{} Mesh, {}, {}-Component".format(mesh_type, rx_type, orientation)) + plt.show() + + return log10diff + + ########################################################### # ANALYTIC WHOLESPACE TESTS FOR MAG AND ELECTRIC DIPOLES ########################################################### @@ -604,3 +766,84 @@ def test_analytic_m3_CYL_0m_CircularLoop(self): ) < 0.15 ) + + +class LayerConductanceTests(unittest.TestCase): + # Compares analytic 1D layered Earth solution to a plate of equivalent + # conductance. + + def test_tensor_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tensor_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_cyl_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tensor_linecurrent_dbdt_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxTimeDerivative", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tensor_linecurrent_dbdt_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_cyl_circularloop_dbdt_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 662556b93e..0e866e520d 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -1,7 +1,8 @@ -import unittest +from typing import List, Tuple import discretize import numpy as np +import pytest from geoana.em.static import MagneticPrism from scipy.constants import mu_0 @@ -9,73 +10,213 @@ from SimPEG.potential_fields import magnetics as mag -def test_ana_mag_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - chi1 = 0.01 - chi2 = 0.02 +@pytest.fixture +def mag_mesh() -> discretize.TensorMesh: + """ + a small tensor mesh for testing magnetic simulations + Returns + ------- + discretize.TensorMesh + the tensor mesh for testing + """ # Define a mesh cs = 0.2 hxind = [(cs, 41)] hyind = [(cs, 41)] hzind = [(cs, 41)] mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + return mesh - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) +@pytest.fixture +def two_blocks() -> Tuple[np.ndarray, np.ndarray]: + """ + The parameters defining two blocks - model = np.zeros(mesh.n_cells) - model[block1_inds] = chi1 - model[block2_inds] = chi2 + Returns + ------- + Tuple[np.ndarray, np.ndarray] + Tuple of (3, 2) arrays of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of each block + """ + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + return block1, block2 - active_cells = model != 0.0 - model_reduced = model[active_cells] - # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) +@pytest.fixture +def receiver_locations() -> np.ndarray: + """ + a grid of receivers for testing + Returns + ------- + np.ndarray + (n, 3) array of receiver locations + """ # Create plane of observations + nx, ny = 5, 5 xr = np.linspace(-20, 20, nx) yr = np.linspace(-20, 20, ny) X, Y = np.meshgrid(xr, yr) Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz", "tmi"] + return np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + +@pytest.fixture +def inducing_field() -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]: + """ + inducing field two ways-- (amplitude, inclination , declination) and (b_x, b_y, b_z) + + Returns + ------- + Tuple[Tuple[float, float, float], Tuple[float, float, float]] + (amplitude, inclination, declination), (b_x, b_y, b_z) + """ + H0 = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + return H0, b0 + + +def get_block_inds(grid: np.ndarray, block: np.ndarray) -> np.ndarray: + """ + get the indices for a block + + Parameters + ---------- + grid : np.ndarray + (n, 3) array of xyz locations + block : np.ndarray + (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of the block + + Returns + ------- + np.ndarray + boolean array of indices corresponding to the block + """ + + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) ) - survey = mag.Survey(srcField) - # Creat reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) +def create_block_model( + mesh: discretize.TensorMesh, + blocks: Tuple[np.ndarray, ...], + block_params: Tuple[np.ndarray, ...], +) -> Tuple[np.ndarray, np.ndarray]: + """ + Create a magnetic model from a sequence of blocks + + Parameters + ---------- + mesh : discretize.TensorMesh + TensorMesh object to put the model on + blocks : Tuple[np.ndarray, ...] + Tuple of block definitions (each element is (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) + dimensions of the block) + block_params : Tuple[np.ndarray, ...] + Tuple of parameters to assign for each block. Must be the same length as ``blocks``. + + Returns + ------- + Tuple[np.ndarray, np.ndarray] + Tuple of the magnetic model and active_cells (a boolean array) + + Raises + ------ + ValueError + if ``blocks`` and ``block_params`` have incompatible dimensions + """ + if len(blocks) != len(block_params): + raise ValueError( + "'blocks' and 'block_params' must have the same number of elements" + ) + model = np.zeros((mesh.n_cells, np.atleast_1d(block_params[0]).shape[0])) + for block, params in zip(blocks, block_params): + block_ind = get_block_inds(mesh.cell_centers, block) + model[block_ind] = params + active_cells = np.any(np.abs(model) > 0, axis=1) + return model.squeeze(), active_cells + + +def create_mag_survey( + components: List[str], + receiver_locations: np.ndarray, + inducing_field_params: Tuple[float, float, float], +) -> mag.Survey: + """ + create a magnetic Survey + + Parameters + ---------- + components : List[str] + List of components to model + receiver_locations : np.ndarray + (n, 3) array of xyz receiver locations + inducing_field_params : Tuple[float, float, float] + amplitude, inclination, and declination of the inducing field + + Returns + ------- + mag.Survey + a magnetic Survey instance + """ + + receivers = mag.Point(receiver_locations, components=components) + source_field = mag.UniformBackgroundField([receivers], *inducing_field_params) + return mag.Survey(source_field) + + +@pytest.mark.parametrize( + "engine,parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + + chi1 = 0.01 + chi2 = 0.02 + model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) + model_reduced = model[active_cells] + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells))) + + survey = create_mag_survey( + components=["bx", "by", "bz", "tmi"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) sim = mag.Simulation3DIntegral( - mesh, + mag_mesh, survey=survey, - chiMap=idenMap, + chiMap=identity_map, ind_active=active_cells, - store_sensitivities="forward_only", - n_processes=None, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + engine=engine, + **parallel_kwargs, ) data = sim.dpred(model_reduced) @@ -84,25 +225,251 @@ def get_block_inds(grid, block): d_z = data[2::4] d_t = data[3::4] - tmi = sim.tmi_projection - d_t2 = d_x * tmi[0] + d_y * tmi[1] + d_z * tmi[2] - np.testing.assert_allclose(d_t, d_t2) # double check internal projection - # Compute analytical response from magnetic prism + block1, block2 = two_blocks prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) + ) + + # TMI projection + tmi = sim.tmi_projection + d_t2 = d_x * tmi[0] + d_y * tmi[1] + d_z * tmi[2] + + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose( + d_t, d_t2, rtol=rtol, atol=atol + ) # double check internal projection + np.testing.assert_allclose(d_x, d[:, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_y, d[:, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_z, d[:, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_t, d @ tmi, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_grad_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + + chi1 = 0.01 + chi2 = 0.02 + model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) + model_reduced = model[active_cells] + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells))) + + survey = create_mag_survey( + components=["bxx", "bxy", "bxz", "byy", "byz", "bzz"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, ) + sim = mag.Simulation3DIntegral( + mag_mesh, + survey=survey, + chiMap=identity_map, + ind_active=active_cells, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + engine=engine, + **parallel_kwargs, + ) + if engine == "choclo": + # gradient simulation not implemented for choclo yet + with pytest.raises(NotImplementedError): + data = sim.dpred(model_reduced) + else: + data = sim.dpred(model_reduced) + d_xx = data[0::6] + d_xy = data[1::6] + d_xz = data[2::6] + d_yy = data[3::6] + d_yz = data[4::6] + d_zz = data[5::6] + + # Compute analytical response from magnetic prism + block1, block2 = two_blocks + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) + + d = ( + prism_1.magnetic_field_gradient(receiver_locations) + + prism_2.magnetic_field_gradient(receiver_locations) + + prism_3.magnetic_field_gradient(receiver_locations) + ) * mu_0 + + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_vec_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() + M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() + + model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) + model_reduced = model[active_cells].reshape(-1, order="F") + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - np.testing.assert_allclose(d_x, d[:, 0]) - np.testing.assert_allclose(d_y, d[:, 1]) - np.testing.assert_allclose(d_z, d[:, 2]) - np.testing.assert_allclose(d_t, d @ tmi) + survey = create_mag_survey( + components=["bx", "by", "bz", "tmi"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) + + sim = mag.Simulation3DIntegral( + mag_mesh, + survey=survey, + chiMap=identity_map, + ind_active=active_cells, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + model_type="vector", + engine=engine, + **parallel_kwargs, + ) + + data = sim.dpred(model_reduced).reshape(-1, 4) + + # Compute analytical response from magnetic prism + block1, block2 = two_blocks + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) + + d = ( + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) + ) + tmi = sim.tmi_projection + + # Check results + rtol, atol = 9e-6, 3e-7 + np.testing.assert_allclose(data[:, 0], d[:, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 1], d[:, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 2], d[:, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 3], d @ tmi, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_amp_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() + M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() + + model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) + model_reduced = model[active_cells].reshape(-1, order="F") + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) + + survey = create_mag_survey( + components=["bx", "by", "bz"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) + + sim = mag.Simulation3DIntegral( + mag_mesh, + survey=survey, + chiMap=identity_map, + ind_active=active_cells, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + model_type="vector", + is_amplitude_data=True, + engine=engine, + **parallel_kwargs, + ) + + data = sim.dpred(model_reduced) + + # Compute analytical response from magnetic prism + block1, block2 = two_blocks + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) + + d = ( + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) + ) + d_amp = np.linalg.norm(d, axis=1) + + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose(data, d_amp, rtol=rtol, atol=atol) def test_ana_mag_tmi_grad_forward(): @@ -213,277 +580,3 @@ def get_block_inds(grid, block): atol=1.0, rtol=1e-1, ) - - -def test_ana_mag_grad_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - chi1 = 0.01 - chi2 = 0.02 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros(mesh.n_cells) - model[block1_inds] = chi1 - model[block2_inds] = chi2 - - active_cells = model != 0.0 - model_reduced = model[active_cells] - - # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bxx", "bxy", "bxz", "byy", "byz", "bzz"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] - ) - survey = mag.Survey(srcField) - - # Creat reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - - sim = mag.Simulation3DIntegral( - mesh, - survey=survey, - chiMap=idenMap, - ind_active=active_cells, - store_sensitivities="forward_only", - n_processes=None, - ) - - data = sim.dpred(model_reduced) - d_xx = data[0::6] - d_xy = data[1::6] - d_xz = data[2::6] - d_yy = data[3::6] - d_yz = data[4::6] - d_zz = data[5::6] - - # Compute analytical response from magnetic prism - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) - - d = ( - prism_1.magnetic_field_gradient(locXyz) - + prism_2.magnetic_field_gradient(locXyz) - + prism_3.magnetic_field_gradient(locXyz) - ) * mu_0 - - np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=1e-10, atol=1e-12) - - -def test_ana_mag_vec_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - - M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 - M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros((mesh.n_cells, 3)) - model[block1_inds] = M1 - model[block2_inds] = M2 - - active_cells = np.any(model != 0.0, axis=1) - model_reduced = model[active_cells].reshape(-1, order="F") - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz", "tmi"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] - ) - survey = mag.Survey(srcField) - - # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - - sim = mag.Simulation3DIntegral( - mesh, - survey=survey, - chiMap=idenMap, - ind_active=active_cells, - store_sensitivities="forward_only", - model_type="vector", - n_processes=None, - ) - - data = sim.dpred(model_reduced).reshape(-1, 4) - - # Compute analytical response from magnetic prism - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) - - d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) - ) - tmi = sim.tmi_projection - - np.testing.assert_allclose(data[:, 0], d[:, 0]) - np.testing.assert_allclose(data[:, 1], d[:, 1]) - np.testing.assert_allclose(data[:, 2], d[:, 2]) - np.testing.assert_allclose(data[:, 3], d @ tmi) - - -def test_ana_mag_amp_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - - M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 - M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros((mesh.n_cells, 3)) - model[block1_inds] = M1 - model[block2_inds] = M2 - - active_cells = np.any(model != 0.0, axis=1) - model_reduced = model[active_cells].reshape(-1, order="F") - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] - ) - survey = mag.Survey(srcField) - - # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - - sim = mag.Simulation3DIntegral( - mesh, - survey=survey, - chiMap=idenMap, - ind_active=active_cells, - store_sensitivities="forward_only", - model_type="vector", - is_amplitude_data=True, - n_processes=None, - ) - - data = sim.dpred(model_reduced) - - # Compute analytical response from magnetic prism - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) - - d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) - ) - d_amp = np.linalg.norm(d, axis=1) - - np.testing.assert_allclose(data, d_amp) - - -if __name__ == "__main__": - unittest.main() diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py new file mode 100644 index 0000000000..02d6b18727 --- /dev/null +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -0,0 +1,576 @@ +""" +Compare weighting strategy with Inversion of surface Gravity Anomaly Data +========================================================================= + +Here we invert gravity anomaly data to recover a density contrast model. We formulate the inverse problem as an iteratively +re-weighted least-squares (IRLS) optimization problem. For this tutorial, we +focus on the following: + + - Setting regularization weights + - Defining the survey from xyz formatted data + - Generating a mesh based on survey geometry + - Including surface topography + - Defining the inverse problem (data misfit, regularization, optimization) + - Specifying directives for the inversion + - Setting sparse and blocky norms + - Plotting the recovered model and data misfit + +Although we consider gravity anomaly data in this tutorial, the same approach +can be used to invert gradiometry and other types of geophysical data. +""" + +######################################################################### +# Import modules +# -------------- +# + +import os +import tarfile + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +from discretize import TensorMesh +from discretize.utils import active_from_xyz + +from SimPEG import ( + data, + data_misfit, + directives, + inverse_problem, + inversion, + maps, + optimization, + regularization, + utils, +) +from SimPEG.potential_fields import gravity +from SimPEG.utils import model_builder, plot2Ddata + +# sphinx_gallery_thumbnail_number = 3 + +############################################# +# Define File Names +# ----------------- +# +# File paths for assets we are loading. To set up the inversion, we require +# topography and field observations. The true model defined on the whole mesh +# is loaded to compare with the inversion result. These files are stored as a +# tar-file on our google cloud bucket: +# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" +# + +# storage bucket where we have the data +data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" + +# download the data +downloaded_data = utils.download(data_source, overwrite=True) + +# unzip the tarfile +tar = tarfile.open(downloaded_data, "r") +tar.extractall() +tar.close() + +# path to the directory containing our data +dir_path = downloaded_data.split(".")[0] + os.path.sep + +# files to work with +topo_filename = dir_path + "gravity_topo.txt" +data_filename = dir_path + "gravity_data.obs" + + +############################################# +# Load Data and Plot +# ------------------ +# +# Here we load and plot synthetic gravity anomaly data. Topography is generally +# defined as an (N, 3) array. Gravity data is generally defined with 4 columns: +# x, y, z and data. +# + +# Load topography +xyz_topo = np.loadtxt(str(topo_filename)) + +# Load field data +dobs = np.loadtxt(str(data_filename)) + +# Define receiver locations and observed data +receiver_locations = dobs[:, 0:3] +dobs = dobs[:, -1] + +# Plot +mpl.rcParams.update({"font.size": 12}) +fig = plt.figure(figsize=(7, 5)) + +ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) +plot2Ddata( + receiver_locations, + dobs, + ax=ax1, + contourOpts={"cmap": "bwr"}, + shade=True, + nx=20, + ny=20, + dataloc=True, +) +ax1.set_title("Gravity Anomaly") +ax1.set_xlabel("x (m)") +ax1.set_ylabel("y (m)") + +ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) +norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) +cbar = mpl.colorbar.ColorbarBase( + ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" +) +cbar.set_label("$mGal$", rotation=270, labelpad=15, size=12) + +plt.show() + +############################################# +# Assign Uncertainties +# -------------------- +# +# Inversion with SimPEG requires that we define the standard deviation of our data. +# This represents our estimate of the noise in our data. For a gravity inversion, +# a constant floor value is generally applied to all data. For this tutorial, +# the standard deviation on each datum will be 1% of the maximum observed +# gravity anomaly value. +# + +maximum_anomaly = np.max(np.abs(dobs)) + +uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs)) + +############################################# +# Defining the Survey +# ------------------- +# +# Here, we define the survey that will be used for this tutorial. Gravity +# surveys are simple to create. The user only needs an (N, 3) array to define +# the xyz locations of the observation locations. From this, the user can +# define the receivers and the source field. +# + +# Define the receivers. The data consists of vertical gravity anomaly measurements. +# The set of receivers must be defined as a list. +receiver_list = gravity.receivers.Point(receiver_locations, components="gz") + +receiver_list = [receiver_list] + +# Define the source field +source_field = gravity.sources.SourceField(receiver_list=receiver_list) + +# Define the survey +survey = gravity.survey.Survey(source_field) + +############################################# +# Defining the Data +# ----------------- +# +# Here is where we define the data that is inverted. The data is defined by +# the survey, the observation values and the standard deviation. +# + +data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) + + +############################################# +# Defining a Tensor Mesh +# ---------------------- +# +# Here, we create the tensor mesh that will be used to invert gravity anomaly +# data. If desired, we could define an OcTree mesh. +# + +dh = 5.0 +hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] +hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] +hz = [(dh, 5, -1.3), (dh, 15)] +mesh = TensorMesh([hx, hy, hz], "CCN") + +######################################################## +# Starting/Reference Model and Mapping on Tensor Mesh +# --------------------------------------------------- +# +# Here, we create starting and/or reference models for the inversion as +# well as the mapping from the model space to the active cells. Starting and +# reference models can be a constant background value or contain a-priori +# structures. +# + +# Find the indices of the active cells in forward model (ones below surface) +ind_active = active_from_xyz(mesh, xyz_topo) + +# Define mapping from model to active cells +nC = int(ind_active.sum()) +model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell + +# Define and plot starting model +starting_model = np.zeros(nC) + + +############################################## +# Define the Physics and data misfit +# ---------------------------------- +# +# Here, we define the physics of the gravity problem by using the simulation +# class. +# + +simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active +) + +# Define the data misfit. Here the data misfit is the L2 norm of the weighted +# residual between the observed data and the data predicted for a given model. +# Within the data misfit, the residual between predicted and observed data are +# normalized by the data's standard deviation. +dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) + + +####################################################################### +# Running the Depth Weighted inversion +# ------------------------------------ +# +# Here we define the directives, weights, regularization, and optimization +# for a depth-weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# The directives are defined as a list +directives_list = [ + update_IRLS, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) with depth weighting. +reg_dpth = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_dpth.norms = [0, 2, 2, 2] +depth_weights = utils.depth_weighting( + mesh, receiver_locations, active_cells=ind_active, exponent=2 +) +reg_dpth.set_weights(depth_weights=depth_weights) + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dpth, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_dpth = inv.run(starting_model) + +####################################################################### +# Running the Distance Weighted inversion +# --------------------------------------- +# +# Here we define the directives, weights, regularization, and optimization +# for a distance-weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# The directives are defined as a list +directives_list = [ + update_IRLS, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) with distance weighting. +reg_dist = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_dist.norms = [0, 2, 2, 2] +distance_weights = utils.distance_weighting( + mesh, receiver_locations, active_cells=ind_active, exponent=2 +) +reg_dist.set_weights(distance_weights=distance_weights) + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dist, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_dist = inv.run(starting_model) + +####################################################################### +# Running the Distance Weighted inversion +# --------------------------------------- +# +# Here we define the directives, weights, regularization, and optimization +# for a sensitivity weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# Add sensitivity weights +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) + +# The directives are defined as a list +directives_list = [ + update_IRLS, + sensitivity_weights, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) for sensitivity weighting. +reg_sensw = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_sensw.norms = [0, 2, 2, 2] + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_sensw, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_sensw = inv.run(starting_model) + +############################################################ +# Recreate True Model +# ------------------- +# + +# Define density contrast values for each unit in g/cc +background_density = 0.0 +block_density = -0.2 +sphere_density = 0.2 + +# Define model. Models in SimPEG are vector arrays. +true_model = background_density * np.ones(nC) + +# You could find the indicies of specific cells within the model and change their +# value to add structures. +ind_block = ( + (mesh.gridCC[ind_active, 0] > -50.0) + & (mesh.gridCC[ind_active, 0] < -20.0) + & (mesh.gridCC[ind_active, 1] > -15.0) + & (mesh.gridCC[ind_active, 1] < 15.0) + & (mesh.gridCC[ind_active, 2] > -50.0) + & (mesh.gridCC[ind_active, 2] < -30.0) +) +true_model[ind_block] = block_density + +# You can also use SimPEG utilities to add structures to the model more concisely +ind_sphere = model_builder.get_indices_sphere( + np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC +) +ind_sphere = ind_sphere[ind_active] +true_model[ind_sphere] = sphere_density + + +############################################################ +# Plotting True Model and Recovered Models +# ---------------------------------------- +# + +# Plot Models +fig, ax = plt.subplots(2, 2, figsize=(20, 10), sharex=True, sharey=True) +ax = ax.flatten() +plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) +cmap = "coolwarm" +slice_y_loc = 0.0 + +mm = mesh.plot_slice( + plotting_map * true_model, + normal="Y", + ax=ax[0], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[0].set_title(f"True model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[0]) + +# plot depth weighting result +vmax = np.abs(recovered_model_dpth).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_dpth, + normal="Y", + ax=ax[1], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[1].set_title(f"Depth weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[1]) + +# plot distance weighting result +vmax = np.abs(recovered_model_dist).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_dist, + normal="Y", + ax=ax[2], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[2].set_title(f"Distance weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[2]) + +# plot sensitivity weighting result +vmax = np.abs(recovered_model_sensw).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_sensw, + normal="Y", + ax=ax[3], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[3].set_title(f"Sensitivity weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[3]) + +# shared plotting +plotting_map = maps.InjectActiveCells(mesh, ind_active, 0.0) +slice_y_ind = ( + mesh.cell_centers[:, 1] == np.abs(mesh.cell_centers[:, 1] - slice_y_loc).min() +) +for axx in ax: + utils.plot2Ddata( + mesh.cell_centers[slice_y_ind][:, [0, 2]], + (plotting_map * true_model)[slice_y_ind], + contourOpts={"alpha": 0}, + level=True, + ncontour=2, + levelOpts={"colors": "grey", "linewidths": 2, "linestyles": "--"}, + method="nearest", + ax=axx, + ) + axx.set_aspect(1) + +plt.tight_layout() + +############################################################ +# Visualize weights +# ----------------- +# +# Plot Weights +fig, ax = plt.subplots(1, 3, figsize=(20, 4), sharex=True, sharey=True) +plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) +cmap = "magma" +slice_y_loc = 0.0 + +# plot depth weights +mm = mesh.plot_slice( + plotting_map * np.log10(depth_weights), + normal="Y", + ax=ax[0], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[0].set_title(f"log10(depth weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(depth weights)", ax=ax[0]) + +# plot distance weights +mm = mesh.plot_slice( + plotting_map * np.log10(distance_weights), + normal="Y", + ax=ax[1], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[1].set_title(f"log10(distance weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(distance weights)", ax=ax[1]) + +# plot sensitivity weights +mm = mesh.plot_slice( + plotting_map * np.log10(reg_sensw.objfcts[0].get_weights(key="sensitivity")), + normal="Y", + ax=ax[2], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[2].set_title(f"log10(sensitivity weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(sensitivity weights)", ax=ax[2]) + +# shared plotting +for axx in ax: + axx.set_aspect(1) + +plt.tight_layout()