From 479243486ce5181239082971f88d0b842e4d3a81 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 2 Nov 2022 08:27:43 -0700 Subject: [PATCH 001/164] fix for anisotropy models, beginings of test --- SimPEG/base/pde_simulation.py | 6 ++-- tests/base/test_mass_matrices.py | 55 ++++++++++++++++++++++++++++++-- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index aefd73edf4..5bee52f6ab 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -232,8 +232,9 @@ def MfDeriv_prop(self, u, v=None, adjoint=False): return Zero() stash_name = f"_Mf_{arg}_deriv" if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) M_prop_deriv = self.mesh.get_face_inner_product_deriv( - np.ones(self.mesh.n_cells) + model=prop )(np.ones(self.mesh.n_faces)) * getattr(self, f"{arg.lower()}Deriv") setattr(self, stash_name, M_prop_deriv) return __inner_mat_mul_op( @@ -252,8 +253,9 @@ def MeDeriv_prop(self, u, v=None, adjoint=False): return Zero() stash_name = f"_Me_{arg}_deriv" if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) M_prop_deriv = self.mesh.get_edge_inner_product_deriv( - np.ones(self.mesh.n_cells) + prop )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") setattr(self, stash_name, M_prop_deriv) return __inner_mat_mul_op( diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index 396a75cd29..f112007756 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -41,9 +41,24 @@ def setUp(self): self.mesh = discretize.TensorMesh([5, 6, 7]) self.sim = SimpleSim(self.mesh, sigmaMap=maps.ExpMap()) - self.start_mod = np.log(1e-2 * np.ones(self.mesh.n_cells)) + np.random.randn( - self.mesh.n_cells - ) + n_cells = self.mesh.n_cells + self.start_mod = np.log(np.full(n_cells, 1e-2)) + np.random.randn(n_cells) + self.start_diag_mod = np.r_[ + np.log(np.full(n_cells, 1e-2)), + np.log(np.full(n_cells, 2e-2)), + np.log(np.full(n_cells, 3e-2)) + ] + np.random.randn(3 * n_cells) + + self.sim_full_aniso = SimpleSim(self.mesh, sigmaMap=maps.IdentityMap()) + + self.start_full_mod = np.r_[ + np.full(n_cells, 1), + np.full(n_cells, 2), + np.full(n_cells, 3), + np.full(n_cells, -1), + np.full(n_cells, 1), + np.full(n_cells, -2), + ] def test_zero_returns(self): n_c = self.mesh.n_cells @@ -352,6 +367,40 @@ def Jvec(v): assert check_derivative(f, x0=x0, num=3, plotIt=False) + def test_Me_diagonal_anisotropy_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_diag_mod + + def f(x): + sim.model = x + d = sim.MeSigma @ u + + def Jvec(v): + sim.model = x0 + return sim.MeSigmaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Me_full_anisotropy_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim_full_aniso + x0 = self.start_full_mod + + def f(x): + sim.model = x + d = sim.MeSigma @ u + + def Jvec(v): + sim.model = x0 + return sim.MeSigmaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + def test_Mf_deriv(self): u = np.random.randn(self.mesh.n_faces) sim = self.sim From 2c21b0f6f77e6278df4415bf159d3b936489de7f Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 2 Nov 2022 10:38:46 -0700 Subject: [PATCH 002/164] add a full gradient regularizer. --- SimPEG/regularization/__init__.py | 1 + SimPEG/regularization/rotated.py | 59 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 SimPEG/regularization/rotated.py diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 3bbadef098..1459e68af0 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -166,6 +166,7 @@ from .cross_gradient import CrossGradient from .correspondence import LinearCorrespondence from .jtv import JointTotalVariation +from .rotated import SmoothnessFullGradient @deprecate_class(removal_version="0.19.0", future_warn=True) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py new file mode 100644 index 0000000000..be0b98be48 --- /dev/null +++ b/SimPEG/regularization/rotated.py @@ -0,0 +1,59 @@ +from .base import BaseRegularization +import numpy as np +import scipy.sparse as sp + + +class SmoothnessFullGradient(BaseRegularization): + def __init__(self, mesh, anisotropic_regularization=None, **kwargs): + super().__init__(mesh=mesh, **kwargs) + + if anisotropic_regularization is None: + anisotropic_regularization = np.ones((len(mesh), mesh.dim)) + self._anis_reg = anisotropic_regularization + + # overwrite the call, deriv, and deriv2... + def __call__(self, m): + r = self.D @ (self.mapping * (self._delta_m(m))) + return 0.5 * r @ self.W @ r + + def deriv(self, m): + mD = self.mapping.deriv(self._delta_m(m)) + r = self.D @ (self.mapping * (self._delta_m(m))) + return mD.T * (self.D.T @ (self.W * r)) + + def deriv2(self, m, v=None): + mDv = self.mapping.deriv(self._delta_m(m), v) + if v is None: + return mDv.T * (self.D.T @ self.W @ self.D) * mDv + + return mDv.T * (self.D.T @ (self.W @ (self.D * mDv))) + + @property + def D(self): + if getattr(self, "_D", None) is None: + mesh = self.regularization_mesh.mesh + # Turn off cell_gradient at boundary faces + bf = mesh.project_face_to_boundary_face.indices + v = np.ones(mesh.n_faces) + v[bf] = 0.0 + P = sp.diags(v) + try: + cell_gradient = mesh.cell_gradient + except AttributeError: + a = mesh.face_areas + v = mesh.average_cell_to_face @ mesh.cell_volumes + cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient + self._D = P @ cell_gradient + return self._D + + @property + def W(self): + if getattr(self, "_W", None) is None: + mesh = self.regularization_mesh.mesh + cell_weights = np.ones(len(mesh)) + for values in self._weights.values(): + cell_weights *= values + reg_model = self._anis_reg * cell_weights[:, None] + + self._W = mesh.get_face_inner_product(reg_model) + return self._W From ee70f5e393dba4ba84b8ec8abf79e49034c85be2 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 19 Jan 2023 15:38:03 -0800 Subject: [PATCH 003/164] Use vectors as orientation input Also adds more documentation with examples --- SimPEG/regularization/rotated.py | 196 ++++++++++++++++++++++++++++--- 1 file changed, 179 insertions(+), 17 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index be0b98be48..21f75feae8 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,36 +1,191 @@ from .base import BaseRegularization import numpy as np import scipy.sparse as sp +from ..utils.code_utils import validate_ndarray_with_shape class SmoothnessFullGradient(BaseRegularization): - def __init__(self, mesh, anisotropic_regularization=None, **kwargs): + r"""Measures the gradient of a model using optionally anisotropic weighting. + + This regularizer measures the first order smoothness in a mesh ambivalent way + by observing that the N-d smoothness operator can be represented as an + inner product with an arbitrarily anisotropic weight. + + By default it assumes uniform weighting in each dimension, which works + for most ``discretize`` mesh types. + + Parameters + ---------- + mesh : discretize.BaseMesh + The mesh object to use for regularization. The mesh should either have + a `cell_gradient` or a `stencil_cell_gradient` defined. + alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. + The weights of the regularization for each axis. This can be defined for each cell + in the mesh. Default is uniform weights equal to the smallest edge length squared. + reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float + The direction of the regularization axes. Each matrix should be orthonormal. + Default is Identity. + **kwargs + Keyword arguments passed to the parent class ``BaseRegularization``. + + Examples + -------- + Construct of 2D measure with uniform smoothing in each direction. + + >>> from discretize import TensorMesh + >>> from SimPEG.regularization import SmoothnessFullGradient + >>> mesh = TensorMesh([32, 32]) + >>> reg = SmoothnessFullGradient(mesh) + + We can instead create a measure that smooths twice as much in the 1st dimension + than it does in the second dimension. + >>> reg = SmoothnessFullGradient(mesh, [2, 1]) + + The `alphas` parameter can also be indepenant for each cell. Here we set all cells + lower than 0.5 in the x2 to twice as much in the first dimension + otherwise it is uniform smoothing. + >>> alphas = np.ones((mesh.n_cells, mesh.dim)) + >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] + >>> reg = SmoothnessFullGradient(mesh, alphas) + + We can also rotate the axis in which we want to preferentially smooth. Say we want to + smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, + effectively rotating our smoothing 45 degrees. Note that we must provide orthonormal + vectors. + >>> sqrt2 = np.sqrt(2) + >>> reg_dirs = [ + ... [sqrt2, sqrt2], + ... [-sqrt2, sqrt2], + ... ] + >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) + + Notes + ----- + The regularization object is the discretized form of the continuous regularization + + ..math: + f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V + + The tensor quantity `a` is used to represent the potential preferential directions of + regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: + + ..math: + \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} + + `Q` is then the regularization directions `reg_dirs`, and `L` is the represents the weighting + along each direction, with `alphas` along its diagonal. These are multiplied to form the + anisotropic alpha used for rotated gradients. + """ + + def __init__(self, mesh, alphas=None, reg_dirs=None, **kwargs): + if mesh.dim < 2: + raise TypeError("Mesh must have dimension higher than 1") super().__init__(mesh=mesh, **kwargs) - if anisotropic_regularization is None: - anisotropic_regularization = np.ones((len(mesh), mesh.dim)) - self._anis_reg = anisotropic_regularization + if alphas is None: + edge_length = np.min(mesh.edge_lengths) + alphas = edge_length ** 2 * np.ones(mesh.dim) + alphas = validate_ndarray_with_shape( + "alphas", + alphas, + shape=[(mesh.dim,), ("*", mesh.dim)], + dtype=float, + ) + n_cells = self.regularization_mesh.n_cells + if len(alphas.shape) == 1: + alphas = np.tile(alphas, (n_cells, 1)) + if alphas.shape[0] != n_cells: + if alphas.shape[0] == mesh.n_cells: + alphas = alphas[self.active_cells] + else: + raise IndexError( + f"`alphas` first dimension, {alphas.shape[0]}, must be either number " + f"of active cells {n_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + if np.any(alphas < 0): + raise ValueError("`alpha` must be non-negative") + anis_alpha = alphas + + if reg_dirs is not None: + reg_dirs = validate_ndarray_with_shape( + "reg_dirs", + reg_dirs, + shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], + dtype=float, + ) + if reg_dirs.shape == (mesh.dim, mesh.dim): + reg_dirs = np.tile(reg_dirs, (n_cells, 1, 1)) + if reg_dirs.shape[0] != n_cells: + if reg_dirs.shape[0] == mesh.n_cells: + reg_dirs = reg_dirs[self.active_cells] + else: + raise IndexError( + f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " + f"of active cells {n_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + # check orthogonality? + eye = np.eye(mesh.dim) + for i, M in enumerate(reg_dirs): + if not np.allclose(eye, reg_dirs.T @ reg_dirs): + raise ValueError(f"Matrix {i} is not orthonormal") + # create a stack of matrices of dir.T @ alphas @ dir + anis_alpha = np.einsum("ikn,ik,ikm->inm", reg_dirs, anis_alpha, reg_dirs) + # Then select the upper diagonal components for input to discretize + if mesh.dim == 2: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 0, 1], + ), + axis=-1, + ) + elif mesh.dim == 3: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 2, 2], + anis_alpha[..., 0, 1], + anis_alpha[..., 0, 2], + anis_alpha[..., 1, 2], + ), + axis=-1, + ) + self._anis_alpha = anis_alpha # overwrite the call, deriv, and deriv2... def __call__(self, m): - r = self.D @ (self.mapping * (self._delta_m(m))) - return 0.5 * r @ self.W @ r + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return 0.5 * r @ M_f @ r def deriv(self, m): - mD = self.mapping.deriv(self._delta_m(m)) - r = self.D @ (self.mapping * (self._delta_m(m))) - return mD.T * (self.D.T @ (self.W * r)) + m_d = self.mapping.deriv(self._delta_m(m)) + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return m_d.T * (G.T @ (M_f @ r)) def deriv2(self, m, v=None): - mDv = self.mapping.deriv(self._delta_m(m), v) + m_d_v = self.mapping.deriv(self._delta_m(m), v) + G = self.cell_gradient + M_f = self.W if v is None: - return mDv.T * (self.D.T @ self.W @ self.D) * mDv + return m_d_v.T @ (G.T @ M_f @ G) @ m_d_v - return mDv.T * (self.D.T @ (self.W @ (self.D * mDv))) + return m_d_v.T @ (G.T @ (M_f @ (G @ m_d_v))) @property - def D(self): - if getattr(self, "_D", None) is None: + def cell_gradient(self): + """The (approximate) cell gradient operator + + Returns + ------- + scipy.sparse.csr_matrix + """ + if getattr(self, "_cell_gradient", None) is None: mesh = self.regularization_mesh.mesh # Turn off cell_gradient at boundary faces bf = mesh.project_face_to_boundary_face.indices @@ -43,17 +198,24 @@ def D(self): a = mesh.face_areas v = mesh.average_cell_to_face @ mesh.cell_volumes cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient - self._D = P @ cell_gradient - return self._D + self._cell_gradient = P @ cell_gradient + return self._cell_gradient @property def W(self): + """The inner product operator using rotated coordinates + + Returns + ------- + scipy.sparse.csr_matrix + """ if getattr(self, "_W", None) is None: mesh = self.regularization_mesh.mesh cell_weights = np.ones(len(mesh)) for values in self._weights.values(): cell_weights *= values - reg_model = self._anis_reg * cell_weights[:, None] + reg_model = self._anis_alpha * cell_weights[:, None] + reg_model[~self.active_cells] = 0.0 self._W = mesh.get_face_inner_product(reg_model) return self._W From 598f15acdd52e25d264d439e685788eeeb582da7 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 19 Jan 2023 15:39:01 -0800 Subject: [PATCH 004/164] deprecate reg_mesh.nC replace with n_cells --- SimPEG/regularization/base.py | 14 +++++++------- SimPEG/regularization/correspondence.py | 2 +- SimPEG/regularization/pgi.py | 10 +++++----- SimPEG/regularization/regularization_mesh.py | 14 ++++++++++---- tests/base/test_regularization.py | 2 +- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 0aa54e3a93..4b35d6b9b7 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -163,9 +163,9 @@ def _weights_shapes(self) -> tuple[int] | str: """ if ( getattr(self, "_regularization_mesh", None) is not None - and self.regularization_mesh.nC != "*" + and self.regularization_mesh.n_cells != "*" ): - return (self.regularization_mesh.nC,) + return (self.regularization_mesh.n_cells,) if getattr(self, "_mapping", None) is not None and self.mapping.shape != "*": return (self.mapping.shape[0],) @@ -291,7 +291,7 @@ def _nC_residual(self) -> int: return self.mapping.shape[1] if nC != "*" and nC is not None: - return self.regularization_mesh.nC + return self.regularization_mesh.n_cells return self._weights_shapes[0] @@ -546,7 +546,7 @@ def W(self): ) weights = 1.0 for values in self._weights.values(): - if values.shape[0] == self.regularization_mesh.nC: + if values.shape[0] == self.regularization_mesh.n_cells: values = average_cell_2_face * values weights *= values self._W = utils.sdiag(weights ** 0.5) @@ -965,9 +965,9 @@ def nP(self): return self.mapping.nP elif ( getattr(self, "_regularization_mesh", None) is not None - and self.regularization_mesh.nC != "*" + and self.regularization_mesh.n_cells != "*" ): - return self.regularization_mesh.nC + return self.regularization_mesh.n_cells else: return "*" @@ -983,7 +983,7 @@ def _nC_residual(self): if mapping is not None and mapping.shape[1] != "*": return self.mapping.shape[1] elif nC != "*" and nC is not None: - return self.regularization_mesh.nC + return self.regularization_mesh.n_cells else: return self.nP diff --git a/SimPEG/regularization/correspondence.py b/SimPEG/regularization/correspondence.py index 37d9354b5e..9be5e5920b 100644 --- a/SimPEG/regularization/correspondence.py +++ b/SimPEG/regularization/correspondence.py @@ -115,7 +115,7 @@ def deriv2(self, model, v=None): p2 = k2 * k1 * v1 + k2 ** 2 * v2 return np.r_[p1, p2] else: - n = self.regularization_mesh.nC + n = self.regularization_mesh.n_cells A = utils.sdiag(np.ones(n) * (k1 ** 2)) B = utils.sdiag(np.ones(n) * (k2 ** 2)) C = utils.sdiag(np.ones(n) * (k1 * k2)) diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 3a06c67fc7..3b74d11d0e 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -96,7 +96,7 @@ def set_weights(self, **weights): for key, values in weights.items(): values = validate_ndarray_with_shape("weights", values, dtype=float) - if values.shape[0] == self.regularization_mesh.nC: + if values.shape[0] == self.regularization_mesh.n_cells: values = np.tile(values, len(self.wiresmap.maps)) values = validate_ndarray_with_shape( @@ -153,7 +153,7 @@ def non_linear_relationships(self, value: bool): @property def wiresmap(self): if getattr(self, "_wiresmap", None) is None: - self._wiresmap = Wires(("m", self.regularization_mesh.nC)) + self._wiresmap = Wires(("m", self.regularization_mesh.n_cells)) return self._wiresmap @wiresmap.setter @@ -172,7 +172,7 @@ def wiresmap(self, wires): def maplist(self): if getattr(self, "_maplist", None) is None: self._maplist = [ - IdentityMap(nP=self.regularization_mesh.nC) + IdentityMap(nP=self.regularization_mesh.n_cells) for maps in self.wiresmap.maps ] return self._maplist @@ -788,14 +788,14 @@ def compute_quasi_geology_model(self): @property def wiresmap(self): if getattr(self, "_wiresmap", None) is None: - self._wiresmap = Wires(("m", self.regularization_mesh.nC)) + self._wiresmap = Wires(("m", self.regularization_mesh.n_cells)) return self._wiresmap @property def maplist(self): if getattr(self, "_maplist", None) is None: self._maplist = [ - IdentityMap(nP=self.regularization_mesh.nC) + IdentityMap(nP=self.regularization_mesh.n_cells) for maps in self.wiresmap.maps ] return self._maplist diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index 12003abd14..65189cdafc 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -117,13 +117,19 @@ def vol(self) -> np.ndarray: return self._vol @property - def nC(self) -> int: - """ - Number of cells being regularized. + def n_cells(self) -> int: + """Number of cells being regularized. + Returns + ------- + int """ if self.active_cells is not None: return int(self.active_cells.sum()) - return self.mesh.nC + return self.mesh.n_cells + + nC = deprecate_property( + n_cells, old_name="nC", new_name="n_cells", removal_version="0.19.0" + ) @property def dim(self) -> int: diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index df712def75..0ac662593d 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -164,7 +164,7 @@ def test_property_mirroring(self): active_cells = mesh.gridCC[:, 2] < 0.6 reg = getattr(regularization, regType)(mesh, active_cells=active_cells) - self.assertTrue(reg.nP == reg.regularization_mesh.nC) + self.assertTrue(reg.nP == reg.regularization_mesh.n_cells) [ self.assertTrue(np.all(fct.active_cells == active_cells)) From 9002374aeb3944de600f3da977265a556c87622f Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Fri, 20 Jan 2023 10:26:42 -0800 Subject: [PATCH 005/164] full gradient updates --- SimPEG/regularization/rotated.py | 72 ++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 21f75feae8..66c07216c4 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -23,8 +23,10 @@ class SmoothnessFullGradient(BaseRegularization): The weights of the regularization for each axis. This can be defined for each cell in the mesh. Default is uniform weights equal to the smallest edge length squared. reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float - The direction of the regularization axes. Each matrix should be orthonormal. - Default is Identity. + Matrix or list of matrices whose columns represent the regularization directions. + Each matrix should be orthonormal. Default is Identity. + ortho_check : bool, optional + Whether to check `reg_dirs` for orthogonality. **kwargs Keyword arguments passed to the parent class ``BaseRegularization``. @@ -51,11 +53,11 @@ class SmoothnessFullGradient(BaseRegularization): We can also rotate the axis in which we want to preferentially smooth. Say we want to smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, effectively rotating our smoothing 45 degrees. Note that we must provide orthonormal - vectors. + vectors, and the columns of the matrix represent the vectors (not the rows). >>> sqrt2 = np.sqrt(2) >>> reg_dirs = [ + ... [sqrt2, -sqrt2], ... [sqrt2, sqrt2], - ... [-sqrt2, sqrt2], ... ] >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) @@ -77,7 +79,7 @@ class SmoothnessFullGradient(BaseRegularization): anisotropic alpha used for rotated gradients. """ - def __init__(self, mesh, alphas=None, reg_dirs=None, **kwargs): + def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs): if mesh.dim < 2: raise TypeError("Mesh must have dimension higher than 1") super().__init__(mesh=mesh, **kwargs) @@ -91,12 +93,15 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, **kwargs): shape=[(mesh.dim,), ("*", mesh.dim)], dtype=float, ) - n_cells = self.regularization_mesh.n_cells + n_active_cells = self.regularization_mesh.n_cells if len(alphas.shape) == 1: - alphas = np.tile(alphas, (n_cells, 1)) - if alphas.shape[0] != n_cells: - if alphas.shape[0] == mesh.n_cells: - alphas = alphas[self.active_cells] + alphas = np.tile(alphas, (mesh.n_cells, 1)) + if alphas.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if alphas.shape[0] == n_active_cells and self.active_cells is not None: + alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) + alpha_temp[self.active_cells] = alphas + alphas = alpha_temp else: raise IndexError( f"`alphas` first dimension, {alphas.shape[0]}, must be either number " @@ -114,22 +119,29 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, **kwargs): dtype=float, ) if reg_dirs.shape == (mesh.dim, mesh.dim): - reg_dirs = np.tile(reg_dirs, (n_cells, 1, 1)) - if reg_dirs.shape[0] != n_cells: - if reg_dirs.shape[0] == mesh.n_cells: - reg_dirs = reg_dirs[self.active_cells] + reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) + if reg_dirs.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if ( + reg_dirs.shape[0] == n_active_cells + and self.active_cells is not None + ): + reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) + reg_dirs_temp[self.active_cells] = reg_dirs + reg_dirs = reg_dirs_temp else: raise IndexError( f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " f"of active cells {n_cells}, or the number of mesh cells {mesh.n_cells}. " ) # check orthogonality? - eye = np.eye(mesh.dim) - for i, M in enumerate(reg_dirs): - if not np.allclose(eye, reg_dirs.T @ reg_dirs): - raise ValueError(f"Matrix {i} is not orthonormal") + if ortho_check: + eye = np.eye(mesh.dim) + for i, M in enumerate(reg_dirs): + if not np.allclose(eye, M @ M.T): + raise ValueError(f"Matrix {i} is not orthonormal") # create a stack of matrices of dir.T @ alphas @ dir - anis_alpha = np.einsum("ikn,ik,ikm->inm", reg_dirs, anis_alpha, reg_dirs) + anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) # Then select the upper diagonal components for input to discretize if mesh.dim == 2: anis_alpha = np.stack( @@ -187,18 +199,24 @@ def cell_gradient(self): """ if getattr(self, "_cell_gradient", None) is None: mesh = self.regularization_mesh.mesh - # Turn off cell_gradient at boundary faces - bf = mesh.project_face_to_boundary_face.indices - v = np.ones(mesh.n_faces) - v[bf] = 0.0 - P = sp.diags(v) try: cell_gradient = mesh.cell_gradient except AttributeError: a = mesh.face_areas v = mesh.average_cell_to_face @ mesh.cell_volumes cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient - self._cell_gradient = P @ cell_gradient + + v = np.ones(mesh.n_cells) + # Turn off cell_gradient at boundary faces + if self.active_cells is not None: + v[~self.active_cells] = 0 + + dv = cell_gradient @ v + P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) + cell_gradient = P @ cell_gradient + if self.active_cells is not None: + cell_gradient = cell_gradient[:, self.active_cells] + self._cell_gradient = cell_gradient return self._cell_gradient @property @@ -215,7 +233,9 @@ def W(self): for values in self._weights.values(): cell_weights *= values reg_model = self._anis_alpha * cell_weights[:, None] - reg_model[~self.active_cells] = 0.0 + # turn off measure in inactive cells + if self.active_cells is not None: + reg_model[~self.active_cells] = 0.0 self._W = mesh.get_face_inner_product(reg_model) return self._W From 53d1ec6f81e3638e85098a14dec62578c862bfec Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 2 Feb 2023 09:32:48 -0800 Subject: [PATCH 006/164] formatting --- SimPEG/regularization/rotated.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 66c07216c4..8b06820232 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -86,7 +86,7 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs) if alphas is None: edge_length = np.min(mesh.edge_lengths) - alphas = edge_length ** 2 * np.ones(mesh.dim) + alphas = edge_length**2 * np.ones(mesh.dim) alphas = validate_ndarray_with_shape( "alphas", alphas, From e662b99fb66e533c20adcf0b0fda20cbcdddcbe0 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Tue, 28 Mar 2023 10:37:18 -0700 Subject: [PATCH 007/164] undo changes here, just alias n_cells -> nC --- SimPEG/regularization/base.py | 14 +++++++------- SimPEG/regularization/correspondence.py | 2 +- SimPEG/regularization/pgi.py | 10 +++++----- SimPEG/regularization/regularization_mesh.py | 4 +--- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 8f12f32e38..263fc64f8e 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -162,9 +162,9 @@ def _weights_shapes(self) -> tuple[int] | str: """ if ( getattr(self, "_regularization_mesh", None) is not None - and self.regularization_mesh.n_cells != "*" + and self.regularization_mesh.nC != "*" ): - return (self.regularization_mesh.n_cells,) + return (self.regularization_mesh.nC,) if getattr(self, "_mapping", None) is not None and self.mapping.shape != "*": return (self.mapping.shape[0],) @@ -290,7 +290,7 @@ def _nC_residual(self) -> int: return self.mapping.shape[1] if nC != "*" and nC is not None: - return self.regularization_mesh.n_cells + return self.regularization_mesh.nC return self._weights_shapes[0] @@ -545,7 +545,7 @@ def W(self): ) weights = 1.0 for values in self._weights.values(): - if values.shape[0] == self.regularization_mesh.n_cells: + if values.shape[0] == self.regularization_mesh.nC: values = average_cell_2_face * values weights *= values self._W = utils.sdiag(weights**0.5) @@ -1012,9 +1012,9 @@ def nP(self): return self.mapping.nP elif ( getattr(self, "_regularization_mesh", None) is not None - and self.regularization_mesh.n_cells != "*" + and self.regularization_mesh.nC != "*" ): - return self.regularization_mesh.n_cells + return self.regularization_mesh.nC else: return "*" @@ -1030,7 +1030,7 @@ def _nC_residual(self): if mapping is not None and mapping.shape[1] != "*": return self.mapping.shape[1] elif nC != "*" and nC is not None: - return self.regularization_mesh.n_cells + return self.regularization_mesh.nC else: return self.nP diff --git a/SimPEG/regularization/correspondence.py b/SimPEG/regularization/correspondence.py index b990152ad5..e94c0efe82 100644 --- a/SimPEG/regularization/correspondence.py +++ b/SimPEG/regularization/correspondence.py @@ -115,7 +115,7 @@ def deriv2(self, model, v=None): p2 = k2 * k1 * v1 + k2**2 * v2 return np.r_[p1, p2] else: - n = self.regularization_mesh.n_cells + n = self.regularization_mesh.nC A = utils.sdiag(np.ones(n) * (k1**2)) B = utils.sdiag(np.ones(n) * (k2**2)) C = utils.sdiag(np.ones(n) * (k1 * k2)) diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 8b424295d8..2e8c313688 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -95,7 +95,7 @@ def set_weights(self, **weights): for key, values in weights.items(): values = validate_ndarray_with_shape("weights", values, dtype=float) - if values.shape[0] == self.regularization_mesh.n_cells: + if values.shape[0] == self.regularization_mesh.nC: values = np.tile(values, len(self.wiresmap.maps)) values = validate_ndarray_with_shape( @@ -152,7 +152,7 @@ def non_linear_relationships(self, value: bool): @property def wiresmap(self): if getattr(self, "_wiresmap", None) is None: - self._wiresmap = Wires(("m", self.regularization_mesh.n_cells)) + self._wiresmap = Wires(("m", self.regularization_mesh.nC)) return self._wiresmap @wiresmap.setter @@ -171,7 +171,7 @@ def wiresmap(self, wires): def maplist(self): if getattr(self, "_maplist", None) is None: self._maplist = [ - IdentityMap(nP=self.regularization_mesh.n_cells) + IdentityMap(nP=self.regularization_mesh.nC) for maps in self.wiresmap.maps ] return self._maplist @@ -782,14 +782,14 @@ def compute_quasi_geology_model(self): @property def wiresmap(self): if getattr(self, "_wiresmap", None) is None: - self._wiresmap = Wires(("m", self.regularization_mesh.n_cells)) + self._wiresmap = Wires(("m", self.regularization_mesh.nC)) return self._wiresmap @property def maplist(self): if getattr(self, "_maplist", None) is None: self._maplist = [ - IdentityMap(nP=self.regularization_mesh.n_cells) + IdentityMap(nP=self.regularization_mesh.nC) for maps in self.wiresmap.maps ] return self._maplist diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index f4a550d713..756ce82731 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -100,9 +100,7 @@ def n_cells(self) -> int: return int(self.active_cells.sum()) return self.mesh.n_cells - nC = deprecate_property( - n_cells, old_name="nC", new_name="n_cells", removal_version="0.19.0" - ) + nC = n_cells @property def dim(self) -> int: From 2af55da816d22b6965f72dcbdf0f211695711831 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Tue, 28 Mar 2023 16:42:32 -0700 Subject: [PATCH 008/164] move regularization tests into there own folder --- tests/base/{ => regularizations}/test_cross_gradient.py | 0 tests/base/{ => regularizations}/test_jtv.py | 0 tests/base/{ => regularizations}/test_pgi_regularization.py | 0 tests/base/{ => regularizations}/test_regularization.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename tests/base/{ => regularizations}/test_cross_gradient.py (100%) rename tests/base/{ => regularizations}/test_jtv.py (100%) rename tests/base/{ => regularizations}/test_pgi_regularization.py (100%) rename tests/base/{ => regularizations}/test_regularization.py (100%) diff --git a/tests/base/test_cross_gradient.py b/tests/base/regularizations/test_cross_gradient.py similarity index 100% rename from tests/base/test_cross_gradient.py rename to tests/base/regularizations/test_cross_gradient.py diff --git a/tests/base/test_jtv.py b/tests/base/regularizations/test_jtv.py similarity index 100% rename from tests/base/test_jtv.py rename to tests/base/regularizations/test_jtv.py diff --git a/tests/base/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py similarity index 100% rename from tests/base/test_pgi_regularization.py rename to tests/base/regularizations/test_pgi_regularization.py diff --git a/tests/base/test_regularization.py b/tests/base/regularizations/test_regularization.py similarity index 100% rename from tests/base/test_regularization.py rename to tests/base/regularizations/test_regularization.py From 56425a2629c7cadd62d5880d9f17358123a5c7f9 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 29 Mar 2023 09:35:47 -0700 Subject: [PATCH 009/164] add tests --- SimPEG/regularization/rotated.py | 14 +++++------ .../regularizations/test_full_gradient.py | 25 +++++++++++++++++++ .../regularizations/test_regularization.py | 1 + 3 files changed, 33 insertions(+), 7 deletions(-) create mode 100644 tests/base/regularizations/test_full_gradient.py diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 8b06820232..70289be22d 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -52,13 +52,13 @@ class SmoothnessFullGradient(BaseRegularization): We can also rotate the axis in which we want to preferentially smooth. Say we want to smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, - effectively rotating our smoothing 45 degrees. Note that we must provide orthonormal - vectors, and the columns of the matrix represent the vectors (not the rows). + effectively rotating our smoothing 45 degrees. Note and the columns of the matrix + represent the directional vectors (not the rows). >>> sqrt2 = np.sqrt(2) - >>> reg_dirs = [ + >>> reg_dirs = np.array([ ... [sqrt2, -sqrt2], ... [sqrt2, sqrt2], - ... ] + ... ]) >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) Notes @@ -74,8 +74,8 @@ class SmoothnessFullGradient(BaseRegularization): ..math: \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} - `Q` is then the regularization directions `reg_dirs`, and `L` is the represents the weighting - along each direction, with `alphas` along its diagonal. These are multiplied to form the + `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting + along each direction, with ``alphas`` along its diagonal. These are multiplied to form the anisotropic alpha used for rotated gradients. """ @@ -140,7 +140,7 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs) for i, M in enumerate(reg_dirs): if not np.allclose(eye, M @ M.T): raise ValueError(f"Matrix {i} is not orthonormal") - # create a stack of matrices of dir.T @ alphas @ dir + # create a stack of matrices of dir @ alphas @ dir.T anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) # Then select the upper diagonal components for input to discretize if mesh.dim == 2: diff --git a/tests/base/regularizations/test_full_gradient.py b/tests/base/regularizations/test_full_gradient.py new file mode 100644 index 0000000000..f2e4da35c4 --- /dev/null +++ b/tests/base/regularizations/test_full_gradient.py @@ -0,0 +1,25 @@ +from discretize import TensorMesh +from discretize.tests import OrderTest +import sympy as sym +import numpy as np +import matplotlib.pyplot as plt +from SimPEG.regularization import SmoothnessFullGradient + + +class RegOrderTest(OrderTest): + meshTypes = ["uniformTensorMesh", "uniformTree"] + meshSizes = [4, 8, 16, 32] + meshDimension = 2 + + def getError(self): + true_val = 59.2176264065362 / 2 + x = self.M.cell_centers[:, 0] + y = self.M.cell_centers[:, 1] + # a function that is zero at edge with zero derivative + f_cc = (1 - np.cos(2 * x * np.pi)) * (1 - np.cos(2 * y * np.pi)) + + reg = SmoothnessFullGradient(self.M, alphas=[1, 1]) + return reg(f_cc) - true_val + + def test_orderWeakCellGradIntegral(self): + self.orderTest() diff --git a/tests/base/regularizations/test_regularization.py b/tests/base/regularizations/test_regularization.py index 5b6081caf5..259de7a2ad 100644 --- a/tests/base/regularizations/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -27,6 +27,7 @@ "CrossGradient", "LinearCorrespondence", "JointTotalVariation", + "SmoothnessFullGradient", ] From f0aac5daf425b5883530acffd3755144e22e6667 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Fri, 31 Mar 2023 17:11:38 -0700 Subject: [PATCH 010/164] remove unused imports --- tests/base/regularizations/test_full_gradient.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/base/regularizations/test_full_gradient.py b/tests/base/regularizations/test_full_gradient.py index f2e4da35c4..9f12d4f43f 100644 --- a/tests/base/regularizations/test_full_gradient.py +++ b/tests/base/regularizations/test_full_gradient.py @@ -1,6 +1,4 @@ -from discretize import TensorMesh from discretize.tests import OrderTest -import sympy as sym import numpy as np import matplotlib.pyplot as plt from SimPEG.regularization import SmoothnessFullGradient From 3e4f72aaa17c36f064a43d193d17a6ec03c24457 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 15 Jun 2023 09:08:04 -0700 Subject: [PATCH 011/164] add ability to work with a function input to the inner matmul operation --- SimPEG/base/pde_simulation.py | 114 ++++++++++++++++++++++++---------- 1 file changed, 81 insertions(+), 33 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index c62c5c89ec..336b270c26 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -1,6 +1,6 @@ import numpy as np import scipy.sparse as sp -from discretize.utils import Zero +from discretize.utils import Zero, TensorType from ..simulation import BaseSimulation from .. import props from scipy.constants import mu_0 @@ -8,34 +8,63 @@ def __inner_mat_mul_op(M, u, v=None, adjoint=False): u = np.squeeze(u) - if v is not None: - if v.ndim > 1: - v = np.squeeze(v) - if u.ndim > 1: - # u has multiple fields - if v.ndim == 1: - v = v[:, None] - else: + if sp.issparse(M): + if v is not None: if v.ndim > 1: - u = u[:, None] - if v.ndim > 2: - u = u[:, None, :] - if adjoint: - if u.ndim > 1 and u.shape[-1] > 1: - return M.T * (u * v).sum(axis=-1) - return M.T * (u * v) - if u.ndim > 1 and u.shape[1] > 1: - return np.squeeze(u[:, None, :] * (M * v)[:, :, None]) - return u * (M * v) + v = np.squeeze(v) + if u.ndim > 1: + # u has multiple fields + if v.ndim == 1: + v = v[:, None] + else: + if v.ndim > 1: + u = u[:, None] + if v.ndim > 2: + u = u[:, None, :] + if adjoint: + if u.ndim > 1 and u.shape[-1] > 1: + return M.T * (u * v).sum(axis=-1) + return M.T * (u * v) + if u.ndim > 1 and u.shape[1] > 1: + return np.squeeze(u[:, None, :] * (M * v)[:, :, None]) + return u * (M * v) + else: + if u.ndim > 1: + UM = sp.vstack([sp.diags(u[:, i]) @ M for i in range(u.shape[1])]) + else: + U = sp.diags(u, format="csr") + UM = U @ M + if adjoint: + return UM.T + return UM else: + # assume it was a tuple of M_func, prop_deriv + M_func, prop_deriv = M if u.ndim > 1: - UM = sp.vstack([sp.diags(u[:, i]) @ M for i in range(u.shape[1])]) + Mu = [M_func(u[:, i]) for i in range(u.shape[1])] + if v is None: + Mu = sp.vstack([M @ prop_deriv for M in Mu]) + if adjoint: + Mu = Mu.T + return Mu + elif v.ndim > 1: + v = np.squeeze(v) + if adjoint: + return sum([prop_deriv.T @ (M.T @ v) for M in Mu]) + v = prop_deriv @ v + return np.stack([M @ pv for M in Mu], axis=-1) else: - U = sp.diags(u, format="csr") - UM = U @ M - if adjoint: - return UM.T - return UM + Mu = M_func(u) + if v is None: + Mu = Mu @ prop_deriv + if adjoint: + Mu = M.T + return Mu + elif v.ndim > 1: + v = np.squeeze(v) + if adjoint: + return prop_deriv.T @ (Mu.T @ v) + return Mu @ (prop_deriv @ v) def with_property_mass_matrices(property_name): @@ -233,10 +262,20 @@ def MfDeriv_prop(self, u, v=None, adjoint=False): stash_name = f"_Mf_{arg}_deriv" if getattr(self, stash_name, None) is None: prop = getattr(self, arg.lower()) - M_prop_deriv = self.mesh.get_face_inner_product_deriv( - model=prop - )(np.ones(self.mesh.n_faces)) * getattr(self, f"{arg.lower()}Deriv") - setattr(self, stash_name, M_prop_deriv) + t_type = TensorType(self.mesh, prop) + + M_func = self.mesh.get_face_inner_product_deriv(model=prop) + prop_deriv = getattr(self, f"{arg.lower()}Deriv") + if t_type < 3 and self.mesh._meshType.lower() in ( + "cyl", + "tensor", + "tree", + ): + M_prop_deriv = M_func(np.ones(self.mesh.n_faces)) @ prop_deriv + setattr(self, stash_name, M_prop_deriv) + else: + setattr(self, stash_name, (M_func, prop_deriv)) + return __inner_mat_mul_op( getattr(self, stash_name), u, v=v, adjoint=adjoint ) @@ -254,10 +293,19 @@ def MeDeriv_prop(self, u, v=None, adjoint=False): stash_name = f"_Me_{arg}_deriv" if getattr(self, stash_name, None) is None: prop = getattr(self, arg.lower()) - M_prop_deriv = self.mesh.get_edge_inner_product_deriv( - prop - )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") - setattr(self, stash_name, M_prop_deriv) + t_type = TensorType(self.mesh, prop) + + M_func = self.mesh.get_edge_inner_product_deriv(model=prop) + prop_deriv = getattr(self, f"{arg.lower()}Deriv") + if t_type < 3 and self.mesh._meshType.lower() in ( + "cyl", + "tensor", + "tree", + ): + M_prop_deriv = M_func(np.ones(self.mesh.n_edges)) @ prop_deriv + setattr(self, stash_name, M_prop_deriv) + else: + setattr(self, stash_name, (M_func, prop_deriv)) return __inner_mat_mul_op( getattr(self, stash_name), u, v=v, adjoint=adjoint ) From e6ef78c2b53a10bb098c796311c14dd5010b8f17 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 15 Jun 2023 09:24:54 -0700 Subject: [PATCH 012/164] variable name --- SimPEG/base/pde_simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 336b270c26..ad8690fd81 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -51,7 +51,7 @@ def __inner_mat_mul_op(M, u, v=None, adjoint=False): v = np.squeeze(v) if adjoint: return sum([prop_deriv.T @ (M.T @ v) for M in Mu]) - v = prop_deriv @ v + pv = prop_deriv @ v return np.stack([M @ pv for M in Mu], axis=-1) else: Mu = M_func(u) From 824193b5757fbcefa750cde62736cabd60187ea0 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 15 Jun 2023 11:02:41 -0700 Subject: [PATCH 013/164] add tests, and fix implementation --- SimPEG/base/pde_simulation.py | 8 +- tests/base/test_mass_matrices.py | 198 ++++++++++++++++++++++++++++++- 2 files changed, 201 insertions(+), 5 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 336b270c26..9594aba51c 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -50,15 +50,17 @@ def __inner_mat_mul_op(M, u, v=None, adjoint=False): elif v.ndim > 1: v = np.squeeze(v) if adjoint: - return sum([prop_deriv.T @ (M.T @ v) for M in Mu]) - v = prop_deriv @ v + return sum( + [prop_deriv.T @ (Mu[i].T @ v[..., i]) for i in range(u.shape[1])] + ) + pv = prop_deriv @ v return np.stack([M @ pv for M in Mu], axis=-1) else: Mu = M_func(u) if v is None: Mu = Mu @ prop_deriv if adjoint: - Mu = M.T + Mu = Mu.T return Mu elif v.ndim > 1: v = np.squeeze(v) diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index f112007756..b4da712e29 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -46,7 +46,7 @@ def setUp(self): self.start_diag_mod = np.r_[ np.log(np.full(n_cells, 1e-2)), np.log(np.full(n_cells, 2e-2)), - np.log(np.full(n_cells, 3e-2)) + np.log(np.full(n_cells, 3e-2)), ] + np.random.randn(3 * n_cells) self.sim_full_aniso = SimpleSim(self.mesh, sigmaMap=maps.IdentityMap()) @@ -195,6 +195,66 @@ def test_forward_expected_shapes(self): UM @ v, sim.MfSigmaDeriv(u2, v).reshape(-1, order="F") ) + def test_forward_anis_expected_shapes(self): + sim = self.sim + sim.model = self.start_full_mod + + n_f = self.mesh.n_faces + n_p = sim.model.size + # if U.shape (*, ) + u = np.random.rand(n_f) + v = np.random.randn(n_p) + u2 = np.random.rand(n_f, 2) + v2 = np.random.randn(n_p, 4) + + # These cases should all return an array of shape (n_f, ) + # if V.shape (*, ) + out = sim.MfSigmaDeriv(u, v) + assert out.shape == (n_f,) + out = sim.MfSigmaDeriv(u, v[:, None]) + assert out.shape == (n_f,) + out = sim.MfSigmaDeriv(u[:, None], v) + assert out.shape == (n_f,) + out = sim.MfSigmaDeriv(u[:, None], v[:, None]) + assert out.shape == (n_f,) + + # now check passing multiple V's + out = sim.MfSigmaDeriv(u, v2) + assert out.shape == (n_f, 4) + out = sim.MfSigmaDeriv(u[:, None], v2) + assert out.shape == (n_f, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim.MfSigmaDeriv(u[:, None], v2[:, i]) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim.MfSigmaDeriv(u2, v) + assert out.shape == (n_f, 2) + out = sim.MfSigmaDeriv(u2, v[:, None]) + assert out.shape == (n_f, 2) + + # and with multiple RHS + out = sim.MfSigmaDeriv(u2, v2) + assert out.shape == (n_f, v2.shape[1], 2) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i, :] = sim.MfSigmaDeriv(u2, v2[:, i]) + np.testing.assert_equal(out, out_2) + + # test None as v + UM = sim.MfSigmaDeriv(u) + np.testing.assert_allclose(UM @ v, sim.MfSigmaDeriv(u, v)) + + UM = sim.MfSigmaDeriv(u2) + np.testing.assert_allclose( + UM @ v, sim.MfSigmaDeriv(u2, v).reshape(-1, order="F") + ) + def test_adjoint_expected_shapes(self): sim = self.sim sim.model = self.start_mod @@ -257,7 +317,69 @@ def test_adjoint_expected_shapes(self): UMT @ v2_2.reshape(-1, order="F"), sim.MfSigmaDeriv(u2, v2_2, adjoint=True) ) - def test_adjoint_opp_shapes(self): + def test_adjoint_anis_expected_shapes(self): + sim = self.sim + sim.model = self.start_full_mod + + n_f = self.mesh.n_faces + n_p = sim.model.size + + u = np.random.rand(n_f) + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + u2 = np.random.rand(n_f, 2) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # These cases should all return an array of shape (n_c, ) + # if V.shape (n_f, ) + out = sim.MfSigmaDeriv(u, v, adjoint=True) + assert out.shape == (n_p,) + out = sim.MfSigmaDeriv(u, v[:, None], adjoint=True) + assert out.shape == (n_p,) + out = sim.MfSigmaDeriv(u[:, None], v, adjoint=True) + assert out.shape == (n_p,) + out = sim.MfSigmaDeriv(u[:, None], v[:, None], adjoint=True) + assert out.shape == (n_p,) + + # now check passing multiple V's + out = sim.MfSigmaDeriv(u, v2, adjoint=True) + assert out.shape == (n_p, 4) + out = sim.MfSigmaDeriv(u[:, None], v2, adjoint=True) + assert out.shape == (n_p, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim.MfSigmaDeriv(u, v2[:, i], adjoint=True) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim.MfSigmaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_p,) + out = sim.MfSigmaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_p,) + + # and with multiple RHS + out = sim.MfSigmaDeriv(u2, v3, adjoint=True) + assert out.shape == (n_p, v3.shape[1]) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim.MfSigmaDeriv(u2, v3[:, i, :], adjoint=True) + np.testing.assert_equal(out, out_2) + + # test None as v + UMT = sim.MfSigmaDeriv(u, adjoint=True) + np.testing.assert_allclose(UMT @ v, sim.MfSigmaDeriv(u, v, adjoint=True)) + + UMT = sim.MfSigmaDeriv(u2, adjoint=True) + np.testing.assert_allclose( + UMT @ v2_2.reshape(-1, order="F"), sim.MfSigmaDeriv(u2, v2_2, adjoint=True) + ) + + def test_adjoint_opp(self): sim = self.sim sim.model = self.start_mod @@ -316,6 +438,44 @@ def test_adjoint_opp_shapes(self): yJtv = np.sum(y2 * sim.MfSigmaIDeriv(u2, v3, adjoint=True)) np.testing.assert_allclose(vJy, yJtv) + def test_anis_adjoint_opp(self): + sim = self.sim + sim.model = self.start_full_mod + + n_f = self.mesh.n_faces + n_p = sim.model.size + + u = np.random.rand(n_f) + u2 = np.random.rand(n_f, 2) + + y = np.random.rand(n_p) + y2 = np.random.rand(n_p, 4) + + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # u1, y1 -> v1 + vJy = v @ sim.MfSigmaDeriv(u, y) + yJtv = y @ sim.MfSigmaDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim.MfSigmaDeriv(u, y2)) + yJtv = np.sum(y2 * sim.MfSigmaDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim.MfSigmaDeriv(u2, y)) + yJtv = np.sum(y * sim.MfSigmaDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim.MfSigmaDeriv(u2, y2)) + yJtv = np.sum(y2 * sim.MfSigmaDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + def test_Mcc_deriv(self): u = np.random.randn(self.mesh.n_cells) sim = self.sim @@ -418,6 +578,40 @@ def Jvec(v): assert check_derivative(f, x0=x0, num=3, plotIt=False) + def test_Mf_diagonal_anisotropy_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim + x0 = self.start_diag_mod + + def f(x): + sim.model = x + d = sim.MfSigma @ u + + def Jvec(v): + sim.model = x0 + return sim.MfSigmaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Mf_full_anisotropy_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim_full_aniso + x0 = self.start_full_mod + + def f(x): + sim.model = x + d = sim.MfSigma @ u + + def Jvec(v): + sim.model = x0 + return sim.MfSigmaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + def test_MccI_deriv(self): u = np.random.randn(self.mesh.n_cells) sim = self.sim From a46fdd2b68ca3bc84581fda033d292b0fd976009 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Fri, 16 Jun 2023 11:27:35 -0700 Subject: [PATCH 014/164] fix call for vector `v` --- SimPEG/regularization/rotated.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 70289be22d..5b5afad63e 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -181,13 +181,13 @@ def deriv(self, m): return m_d.T * (G.T @ (M_f @ r)) def deriv2(self, m, v=None): - m_d_v = self.mapping.deriv(self._delta_m(m), v) + m_d = self.mapping.deriv(self._delta_m(m)) G = self.cell_gradient M_f = self.W if v is None: - return m_d_v.T @ (G.T @ M_f @ G) @ m_d_v + return m_d.T @ (G.T @ M_f @ G) @ m_d - return m_d_v.T @ (G.T @ (M_f @ (G @ m_d_v))) + return m_d.T @ (G.T @ (M_f @ (G @ (m_d @ v)))) @property def cell_gradient(self): From 679a0411df93a491bd5f605c207dd3edde40f398 Mon Sep 17 00:00:00 2001 From: dccowan Date: Thu, 6 Jul 2023 13:59:53 -0700 Subject: [PATCH 015/164] Create face and edge property mass matrices. Add tests --- SimPEG/base/__init__.py | 2 + SimPEG/base/pde_simulation.py | 312 +++++++++++++++ tests/base/test_mass_matrices.py | 636 ++++++++++++++++++++++++++++++- 3 files changed, 947 insertions(+), 3 deletions(-) diff --git a/SimPEG/base/__init__.py b/SimPEG/base/__init__.py index d7425ebd92..c2cb75dcae 100644 --- a/SimPEG/base/__init__.py +++ b/SimPEG/base/__init__.py @@ -3,4 +3,6 @@ BaseElectricalPDESimulation, BaseMagneticPDESimulation, with_property_mass_matrices, + with_surface_property_mass_matrices, + with_line_property_mass_matrices, ) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index a38ad4a467..74d897bb46 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -350,6 +350,279 @@ def _clear_on_prop_update(self): return decorator +def with_surface_property_mass_matrices(property_name): + """ + This decorator will automatically populate all of the surface property mass matrices. + + Given the property "prop", this will add properties and functions to the class + representing all of the possible mass matrix operations on the mesh. + + For a given property, "prop", they will be named: + + * MeProp + * MePropDeriv + * MePropI + * MePropIDeriv + * MfProp + * MfPropDeriv + * MfPropI + * MfPropIDeriv + """ + + def decorator(cls): + arg = property_name.lower() + arg = arg[0].upper() + arg[1:] + + @property + def Mf_prop(self): + """ + Face property inner product surface matrix. + """ + stash_name = f"__Mf_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_face_inner_product_surface(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Mf{arg}", Mf_prop) + + @property + def Me_prop(self): + """ + Edge property inner product surface matrix. + """ + stash_name = f"__Me_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_surface(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}", Me_prop) + + @property + def MfI_prop(self): + """ + Face property inner product inverse matrix. + """ + stash_name = f"__MfI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_face_inner_product_surface( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Mf{arg}I", MfI_prop) + + @property + def MeI_prop(self): + """ + Edge property inner product inverse matrix. + """ + stash_name = f"__MeI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_surface( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}I", MeI_prop) + + def MfDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MfProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Mf_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_face_inner_product_surface_deriv( + np.ones(self.mesh.n_faces) + )(np.ones(self.mesh.n_faces)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Mf{arg}Deriv", MfDeriv_prop) + + def MeDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MeProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Me_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_edge_inner_product_surface_deriv( + np.ones(self.mesh.n_faces) + )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Me{arg}Deriv", MeDeriv_prop) + + def MfIDeriv_prop(self, u, v=None, adjoint=False): + """I + Derivative of `MfPropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Mf{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Mf{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Mf{arg}IDeriv", MfIDeriv_prop) + + def MeIDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MePropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Me{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Me{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Me{arg}IDeriv", MeIDeriv_prop) + + @property + def _clear_on_prop_update(self): + items = [ + f"__Mf_{arg}", + f"__Me_{arg}", + f"__MfI_{arg}", + f"__MeI_{arg}", + f"__Mf_{arg}_deriv", + f"__Me_{arg}_deriv", + ] + return items + + setattr(cls, f"_clear_on_{arg.lower()}_update", _clear_on_prop_update) + return cls + + return decorator + + +def with_line_property_mass_matrices(property_name): + """ + This decorator will automatically populate all of the line property mass matrices. + + Given the property "prop", this will add properties and functions to the class + representing all of the possible mass matrix operations on the mesh. + + For a given property, "prop", they will be named: + + * MeProp + * MePropDeriv + * MePropI + * MePropIDeriv + """ + + def decorator(cls): + arg = property_name.lower() + arg = arg[0].upper() + arg[1:] + + @property + def Me_prop(self): + """ + Edge property inner product line matrix. + """ + stash_name = f"__Me_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_line(model=prop) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}", Me_prop) + + @property + def MeI_prop(self): + """ + Edge property inner product inverse matrix. + """ + stash_name = f"__MeI_{arg}" + if getattr(self, stash_name, None) is None: + prop = getattr(self, arg.lower()) + M_prop = self.mesh.get_edge_inner_product_line( + model=prop, invert_matrix=True + ) + setattr(self, stash_name, M_prop) + return getattr(self, stash_name) + + setattr(cls, f"_Me{arg}I", MeI_prop) + + def MeDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MeProperty` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + stash_name = f"__Me_{arg}_deriv" + if getattr(self, stash_name, None) is None: + M_prop_deriv = self.mesh.get_edge_inner_product_line_deriv( + np.ones(self.mesh.n_edges) + )(np.ones(self.mesh.n_edges)) * getattr(self, f"{arg.lower()}Deriv") + setattr(self, stash_name, M_prop_deriv) + return __inner_mat_mul_op( + getattr(self, stash_name), u, v=v, adjoint=adjoint + ) + + setattr(cls, f"_Me{arg}Deriv", MeDeriv_prop) + + def MeIDeriv_prop(self, u, v=None, adjoint=False): + """ + Derivative of `MePropertyI` with respect to the model. + """ + if getattr(self, f"{arg.lower()}Map") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = getattr(self, f"_Me{arg}I") + u = MI_prop @ (MI_prop @ -u) + M_prop_deriv = getattr(self, f"_Me{arg}Deriv") + return M_prop_deriv(u, v, adjoint=adjoint) + + setattr(cls, f"_Me{arg}IDeriv", MeIDeriv_prop) + + @property + def _clear_on_prop_update(self): + items = [ + f"__Me_{arg}", + f"__MeI_{arg}", + f"__Me_{arg}_deriv", + ] + return items + + setattr(cls, f"_clear_on_{arg.lower()}_update", _clear_on_prop_update) + return cls + + return decorator + + class BasePDESimulation(BaseSimulation): @property def Vol(self): @@ -495,3 +768,42 @@ def deleteTheseOnModelUpdate(self): if self.muMap is not None or self.muiMap is not None: toDelete = toDelete + self._clear_on_mu_update + self._clear_on_mui_update return toDelete + + +@with_surface_property_mass_matrices("tau") +@with_line_property_mass_matrices("lambda") +class BaseConductancePDESimulation(BasePDESimulation): + tau, tauMap, tauDeriv = props.Invertible( + "Electrical Conductance (S)", + ) + kappa, kappaMap, kappaDeriv = props.Invertible( + "Electrical Resistance per meter (Ohm/m)", + ) + + def __init__( + self, mesh, tau=None, tauMap=None, kappa=None, kappaMap=None, **kwargs + ): + super().__init__(mesh=mesh, **kwargs) + self.tau = tau + self.kappa = kappa + self.tauMap = tauMap + self.kappaMap = kappaMap + + def __setattr__(self, name, value): + super().__setattr__(name, value) + if name in ["tau", "kappa"]: + for mat in self._clear_on_tau_update + self._clear_on_kappa_update: + if hasattr(self, mat): + delattr(self, mat) + + @property + def deleteTheseOnModelUpdate(self): + """ + items to be deleted if the model for conductance or resistance per meter is updated + """ + toDelete = super().deleteTheseOnModelUpdate + if self.tauMap is not None or self.kappaMap is not None: + toDelete = ( + toDelete + self._clear_on_tau_update + self._clear_on_kappa_update + ) + return toDelete diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index 396a75cd29..44463f24b6 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -1,4 +1,9 @@ -from SimPEG.base import with_property_mass_matrices, BasePDESimulation +from SimPEG.base import ( + with_property_mass_matrices, + with_surface_property_mass_matrices, + with_line_property_mass_matrices, + BasePDESimulation, +) from SimPEG import props, maps import unittest import discretize @@ -11,19 +16,38 @@ # define a very simple class... @with_property_mass_matrices("sigma") @with_property_mass_matrices("mu") +@with_surface_property_mass_matrices("tau") +@with_line_property_mass_matrices("kappa") class SimpleSim(BasePDESimulation): sigma, sigmaMap, sigmaDeriv = props.Invertible("Electrical conductivity (S/m)") - + rho, rhoMap, rhoDeriv = props.Invertible("Electrical conductivity (S/m)") + props.Reciprocal(sigma, rho) mu, muMap, muDeriv = props.Invertible("Magnetic Permeability") + tau, tauMap, tauDeriv = props.Invertible("Conductance (S)") + kappa, kappaMap, kappaDeriv = props.Invertible("Resistance per meter (Ohm/m)") def __init__( - self, mesh, survey=None, sigma=None, sigmaMap=None, mu=mu_0, muMap=None + self, + mesh, + survey=None, + sigma=None, + sigmaMap=None, + mu=mu_0, + muMap=None, + tau=None, + tauMap=None, + kappa=None, + kappaMap=None, ): super().__init__(mesh=mesh, survey=survey) self.sigma = sigma self.mu = mu + self.tau = tau + self.kappa = kappa self.sigmaMap = sigmaMap self.muMap = muMap + self.tauMap = tauMap + self.kappaMap = kappaMap @property def deleteTheseOnModelUpdate(self): @@ -33,6 +57,10 @@ def deleteTheseOnModelUpdate(self): toDelete = super().deleteTheseOnModelUpdate if self.sigmaMap is not None or self.rhoMap is not None: toDelete = toDelete + self._clear_on_sigma_update + if self.tauMap is not None: + toDelete = toDelete + self._clear_on_tau_update + if self.kappaMap is not None: + toDelete = toDelete + self._clear_on_kappa_update return toDelete @@ -540,3 +568,605 @@ def test_MfI_adjoint(self): yJv = y @ sim.MfSigmaIDeriv(u, v) vJty = v @ sim.MfSigmaIDeriv(u, y, adjoint=True) np.testing.assert_allclose(yJv, vJty) + + +class TestSimSurfaceProperties(unittest.TestCase): + def setUp(self): + self.mesh = discretize.TensorMesh([5, 6, 7]) + + self.sim = SimpleSim(self.mesh, tauMap=maps.ExpMap()) + self.start_mod = np.log(1e-2 * np.ones(self.mesh.n_faces)) + np.random.randn( + self.mesh.n_faces + ) + + def test_zero_returns(self): + n_f = self.mesh.n_faces + n_e = self.mesh.n_edges + sim = self.sim + + v = np.random.rand(n_f) + u_f = np.random.rand(n_f) + u_e = np.random.rand(n_e) + + # Test zero return on u passed as Zero + assert sim._MfTauDeriv(Zero(), v).__class__ == Zero + assert sim._MeTauDeriv(Zero(), v).__class__ == Zero + assert sim._MfTauIDeriv(Zero(), v).__class__ == Zero + assert sim._MeTauIDeriv(Zero(), v).__class__ == Zero + + # Test zero return on v as Zero + assert sim._MfTauDeriv(u_f, Zero()).__class__ == Zero + assert sim._MeTauDeriv(u_e, Zero()).__class__ == Zero + assert sim._MfTauIDeriv(u_f, Zero()).__class__ == Zero + assert sim._MeTauIDeriv(u_e, Zero()).__class__ == Zero + + def test_forward_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + # if U.shape (n_f, ) + u = np.random.rand(n_f) + v = np.random.randn(n_f) + u2 = np.random.rand(n_f, 2) + v2 = np.random.randn(n_f, 4) + + # These cases should all return an array of shape (n_f, ) + # if V.shape (n_c, ) + out = sim._MfTauDeriv(u, v) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u, v[:, None]) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v[:, None]) + assert out.shape == (n_f,) + + # now check passing multiple V's + out = sim._MfTauDeriv(u, v2) + assert out.shape == (n_f, 4) + out = sim._MfTauDeriv(u[:, None], v2) + assert out.shape == (n_f, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u[:, None], v2[:, i]) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MfTauDeriv(u2, v) + assert out.shape == (n_f, 2) + out = sim._MfTauDeriv(u2, v[:, None]) + assert out.shape == (n_f, 2) + + # and with multiple RHS + out = sim._MfTauDeriv(u2, v2) + assert out.shape == (n_f, v2.shape[1], 2) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i, :] = sim._MfTauDeriv(u2, v2[:, i]) + np.testing.assert_equal(out, out_2) + + # test None as v + UM = sim._MfTauDeriv(u) + np.testing.assert_allclose(UM @ v, sim._MfTauDeriv(u, v)) + + UM = sim._MfTauDeriv(u2) + np.testing.assert_allclose( + UM @ v, sim._MfTauDeriv(u2, v).reshape(-1, order="F") + ) + + def test_adjoint_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + + u = np.random.rand(n_f) + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + u2 = np.random.rand(n_f, 2) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # These cases should all return an array of shape (n_c, ) + # if V.shape (n_f, ) + out = sim._MfTauDeriv(u, v, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u, v[:, None], adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u[:, None], v[:, None], adjoint=True) + assert out.shape == (n_f,) + + # now check passing multiple V's + out = sim._MfTauDeriv(u, v2, adjoint=True) + assert out.shape == (n_f, 4) + out = sim._MfTauDeriv(u[:, None], v2, adjoint=True) + assert out.shape == (n_f, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u, v2[:, i], adjoint=True) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MfTauDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_f,) + out = sim._MfTauDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_f,) + + # and with multiple RHS + out = sim._MfTauDeriv(u2, v3, adjoint=True) + assert out.shape == (n_f, v3.shape[1]) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MfTauDeriv(u2, v3[:, i, :], adjoint=True) + np.testing.assert_equal(out, out_2) + + # test None as v + UMT = sim._MfTauDeriv(u, adjoint=True) + np.testing.assert_allclose(UMT @ v, sim._MfTauDeriv(u, v, adjoint=True)) + + UMT = sim._MfTauDeriv(u2, adjoint=True) + np.testing.assert_allclose( + UMT @ v2_2.reshape(-1, order="F"), sim._MfTauDeriv(u2, v2_2, adjoint=True) + ) + + def test_adjoint_opp_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_f = self.mesh.n_faces + # n_c = self.mesh.n_cells + + u = np.random.rand(n_f) + u2 = np.random.rand(n_f, 2) + + y = np.random.rand(n_f) + y2 = np.random.rand(n_f, 4) + + v = np.random.randn(n_f) + v2 = np.random.randn(n_f, 4) + v2_2 = np.random.randn(n_f, 2) + v3 = np.random.rand(n_f, 4, 2) + + # u1, y1 -> v1 + vJy = v @ sim._MfTauDeriv(u, y) + yJtv = y @ sim._MfTauDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MfTauDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MfTauDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MfTauDeriv(u2, y)) + yJtv = np.sum(y * sim._MfTauDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MfTauDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MfTauDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # Also test Inverse opp, just to be sure... + # u1, y1 -> v1 + vJy = v @ sim._MfTauIDeriv(u, y) + yJtv = y @ sim._MfTauIDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MfTauIDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MfTauIDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MfTauIDeriv(u2, y)) + yJtv = np.sum(y * sim._MfTauIDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MfTauIDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MfTauIDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + def test_Me_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeTau @ u + + def Jvec(v): + sim.model = x0 + return sim._MeTauDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Mf_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MfTau @ u + + def Jvec(v): + sim.model = x0 + return sim._MfTauDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MeI_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeTauI @ u + + def Jvec(v): + sim.model = x0 + return sim._MeTauIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MfI_deriv(self): + u = np.random.randn(self.mesh.n_faces) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MfTauI @ u + + def Jvec(v): + sim.model = x0 + return sim._MfTauIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Me_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MeTauDeriv(u, v) + vJty = v @ sim._MeTauDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_Mf_adjoint(self): + n_items = self.mesh.n_faces + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MfTauDeriv(u, v) + vJty = v @ sim._MfTauDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MeI_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MeTauIDeriv(u, v) + vJty = v @ sim._MeTauIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MfI_adjoint(self): + n_items = self.mesh.n_faces + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_faces) + y = np.random.randn(n_items) + + yJv = y @ sim._MfTauIDeriv(u, v) + vJty = v @ sim._MfTauIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + +class TestSimEdgeProperties(unittest.TestCase): + def setUp(self): + self.mesh = discretize.TensorMesh([5, 6, 7]) + + self.sim = SimpleSim(self.mesh, kappaMap=maps.ExpMap()) + self.start_mod = np.log(1e-2 * np.ones(self.mesh.n_edges)) + np.random.randn( + self.mesh.n_edges + ) + + def test_zero_returns(self): + n_e = self.mesh.n_edges + sim = self.sim + + v = np.random.rand(n_e) + u_e = np.random.rand(n_e) + + # Test zero return on u passed as Zero + assert sim._MeKappaDeriv(Zero(), v).__class__ == Zero + assert sim._MeKappaIDeriv(Zero(), v).__class__ == Zero + + # Test zero return on v as Zero + assert sim._MeKappaDeriv(u_e, Zero()).__class__ == Zero + assert sim._MeKappaIDeriv(u_e, Zero()).__class__ == Zero + + def test_forward_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + # if U.shape (n_f, ) + u = np.random.rand(n_e) + v = np.random.randn(n_e) + u2 = np.random.rand(n_e, 2) + v2 = np.random.randn(n_e, 4) + + # These cases should all return an array of shape (n_f, ) + # if V.shape (n_c, ) + out = sim._MeKappaDeriv(u, v) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u, v[:, None]) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v[:, None]) + assert out.shape == (n_e,) + + # now check passing multiple V's + out = sim._MeKappaDeriv(u, v2) + assert out.shape == (n_e, 4) + out = sim._MeKappaDeriv(u[:, None], v2) + assert out.shape == (n_e, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u[:, None], v2[:, i]) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MeKappaDeriv(u2, v) + assert out.shape == (n_e, 2) + out = sim._MeKappaDeriv(u2, v[:, None]) + assert out.shape == (n_e, 2) + + # and with multiple RHS + out = sim._MeKappaDeriv(u2, v2) + assert out.shape == (n_e, v2.shape[1], 2) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i, :] = sim._MeKappaDeriv(u2, v2[:, i]) + np.testing.assert_equal(out, out_2) + + # test None as v + UM = sim._MeKappaDeriv(u) + np.testing.assert_allclose(UM @ v, sim._MeKappaDeriv(u, v)) + + UM = sim._MeKappaDeriv(u2) + np.testing.assert_allclose( + UM @ v, sim._MeKappaDeriv(u2, v).reshape(-1, order="F") + ) + + def test_adjoint_expected_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + + u = np.random.rand(n_e) + v = np.random.randn(n_e) + v2 = np.random.randn(n_e, 4) + u2 = np.random.rand(n_e, 2) + v2_2 = np.random.randn(n_e, 2) + v3 = np.random.rand(n_e, 4, 2) + + # These cases should all return an array of shape (n_c, ) + # if V.shape (n_f, ) + out = sim._MeKappaDeriv(u, v, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u, v[:, None], adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u[:, None], v[:, None], adjoint=True) + assert out.shape == (n_e,) + + # now check passing multiple V's + out = sim._MeKappaDeriv(u, v2, adjoint=True) + assert out.shape == (n_e, 4) + out = sim._MeKappaDeriv(u[:, None], v2, adjoint=True) + assert out.shape == (n_e, 4) + + # also ensure it properly broadcasted the operation.... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u, v2[:, i], adjoint=True) + np.testing.assert_equal(out, out_2) + + # now check for multiple source polarizations + out = sim._MeKappaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_e,) + out = sim._MeKappaDeriv(u2, v2_2, adjoint=True) + assert out.shape == (n_e,) + + # and with multiple RHS + out = sim._MeKappaDeriv(u2, v3, adjoint=True) + assert out.shape == (n_e, v3.shape[1]) + + # and test broadcasting here... + out_2 = np.empty_like(out) + for i in range(v2.shape[1]): + out_2[:, i] = sim._MeKappaDeriv(u2, v3[:, i, :], adjoint=True) + np.testing.assert_equal(out, out_2) + + # test None as v + UMT = sim._MeKappaDeriv(u, adjoint=True) + np.testing.assert_allclose(UMT @ v, sim._MeKappaDeriv(u, v, adjoint=True)) + + UMT = sim._MeKappaDeriv(u2, adjoint=True) + np.testing.assert_allclose( + UMT @ v2_2.reshape(-1, order="F"), sim._MeKappaDeriv(u2, v2_2, adjoint=True) + ) + + def test_adjoint_opp_shapes(self): + sim = self.sim + sim.model = self.start_mod + + n_e = self.mesh.n_edges + # n_c = self.mesh.n_cells + + u = np.random.rand(n_e) + u2 = np.random.rand(n_e, 2) + + y = np.random.rand(n_e) + y2 = np.random.rand(n_e, 4) + + v = np.random.randn(n_e) + v2 = np.random.randn(n_e, 4) + v2_2 = np.random.randn(n_e, 2) + v3 = np.random.rand(n_e, 4, 2) + + # u1, y1 -> v1 + vJy = v @ sim._MeKappaDeriv(u, y) + yJtv = y @ sim._MeKappaDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MeKappaDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MeKappaDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MeKappaDeriv(u2, y)) + yJtv = np.sum(y * sim._MeKappaDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MeKappaDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MeKappaDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # Also test Inverse opp, just to be sure... + # u1, y1 -> v1 + vJy = v @ sim._MeKappaIDeriv(u, y) + yJtv = y @ sim._MeKappaIDeriv(u, v, adjoint=True) + np.testing.assert_allclose(vJy, yJtv) + + # u1, y2 -> v2 + vJy = np.sum(v2 * sim._MeKappaIDeriv(u, y2)) + yJtv = np.sum(y2 * sim._MeKappaIDeriv(u, v2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y1 -> v2_2 + vJy = np.sum(v2_2 * sim._MeKappaIDeriv(u2, y)) + yJtv = np.sum(y * sim._MeKappaIDeriv(u2, v2_2, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + # u2, y2 -> v3 + vJy = np.sum(v3 * sim._MeKappaIDeriv(u2, y2)) + yJtv = np.sum(y2 * sim._MeKappaIDeriv(u2, v3, adjoint=True)) + np.testing.assert_allclose(vJy, yJtv) + + def test_Me_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeKappa @ u + + def Jvec(v): + sim.model = x0 + return sim._MeKappaDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_MeI_deriv(self): + u = np.random.randn(self.mesh.n_edges) + sim = self.sim + x0 = self.start_mod + + def f(x): + sim.model = x + d = sim._MeKappaI @ u + + def Jvec(v): + sim.model = x0 + return sim._MeKappaIDeriv(u, v) + + return d, Jvec + + assert check_derivative(f, x0=x0, num=3, plotIt=False) + + def test_Me_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_edges) + y = np.random.randn(n_items) + + yJv = y @ sim._MeKappaDeriv(u, v) + vJty = v @ sim._MeKappaDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + def test_MeI_adjoint(self): + n_items = self.mesh.n_edges + u = np.random.randn(n_items) + sim = self.sim + sim.model = self.start_mod + + v = np.random.randn(self.mesh.n_edges) + y = np.random.randn(n_items) + + yJv = y @ sim._MeKappaIDeriv(u, v) + vJty = v @ sim._MeKappaIDeriv(u, y, adjoint=True) + np.testing.assert_allclose(yJv, vJty) + + +if __name__ == "__main__": + unittest.main() From 7643a41249951056b32cd23ae404156a725345f8 Mon Sep 17 00:00:00 2001 From: dccowan Date: Fri, 7 Jul 2023 14:12:25 -0700 Subject: [PATCH 016/164] Add conductance to simulation and added analytic 1d layered earth tests. --- SimPEG/base/__init__.py | 1 + SimPEG/base/pde_simulation.py | 4 +- .../electromagnetics/time_domain/__init__.py | 2 + SimPEG/electromagnetics/time_domain/fields.py | 55 ++++++ .../time_domain/simulation.py | 92 +++++++++ tests/em/tdem/test_TDEM_forward_Analytic.py | 186 ++++++++++++++++++ 6 files changed, 338 insertions(+), 2 deletions(-) diff --git a/SimPEG/base/__init__.py b/SimPEG/base/__init__.py index c2cb75dcae..bc2cbaf8f3 100644 --- a/SimPEG/base/__init__.py +++ b/SimPEG/base/__init__.py @@ -2,6 +2,7 @@ BasePDESimulation, BaseElectricalPDESimulation, BaseMagneticPDESimulation, + BaseConductancePDESimulation, with_property_mass_matrices, with_surface_property_mass_matrices, with_line_property_mass_matrices, diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 74d897bb46..34f1131889 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -771,7 +771,7 @@ def deleteTheseOnModelUpdate(self): @with_surface_property_mass_matrices("tau") -@with_line_property_mass_matrices("lambda") +@with_line_property_mass_matrices("kappa") class BaseConductancePDESimulation(BasePDESimulation): tau, tauMap, tauDeriv = props.Invertible( "Electrical Conductance (S)", @@ -781,7 +781,7 @@ class BaseConductancePDESimulation(BasePDESimulation): ) def __init__( - self, mesh, tau=None, tauMap=None, kappa=None, kappaMap=None, **kwargs + self, mesh, tau=None, tauMap=None, kappa=0., kappaMap=None, **kwargs ): super().__init__(mesh=mesh, **kwargs) self.tau = tau diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index dcf8dde9a8..4363b1aced 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -92,6 +92,7 @@ from .simulation import ( Simulation3DMagneticFluxDensity, Simulation3DElectricField, + Simulation3DElectricFieldConductance, Simulation3DMagneticField, Simulation3DCurrentDensity, ) @@ -99,6 +100,7 @@ from .fields import ( Fields3DMagneticFluxDensity, Fields3DElectricField, + Fields3DElectricFieldConductance, Fields3DMagneticField, Fields3DCurrentDensity, ) diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 384432c736..afb832a46f 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -391,6 +391,61 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) +class Fields3DElectricFieldConductance(Fields3DElectricField): + """Fancy Field Storage for a TDEM simulation.""" + + def startup(self): + self._times = self.simulation.times + self._MeSigma = self.simulation.MeSigma + self._MeSigmaI = self.simulation.MeSigmaI + self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self._edgeCurl = self.simulation.mesh.edge_curl + self._MfMui = self.simulation.MfMui + self.__MeTau = self.simulation._MeTau + # self.__MeTauI = self.simulation._MeTauI + self.__MeTauDeriv = self.simulation._MeTauDeriv + # self.__MeTauIDeriv = self.simulation._MeTauIDeriv + self.__MeKappa = self.simulation._MeKappa + # self.__MeKappaI = self.simulation._MeKappaI + + def _j(self, eSolution, source_list, tInd): + return self.simulation.MeI * ( + (self._MeSigma + self.__MeTau + self.__MeKappa) + * self._e(eSolution, source_list, tInd) + ) + + def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._eDeriv_u( + tInd, + src, + (self._MeSigma + self.__MeTau + self.__MeKappa).T + * (self.simulation.MeI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MeI * ( + (self._MeSigma + self.__MeTau + self.__MeKappa) + * self._eDeriv_u(tInd, src, dun_dm_v) + ) + + def _jDeriv_m(self, tInd, src, v, adjoint=False): + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self.__MeTauDeriv(e).T * w + self._eDeriv_m( + tInd, + src, + (self._MeSigma + self.__MeTau + self.__MeKappa).T * w, + adjoint=True, + ) + return self.simulation.MeI * ( + self._MeTauDeriv(e) * v + + (self._MeSigma + self.__MeTau + self.__MeKappa) + * self._eDeriv_m(tInd, src, v) + ) + + class Fields3DMagneticField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 09ae43a85e..75a981c365 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -4,11 +4,13 @@ from ...data import Data from ...simulation import BaseTimeSimulation from ...utils import mkvc, sdiag, speye, Zero, validate_type, validate_float +from ...base import BaseConductancePDESimulation from ..base import BaseEMSimulation from .survey import Survey from .fields import ( Fields3DMagneticFluxDensity, Fields3DElectricField, + Fields3DElectricFieldConductance, Fields3DMagneticField, Fields3DCurrentDensity, FieldsDerivativesEB, @@ -962,6 +964,96 @@ def getAdcDeriv(self, u, v, adjoint=False): # self.Adcinv.clean() + +# ------------------------------- Simulation3DElectricField ------------------------------- # +class Simulation3DElectricFieldConductance(Simulation3DElectricField, BaseConductancePDESimulation): + + fieldsPair = Fields3DElectricFieldConductance + + def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): + super().__init__(mesh=mesh, survey=survey, **kwargs) + + if self.sigmaMap is not None: + raise NotImplementedError( + "Conductivity (sigma) is not an invertible property for the " + "Simulation3DElectricFieldConductance class. The mapping for the " + "invertible property is 'tauMap'." + ) + + if self.kappaMap is not None: + raise NotImplementedError( + "Resistance per unit length (kappa) is not an invertible property, yet." + ) + + + + def getAdiag(self, tInd): + """ + Diagonal of the system matrix at a given time index + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + C = self.mesh.edge_curl + MfMui = self.MfMui + MeSigma = self.MeSigma + self._MeTau + self._MeKappa + + return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigma + + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + """ + Deriv of ADiag with respect to conductance + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + if adjoint: + return 1.0 / dt * self._MeTauDeriv(u, v, adjoint) + + return 1.0 / dt * self._MeTauDeriv(u, v, adjoint) + + def getAsubdiag(self, tInd): + """ + Matrix below the diagonal + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + MeSigma = self.MeSigma + self._MeTau + self._MeKappa + + return -1.0 / dt * MeSigma + + def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): + """ + Derivative of the matrix below the diagonal with respect to conductance + """ + dt = self.time_steps[tInd] + + if adjoint: + return -1.0 / dt * self._MeTauDeriv(u, v, adjoint) + + return -1.0 / dt * self._MeTauDeriv(u, v, adjoint) + + def getAdc(self): + + MeSigma = self.MeSigma + self._MeTau + self._MeKappa + + Grad = self.mesh.nodal_gradient + Adc = Grad.T.tocsr() * MeSigma * Grad + # Handling Null space of A + Adc[0, 0] = Adc[0, 0] + 1.0 + return Adc + + def getAdcDeriv(self, u, v, adjoint=False): + Grad = self.mesh.nodal_gradient + if not adjoint: + return Grad.T * self._MeTauDeriv(-u, v, adjoint) + else: + return self._MeTauDeriv(-u, Grad * v, adjoint) + + ############################################################################### # # # H-J Formulation # diff --git a/tests/em/tdem/test_TDEM_forward_Analytic.py b/tests/em/tdem/test_TDEM_forward_Analytic.py index 9594e3de86..878eb923bb 100644 --- a/tests/em/tdem/test_TDEM_forward_Analytic.py +++ b/tests/em/tdem/test_TDEM_forward_Analytic.py @@ -311,6 +311,149 @@ def analytic_halfspace_mag_dipole_comparison( return log10diff +def analytic_layer_small_loop_conuductance_comparison( + mesh_type="CYL", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, +): + # Some static parameters + PHI = np.linspace(0, 2 * np.pi, 21) + loop_radius = np.pi**-0.5 + if orientation == "X": + source_nodes = np.c_[ + np.zeros_like(PHI), + loop_radius * np.cos(PHI), + 1.0 + loop_radius * np.sin(PHI), + ] + elif orientation == "Z": + source_nodes = np.c_[ + loop_radius * np.cos(PHI), loop_radius * np.sin(PHI), np.ones_like(PHI) + ] + receiver_location = np.c_[40.0, 0.0, 1.0] + + layer_depth = 24.0 + layer_thickness = 0.1 + layer_conductivity = 10.0 + background_conductivity = 2.5e-3 + + tau = layer_thickness * layer_conductivity + + if bounds is None: + bounds = [1e-5, 1e-3] + + # 1D LAYER MODEL + thicknesses = np.array([layer_depth - layer_thickness / 2, layer_thickness]) + n_layer = len(thicknesses) + 1 + + sigma_1d = background_conductivity * np.ones(n_layer) + sigma_1d[1] = layer_conductivity + + sigma_map_1d = maps.IdentityMap(nP=n_layer) + + # 3D LAYER MODEL + if mesh_type == "CYL": + cs, ncx, ncz, npad = 4.0, 40, 20, 20 + hx = [(cs, ncx), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] + mesh = discretize.CylindricalMesh([hx, 1, hz], "00C") + + elif mesh_type == "TENSOR": + cs, nc, npad = 8.0, 14, 8 + hx = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hy = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + mesh = discretize.TensorMesh([hx, hy, hz], "CCC") + + sigma_3d = 1e-8 * np.ones(mesh.nC) + sigma_3d[mesh.cell_centers[:, -1] < 0.0] = background_conductivity + + tau_3d = np.zeros(mesh.nF) + tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau + tau_map = maps.IdentityMap(nP=mesh.n_faces) + + # DEFINE SURVEY + times = np.logspace(-5, -4, 21) + rx = getattr(tdem.receivers, "Point{}".format(rx_type))( + receiver_location, times, orientation=orientation + ) + + src_1d = tdem.sources.MagDipole( + [rx], + location=np.r_[0.0, 0.0, 1.0], + orientation=orientation, + waveform=tdem.sources.StepOffWaveform(), + ) + + if mesh_type == "CYL": + src_3d = tdem.sources.CircularLoop( + [rx], + radius=loop_radius, + location=np.c_[0.0, 0.0, 1.0], + waveform=tdem.sources.StepOffWaveform(), + ) + else: + src_3d = tdem.sources.LineCurrent( + [rx], location=source_nodes, waveform=tdem.sources.StepOffWaveform() + ) + + survey_1d = tdem.Survey([src_1d]) + survey_3d = tdem.Survey([src_3d]) + + # DEFINE THE SIMULATIONS + sim_1d = tdem.Simulation1DLayered( + survey=survey_1d, + thicknesses=thicknesses, + sigmaMap=sigma_map_1d, + ) + + sim_3d = tdem.simulation.Simulation3DElectricFieldConductance( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + sim_3d.time_steps = [ + (1e-06, 40), + (5e-06, 40), + (1e-05, 40), + (5e-05, 40), + (0.0001, 40), + (0.0005, 40), + ] + + # COMPUTE SOLUTIONS + analytic_solution = sim_1d.dpred(sigma_1d) + numeric_solution = sim_3d.dpred(tau_3d) + + ind = np.logical_and(rx.times > bounds[0], rx.times < bounds[1]) + log10diff = np.linalg.norm( + np.log10(np.abs(numeric_solution[ind])) + - np.log10(np.abs(analytic_solution[ind])) + ) / np.linalg.norm(np.log10(np.abs(analytic_solution[ind]))) + + print( + " |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}".format( + ana=np.linalg.norm(analytic_solution), + num=np.linalg.norm(numeric_solution), + diff=np.linalg.norm(analytic_solution - numeric_solution), + ) + ) + print("Difference: {}".format(log10diff)) + + if plotIt is True: + plt.loglog( + rx.times[numeric_solution > 0], + numeric_solution[numeric_solution > 0], + "r", + rx.times[numeric_solution < 0], + -numeric_solution[numeric_solution < 0], + "r--", + ) + plt.loglog(rx.times, abs(analytic_solution), "b*") + plt.show() + + return log10diff + + ########################################################### # ANALYTIC WHOLESPACE TESTS FOR MAG AND ELECTRIC DIPOLES ########################################################### @@ -604,3 +747,46 @@ def test_analytic_m3_CYL_0m_CircularLoop(self): ) < 0.15 ) + + +class LayerConductanceTests(unittest.TestCase): + # WORKING + def test_tensor_linecurrent_dbdt_x(self): + assert ( + analytic_layer_small_loop_conuductance_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxTimeDerivative", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tensor_linecurrent_dbdt_z(self): + assert ( + analytic_layer_small_loop_conuductance_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_cyle_linecurrent_dbdt_z(self): + assert ( + analytic_layer_small_loop_conuductance_comparison( + mesh_type="CYL", + rx_type="MagneticFluxTimeDerivative", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + +if __name__ == "__main__": + unittest.main() From 4f19202e0caa155942a2646fa905b7ec4a8ee08d Mon Sep 17 00:00:00 2001 From: dccowan Date: Mon, 10 Jul 2023 16:11:40 -0700 Subject: [PATCH 017/164] add derivative and adjoint tests --- SimPEG/__init__.py | 1 + SimPEG/base/pde_simulation.py | 106 +++++++- .../electromagnetics/time_domain/__init__.py | 6 + SimPEG/electromagnetics/time_domain/fields.py | 157 ++++++++++++ .../time_domain/simulation.py | 242 ++++++++++++++++-- SimPEG/maps.py | 173 +++++++++++++ tests/em/tdem/test_TDEM_DerivAdjoint.py | 159 +++++++++++- tests/em/tdem/test_TDEM_forward_Analytic.py | 94 +++++-- 8 files changed, 885 insertions(+), 53 deletions(-) diff --git a/SimPEG/__init__.py b/SimPEG/__init__.py index 4f390f6886..e00e25203d 100644 --- a/SimPEG/__init__.py +++ b/SimPEG/__init__.py @@ -76,6 +76,7 @@ maps.LinearMap maps.IdentityMap maps.InjectActiveCells + maps.InjectActiveFaces maps.MuRelative maps.LogMap maps.ParametricBlock diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 34f1131889..adbeef0eaa 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -1,6 +1,6 @@ import numpy as np import scipy.sparse as sp -from discretize.utils import Zero +from discretize.utils import Zero, sdinv from ..simulation import BaseSimulation from .. import props from scipy.constants import mu_0 @@ -772,38 +772,130 @@ def deleteTheseOnModelUpdate(self): @with_surface_property_mass_matrices("tau") @with_line_property_mass_matrices("kappa") -class BaseConductancePDESimulation(BasePDESimulation): +@with_line_property_mass_matrices("kappai") +class BaseConductancePDESimulation(BaseElectricalPDESimulation): tau, tauMap, tauDeriv = props.Invertible( "Electrical Conductance (S)", ) kappa, kappaMap, kappaDeriv = props.Invertible( + "Electrical Conductance integrated over length (Sm)", + ) + kappai, kappaiMap, kappaiDeriv = props.Invertible( "Electrical Resistance per meter (Ohm/m)", ) + props.Reciprocal(kappa, kappai) def __init__( - self, mesh, tau=None, tauMap=None, kappa=0., kappaMap=None, **kwargs + self, + mesh, + sigma=1e-8, + sigmaMap=None, + rho=None, + rhoMap=None, + tau=None, + tauMap=None, + kappa=0., + kappaMap=None, + kappai=None, + kappaiMap=None, + **kwargs ): super().__init__(mesh=mesh, **kwargs) + self.sigma = sigma + self.rho = rho + self.sigmaMap = sigmaMap + self.rhoMap = rhoMap self.tau = tau self.kappa = kappa + self.kappai = kappai self.tauMap = tauMap self.kappaMap = kappaMap + self.kappaiMap = kappaiMap def __setattr__(self, name, value): super().__setattr__(name, value) - if name in ["tau", "kappa"]: - for mat in self._clear_on_tau_update + self._clear_on_kappa_update: + if name in ["sigma", "rho", "tau", "kappa", "kappai"]: + mat_list = ( + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + self._clear_on_kappai_update + + [ + "__MeSigmaTauKappa", + "__MeSigmaTauKappaI", + "__MeSigmaTauKappaDeriv", + # "__MeSigmaTauKappaIDeriv" + ] + ) + for mat in mat_list: if hasattr(self, mat): delattr(self, mat) + @property + def _MeSigmaTauKappa(self): + if getattr(self, "__MeSigmaTauKappa", None) is None: + M_prop = self.MeSigma + self._MeTau + self._MeKappa + setattr(self, "__MeSigmaTauKappa", M_prop) + return getattr(self, "__MeSigmaTauKappa") + + @property + def _MeSigmaTauKappaI(self): + if getattr(self, "__MeSigmaTauKappaI", None) is None: + M_prop = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + setattr(self, "__MeSigmaTauKappaI", M_prop) + return getattr(self, "__MeSigmaTauKappaI") + + def _MeSigmaTauKappaDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to tau at the moment""" + return self._MeTauDeriv(u, v, adjoint) + + + + # if getattr(self, "__MeSigmaTauKappaDeriv", None) is None: + # M_prop_deriv = getattr(self, "__Me_tau_deriv") + # setattr( + # self, "__MeSigmaTauKappaDeriv", __inner_mat_mul_op( + # M_prop_deriv, u, v=v, adjoint=adjoint + # ) + # ) + # return getattr(self, "__MeSigmaTauKappaDeriv") + + def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to tau at the moment""" + if getattr(self, "tauMap") is None: + return Zero() + if isinstance(u, Zero) or isinstance(v, Zero): + return Zero() + + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self._MeTauDeriv(u, v, adjoint) + + + # M_prop_deriv = getattr(self, "__Me_tau_deriv") + # return M_prop_deriv(u, v, adjoint=adjoint) + + @property def deleteTheseOnModelUpdate(self): """ items to be deleted if the model for conductance or resistance per meter is updated """ toDelete = super().deleteTheseOnModelUpdate - if self.tauMap is not None or self.kappaMap is not None: + if self.tauMap is not None or self.kappaMap is not None or self.kappaiMap is not None: toDelete = ( - toDelete + self._clear_on_tau_update + self._clear_on_kappa_update + toDelete + + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + self._clear_on_kappai_update + + [ + "__MeSigmaTauKappa", + "__MeSigmaTauKappaI", + "__MeSigmaTauKappaDeriv", + # "__MeSigmaTauKappaIDeriv" + ] ) return toDelete diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index 4363b1aced..4074242423 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -13,7 +13,9 @@ Simulation1DLayered Simulation3DMagneticFluxDensity + Simulation3DMagneticFluxDensityConductance Simulation3DElectricField + Simulation3DElectricFieldConductance Simulation3DMagneticField Simulation3DCurrentDensity @@ -70,7 +72,9 @@ :toctree: generated/ Fields3DMagneticFluxDensity + Fields3DMagneticFluxDensityConductance Fields3DElectricField + Fields3DElectricFieldConductance Fields3DMagneticField Fields3DCurrentDensity @@ -91,6 +95,7 @@ """ from .simulation import ( Simulation3DMagneticFluxDensity, + Simulation3DMagneticFluxDensityConductance, Simulation3DElectricField, Simulation3DElectricFieldConductance, Simulation3DMagneticField, @@ -99,6 +104,7 @@ from .simulation_1d import Simulation1DLayered from .fields import ( Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityConductance, Fields3DElectricField, Fields3DElectricFieldConductance, Fields3DMagneticField, diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index afb832a46f..30ea119c58 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -272,6 +272,163 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) +class Fields3DMagneticFluxDensityConductance(Fields3DMagneticFluxDensity): + """Field Storage for a TDEM simulation.""" + + knownFields = {"bSolution": "F"} + aliasFields = { + "b": ["bSolution", "F", "_b"], + "h": ["bSolution", "F", "_h"], + "e": ["bSolution", "E", "_e"], + "j": ["bSolution", "E", "_j"], + "dbdt": ["bSolution", "F", "_dbdt"], + "dhdt": ["bSolution", "F", "_dhdt"], + } + + def startup(self): + self._times = self.simulation.times + self._MeSigma = self.simulation.MeSigma + self._MeSigmaI = self.simulation.MeSigmaI + self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self._edgeCurl = self.simulation.mesh.edge_curl + self._MfMui = self.simulation.MfMui + self._timeMesh = self.simulation.time_mesh + + def _TLoc(self, fieldType): + return "N" + + def _b(self, bSolution, source_list, tInd): + return bSolution + + def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + return dun_dm_v + + def _bDeriv_m(self, tInd, src, v, adjoint=False): + return Zero() + + def _dbdt(self, bSolution, source_list, tInd): + # self._timeMesh.face_divergence + dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd) + for i, src in enumerate(source_list): + s_m = src.s_m(self.simulation, self._times[tInd]) + dbdt[:, i] = dbdt[:, i] + s_m + return dbdt + + def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint is True: + return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint) + return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v)) + + def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): + if adjoint is True: + return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint)) + return -( + self._edgeCurl * self._eDeriv_m(tInd, src, v) + ) # + src.s_mDeriv() assuming src doesn't have deriv for now + + def _e(self, bSolution, source_list, tInd): + e = self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * bSolution)) + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation, self._times[tInd]) + e[:, i] = e[:, i] - self._MeSigmaI * s_e + return e + + def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint is True: + return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * dun_dm_v)) + return self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) + + def _eDeriv_m(self, tInd, src, v, adjoint=False): + _, s_e = src.eval(self.simulation, self._times[tInd]) + bSolution = self[[src], "bSolution", tInd].flatten() + + _, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint) + + if adjoint is True: + return self._MeSigmaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - s_eDeriv(self._MeSigmaI.T * v) + + return self._MeSigmaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - self._MeSigmaI * s_eDeriv(v) + + def _j(self, hSolution, source_list, tInd): + return self.simulation.MeI * ( + self._MeSigma * self._e(hSolution, source_list, tInd) + ) + + def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._eDeriv_u( + tInd, + src, + self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MeI * ( + self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v) + ) + + def _jDeriv_m(self, tInd, src, v, adjoint=False): + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self._MeSigmaDeriv(e).T * w + self._eDeriv_m( + tInd, src, self._MeSigma.T * w, adjoint=True + ) + return self.simulation.MeI * ( + self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v) + ) + + def _h(self, hSolution, source_list, tInd): + return self.simulation.MfI * ( + self._MfMui * self._b(hSolution, source_list, tInd) + ) + + def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._bDeriv_u( + tInd, + src, + self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v)) + + def _hDeriv_m(self, tInd, src, v, adjoint=False): + if adjoint: + return self._bDeriv_m( + tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True + ) + return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v)) + + def _dhdt(self, hSolution, source_list, tInd): + return self.simulation.MfI * ( + self._MfMui * self._dbdt(hSolution, source_list, tInd) + ) + + def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + if adjoint: + return self._dbdtDeriv_u( + tInd, + src, + self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), + adjoint=True, + ) + return self.simulation.MfI * ( + self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) + ) + + def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): + if adjoint: + return self._dbdtDeriv_m( + tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True + ) + return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) + + class Fields3DElectricField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 75a981c365..6edc83da22 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -3,12 +3,13 @@ from ...data import Data from ...simulation import BaseTimeSimulation -from ...utils import mkvc, sdiag, speye, Zero, validate_type, validate_float +from ...utils import mkvc, sdiag, sdinv, speye, Zero, validate_type, validate_float from ...base import BaseConductancePDESimulation from ..base import BaseEMSimulation from .survey import Survey from .fields import ( Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityConductance, Fields3DElectricField, Fields3DElectricFieldConductance, Fields3DMagneticField, @@ -964,6 +965,207 @@ def getAdcDeriv(self, u, v, adjoint=False): # self.Adcinv.clean() +# ------------------------------- Simulation3DElectricField ------------------------------- # +class Simulation3DMagneticFluxDensityConductance(Simulation3DMagneticFluxDensity, BaseConductancePDESimulation): + r""" + Starting from the quasi-static E-B formulation of Maxwell's equations + (semi-discretized) + + .. math:: + + \mathbf{C} \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = + \mathbf{s_m} \\ + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \mathbf{M_{\sigma}^e} \mathbf{e} = \mathbf{s_e} + + + where :math:`\mathbf{s_e}` is an integrated quantity, we eliminate + :math:`\mathbf{e}` using + + .. math:: + + \mathbf{e} = \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} + + + to obtain a second order semi-discretized system in :math:`\mathbf{b}` + + .. math:: + + \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \frac{\partial \mathbf{b}}{\partial t} = + \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} + \mathbf{s_m} + + + and moving everything except the time derivative to the rhs gives + + .. math:: + \frac{\partial \mathbf{b}}{\partial t} = + -\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} + \mathbf{s_m} + + For the time discretization, we use backward euler. To solve for the + :math:`n+1` th time step, we have + + .. math:: + + \frac{\mathbf{b}^{n+1} - \mathbf{b}^{n}}{\mathbf{dt}} = + -\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b}^{n+1} + + \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e}^{n+1} + + \mathbf{s_m}^{n+1} + + + re-arranging to put :math:`\mathbf{b}^{n+1}` on the left hand side gives + + .. math:: + + (\mathbf{I} + \mathbf{dt} \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) \mathbf{b}^{n+1} = + \mathbf{b}^{n} + \mathbf{dt}(\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} + \mathbf{s_e}^{n+1} + \mathbf{s_m}^{n+1}) + + """ + + fieldsPair = Fields3DMagneticFluxDensityConductance #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object + + def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): + super().__init__(mesh=mesh, survey=survey, **kwargs) + + if self.sigmaMap is not None or self.rhoMap is not None: + raise NotImplementedError( + "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " + "Simulation3DMagneticFluxDensityConductance class. The mapping for the " + "invertible property is 'tauMap'." + ) + + if self.kappaMap is not None: + raise NotImplementedError( + "Conductance times length (kappa) is not an invertible property, yet." + ) + + if self.kappaiMap is not None: + raise NotImplementedError( + "Resistance per unit length (kappai) is not an invertible property, yet." + ) + + def getAdiag(self, tInd): + r""" + System matrix at a given time index + + .. math:: + + (\mathbf{I} + \mathbf{dt} \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) + + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + C = self.mesh.edge_curl + # MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + MeSigmaTauKappaI = self._MeSigmaTauKappaI + MfMui = self.MfMui + I = speye(self.mesh.n_faces) + + A = 1.0 / dt * I + (C * (MeSigmaTauKappaI * (C.T.tocsr() * MfMui))) + + if self._makeASymmetric is True: + return MfMui.T.tocsr() * A + return A + + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + """ + Derivative of ADiag + """ + C = self.mesh.edge_curl + + # def MeSigmaIDeriv(x): + # return self.MeSigmaIDeriv(x) + + MfMui = self.MfMui + + if adjoint: + if self._makeASymmetric is True: + v = MfMui * v + return self._MeSigmaTauKappaIDeriv(C.T * (MfMui * u), C.T * v, adjoint) + + ADeriv = C * (self._MeSigmaTauKappaIDeriv(C.T * (MfMui * u), v, adjoint)) + + if self._makeASymmetric is True: + return MfMui.T * ADeriv + return ADeriv + + def getAsubdiag(self, tInd): + """ + Matrix below the diagonal + """ + + dt = self.time_steps[tInd] + MfMui = self.MfMui + Asubdiag = -1.0 / dt * sp.eye(self.mesh.n_faces) + + if self._makeASymmetric is True: + return MfMui.T * Asubdiag + + return Asubdiag + + def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): + return Zero() * v + + def getRHS(self, tInd): + """ + Assemble the RHS + """ + C = self.mesh.edge_curl + # MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + MeSigmaTauKappaI = self._MeSigmaTauKappaI + MfMui = self.MfMui + + s_m, s_e = self.getSourceTerm(tInd) + + rhs = C * (MeSigmaTauKappaI * s_e) + s_m + if self._makeASymmetric is True: + return MfMui.T * rhs + return rhs + + def getRHSDeriv(self, tInd, src, v, adjoint=False): + """ + Derivative of the RHS + """ + + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI + + _, s_e = src.eval(self, self.times[tInd]) + s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) + + if adjoint: + if self._makeASymmetric is True: + v = self.MfMui * v + if isinstance(s_e, Zero): + MeSigmaTauKappaIDerivT_v = Zero() + else: + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv(s_e, C.T * v, adjoint) + + RHSDeriv = MeSigmaTauKappaIDerivT_v + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + s_mDeriv(v) + + return RHSDeriv + + if isinstance(s_e, Zero): + MeSigmaTauKappaIDeriv_v = Zero() + else: + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) + + RHSDeriv = C * MeSigmaTauKappaIDeriv_v + C * MeSigmaTauKappaI * s_eDeriv(v) + s_mDeriv(v) + + if self._makeASymmetric is True: + return self.MfMui.T * RHSDeriv + return RHSDeriv + # ------------------------------- Simulation3DElectricField ------------------------------- # class Simulation3DElectricFieldConductance(Simulation3DElectricField, BaseConductancePDESimulation): @@ -973,19 +1175,22 @@ class Simulation3DElectricFieldConductance(Simulation3DElectricField, BaseConduc def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): super().__init__(mesh=mesh, survey=survey, **kwargs) - if self.sigmaMap is not None: + if self.sigmaMap is not None or self.rhoMap is not None: raise NotImplementedError( - "Conductivity (sigma) is not an invertible property for the " + "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " "Simulation3DElectricFieldConductance class. The mapping for the " "invertible property is 'tauMap'." ) if self.kappaMap is not None: raise NotImplementedError( - "Resistance per unit length (kappa) is not an invertible property, yet." + "Conductance times length (kappa) is not an invertible property, yet." ) - + if self.kappaiMap is not None: + raise NotImplementedError( + "Resistance per unit length (kappai) is not an invertible property, yet." + ) def getAdiag(self, tInd): """ @@ -996,9 +1201,10 @@ def getAdiag(self, tInd): dt = self.time_steps[tInd] C = self.mesh.edge_curl MfMui = self.MfMui - MeSigma = self.MeSigma + self._MeTau + self._MeKappa + # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa + MeSigmaTauKappa = self._MeSigmaTauKappa - return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigma + return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa def getAdiagDeriv(self, tInd, u, v, adjoint=False): """ @@ -1009,9 +1215,9 @@ def getAdiagDeriv(self, tInd, u, v, adjoint=False): dt = self.time_steps[tInd] if adjoint: - return 1.0 / dt * self._MeTauDeriv(u, v, adjoint) + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) - return 1.0 / dt * self._MeTauDeriv(u, v, adjoint) + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAsubdiag(self, tInd): """ @@ -1021,9 +1227,10 @@ def getAsubdiag(self, tInd): dt = self.time_steps[tInd] - MeSigma = self.MeSigma + self._MeTau + self._MeKappa + # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa + MeSigmaTauKappa = self._MeSigmaTauKappa - return -1.0 / dt * MeSigma + return -1.0 / dt * MeSigmaTauKappa def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): """ @@ -1032,16 +1239,17 @@ def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): dt = self.time_steps[tInd] if adjoint: - return -1.0 / dt * self._MeTauDeriv(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) - return -1.0 / dt * self._MeTauDeriv(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAdc(self): - MeSigma = self.MeSigma + self._MeTau + self._MeKappa + # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa + MeSigmaTauKappa = self._MeSigmaTauKappa Grad = self.mesh.nodal_gradient - Adc = Grad.T.tocsr() * MeSigma * Grad + Adc = Grad.T.tocsr() * MeSigmaTauKappa * Grad # Handling Null space of A Adc[0, 0] = Adc[0, 0] + 1.0 return Adc @@ -1049,9 +1257,9 @@ def getAdc(self): def getAdcDeriv(self, u, v, adjoint=False): Grad = self.mesh.nodal_gradient if not adjoint: - return Grad.T * self._MeTauDeriv(-u, v, adjoint) + return Grad.T * self._MeSigmaTauKappaDeriv(-u, v, adjoint) else: - return self._MeTauDeriv(-u, Grad * v, adjoint) + return self._MeSigmaTauKappaDeriv(-u, Grad * v, adjoint) ############################################################################### diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 5cc526b0a0..2f5c73d3ac 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3314,6 +3314,179 @@ def deriv(self, m, v=None): return self.P * v return self.P +class InjectActiveFaces(IdentityMap): + r"""Map active faces model to all faces of a mesh. + + The ``InjectActiveFaces`` class is used to define the mapping when + the model consists of diagnostic property values defined on a set of active + mesh faces; e.g. faces below topography, z-faces only. For a discrete set of + model parameters :math:`\mathbf{m}` defined on a set of active + faces, the mapping :math:`\mathbf{u}(\mathbf{m})` is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d}\, m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive faces value + :math:`m_\perp` to all inactive mesh cells. + + Parameters + ---------- + mesh : discretize.BaseMesh + A discretize mesh + indActive : numpy.ndarray + Active faces array. Can be a boolean ``numpy.ndarray`` of length *mesh.nF* + or a ``numpy.ndarray`` of ``int`` containing the indices of the active faces. + valInactive : float or numpy.ndarray + The physical property value assigned to all inactive faces in the mesh + + """ + + def __init__(self, mesh, indActive=None, valInactive=0.0, nF=None): + self.mesh = mesh + self.nF = nF or mesh.nF + + self._indActive = validate_active_indices("indActive", indActive, self.nF) + self._nP = np.sum(self.indActive) + + self.P = sp.eye(self.nF, format="csr")[:, self.indActive] + + self.valInactive = valInactive + + @property + def valInactive(self): + """The physical property value assigned to all inactive faces in the mesh. + + Returns + ------- + numpy.ndarray + """ + return self._valInactive + + @valInactive.setter + def valInactive(self, value): + n_inactive = self.nF - self.nP + try: + value = validate_float("valInactive", value) + value = np.full(n_inactive, value) + except Exception: + pass + value = validate_ndarray_with_shape("valInactive", value, shape=(n_inactive,)) + + self._valInactive = np.zeros(self.nF, dtype=float) + self._valInactive[~self.indActive] = value + + @property + def indActive(self): + """ + + Returns + ------- + numpy.ndarray of bool + + """ + return self._indActive + + @property + def shape(self): + """Dimensions of the mapping + + Returns + ------- + tuple of int + Where *nP* is the number of active faces and *nF* is + number of faces in the mesh, **shape** returns a + tuple (*nF* , *nP*). + """ + return (self.nF, self.nP) + + @property + def nP(self): + """Number of parameters the model acts on. + + Returns + ------- + int + Number of parameters the model acts on; i.e. the number of active cells + """ + return int(self.indActive.sum()) + + def _transform(self, m): + if m.ndim > 1: + return self.P * m + self.valInactive[:, None] + return self.P * m + self.valInactive + + def inverse(self, u): + r"""Recover the model parameters (active faces) from a set of physical + property values defined on the entire mesh. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active faces, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \,m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nC* , 1) matrix that projects the inactive cell value + :math:`m_\perp` to all inactive mesh cells. + + The inverse mapping is given by: + + .. math:: + \mathbf{m}(\mathbf{u}) = \mathbf{P^T u} + + Parameters + ---------- + u : (mesh.nF) numpy.ndarray + A vector which contains physical property values for all + mesh faces. + """ + return self.P.T * u + + def deriv(self, m, v=None): + r"""Derivative of the mapping with respect to the input parameters. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active facees, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \, m_\perp + + where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from + active faces to all mesh faces, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive face value + :math:`m_\perp` to all inactive mesh faces. + + the **deriv** method returns the derivative of :math:`\mathbf{u}` with respect + to the model parameters; i.e.: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} = \mathbf{P} + + Note that in this case, **deriv** simply returns a sparse projection matrix. + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v* + + Returns + ------- + scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + if v is not None: + return self.P * v + return self.P + ############################################################################### # # diff --git a/tests/em/tdem/test_TDEM_DerivAdjoint.py b/tests/em/tdem/test_TDEM_DerivAdjoint.py index 1820044ca1..36419b0e39 100644 --- a/tests/em/tdem/test_TDEM_DerivAdjoint.py +++ b/tests/em/tdem/test_TDEM_DerivAdjoint.py @@ -40,11 +40,22 @@ def get_mapping(mesh): ) return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap +def get_face_mapping(mesh): + active = mesh.faces[:, -1] < 0.0 + activeMap = maps.InjectActiveFaces( + mesh, active, 0. + ) + return activeMap * maps.ExpMap(nP=np.sum(active)) def get_prob(mesh, mapping, formulation, **kwargs): - prb = getattr(tdem, "Simulation3D{}".format(formulation))( - mesh, sigmaMap=mapping, **kwargs - ) + if "Conductance" in formulation: + prb = getattr(tdem, "Simulation3D{}".format(formulation))( + mesh, tauMap=mapping, **kwargs + ) + else: + prb = getattr(tdem, "Simulation3D{}".format(formulation))( + mesh, sigmaMap=mapping, **kwargs + ) prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] prb.solver = Solver return prb @@ -64,12 +75,23 @@ class Base_DerivAdjoint_Test(unittest.TestCase): def setUpClass(self): # create a prob where we will store the fields mesh = get_mesh() - mapping = get_mapping(mesh) + + if "Conductance" in self.formulation: + mapping = get_face_mapping(mesh) + else: + mapping = get_mapping(mesh) self.survey = get_survey() self.prob = get_prob(mesh, mapping, self.formulation, survey=self.survey) - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn( - self.prob.sigmaMap.nP - ) + + if "Conductance" in self.formulation: + self.m = np.log(1e-1) * np.ones(self.prob.tauMap.nP) + 1e-3 * np.random.randn( + self.prob.tauMap.nP + ) + else: + self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn( + self.prob.sigmaMap.nP + ) + print("Solving Fields for problem {}".format(self.formulation)) t = time.time() self.fields = self.prob.fields(self.m) @@ -78,7 +100,10 @@ def setUpClass(self): # create a prob where will be re-computing fields at each jvec # iteration mesh = get_mesh() - mapping = get_mapping(mesh) + if "Conductance" in self.formulation: + mapping = get_face_mapping(mesh) + else: + mapping = get_mapping(mesh) self.surveyfwd = get_survey() self.probfwd = get_prob(mesh, mapping, self.formulation, survey=self.surveyfwd) @@ -125,8 +150,11 @@ def JvecVsJtvecTest(self, rxcomp): print( "\nAdjoint Testing Jvec, Jtvec prob {}, {}".format(self.formulation, rxcomp) ) - - m = np.random.rand(self.prob.sigmaMap.nP) + + if "Conductance" in self.formulation: + m = np.random.rand(self.prob.tauMap.nP) + else: + m = np.random.rand(self.prob.sigmaMap.nP) d = np.random.randn(self.prob.survey.nD) V1 = d.dot(self.prob.Jvec(self.m, m, f=self.fields)) V2 = m.dot(self.prob.Jtvec(self.m, d, f=self.fields)) @@ -216,6 +244,49 @@ def test_Jvec_adjoint_e_dhdtz(self): def test_Jvec_adjoint_e_jy(self): self.JvecVsJtvecTest("CurrentDensityy") +class DerivAdjoint_E_Conductance(Base_DerivAdjoint_Test): + formulation = "ElectricFieldConductance" + + if testDeriv: + + def test_Jvec_e_dbxdt(self): + self.JvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_e_dbzdt(self): + self.JvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_e_ey(self): + self.JvecTest("ElectricFieldy") + + def test_Jvec_e_dhxdt(self): + self.JvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_e_dhzdt(self): + self.JvecTest("MagneticFieldTimeDerivativez") + + # def test_Jvec_e_jy(self): + # self.JvecTest("CurrentDensityy") + + if testAdjoint: + + def test_Jvec_adjoint_e_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_adjoint_e_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_adjoint_e_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") + + def test_Jvec_adjoint_e_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_adjoint_e_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + + # def test_Jvec_adjoint_e_jy(self): + # self.JvecVsJtvecTest("CurrentDensityy") + class DerivAdjoint_B(Base_DerivAdjoint_Test): formulation = "MagneticFluxDensity" @@ -281,6 +352,70 @@ def test_Jvec_adjoint_b_dhdtz(self): def test_Jvec_adjoint_b_jy(self): self.JvecVsJtvecTest("CurrentDensityy") +class DerivAdjoint_B_Conductance(Base_DerivAdjoint_Test): + formulation = "MagneticFluxDensityConductance" + + if testDeriv: + + def test_Jvec_b_bx(self): + self.JvecTest("MagneticFluxDensityx") + + def test_Jvec_b_bz(self): + self.JvecTest("MagneticFluxDensityz") + + def test_Jvec_b_dbdtx(self): + self.JvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_b_dbdtz(self): + self.JvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_b_hx(self): + self.JvecTest("MagneticFieldx") + + def test_Jvec_b_hz(self): + self.JvecTest("MagneticFieldz") + + def test_Jvec_b_dhdtx(self): + self.JvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_b_dhdtz(self): + self.JvecTest("MagneticFieldTimeDerivativez") + + # def test_Jvec_b_jy(self): + # self.JvecTest("CurrentDensityy") + + if testAdjoint: + + def test_Jvec_adjoint_b_bx(self): + self.JvecVsJtvecTest("MagneticFluxDensityx") + + def test_Jvec_adjoint_b_bz(self): + self.JvecVsJtvecTest("MagneticFluxDensityz") + + def test_Jvec_adjoint_b_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + + def test_Jvec_adjoint_b_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + def test_Jvec_adjoint_b_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") + + def test_Jvec_adjoint_b_hx(self): + self.JvecVsJtvecTest("MagneticFieldx") + + def test_Jvec_adjoint_b_hz(self): + self.JvecVsJtvecTest("MagneticFieldz") + + def test_Jvec_adjoint_b_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + + def test_Jvec_adjoint_b_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + + # def test_Jvec_adjoint_b_jy(self): + # self.JvecVsJtvecTest("CurrentDensityy") + class DerivAdjoint_H(Base_DerivAdjoint_Test): formulation = "MagneticField" @@ -392,3 +527,7 @@ def test_Jvec_adjoint_j_dbdtx(self): def test_Jvec_adjoint_j_dbdtz(self): self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/em/tdem/test_TDEM_forward_Analytic.py b/tests/em/tdem/test_TDEM_forward_Analytic.py index 878eb923bb..87d8dc01eb 100644 --- a/tests/em/tdem/test_TDEM_forward_Analytic.py +++ b/tests/em/tdem/test_TDEM_forward_Analytic.py @@ -311,7 +311,7 @@ def analytic_halfspace_mag_dipole_comparison( return log10diff -def analytic_layer_small_loop_conuductance_comparison( +def analytic_layer_small_loop_conductance_comparison( mesh_type="CYL", rx_type="MagneticFluxTimeDerivative", orientation="Z", @@ -321,6 +321,9 @@ def analytic_layer_small_loop_conuductance_comparison( # Some static parameters PHI = np.linspace(0, 2 * np.pi, 21) loop_radius = np.pi**-0.5 + receiver_location = np.c_[40.0, 0.0, 1.0] + source_location = np.r_[0.0, 0.0, 1.0] + if orientation == "X": source_nodes = np.c_[ np.zeros_like(PHI), @@ -331,7 +334,6 @@ def analytic_layer_small_loop_conuductance_comparison( source_nodes = np.c_[ loop_radius * np.cos(PHI), loop_radius * np.sin(PHI), np.ones_like(PHI) ] - receiver_location = np.c_[40.0, 0.0, 1.0] layer_depth = 24.0 layer_thickness = 0.1 @@ -379,38 +381,54 @@ def analytic_layer_small_loop_conuductance_comparison( receiver_location, times, orientation=orientation ) + # 1D SURVEY AND SIMULATION src_1d = tdem.sources.MagDipole( [rx], location=np.r_[0.0, 0.0, 1.0], orientation=orientation, waveform=tdem.sources.StepOffWaveform(), ) + survey_1d = tdem.Survey([src_1d]) + + sim_1d = tdem.Simulation1DLayered( + survey=survey_1d, + thicknesses=thicknesses, + sigmaMap=sigma_map_1d, + ) + # 3D SURVEY AND SIMULATION if mesh_type == "CYL": src_3d = tdem.sources.CircularLoop( [rx], radius=loop_radius, - location=np.c_[0.0, 0.0, 1.0], + location=source_location, waveform=tdem.sources.StepOffWaveform(), ) else: - src_3d = tdem.sources.LineCurrent( - [rx], location=source_nodes, waveform=tdem.sources.StepOffWaveform() - ) + if rx_type == "MagneticFluxDensity": + src_3d = tdem.sources.MagDipole( + [rx], + location=source_location, + orientation=orientation, + waveform=tdem.sources.StepOffWaveform(), + ) + else: + src_3d = tdem.sources.LineCurrent( + [rx], location=source_nodes, waveform=tdem.sources.StepOffWaveform() + ) - survey_1d = tdem.Survey([src_1d]) survey_3d = tdem.Survey([src_3d]) # DEFINE THE SIMULATIONS - sim_1d = tdem.Simulation1DLayered( - survey=survey_1d, - thicknesses=thicknesses, - sigmaMap=sigma_map_1d, - ) + if rx_type == "MagneticFluxDensity": + sim_3d = tdem.simulation.Simulation3DMagneticFluxDensityConductance( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + else: + sim_3d = tdem.simulation.Simulation3DElectricFieldConductance( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) - sim_3d = tdem.simulation.Simulation3DElectricFieldConductance( - mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map - ) sim_3d.time_steps = [ (1e-06, 40), (5e-06, 40), @@ -449,6 +467,7 @@ def analytic_layer_small_loop_conuductance_comparison( "r--", ) plt.loglog(rx.times, abs(analytic_solution), "b*") + plt.title("{} Mesh, {}, {}-Component".format(mesh_type, rx_type, orientation)) plt.show() return log10diff @@ -751,9 +770,46 @@ def test_analytic_m3_CYL_0m_CircularLoop(self): class LayerConductanceTests(unittest.TestCase): # WORKING + + def test_tensor_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_conductance_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_tensor_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_conductance_comparison( + mesh_type="TENSOR", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + def test_cyl_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_conductance_comparison( + mesh_type="CYL", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + def test_tensor_linecurrent_dbdt_x(self): assert ( - analytic_layer_small_loop_conuductance_comparison( + analytic_layer_small_loop_conductance_comparison( mesh_type="TENSOR", rx_type="MagneticFluxTimeDerivative", orientation="X", @@ -765,7 +821,7 @@ def test_tensor_linecurrent_dbdt_x(self): def test_tensor_linecurrent_dbdt_z(self): assert ( - analytic_layer_small_loop_conuductance_comparison( + analytic_layer_small_loop_conductance_comparison( mesh_type="TENSOR", rx_type="MagneticFluxTimeDerivative", orientation="Z", @@ -775,9 +831,9 @@ def test_tensor_linecurrent_dbdt_z(self): < 0.01 ) - def test_cyle_linecurrent_dbdt_z(self): + def test_cyl_circularloop_dbdt_z(self): assert ( - analytic_layer_small_loop_conuductance_comparison( + analytic_layer_small_loop_conductance_comparison( mesh_type="CYL", rx_type="MagneticFluxTimeDerivative", orientation="Z", From 3280c7f81587bb684ff6f5eaa2f1dbdfbfe015ec Mon Sep 17 00:00:00 2001 From: dccowan Date: Thu, 13 Jul 2023 09:54:06 -0700 Subject: [PATCH 018/164] adjoint --- SimPEG/base/pde_simulation.py | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index adbeef0eaa..5a103ebccb 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -821,12 +821,7 @@ def __setattr__(self, name, value): self._clear_on_tau_update + self._clear_on_kappa_update + self._clear_on_kappai_update + - [ - "__MeSigmaTauKappa", - "__MeSigmaTauKappaI", - "__MeSigmaTauKappaDeriv", - # "__MeSigmaTauKappaIDeriv" - ] + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) for mat in mat_list: if hasattr(self, mat): @@ -850,17 +845,6 @@ def _MeSigmaTauKappaDeriv(self, u, v=None, adjoint=False): """Only derivative wrt to tau at the moment""" return self._MeTauDeriv(u, v, adjoint) - - - # if getattr(self, "__MeSigmaTauKappaDeriv", None) is None: - # M_prop_deriv = getattr(self, "__Me_tau_deriv") - # setattr( - # self, "__MeSigmaTauKappaDeriv", __inner_mat_mul_op( - # M_prop_deriv, u, v=v, adjoint=adjoint - # ) - # ) - # return getattr(self, "__MeSigmaTauKappaDeriv") - def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): """Only derivative wrt to tau at the moment""" if getattr(self, "tauMap") is None: @@ -873,10 +857,6 @@ def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): return self._MeTauDeriv(u, v, adjoint) - # M_prop_deriv = getattr(self, "__Me_tau_deriv") - # return M_prop_deriv(u, v, adjoint=adjoint) - - @property def deleteTheseOnModelUpdate(self): """ @@ -891,11 +871,6 @@ def deleteTheseOnModelUpdate(self): self._clear_on_tau_update + self._clear_on_kappa_update + self._clear_on_kappai_update + - [ - "__MeSigmaTauKappa", - "__MeSigmaTauKappaI", - "__MeSigmaTauKappaDeriv", - # "__MeSigmaTauKappaIDeriv" - ] + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) return toDelete From 3eec638d2be735e11e26dd5aba547acc87a8b926 Mon Sep 17 00:00:00 2001 From: dccowan Date: Tue, 18 Jul 2023 08:35:24 -0700 Subject: [PATCH 019/164] formatting --- SimPEG/base/pde_simulation.py | 37 ++++++++++--------- .../time_domain/simulation.py | 26 +++++++++---- SimPEG/maps.py | 1 + 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 17eaaff340..111a16a9c3 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -853,11 +853,11 @@ def __init__( rhoMap=None, tau=None, tauMap=None, - kappa=0., + kappa=0.0, kappaMap=None, kappai=None, kappaiMap=None, - **kwargs + **kwargs, ): super().__init__(mesh=mesh, **kwargs) self.sigma = sigma @@ -875,12 +875,12 @@ def __setattr__(self, name, value): super().__setattr__(name, value) if name in ["sigma", "rho", "tau", "kappa", "kappai"]: mat_list = ( - self._clear_on_sigma_update + - self._clear_on_rho_update + - self._clear_on_tau_update + - self._clear_on_kappa_update + - self._clear_on_kappai_update + - ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + self._clear_on_kappai_update + + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) for mat in mat_list: if hasattr(self, mat): @@ -915,21 +915,24 @@ def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): u = MI_prop @ (MI_prop @ -u) return self._MeTauDeriv(u, v, adjoint) - @property def deleteTheseOnModelUpdate(self): """ items to be deleted if the model for conductance or resistance per meter is updated """ toDelete = super().deleteTheseOnModelUpdate - if self.tauMap is not None or self.kappaMap is not None or self.kappaiMap is not None: + if ( + self.tauMap is not None + or self.kappaMap is not None + or self.kappaiMap is not None + ): toDelete = ( - toDelete + - self._clear_on_sigma_update + - self._clear_on_rho_update + - self._clear_on_tau_update + - self._clear_on_kappa_update + - self._clear_on_kappai_update + - ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] + toDelete + + self._clear_on_sigma_update + + self._clear_on_rho_update + + self._clear_on_tau_update + + self._clear_on_kappa_update + + self._clear_on_kappai_update + + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) return toDelete diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 6edc83da22..ed15a05998 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -966,7 +966,9 @@ def getAdcDeriv(self, u, v, adjoint=False): # ------------------------------- Simulation3DElectricField ------------------------------- # -class Simulation3DMagneticFluxDensityConductance(Simulation3DMagneticFluxDensity, BaseConductancePDESimulation): +class Simulation3DMagneticFluxDensityConductance( + Simulation3DMagneticFluxDensity, BaseConductancePDESimulation +): r""" Starting from the quasi-static E-B formulation of Maxwell's equations (semi-discretized) @@ -1149,9 +1151,15 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): if isinstance(s_e, Zero): MeSigmaTauKappaIDerivT_v = Zero() else: - MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv(s_e, C.T * v, adjoint) + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv( + s_e, C.T * v, adjoint + ) - RHSDeriv = MeSigmaTauKappaIDerivT_v + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + s_mDeriv(v) + RHSDeriv = ( + MeSigmaTauKappaIDerivT_v + + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + + s_mDeriv(v) + ) return RHSDeriv @@ -1160,7 +1168,11 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): else: MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) - RHSDeriv = C * MeSigmaTauKappaIDeriv_v + C * MeSigmaTauKappaI * s_eDeriv(v) + s_mDeriv(v) + RHSDeriv = ( + C * MeSigmaTauKappaIDeriv_v + + C * MeSigmaTauKappaI * s_eDeriv(v) + + s_mDeriv(v) + ) if self._makeASymmetric is True: return self.MfMui.T * RHSDeriv @@ -1168,8 +1180,9 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): # ------------------------------- Simulation3DElectricField ------------------------------- # -class Simulation3DElectricFieldConductance(Simulation3DElectricField, BaseConductancePDESimulation): - +class Simulation3DElectricFieldConductance( + Simulation3DElectricField, BaseConductancePDESimulation +): fieldsPair = Fields3DElectricFieldConductance def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): @@ -1244,7 +1257,6 @@ def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAdc(self): - # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa MeSigmaTauKappa = self._MeSigmaTauKappa diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 2f5c73d3ac..90cbf1c54d 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3314,6 +3314,7 @@ def deriv(self, m, v=None): return self.P * v return self.P + class InjectActiveFaces(IdentityMap): r"""Map active faces model to all faces of a mesh. From 3aef7212996f8b71a8ffc4e56717da59da932048 Mon Sep 17 00:00:00 2001 From: dccowan Date: Mon, 21 Aug 2023 13:53:28 -0700 Subject: [PATCH 020/164] preliminary TEM tetrahedral --- SimPEG/electromagnetics/time_domain/fields.py | 6 +-- .../time_domain/simulation.py | 8 +-- .../electromagnetics/time_domain/sources.py | 51 ++++++++++++------- 3 files changed, 40 insertions(+), 25 deletions(-) diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 30ea119c58..d1c0dc41c2 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -288,9 +288,9 @@ class Fields3DMagneticFluxDensityConductance(Fields3DMagneticFluxDensity): def startup(self): self._times = self.simulation.times self._MeSigma = self.simulation.MeSigma - self._MeSigmaI = self.simulation.MeSigmaI + # self._MeSigmaI = self.simulation.MeSigmaI self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui self._timeMesh = self.simulation.time_mesh @@ -554,7 +554,7 @@ class Fields3DElectricFieldConductance(Fields3DElectricField): def startup(self): self._times = self.simulation.times self._MeSigma = self.simulation.MeSigma - self._MeSigmaI = self.simulation.MeSigmaI + # self._MeSigmaI = self.simulation.MeSigmaI self._MeSigmaDeriv = self.simulation.MeSigmaDeriv self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self._edgeCurl = self.simulation.mesh.edge_curl diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index ed15a05998..8194bf179d 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -1068,8 +1068,8 @@ def getAdiag(self, tInd): dt = self.time_steps[tInd] C = self.mesh.edge_curl - # MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - MeSigmaTauKappaI = self._MeSigmaTauKappaI + MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + # MeSigmaTauKappaI = self._MeSigmaTauKappaI MfMui = self.MfMui I = speye(self.mesh.n_faces) @@ -1123,8 +1123,8 @@ def getRHS(self, tInd): Assemble the RHS """ C = self.mesh.edge_curl - # MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - MeSigmaTauKappaI = self._MeSigmaTauKappaI + MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) + # MeSigmaTauKappaI = self._MeSigmaTauKappaI MfMui = self.MfMui s_m, s_e = self.getSourceTerm(tInd) diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index fa37081259..06c90f66ee 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -3,6 +3,7 @@ import numpy as np from geoana.em.static import CircularLoopWholeSpace, MagneticDipoleWholeSpace from scipy.constants import mu_0 +from discretize import SimplexMesh from ...utils import Zero, sdiag from ...utils.code_utils import ( @@ -1310,25 +1311,39 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): def _aSrc(self, simulation): coordinates = "cartesian" - if simulation._formulation == "EB": - gridX = simulation.mesh.gridEx - gridY = simulation.mesh.gridEy - gridZ = simulation.mesh.gridEz + + if isinstance(simulation.mesh, SimplexMesh): + if simulation._formulation == "EB": + edges = simulation.mesh.edges + edge_tangents = simulation.mesh.edge_tangents + axyz = self._srcFct(edges, coordinates) + a = np.sum(axyz*edge_tangents, axis=1) + else: + faces = simulation.mesh.faces + face_normals = simulation.mesh.face_normals + axyz = self._srcFct(faces, coordinates) + a = np.sum(axyz*face_normals, axis=1) - elif simulation._formulation == "HJ": - gridX = simulation.mesh.gridFx - gridY = simulation.mesh.gridFy - gridZ = simulation.mesh.gridFz - - if simulation.mesh._meshType == "CYL": - coordinates = "cylindrical" - if simulation.mesh.is_symmetric: - return self._srcFct(gridY)[:, 1] - - ax = self._srcFct(gridX, coordinates)[:, 0] - ay = self._srcFct(gridY, coordinates)[:, 1] - az = self._srcFct(gridZ, coordinates)[:, 2] - a = np.concatenate((ax, ay, az)) + else: + if simulation._formulation == "EB": + gridX = simulation.mesh.gridEx + gridY = simulation.mesh.gridEy + gridZ = simulation.mesh.gridEz + + elif simulation._formulation == "HJ": + gridX = simulation.mesh.gridFx + gridY = simulation.mesh.gridFy + gridZ = simulation.mesh.gridFz + + if simulation.mesh._meshType == "CYL": + coordinates = "cylindrical" + if simulation.mesh.is_symmetric: + return self._srcFct(gridY)[:, 1] + + ax = self._srcFct(gridX, coordinates)[:, 0] + ay = self._srcFct(gridY, coordinates)[:, 1] + az = self._srcFct(gridZ, coordinates)[:, 2] + a = np.concatenate((ax, ay, az)) return a From 73f3809d3c18d11bfc4243b2e684ba5c39272ae4 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 14 Sep 2023 16:50:33 -0700 Subject: [PATCH 021/164] Add Choclo as alternative engine in gravity simulation --- SimPEG/potential_fields/gravity/simulation.py | 445 +++++++++++++++++- SimPEG/utils/code_utils.py | 1 + environment_test.yml | 2 + tests/utils/test_report.py | 1 + 4 files changed, 443 insertions(+), 6 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index c5f134e402..90b08a47bc 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -1,4 +1,7 @@ +import os +import warnings import numpy as np +import discretize import scipy.constants as constants from geoana.kernels import prism_fz, prism_fzx, prism_fzy, prism_fzz from scipy.constants import G as NewtG @@ -9,6 +12,169 @@ from ...base import BasePDESimulation from ..base import BaseEquivalentSourceLayerSimulation, BasePFSimulation +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + @jit(nopython=True) + def kernel_uv(easting, northing, upward, radius): + """Kernel for Guv gradiometry component.""" + result = 0.5 * ( + choclo.prism.kernel_nn(easting, northing, upward, radius) + - choclo.prism.kernel_ee(easting, northing, upward, radius) + ) + return result + + CHOCLO_KERNELS = { + "gx": choclo.prism.kernel_e, + "gy": choclo.prism.kernel_n, + "gz": choclo.prism.kernel_u, + "gxx": choclo.prism.kernel_ee, + "gyy": choclo.prism.kernel_nn, + "gzz": choclo.prism.kernel_uu, + "gxy": choclo.prism.kernel_en, + "gxz": choclo.prism.kernel_eu, + "gyz": choclo.prism.kernel_nu, + "guv": kernel_uv, + } + + +def _forward_gravity( + receivers, + nodes, + densities, + fields, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Forward model the gravity field of active cells on receivers + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + densities : (n_active_cells) + Array with densities of each active cell in the mesh. + fields : (n_receivers) array + Array full of zeros where the gravity fields on each receiver will be + stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + ``fields`` array. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kernels[j] = kernel_func(dx, dy, dz, distance) + # Compute fields from the kernel values + for k in range(n_cells): + fields[i] += ( + constant_factor + * densities[k] + * ( + -kernels[cell_nodes[k, 0]] + + kernels[cell_nodes[k, 1]] + + kernels[cell_nodes[k, 2]] + - kernels[cell_nodes[k, 3]] + + kernels[cell_nodes[k, 4]] + - kernels[cell_nodes[k, 5]] + - kernels[cell_nodes[k, 6]] + + kernels[cell_nodes[k, 7]] + ) + ) + + +def _fill_sensitivity_matrix( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Fill the sensitivity matrix + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The conversion factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kernels[j] = kernel_func(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + sensitivity_matrix[i, k] = np.float32( + constant_factor + * ( + -kernels[cell_nodes[k, 0]] + + kernels[cell_nodes[k, 1]] + + kernels[cell_nodes[k, 2]] + - kernels[cell_nodes[k, 3]] + + kernels[cell_nodes[k, 4]] + - kernels[cell_nodes[k, 5]] + - kernels[cell_nodes[k, 6]] + + kernels[cell_nodes[k, 7]] + ) + ) + class Simulation3DIntegral(BasePFSimulation): """ @@ -27,27 +193,131 @@ class Simulation3DIntegral(BasePFSimulation): Gradient components ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz") are returned in Eotvos (:math:`10^{-9} s^{-2}`). + + Parameters + ---------- + mesh : discretize.TreeMesh or discretize.TensorMesh + Mesh use to run the gravity simulation. + survey : SimPEG.potential_fields.gravity.Survey + Gravity survey with information of the receivers. + ind_active : (n_cells) array, optional + Array that indicates which cells in ``mesh`` are active cells. + rho : array (optional) + Density array for the active cells in the mesh. + rhoMap : Mapping (optional) + Model mapping. + sensitivity_dtype : numpy.dtype, optional + Data type that will be used to build the sensitivity matrix. + store_sensitivities : str + Options for storing sensitivity matrix. There are 3 options + + - 'ram': sensitivities are stored in the computer's RAM + - 'disk': sensitivities are written to a directory + - 'forward_only': you intend only do perform a forward simulation and + sensitivities do not need to be stored + + sensitivity_path : str, optional + Path to store the sensitivity matrix if ``store_sensitivities`` is set + to ``"disk"``. Default to "./sensitivities". + engine : str, optional + Choose which engine should be used to run the forward model: + ``"geoana"`` or "``choclo``". + choclo_parallel : bool, optional + If True, the simulation will run in parallel. If False, it will + run in serial. If ``engine`` is not ``"choclo"`` this argument will be + ignored. """ rho, rhoMap, rhoDeriv = props.Invertible("Density") - def __init__(self, mesh, rho=None, rhoMap=None, **kwargs): + def __init__( + self, + mesh, + rho=None, + rhoMap=None, + engine="geoana", + choclo_parallel=True, + **kwargs, + ): super().__init__(mesh, **kwargs) self.rho = rho self.rhoMap = rhoMap self._G = None self._gtg_diagonal = None self.modelMap = self.rhoMap + self.choclo_parallel = choclo_parallel + self.engine = engine + self._sanity_checks_engine(kwargs) + # Define jit functions + if self.engine == "choclo": + self._fill_sensitivity_matrix = jit( + nopython=True, parallel=choclo_parallel + )(_fill_sensitivity_matrix) + self._forward_gravity = jit(nopython=True, parallel=choclo_parallel)( + _forward_gravity + ) + + def _sanity_checks_engine(self, kwargs): + """ + Sanity checks for the engine parameter. + + Needs the kwargs passed to the __init__ method to raise some warnings. + """ + if self.engine not in ("choclo", "geoana"): + raise ValueError( + f"Invalid engine '{self.engine}'. Choose from 'geoana' or 'choclo'." + ) + if self.engine == "choclo" and choclo is None: + raise ImportError( + "The choclo package couldn't be found." + "Running a gravity simulation with 'engine=\"choclo\"' needs " + "choclo to be installed." + "\nTry installing choclo with:" + "\n pip install choclo" + "\nor:" + "\n conda install choclo" + ) + # Warn if n_processes has been passed + if self.engine == "choclo" and "n_processes" in kwargs: + warnings.warn( + "The 'n_processes' will be ignored when selecting 'choclo' as the " + "engine in the gravity simulation.", + stacklevel=1, + ) + # Sanity checks for sensitivity_path when using choclo and storing in disk + if self.engine == "choclo" and self.store_sensitivities == "disk": + if os.path.isdir(self.sensitivity_path): + raise ValueError( + f"The passed sensitivity_path '{self.sensitivity_path}' is " + "a directory. " + "When using 'choclo' as the engine, 'senstivity_path' " + "should be the path to a new or existing file." + ) def fields(self, m): - self.model = m + """ + Forward model the gravity field of the mesh on the receivers in the survey + Parameters + ---------- + m : (n_active_cells,) array + Array with values for the model. + + Returns + ------- + (nD,) array + Gravity fields generated by the given model on every receiver + location. + """ + self.model = m if self.store_sensitivities == "forward_only": # Compute the linear operation without forming the full dense G - fields = mkvc(self.linear_operator()) + if self.engine == "choclo": + fields = self._forward(self.rho) + else: + fields = mkvc(self.linear_operator()) else: fields = self.G @ (self.rho).astype(self.sensitivity_dtype, copy=False) - return np.asarray(fields) def getJtJdiag(self, m, W=None, f=None): @@ -95,8 +365,10 @@ def G(self): Gravity forward operator """ if getattr(self, "_G", None) is None: - self._G = self.linear_operator() - + if self.engine == "choclo": + self._G = self._sensitivity_matrix() + else: + self._G = self.linear_operator() return self._G @property @@ -205,6 +477,167 @@ def evaluate_integral(self, receiver_location, components): ] ) + def _forward(self, densities): + """ + Forward model the fields of active cells in the mesh on receivers. + + Parameters + ---------- + densities : (n_active_cells) array + Array containing the densities of the active cells in the mesh, in + g/cc. + + Returns + ------- + (nD,) array + Always return a ``np.float64`` array. + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Allocate fields array + fields = np.zeros(self.survey.nD, dtype=np.float32) + # Start filling the sensitivity matrix + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + n_components = len(components) + n_elements = n_components * receivers.shape[0] + for i, component in enumerate(components): + kernel_func = CHOCLO_KERNELS[component] + conversion_factor = self._get_conversion_factor(component) + vector_slice = slice( + index_offset + i, index_offset + n_elements, n_components + ) + self._forward_gravity( + receivers, + active_nodes, + densities, + fields[vector_slice], + active_cell_nodes, + kernel_func, + constants.G * conversion_factor, + ) + index_offset += n_elements + return fields + + def _sensitivity_matrix(self): + """ + Compute the sensitivity matrix G + + Returns + ------- + (nD, n_active_cells) array + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Allocate sensitivity matrix + shape = (self.survey.nD, self.nC) + if self.store_sensitivities == "disk": + sensitivity_matrix = np.memmap( + self.sensitivity_path, + shape=shape, + dtype=self.sensitivity_dtype, + order="C", # it's more efficient to write in row major + mode="w+", + ) + else: + sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) + # Start filling the sensitivity matrix + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + kernel_func = CHOCLO_KERNELS[component] + conversion_factor = self._get_conversion_factor(component) + matrix_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + self._fill_sensitivity_matrix( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + kernel_func, + constants.G * conversion_factor, + ) + index_offset += n_rows + return sensitivity_matrix + + def _get_cell_nodes(self): + """ + Return indices of nodes for each cell in the mesh. + """ + if isinstance(self.mesh, discretize.TreeMesh): + cell_nodes = self.mesh.cell_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + cell_nodes = self._get_tensormesh_cell_nodes() + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + return cell_nodes + + def _get_tensormesh_cell_nodes(self): + """Dumb implementation of cell_nodes for a TensorMesh""" + inds = np.arange(self.mesh.n_nodes).reshape(self.mesh.shape_nodes, order="F") + cell_nodes = [ + inds[:-1, :-1, :-1].reshape(-1, order="F"), + inds[1:, :-1, :-1].reshape(-1, order="F"), + inds[:-1, 1:, :-1].reshape(-1, order="F"), + inds[1:, 1:, :-1].reshape(-1, order="F"), + inds[:-1, :-1, 1:].reshape(-1, order="F"), + inds[1:, :-1, 1:].reshape(-1, order="F"), + inds[:-1, 1:, 1:].reshape(-1, order="F"), + inds[1:, 1:, 1:].reshape(-1, order="F"), + ] + cell_nodes = np.stack(cell_nodes, axis=-1) + return cell_nodes + + def _get_active_nodes(self): + """ + Return locations of nodes only for active cells + + Also return an array containing the indices of the "active nodes" for + each active cell in the mesh + """ + # Get all nodes in the mesh + if isinstance(self.mesh, discretize.TreeMesh): + nodes = self.mesh.total_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + nodes = self.mesh.nodes + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + # Get original cell_nodes but only for active cells + cell_nodes = self._get_cell_nodes() + # If all cells in the mesh are active, return nodes and cell_nodes + if self.nC == self.mesh.n_cells: + return nodes, cell_nodes + # Keep only the cell_nodes for active cells + cell_nodes = cell_nodes[self.ind_active] + # Get the unique indices of the nodes that belong to every active cell + # (these indices correspond to the original `nodes` array) + unique_nodes, active_cell_nodes = np.unique(cell_nodes, return_inverse=True) + # Select only the nodes that belong to the active cells (active nodes) + active_nodes = nodes[unique_nodes] + # Reshape indices of active cells for each active cell in the mesh + active_cell_nodes = active_cell_nodes.reshape(cell_nodes.shape) + return active_nodes, active_cell_nodes + + def _get_components_and_receivers(self): + """Generator for receiver locations and their field components.""" + for receiver_object in self.survey.source_field.receiver_list: + yield receiver_object.components, receiver_object.locations + + def _get_conversion_factor(self, component): + """ + Return conversion factor for the given component + """ + if component in ("gx", "gy", "gz"): + conversion_factor = 1e8 + elif component in ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz", "guv"): + conversion_factor = 1e12 + else: + raise ValueError(f"Invalid component '{component}'.") + return conversion_factor + class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral diff --git a/SimPEG/utils/code_utils.py b/SimPEG/utils/code_utils.py index b0c5623a4c..87e4c1bede 100644 --- a/SimPEG/utils/code_utils.py +++ b/SimPEG/utils/code_utils.py @@ -491,6 +491,7 @@ def __init__(self, add_pckg=None, ncol=3, text_width=80, sort=False): "vtk", "utm", "memory_profiler", + "choclo", ] super().__init__( diff --git a/environment_test.yml b/environment_test.yml index b658b30427..06d6ff8db6 100644 --- a/environment_test.yml +++ b/environment_test.yml @@ -36,6 +36,8 @@ dependencies: - pyvista - pip - python-kaleido + # Optional dependencies + - choclo # Linters and code style - pre-commit - black==23.1.0 diff --git a/tests/utils/test_report.py b/tests/utils/test_report.py index e9ebe09bb3..9d5091545e 100644 --- a/tests/utils/test_report.py +++ b/tests/utils/test_report.py @@ -34,6 +34,7 @@ def test_version_defaults(self): "vtk", "utm", "memory_profiler", + "choclo", ], ncol=3, text_width=80, From a320069d8607e6c0df9c6bc0294fddb6f1a4ac96 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 14 Sep 2023 16:51:13 -0700 Subject: [PATCH 022/164] Fix typo --- SimPEG/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index bd45647c62..aece3dbc65 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -101,7 +101,7 @@ def counter(self, value): @property def sensitivity_path(self): - """Path to store the sensitivty. + """Path to store the sensitivity. Returns ------- From 3c9a8c410ca38dbc3b870c7027ebdec90b2116ce Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Sep 2023 09:46:02 -0700 Subject: [PATCH 023/164] Improve docstrings --- SimPEG/potential_fields/gravity/simulation.py | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 90b08a47bc..79dee58054 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -58,6 +58,17 @@ def _forward_gravity( """ Forward model the gravity field of active cells on receivers + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) + jit_forward_gravity( + receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor + ) + Parameters ---------- receivers : (n_receivers, 3) array @@ -78,6 +89,12 @@ def _forward_gravity( constant_factor : float Constant factor that will be used to multiply each element of the ``fields`` array. + + Notes + ----- + The conversion factor is applied here to each element of fields because + it's more efficient than doing it afterwards: it would require to + index the elements that corresponds to each component. """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -121,6 +138,17 @@ def _fill_sensitivity_matrix( """ Fill the sensitivity matrix + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)(_forward_sensitivity_matrix) + jit_sensitivity( + receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor + ) + Parameters ---------- receivers : (n_receivers, 3) array @@ -576,7 +604,12 @@ def _get_cell_nodes(self): return cell_nodes def _get_tensormesh_cell_nodes(self): - """Dumb implementation of cell_nodes for a TensorMesh""" + """ + Quick implmentation of ``cell_nodes`` for a ``TensorMesh``. + + This method should be removed after ``TensorMesh.cell_nodes`` is added + in discretize. + """ inds = np.arange(self.mesh.n_nodes).reshape(self.mesh.shape_nodes, order="F") cell_nodes = [ inds[:-1, :-1, :-1].reshape(-1, order="F"), From d7ece2e692631fb21894350e47a209e9c85963b6 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Sep 2023 09:46:14 -0700 Subject: [PATCH 024/164] Raise error if survey has not source_field attribute --- SimPEG/potential_fields/gravity/simulation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 79dee58054..4aa3d2bd37 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -656,6 +656,10 @@ def _get_active_nodes(self): def _get_components_and_receivers(self): """Generator for receiver locations and their field components.""" + if not hasattr(self.survey, "source_field"): + raise AttributeError( + f"The survey '{self.survey}' has no 'source_field' attribute." + ) for receiver_object in self.survey.source_field.receiver_list: yield receiver_object.components, receiver_object.locations From 3f3bc0f5fb4ac1f6d195648bc377431516ed7dcb Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Sep 2023 16:13:41 -0700 Subject: [PATCH 025/164] Allow numba to automatically cast matrix elements Don't force values to be np.float32, Numba will cast them automatically when trying to write to the preallocated matrix. --- SimPEG/potential_fields/gravity/simulation.py | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 4aa3d2bd37..81b8e0222b 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -189,18 +189,15 @@ def _fill_sensitivity_matrix( kernels[j] = kernel_func(dx, dy, dz, distance) # Compute sensitivity matrix elements from the kernel values for k in range(n_cells): - sensitivity_matrix[i, k] = np.float32( - constant_factor - * ( - -kernels[cell_nodes[k, 0]] - + kernels[cell_nodes[k, 1]] - + kernels[cell_nodes[k, 2]] - - kernels[cell_nodes[k, 3]] - + kernels[cell_nodes[k, 4]] - - kernels[cell_nodes[k, 5]] - - kernels[cell_nodes[k, 6]] - + kernels[cell_nodes[k, 7]] - ) + sensitivity_matrix[i, k] = constant_factor * ( + -kernels[cell_nodes[k, 0]] + + kernels[cell_nodes[k, 1]] + + kernels[cell_nodes[k, 2]] + - kernels[cell_nodes[k, 3]] + + kernels[cell_nodes[k, 4]] + - kernels[cell_nodes[k, 5]] + - kernels[cell_nodes[k, 6]] + + kernels[cell_nodes[k, 7]] ) From 8cbcda450e86639d73d6600f5d2e6820274c307b Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Sep 2023 16:14:44 -0700 Subject: [PATCH 026/164] Define decorated functions as global private ones When defining the decorated functions as attributes of the class, then every new simulation is created, they need to be recompiled. --- SimPEG/potential_fields/gravity/simulation.py | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 81b8e0222b..5111539ad5 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -201,6 +201,17 @@ def _fill_sensitivity_matrix( ) +# Define decorated versions of these functions +_fill_sensitivity_matrix_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix +) +_fill_sensitivity_matrix_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_matrix +) +_forward_gravity_parallel = jit(nopython=True, parallel=True)(_forward_gravity) +_forward_gravity_serial = jit(nopython=True, parallel=False)(_forward_gravity) + + class Simulation3DIntegral(BasePFSimulation): """ Gravity simulation in integral form. @@ -275,12 +286,12 @@ def __init__( self._sanity_checks_engine(kwargs) # Define jit functions if self.engine == "choclo": - self._fill_sensitivity_matrix = jit( - nopython=True, parallel=choclo_parallel - )(_fill_sensitivity_matrix) - self._forward_gravity = jit(nopython=True, parallel=choclo_parallel)( - _forward_gravity - ) + if choclo_parallel: + self._fill_sensitivity_matrix = _fill_sensitivity_matrix_parallel + self._forward_gravity = _forward_gravity_parallel + else: + self._fill_sensitivity_matrix = _fill_sensitivity_matrix_serial + self._forward_gravity = _forward_gravity_serial def _sanity_checks_engine(self, kwargs): """ From c4f4702bbc26fa0e1daf632888be887eb293dec1 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Sep 2023 16:48:16 -0700 Subject: [PATCH 027/164] Start adding tests for the new features --- tests/pf/test_forward_Grav_Linear.py | 186 +++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 286e8ba6db..e6d4cf5eaa 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -8,6 +8,192 @@ import os +class TestsGravitySimulation: + """ + Test gravity simulation. + """ + + @pytest.fixture + def blocks(self): + """Synthetic blocks to build the sample model.""" + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + rho1 = 1.0 + rho2 = 2.0 + return (block1, block2), (rho1, rho2) + + @pytest.fixture + def mesh(self): + """Sample mesh and density array.""" + # Define a mesh + cs = 0.2 + hxind, hyind, hzind = tuple([(cs, 41)] for _ in range(3)) + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + return mesh + + @pytest.fixture + def density_and_active_cells(self, mesh, blocks): + """Sample density and active_cells arrays for the sample mesh.""" + # create a model of two blocks, 1 inside the other + (block1, block2), (rho1, rho2) = blocks + block1_inds = self.get_block_inds(mesh.cell_centers, block1) + block2_inds = self.get_block_inds(mesh.cell_centers, block2) + # Define densities for each block + model = np.zeros(mesh.n_cells) + model[block1_inds] = rho1 + model[block2_inds] = rho2 + # Define active cells and reduce model + active_cells = model != 0.0 + model_reduced = model[active_cells] + return model_reduced, active_cells + + def get_block_inds(self, grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + @pytest.fixture + def receivers_locations(self): + nx = 5 + ny = 5 + # Create plane of observations + xr = np.linspace(-20, 20, nx) + yr = np.linspace(-20, 20, ny) + x, y = np.meshgrid(xr, yr) + z = np.ones_like(x) * 3.0 + receivers_locations = np.vstack([a.ravel() for a in (x, y, z)]).T + return receivers_locations + + def get_analytic_solution(self, blocks, survey): + """Compute analytical response from dense prism.""" + (block1, block2), (rho1, rho2) = blocks + # Build prisms (convert densities from g/cc to kg/m3) + prisms = [ + Prism(block1[:, 0], block1[:, 1], rho1 * 1000), + Prism(block2[:, 0], block2[:, 1], -rho1 * 1000), + Prism(block2[:, 0], block2[:, 1], rho2 * 1000), + ] + # Forward model the prisms + components = survey.source_field.receiver_list[0].components + receivers_locations = survey.source_field.receiver_list[0].locations + if "gx" in components or "gy" in components or "gz" in components: + fields = sum( + prism.gravitational_field(receivers_locations) for prism in prisms + ) + fields *= 1e5 # convert to mGal from m/s^2 + else: + fields = sum( + prism.gravitational_gradient(receivers_locations) for prism in prisms + ) + fields *= 1e9 # convert to Eotvos from 1/s^2 + return fields + + @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) + def test_accelerations_vs_analytic( + self, + engine, + store_sensitivities, + tmp_path, + blocks, + mesh, + density_and_active_cells, + receivers_locations, + ): + """ + Test gravity acceleration components against analytic solutions of prisms. + """ + components = ["gx", "gy", "gz"] + # Unpack fixtures + density, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components=components) + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Define sensitivity_path + sensitivity_path = tmp_path + if engine == "choclo": + sensitivity_path /= "sensitivity_choclo" + # Create simulation + sim = gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + store_sensitivities=store_sensitivities, + engine=engine, + sensitivity_path=str(sensitivity_path), + sensitivity_dtype=np.float64, + ) + data = sim.dpred(density) + g_x, g_y, g_z = data[0::3], data[1::3], data[2::3] + solution = self.get_analytic_solution(blocks, survey) + # Check results + rtol, atol = 1e-9, 1e-6 + np.testing.assert_allclose(g_x, solution[:, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_y, solution[:, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_z, solution[:, 2], rtol=rtol, atol=atol) + + @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) + def test_tensor_vs_analytic( + self, + engine, + store_sensitivities, + tmp_path, + blocks, + mesh, + density_and_active_cells, + receivers_locations, + ): + """ + Test tensor components against analytic solutions of prisms. + """ + components = ["gxx", "gxy", "gxz", "gyy", "gyz", "gzz"] + # Unpack fixtures + density, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components=components) + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Define sensitivity_path + sensitivity_path = tmp_path + if engine == "choclo": + sensitivity_path /= "sensitivity_choclo" + # Create simulation + sim = gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + store_sensitivities=store_sensitivities, + engine=engine, + sensitivity_path=str(sensitivity_path), + sensitivity_dtype=np.float64, + ) + data = sim.dpred(density) + g_xx, g_xy, g_xz = data[0::6], data[1::6], data[2::6] + g_yy, g_yz, g_zz = data[3::6], data[4::6], data[5::6] + solution = self.get_analytic_solution(blocks, survey) + # Check results + rtol, atol = 1e-10, 1.2e-6 + np.testing.assert_allclose(g_xx, solution[..., 0, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_xy, solution[..., 0, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_xz, solution[..., 0, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_yy, solution[..., 1, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_yz, solution[..., 1, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(g_zz, solution[..., 2, 2], rtol=rtol, atol=atol) + + def test_ana_grav_forward(tmp_path): nx = 5 ny = 5 From 0b064b24879b53de88abd4cb1b31a92202547b33 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 18 Sep 2023 10:15:22 -0700 Subject: [PATCH 028/164] Add test for guv --- tests/pf/test_forward_Grav_Linear.py | 50 +++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index e6d4cf5eaa..76af3c9d28 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -185,7 +185,7 @@ def test_tensor_vs_analytic( g_yy, g_yz, g_zz = data[3::6], data[4::6], data[5::6] solution = self.get_analytic_solution(blocks, survey) # Check results - rtol, atol = 1e-10, 1.2e-6 + rtol, atol = 2e-6, 1e-6 np.testing.assert_allclose(g_xx, solution[..., 0, 0], rtol=rtol, atol=atol) np.testing.assert_allclose(g_xy, solution[..., 0, 1], rtol=rtol, atol=atol) np.testing.assert_allclose(g_xz, solution[..., 0, 2], rtol=rtol, atol=atol) @@ -193,6 +193,54 @@ def test_tensor_vs_analytic( np.testing.assert_allclose(g_yz, solution[..., 1, 2], rtol=rtol, atol=atol) np.testing.assert_allclose(g_zz, solution[..., 2, 2], rtol=rtol, atol=atol) + @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) + def test_guv_vs_analytic( + self, + engine, + store_sensitivities, + tmp_path, + blocks, + mesh, + density_and_active_cells, + receivers_locations, + ): + """ + Test guv tensor component against analytic solutions of prisms. + """ + components = ["guv"] + # Unpack fixtures + density, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components=components) + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Define sensitivity_path + sensitivity_path = tmp_path + if engine == "choclo": + sensitivity_path /= "sensitivity_choclo" + # Create simulation + sim = gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + store_sensitivities=store_sensitivities, + engine=engine, + sensitivity_path=str(sensitivity_path), + sensitivity_dtype=np.float64, + ) + g_uv = sim.dpred(density) + solution = self.get_analytic_solution(blocks, survey) + g_xx_solution = solution[..., 0, 0] + g_yy_solution = solution[..., 1, 1] + g_uv_solution = 0.5 * (g_yy_solution - g_xx_solution) + # Check results + rtol, atol = 2e-6, 1e-6 + np.testing.assert_allclose(g_uv, g_uv_solution, rtol=rtol, atol=atol) + def test_ana_grav_forward(tmp_path): nx = 5 From 9cab149374799bc4420caec3e3c6b8ca5dde843e Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 18 Sep 2023 10:16:28 -0700 Subject: [PATCH 029/164] Test gravity inversion with both engines Move the test for gravity inversions to pure pytest. Use parametrization to run the same test using geoana and choclo. Add a `random_seed` to `make_synthetic_data` to avoid tests failing randomly. --- SimPEG/simulation.py | 12 +- tests/pf/test_grav_inversion_linear.py | 222 ++++++++++++------------- 2 files changed, 113 insertions(+), 121 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index aece3dbc65..722b8eaa92 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -305,7 +305,14 @@ def residual(self, m, dobs, f=None): return mkvc(self.dpred(m, f=f) - dobs) def make_synthetic_data( - self, m, relative_error=0.05, noise_floor=0.0, f=None, add_noise=False, **kwargs + self, + m, + relative_error=0.05, + noise_floor=0.0, + f=None, + add_noise=False, + random_seed=None, + **kwargs, ): """ Make synthetic data given a model, and a standard deviation. @@ -328,7 +335,8 @@ def make_synthetic_data( if add_noise is True: std = np.sqrt((relative_error * np.abs(dclean)) ** 2 + noise_floor**2) - noise = std * np.random.randn(*dclean.shape) + random_num_generator = np.random.default_rng(seed=random_seed) + noise = random_num_generator.normal(loc=0, scale=std, size=dclean.shape) dobs = dclean + noise else: dobs = dclean diff --git a/tests/pf/test_grav_inversion_linear.py b/tests/pf/test_grav_inversion_linear.py index 733cec9bfe..ab9eafe7f1 100644 --- a/tests/pf/test_grav_inversion_linear.py +++ b/tests/pf/test_grav_inversion_linear.py @@ -1,5 +1,4 @@ -import shutil -import unittest +import pytest import numpy as np import discretize @@ -17,121 +16,106 @@ from SimPEG.potential_fields import gravity -class GravInvLinProblemTest(unittest.TestCase): - def setUp(self): - # Create a self.mesh - dx = 5.0 - hxind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] - hyind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] - hzind = [(dx, 5, -1.3), (dx, 6)] - self.mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # Get index of the center - midx = int(self.mesh.shape_cells[0] / 2) - midy = int(self.mesh.shape_cells[1] / 2) - - # Lets create a simple Gaussian topo and set the active cells - [xx, yy] = np.meshgrid(self.mesh.nodes_x, self.mesh.nodes_y) - zz = -np.exp((xx**2 + yy**2) / 75**2) + self.mesh.nodes_z[-1] - - # Go from topo to actv cells - topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] - actv = active_from_xyz(self.mesh, topo, "N") - - # Create active map to go from reduce space to full - self.actvMap = maps.InjectActiveCells(self.mesh, actv, -100) - nC = int(actv.sum()) - - # Create and array of observation points - xr = np.linspace(-20.0, 20.0, 20) - yr = np.linspace(-20.0, 20.0, 20) - X, Y = np.meshgrid(xr, yr) - - # Move the observation points 5m above the topo - Z = -np.exp((X**2 + Y**2) / 75**2) + self.mesh.nodes_z[-1] + 5.0 - - # Create a MAGsurvey - locXYZ = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] - rxLoc = gravity.Point(locXYZ) - srcField = gravity.SourceField([rxLoc]) - survey = gravity.Survey(srcField) - - # We can now create a density model and generate data - # Here a simple block in half-space - model = np.zeros( - ( - self.mesh.shape_cells[0], - self.mesh.shape_cells[1], - self.mesh.shape_cells[2], - ) +@pytest.mark.parametrize("engine", ("geoana", "choclo")) +def test_gravity_inversion_linear(engine): + """Test gravity inversion.""" + # Create a mesh + dx = 5.0 + hxind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] + hyind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] + hzind = [(dx, 5, -1.3), (dx, 6)] + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + + # Get index of the center + midx = int(mesh.shape_cells[0] / 2) + midy = int(mesh.shape_cells[1] / 2) + + # Lets create a simple Gaussian topo and set the active cells + [xx, yy] = np.meshgrid(mesh.nodes_x, mesh.nodes_y) + zz = -np.exp((xx**2 + yy**2) / 75**2) + mesh.nodes_z[-1] + + # Go from topo to actv cells + topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] + actv = active_from_xyz(mesh, topo, "N") + nC = int(actv.sum()) + + # Create and array of observation points + xr = np.linspace(-20.0, 20.0, 20) + yr = np.linspace(-20.0, 20.0, 20) + X, Y = np.meshgrid(xr, yr) + + # Move the observation points 5m above the topo + Z = -np.exp((X**2 + Y**2) / 75**2) + mesh.nodes_z[-1] + 5.0 + + # Create a MAGsurvey + locXYZ = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] + rxLoc = gravity.Point(locXYZ) + srcField = gravity.SourceField([rxLoc]) + survey = gravity.Survey(srcField) + + # We can now create a density model and generate data + # Here a simple block in half-space + model = np.zeros( + ( + mesh.shape_cells[0], + mesh.shape_cells[1], + mesh.shape_cells[2], ) - model[(midx - 2) : (midx + 2), (midy - 2) : (midy + 2), -6:-2] = 0.5 - model = utils.mkvc(model) - self.model = model[actv] - - # Create reduced identity map - idenMap = maps.IdentityMap(nP=nC) - - # Create the forward model operator - sim = gravity.Simulation3DIntegral( - self.mesh, - survey=survey, - rhoMap=idenMap, - ind_active=actv, - store_sensitivities="ram", - n_processes=None, - ) - - # Compute linear forward operator and compute some data - data = sim.make_synthetic_data( - self.model, relative_error=0.0, noise_floor=0.0005, add_noise=True - ) - - # Create a regularization - reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) - reg.norms = [0, 0, 0, 0] - reg.gradientType = "components" - - # Data misfit function - dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) - - # Add directives to the inversion - opt = optimization.ProjectedGNCG( - maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 - ) - invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) - - # Here is where the norms are applied - starting_beta = directives.BetaEstimateMaxDerivative(10.0) - IRLS = directives.Update_IRLS() - update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) - self.inv = inversion.BaseInversion( - invProb, - directiveList=[IRLS, sensitivity_weights, starting_beta, update_Jacobi], - ) - self.sim = sim - - def test_grav_inverse(self): - # Run the inversion - mrec = self.inv.run(self.model) - residual = np.linalg.norm(mrec - self.model) / np.linalg.norm(self.model) - - # import matplotlib.pyplot as plt - # plt.figure() - # ax = plt.subplot(1, 2, 1) - # self.mesh.plot_slice(self.actvMap*mrec, ax=ax, clim=(0, 0.5), normal="Y") - # ax = plt.subplot(1, 2, 2) - # self.mesh.plot_slice(self.actvMap*self.model, ax=ax, clim=(0, 0.5), normal="Y") - # plt.show() - - self.assertTrue(residual < 0.05) - - def tearDown(self): - # Clean up the working directory - if self.sim.store_sensitivities == "disk": - shutil.rmtree(self.sim.sensitivity_path) - - -if __name__ == "__main__": - unittest.main() + ) + model[(midx - 2) : (midx + 2), (midy - 2) : (midy + 2), -6:-2] = 0.5 + model = utils.mkvc(model) + model = model[actv] + + # Create reduced identity map + idenMap = maps.IdentityMap(nP=nC) + + # Create the forward model operator + kwargs = dict() + if engine == "geoana": + kwargs["n_processes"] = None + sim = gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=actv, + store_sensitivities="ram", + engine=engine, + **kwargs, + ) + + # Compute linear forward operator and compute some data + data = sim.make_synthetic_data( + model, relative_error=0.0, noise_floor=0.0005, add_noise=True, random_seed=2 + ) + + # Create a regularization + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) + reg.norms = [0, 0, 0, 0] + reg.gradient_type = "components" + + # Data misfit function + dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) + + # Add directives to the inversion + opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 + ) + invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) + + # Here is where the norms are applied + starting_beta = directives.BetaEstimateMaxDerivative(10.0) + IRLS = directives.Update_IRLS() + update_Jacobi = directives.UpdatePreconditioner() + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) + inv = inversion.BaseInversion( + invProb, + directiveList=[IRLS, sensitivity_weights, starting_beta, update_Jacobi], + ) + sim = sim + + # Run the inversion + mrec = inv.run(model) + residual = np.linalg.norm(mrec - model) / np.linalg.norm(model) + + # Assert result + assert np.all(residual < 0.05) From 627f03a7ce30e286aa82e312d81fd6141fd3f7cb Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 11:45:36 -0700 Subject: [PATCH 030/164] Move conversion factor to a private function --- SimPEG/potential_fields/gravity/simulation.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 5111539ad5..ef217aeadb 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -212,6 +212,19 @@ def _fill_sensitivity_matrix( _forward_gravity_serial = jit(nopython=True, parallel=False)(_forward_gravity) +def _get_conversion_factor(component): + """ + Return conversion factor for the given component + """ + if component in ("gx", "gy", "gz"): + conversion_factor = 1e8 + elif component in ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz", "guv"): + conversion_factor = 1e12 + else: + raise ValueError(f"Invalid component '{component}'.") + return conversion_factor + + class Simulation3DIntegral(BasePFSimulation): """ Gravity simulation in integral form. @@ -539,7 +552,7 @@ def _forward(self, densities): n_elements = n_components * receivers.shape[0] for i, component in enumerate(components): kernel_func = CHOCLO_KERNELS[component] - conversion_factor = self._get_conversion_factor(component) + conversion_factor = _get_conversion_factor(component) vector_slice = slice( index_offset + i, index_offset + n_elements, n_components ) @@ -584,7 +597,7 @@ def _sensitivity_matrix(self): n_rows = n_components * receivers.shape[0] for i, component in enumerate(components): kernel_func = CHOCLO_KERNELS[component] - conversion_factor = self._get_conversion_factor(component) + conversion_factor = _get_conversion_factor(component) matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) @@ -671,18 +684,6 @@ def _get_components_and_receivers(self): for receiver_object in self.survey.source_field.receiver_list: yield receiver_object.components, receiver_object.locations - def _get_conversion_factor(self, component): - """ - Return conversion factor for the given component - """ - if component in ("gx", "gy", "gz"): - conversion_factor = 1e8 - elif component in ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz", "guv"): - conversion_factor = 1e12 - else: - raise ValueError(f"Invalid component '{component}'.") - return conversion_factor - class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral From 67bea68b88e5cdf170a0aa6df149303306349956 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 11:47:30 -0700 Subject: [PATCH 031/164] Specify type of warning --- SimPEG/potential_fields/gravity/simulation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index ef217aeadb..96068ff9c3 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -331,6 +331,7 @@ def _sanity_checks_engine(self, kwargs): warnings.warn( "The 'n_processes' will be ignored when selecting 'choclo' as the " "engine in the gravity simulation.", + UserWarning, stacklevel=1, ) # Sanity checks for sensitivity_path when using choclo and storing in disk From 81480002fb753f161802d8922a3f29d504c8c4dd Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 11:48:18 -0700 Subject: [PATCH 032/164] Extend tests Add tests for sanity checks of the engine and some other attributes. Add tests for running the forward in parallel and serial. Add test for conversion factor. --- tests/pf/test_forward_Grav_Linear.py | 184 ++++++++++++++++++++++++--- 1 file changed, 167 insertions(+), 17 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 76af3c9d28..512f9b4efe 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -1,4 +1,5 @@ import unittest +from unittest.mock import patch import pytest import discretize from SimPEG import maps @@ -24,8 +25,8 @@ def blocks(self): @pytest.fixture def mesh(self): - """Sample mesh and density array.""" - # Define a mesh + """Sample mesh.""" + # Define a Tensor mesh cs = 0.2 hxind, hyind, hzind = tuple([(cs, 41)] for _ in range(3)) mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") @@ -93,11 +94,16 @@ def get_analytic_solution(self, blocks, survey): fields *= 1e9 # convert to Eotvos from 1/s^2 return fields - @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize( + "engine, parallelism", + [("geoana", None), ("geoana", 1), ("choclo", False), ("choclo", True)], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], + ) @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) def test_accelerations_vs_analytic( self, engine, + parallelism, store_sensitivities, tmp_path, blocks, @@ -117,11 +123,13 @@ def test_accelerations_vs_analytic( survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - # Define sensitivity_path - sensitivity_path = tmp_path - if engine == "choclo": - sensitivity_path /= "sensitivity_choclo" # Create simulation + if engine == "choclo": + sensitivity_path = tmp_path / "sensitivity_choclo" + kwargs = dict(choclo_parallel=parallelism) + else: + sensitivity_path = tmp_path + kwargs = dict(n_processes=parallelism) sim = gravity.Simulation3DIntegral( mesh, survey=survey, @@ -131,6 +139,7 @@ def test_accelerations_vs_analytic( engine=engine, sensitivity_path=str(sensitivity_path), sensitivity_dtype=np.float64, + **kwargs, ) data = sim.dpred(density) g_x, g_y, g_z = data[0::3], data[1::3], data[2::3] @@ -141,11 +150,16 @@ def test_accelerations_vs_analytic( np.testing.assert_allclose(g_y, solution[:, 1], rtol=rtol, atol=atol) np.testing.assert_allclose(g_z, solution[:, 2], rtol=rtol, atol=atol) - @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize( + "engine, parallelism", + [("geoana", None), ("geoana", 1), ("choclo", False), ("choclo", True)], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], + ) @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) def test_tensor_vs_analytic( self, engine, + parallelism, store_sensitivities, tmp_path, blocks, @@ -165,11 +179,13 @@ def test_tensor_vs_analytic( survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - # Define sensitivity_path - sensitivity_path = tmp_path - if engine == "choclo": - sensitivity_path /= "sensitivity_choclo" # Create simulation + if engine == "choclo": + sensitivity_path = tmp_path / "sensitivity_choclo" + kwargs = dict(choclo_parallel=parallelism) + else: + sensitivity_path = tmp_path + kwargs = dict(n_processes=parallelism) sim = gravity.Simulation3DIntegral( mesh, survey=survey, @@ -179,6 +195,7 @@ def test_tensor_vs_analytic( engine=engine, sensitivity_path=str(sensitivity_path), sensitivity_dtype=np.float64, + **kwargs, ) data = sim.dpred(density) g_xx, g_xy, g_xz = data[0::6], data[1::6], data[2::6] @@ -193,11 +210,16 @@ def test_tensor_vs_analytic( np.testing.assert_allclose(g_yz, solution[..., 1, 2], rtol=rtol, atol=atol) np.testing.assert_allclose(g_zz, solution[..., 2, 2], rtol=rtol, atol=atol) - @pytest.mark.parametrize("engine", ("geoana", "choclo")) + @pytest.mark.parametrize( + "engine, parallelism", + [("geoana", 1), ("geoana", None), ("choclo", False), ("choclo", True)], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], + ) @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) def test_guv_vs_analytic( self, engine, + parallelism, store_sensitivities, tmp_path, blocks, @@ -217,11 +239,13 @@ def test_guv_vs_analytic( survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - # Define sensitivity_path - sensitivity_path = tmp_path - if engine == "choclo": - sensitivity_path /= "sensitivity_choclo" # Create simulation + if engine == "choclo": + sensitivity_path = tmp_path / "sensitivity_choclo" + kwargs = dict(choclo_parallel=parallelism) + else: + sensitivity_path = tmp_path + kwargs = dict(n_processes=parallelism) sim = gravity.Simulation3DIntegral( mesh, survey=survey, @@ -231,6 +255,7 @@ def test_guv_vs_analytic( engine=engine, sensitivity_path=str(sensitivity_path), sensitivity_dtype=np.float64, + **kwargs, ) g_uv = sim.dpred(density) solution = self.get_analytic_solution(blocks, survey) @@ -241,6 +266,131 @@ def test_guv_vs_analytic( rtol, atol = 2e-6, 1e-6 np.testing.assert_allclose(g_uv, g_uv_solution, rtol=rtol, atol=atol) + def test_invalid_engine(self, mesh, density_and_active_cells, receivers_locations): + """Test if error is raised after invalid engine.""" + # Unpack fixtures + _, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Check if error is raised after an invalid engine is passed + engine = "invalid engine" + with pytest.raises(ValueError, match=f"Invalid engine '{engine}'"): + gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + engine=engine, + ) + + def test_choclo_and_n_proceesses( + self, mesh, density_and_active_cells, receivers_locations + ): + """Check if warning is raised after passing n_processes with choclo engine.""" + # Unpack fixtures + _, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Check if warning is raised + msg = "The 'n_processes' will be ignored when selecting 'choclo'" + with pytest.warns(UserWarning, match=msg): + gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + engine="choclo", + n_processes=2, + ) + + def test_choclo_and_sensitivity_path_as_dir( + self, mesh, density_and_active_cells, receivers_locations, tmp_path + ): + """ + Check if error is raised when sensitivity_path is a dir with choclo engine. + """ + # Unpack fixtures + _, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Create a sensitivity_path directory + sensitivity_path = tmp_path / "sensitivity_dummy" + sensitivity_path.mkdir() + # Check if error is raised + msg = f"The passed sensitivity_path '{str(sensitivity_path)}' is a directory" + with pytest.raises(ValueError, match=msg): + gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + store_sensitivities="disk", + sensitivity_path=str(sensitivity_path), + engine="choclo", + ) + + @patch("SimPEG.potential_fields.gravity.simulation.choclo", None) + def test_choclo_missing(self, mesh, density_and_active_cells, receivers_locations): + """ + Check if error is raised when choclo is missing and chosen as engine. + """ + # Unpack fixtures + _, active_cells = density_and_active_cells + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + # Check if error is raised + msg = "The choclo package couldn't be found." + with pytest.raises(ImportError, match=msg): + gravity.Simulation3DIntegral( + mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + engine="choclo", + ) + + +class TestConversionFactor: + """Test _get_conversion_factor function.""" + + @pytest.mark.parametrize( + "component", + ("gx", "gy", "gz", "gxx", "gyy", "gzz", "gxy", "gxz", "gyz", "guv"), + ) + def test_conversion_factor(self, component): + """ + Test _get_conversion_factor function with valid components + """ + conversion_factor = gravity.simulation._get_conversion_factor(component) + if len(component) == 2: + assert conversion_factor == 1e5 * 1e3 # SI to mGal and g/cc to kg/m3 + else: + assert conversion_factor == 1e9 * 1e3 # SI to Eotvos and g/cc to kg/m3 + + def test_invalid_conversion_factor(self): + """ + Test invalid conversion factor _get_conversion_factor function + """ + component = "invalid-component" + with pytest.raises(ValueError, match=f"Invalid component '{component}'"): + gravity.simulation._get_conversion_factor(component) + def test_ana_grav_forward(tmp_path): nx = 5 From 634d3a1efaa87fc0064978077138def3a978fa29 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 12:19:13 -0700 Subject: [PATCH 033/164] Use sensitivity_dtype for the fields array in _forward --- SimPEG/potential_fields/gravity/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 96068ff9c3..0e095227c3 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -545,7 +545,7 @@ def _forward(self, densities): # Gather active nodes and the indices of the nodes for each active cell active_nodes, active_cell_nodes = self._get_active_nodes() # Allocate fields array - fields = np.zeros(self.survey.nD, dtype=np.float32) + fields = np.zeros(self.survey.nD, dtype=self.sensitivity_dtype) # Start filling the sensitivity matrix index_offset = 0 for components, receivers in self._get_components_and_receivers(): From 51e1616ff406c35367f166484796303c3df441b6 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 12:20:35 -0700 Subject: [PATCH 034/164] Parametrize fixture to test with Tensor and Tree meshes --- tests/pf/test_forward_Grav_Linear.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 512f9b4efe..c51193b414 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -17,19 +17,25 @@ class TestsGravitySimulation: @pytest.fixture def blocks(self): """Synthetic blocks to build the sample model.""" - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + block1 = np.array([[-1.6, 1.6], [-1.6, 1.6], [-1.6, 1.6]]) + block2 = np.array([[-0.8, 0.8], [-0.8, 0.8], [-0.8, 0.8]]) rho1 = 1.0 rho2 = 2.0 return (block1, block2), (rho1, rho2) - @pytest.fixture - def mesh(self): + @pytest.fixture(params=("tensormesh", "treemesh")) + def mesh(self, blocks, request): """Sample mesh.""" - # Define a Tensor mesh cs = 0.2 - hxind, hyind, hzind = tuple([(cs, 41)] for _ in range(3)) - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + (block1, _), _ = blocks + if request.param == "tensormesh": + hxind, hyind, hzind = tuple([(cs, 42)] for _ in range(3)) + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + else: + h = cs * np.ones(64) + mesh = discretize.TreeMesh([h, h, h], origin="CCC") + x0, x1 = block1[:, 0], block1[:, 1] + mesh.refine_box(x0, x1, levels=9) return mesh @pytest.fixture From 5a733deb687a5e1218eb02d90e6d22c74f96d6ac Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 12:52:14 -0700 Subject: [PATCH 035/164] Simplify some tests --- tests/pf/test_forward_Grav_Linear.py | 30 +++++++++++----------------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index c51193b414..f36250a0e8 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -272,16 +272,15 @@ def test_guv_vs_analytic( rtol, atol = 2e-6, 1e-6 np.testing.assert_allclose(g_uv, g_uv_solution, rtol=rtol, atol=atol) - def test_invalid_engine(self, mesh, density_and_active_cells, receivers_locations): + def test_invalid_engine(self, mesh, receivers_locations): """Test if error is raised after invalid engine.""" - # Unpack fixtures - _, active_cells = density_and_active_cells # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + active_cells = np.ones(mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=mesh.n_cells) # Check if error is raised after an invalid engine is passed engine = "invalid engine" with pytest.raises(ValueError, match=f"Invalid engine '{engine}'"): @@ -293,18 +292,15 @@ def test_invalid_engine(self, mesh, density_and_active_cells, receivers_location engine=engine, ) - def test_choclo_and_n_proceesses( - self, mesh, density_and_active_cells, receivers_locations - ): + def test_choclo_and_n_proceesses(self, mesh, receivers_locations): """Check if warning is raised after passing n_processes with choclo engine.""" - # Unpack fixtures - _, active_cells = density_and_active_cells # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + active_cells = np.ones(mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=mesh.n_cells) # Check if warning is raised msg = "The 'n_processes' will be ignored when selecting 'choclo'" with pytest.warns(UserWarning, match=msg): @@ -318,19 +314,18 @@ def test_choclo_and_n_proceesses( ) def test_choclo_and_sensitivity_path_as_dir( - self, mesh, density_and_active_cells, receivers_locations, tmp_path + self, mesh, receivers_locations, tmp_path ): """ Check if error is raised when sensitivity_path is a dir with choclo engine. """ - # Unpack fixtures - _, active_cells = density_and_active_cells # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + active_cells = np.ones(mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=mesh.n_cells) # Create a sensitivity_path directory sensitivity_path = tmp_path / "sensitivity_dummy" sensitivity_path.mkdir() @@ -348,18 +343,17 @@ def test_choclo_and_sensitivity_path_as_dir( ) @patch("SimPEG.potential_fields.gravity.simulation.choclo", None) - def test_choclo_missing(self, mesh, density_and_active_cells, receivers_locations): + def test_choclo_missing(self, mesh, receivers_locations): """ Check if error is raised when choclo is missing and chosen as engine. """ - # Unpack fixtures - _, active_cells = density_and_active_cells # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + active_cells = np.ones(mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=mesh.n_cells) # Check if error is raised msg = "The choclo package couldn't be found." with pytest.raises(ImportError, match=msg): From 3c0c85f36e133aac982c2b80dddf6598d909cfbc Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 12:57:21 -0700 Subject: [PATCH 036/164] Add tests for sensitivity_dtype --- tests/pf/test_forward_Grav_Linear.py | 69 ++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index f36250a0e8..cf4c85556e 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -38,6 +38,11 @@ def mesh(self, blocks, request): mesh.refine_box(x0, x1, levels=9) return mesh + @pytest.fixture + def simple_mesh(self): + """Simpler sample mesh, just to use it as a placeholder in some tests.""" + return discretize.TensorMesh([5, 5, 5], "CCC") + @pytest.fixture def density_and_active_cells(self, mesh, blocks): """Sample density and active_cells arrays for the sample mesh.""" @@ -272,6 +277,70 @@ def test_guv_vs_analytic( rtol, atol = 2e-6, 1e-6 np.testing.assert_allclose(g_uv, g_uv_solution, rtol=rtol, atol=atol) + @pytest.mark.parametrize("engine", ("choclo", "geoana")) + @pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) + def test_sensitivity_dtype( + self, + engine, + store_sensitivities, + simple_mesh, + receivers_locations, + tmp_path, + ): + """Test sensitivity_dtype.""" + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) + # Create simulation + sensitivity_path = tmp_path + if engine == "choclo": + sensitivity_path /= "dummy" + simulation = gravity.Simulation3DIntegral( + simple_mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + engine=engine, + store_sensitivities=store_sensitivities, + sensitivity_path=str(sensitivity_path), + ) + # sensitivity_dtype should be float64 when running forward only, + # but float32 in other cases + if store_sensitivities == "forward_only": + assert simulation.sensitivity_dtype is np.float64 + else: + assert simulation.sensitivity_dtype is np.float32 + + @pytest.mark.parametrize("invalid_dtype", (float, np.float16)) + def test_invalid_sensitivity_dtype_assignment( + self, simple_mesh, receivers_locations, invalid_dtype + ): + """ + Test invalid sensitivity_dtype assignment + """ + # Create survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Create reduced identity map for Linear Problem + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) + # Create simulation + simulation = gravity.Simulation3DIntegral( + simple_mesh, + survey=survey, + rhoMap=idenMap, + ind_active=active_cells, + ) + # Check if error is raised + msg = "sensitivity_dtype must be either np.float32 or np.float64." + with pytest.raises(TypeError, match=msg): + simulation.sensitivity_dtype = invalid_dtype + def test_invalid_engine(self, mesh, receivers_locations): """Test if error is raised after invalid engine.""" # Create survey From 6d29ed7ecef6b32f9c7e87548296d2867a7cf11d Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 13:00:47 -0700 Subject: [PATCH 037/164] Simplify some tests with the simple_mesh fixture --- tests/pf/test_forward_Grav_Linear.py | 32 ++++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index cf4c85556e..e3de52e0de 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -341,40 +341,40 @@ def test_invalid_sensitivity_dtype_assignment( with pytest.raises(TypeError, match=msg): simulation.sensitivity_dtype = invalid_dtype - def test_invalid_engine(self, mesh, receivers_locations): + def test_invalid_engine(self, simple_mesh, receivers_locations): """Test if error is raised after invalid engine.""" # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - active_cells = np.ones(mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=mesh.n_cells) + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Check if error is raised after an invalid engine is passed engine = "invalid engine" with pytest.raises(ValueError, match=f"Invalid engine '{engine}'"): gravity.Simulation3DIntegral( - mesh, + simple_mesh, survey=survey, rhoMap=idenMap, ind_active=active_cells, engine=engine, ) - def test_choclo_and_n_proceesses(self, mesh, receivers_locations): + def test_choclo_and_n_proceesses(self, simple_mesh, receivers_locations): """Check if warning is raised after passing n_processes with choclo engine.""" # Create survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - active_cells = np.ones(mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=mesh.n_cells) + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Check if warning is raised msg = "The 'n_processes' will be ignored when selecting 'choclo'" with pytest.warns(UserWarning, match=msg): gravity.Simulation3DIntegral( - mesh, + simple_mesh, survey=survey, rhoMap=idenMap, ind_active=active_cells, @@ -383,7 +383,7 @@ def test_choclo_and_n_proceesses(self, mesh, receivers_locations): ) def test_choclo_and_sensitivity_path_as_dir( - self, mesh, receivers_locations, tmp_path + self, simple_mesh, receivers_locations, tmp_path ): """ Check if error is raised when sensitivity_path is a dir with choclo engine. @@ -393,8 +393,8 @@ def test_choclo_and_sensitivity_path_as_dir( sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - active_cells = np.ones(mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=mesh.n_cells) + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Create a sensitivity_path directory sensitivity_path = tmp_path / "sensitivity_dummy" sensitivity_path.mkdir() @@ -402,7 +402,7 @@ def test_choclo_and_sensitivity_path_as_dir( msg = f"The passed sensitivity_path '{str(sensitivity_path)}' is a directory" with pytest.raises(ValueError, match=msg): gravity.Simulation3DIntegral( - mesh, + simple_mesh, survey=survey, rhoMap=idenMap, ind_active=active_cells, @@ -412,7 +412,7 @@ def test_choclo_and_sensitivity_path_as_dir( ) @patch("SimPEG.potential_fields.gravity.simulation.choclo", None) - def test_choclo_missing(self, mesh, receivers_locations): + def test_choclo_missing(self, simple_mesh, receivers_locations): """ Check if error is raised when choclo is missing and chosen as engine. """ @@ -421,13 +421,13 @@ def test_choclo_missing(self, mesh, receivers_locations): sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) # Create reduced identity map for Linear Problem - active_cells = np.ones(mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=mesh.n_cells) + active_cells = np.ones(simple_mesh.n_cells, dtype=bool) + idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Check if error is raised msg = "The choclo package couldn't be found." with pytest.raises(ImportError, match=msg): gravity.Simulation3DIntegral( - mesh, + simple_mesh, survey=survey, rhoMap=idenMap, ind_active=active_cells, From 85e678a434f4ddb95c635451e7b32d2a0f655c8e Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Sep 2023 13:05:36 -0700 Subject: [PATCH 038/164] Remove old tests for the simulation --- tests/pf/test_forward_Grav_Linear.py | 186 --------------------------- 1 file changed, 186 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index e3de52e0de..badfe827b8 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -1,4 +1,3 @@ -import unittest from unittest.mock import patch import pytest import discretize @@ -6,7 +5,6 @@ from SimPEG.potential_fields import gravity from geoana.gravity import Prism import numpy as np -import os class TestsGravitySimulation: @@ -459,187 +457,3 @@ def test_invalid_conversion_factor(self): component = "invalid-component" with pytest.raises(ValueError, match=f"Invalid component '{component}'"): gravity.simulation._get_conversion_factor(component) - - -def test_ana_grav_forward(tmp_path): - nx = 5 - ny = 5 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - rho1 = 1.0 - rho2 = 2.0 - - model = np.zeros(mesh.n_cells) - model[block1_inds] = rho1 - model[block2_inds] = rho2 - - active_cells = model != 0.0 - model_reduced = model[active_cells] - - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - - receivers = gravity.Point(locXyz, components=["gx", "gy", "gz"]) - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - - sim = gravity.Simulation3DIntegral( - mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - store_sensitivities="disk", - sensitivity_path=str(tmp_path) + os.sep, - ) - - with pytest.raises(TypeError): - sim.sensitivity_dtype = float - - assert sim.sensitivity_dtype is np.float32 - - data = sim.dpred(model_reduced) - d_x = data[0::3] - d_y = data[1::3] - d_z = data[2::3] - - # Compute analytical response from dense prism - prism_1 = Prism(block1[:, 0], block1[:, 1], rho1 * 1000) # g/cc to kg/m**3 - prism_2 = Prism(block2[:, 0], block2[:, 1], -rho1 * 1000) - prism_3 = Prism(block2[:, 0], block2[:, 1], rho2 * 1000) - - d = ( - prism_1.gravitational_field(locXyz) - + prism_2.gravitational_field(locXyz) - + prism_3.gravitational_field(locXyz) - ) * 1e5 # convert to mGal from m/s^2 - d = d.astype(sim.sensitivity_dtype) - np.testing.assert_allclose(d_x, d[:, 0], rtol=1e-9, atol=1e-6) - np.testing.assert_allclose(d_y, d[:, 1], rtol=1e-9, atol=1e-6) - np.testing.assert_allclose(d_z, d[:, 2], rtol=1e-9, atol=1e-6) - - -def test_ana_gg_forward(): - nx = 5 - ny = 5 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - rho1 = 1.0 - rho2 = 2.0 - - model = np.zeros(mesh.n_cells) - model[block1_inds] = rho1 - model[block2_inds] = rho2 - - active_cells = model != 0.0 - model_reduced = model[active_cells] - - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - - receivers = gravity.Point( - locXyz, components=["gxx", "gxy", "gxz", "gyy", "gyz", "gzz"] - ) - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - - sim = gravity.Simulation3DIntegral( - mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - store_sensitivities="forward_only", - n_processes=None, - ) - - # forward only should default to np.float64 - assert sim.sensitivity_dtype is np.float64 - - data = sim.dpred(model_reduced) - d_xx = data[0::6] - d_xy = data[1::6] - d_xz = data[2::6] - d_yy = data[3::6] - d_yz = data[4::6] - d_zz = data[5::6] - - # Compute analytical response from dense prism - prism_1 = Prism(block1[:, 0], block1[:, 1], rho1 * 1000) # g/cc to kg/m**3 - prism_2 = Prism(block2[:, 0], block2[:, 1], -rho1 * 1000) - prism_3 = Prism(block2[:, 0], block2[:, 1], rho2 * 1000) - - d = ( - prism_1.gravitational_gradient(locXyz) - + prism_2.gravitational_gradient(locXyz) - + prism_3.gravitational_gradient(locXyz) - ) * 1e9 # convert to Eotvos from 1/s^2 - - np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=1e-10, atol=1e-12) - - -if __name__ == "__main__": - unittest.main() From c7e74adcfe848f42f7f5a13eda663e97a1517418 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 20 Sep 2023 09:39:58 -0700 Subject: [PATCH 039/164] Remove trivial assignment in refactorized test --- tests/pf/test_grav_inversion_linear.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pf/test_grav_inversion_linear.py b/tests/pf/test_grav_inversion_linear.py index ab9eafe7f1..c67a8ba474 100644 --- a/tests/pf/test_grav_inversion_linear.py +++ b/tests/pf/test_grav_inversion_linear.py @@ -111,7 +111,6 @@ def test_gravity_inversion_linear(engine): invProb, directiveList=[IRLS, sensitivity_weights, starting_beta, update_Jacobi], ) - sim = sim # Run the inversion mrec = inv.run(model) From dfec5d8225f543eab5af778d3a8917c9cac618b7 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 20 Sep 2023 10:10:24 -0700 Subject: [PATCH 040/164] Revert make_synthetic_data method Remove the `random_seed` argument added to the `make_synthetic_data` method. The change was moved to a separate PR to check the reason why some tests were failing in CI. --- SimPEG/simulation.py | 12 ++---------- tests/pf/test_grav_inversion_linear.py | 2 +- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index 722b8eaa92..aece3dbc65 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -305,14 +305,7 @@ def residual(self, m, dobs, f=None): return mkvc(self.dpred(m, f=f) - dobs) def make_synthetic_data( - self, - m, - relative_error=0.05, - noise_floor=0.0, - f=None, - add_noise=False, - random_seed=None, - **kwargs, + self, m, relative_error=0.05, noise_floor=0.0, f=None, add_noise=False, **kwargs ): """ Make synthetic data given a model, and a standard deviation. @@ -335,8 +328,7 @@ def make_synthetic_data( if add_noise is True: std = np.sqrt((relative_error * np.abs(dclean)) ** 2 + noise_floor**2) - random_num_generator = np.random.default_rng(seed=random_seed) - noise = random_num_generator.normal(loc=0, scale=std, size=dclean.shape) + noise = std * np.random.randn(*dclean.shape) dobs = dclean + noise else: dobs = dclean diff --git a/tests/pf/test_grav_inversion_linear.py b/tests/pf/test_grav_inversion_linear.py index c67a8ba474..573502bf61 100644 --- a/tests/pf/test_grav_inversion_linear.py +++ b/tests/pf/test_grav_inversion_linear.py @@ -85,7 +85,7 @@ def test_gravity_inversion_linear(engine): # Compute linear forward operator and compute some data data = sim.make_synthetic_data( - model, relative_error=0.0, noise_floor=0.0005, add_noise=True, random_seed=2 + model, relative_error=0.0, noise_floor=0.0005, add_noise=True ) # Create a regularization From 72ca7556ef97e608ff4ef9441cbc46ee8618669f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 20 Sep 2023 10:22:32 -0700 Subject: [PATCH 041/164] Revert "Revert make_synthetic_data method" This reverts commit dfec5d8225f543eab5af778d3a8917c9cac618b7. --- SimPEG/simulation.py | 12 ++++++++++-- tests/pf/test_grav_inversion_linear.py | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index aece3dbc65..722b8eaa92 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -305,7 +305,14 @@ def residual(self, m, dobs, f=None): return mkvc(self.dpred(m, f=f) - dobs) def make_synthetic_data( - self, m, relative_error=0.05, noise_floor=0.0, f=None, add_noise=False, **kwargs + self, + m, + relative_error=0.05, + noise_floor=0.0, + f=None, + add_noise=False, + random_seed=None, + **kwargs, ): """ Make synthetic data given a model, and a standard deviation. @@ -328,7 +335,8 @@ def make_synthetic_data( if add_noise is True: std = np.sqrt((relative_error * np.abs(dclean)) ** 2 + noise_floor**2) - noise = std * np.random.randn(*dclean.shape) + random_num_generator = np.random.default_rng(seed=random_seed) + noise = random_num_generator.normal(loc=0, scale=std, size=dclean.shape) dobs = dclean + noise else: dobs = dclean diff --git a/tests/pf/test_grav_inversion_linear.py b/tests/pf/test_grav_inversion_linear.py index 573502bf61..c67a8ba474 100644 --- a/tests/pf/test_grav_inversion_linear.py +++ b/tests/pf/test_grav_inversion_linear.py @@ -85,7 +85,7 @@ def test_gravity_inversion_linear(engine): # Compute linear forward operator and compute some data data = sim.make_synthetic_data( - model, relative_error=0.0, noise_floor=0.0005, add_noise=True + model, relative_error=0.0, noise_floor=0.0005, add_noise=True, random_seed=2 ) # Create a regularization From 4874a284062f0ac6d73cc2a0f44313b73a42bfff Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Sep 2023 10:26:34 -0700 Subject: [PATCH 042/164] SHA triaxial magnetic forward modelling --- .../potential_fields/magnetics/receivers.py | 3 + .../potential_fields/magnetics/simulation.py | 100 +++++++++++++----- 2 files changed, 77 insertions(+), 26 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/receivers.py b/SimPEG/potential_fields/magnetics/receivers.py index 8a290959c1..e9f22ea631 100644 --- a/SimPEG/potential_fields/magnetics/receivers.py +++ b/SimPEG/potential_fields/magnetics/receivers.py @@ -51,6 +51,9 @@ def __init__(self, locations, components="tmi", **kwargs): "by", "bz", "tmi", + "tmi_x", + "tmi_y", + "tmi_z", ], ) self.components = components diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 8d926d5e4a..d1e837f886 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -1,27 +1,24 @@ import numpy as np import scipy.sparse as sp -from scipy.constants import mu_0 - -from SimPEG import utils -from ..base import BasePFSimulation, BaseEquivalentSourceLayerSimulation -from ...base import BaseMagneticPDESimulation -from .survey import Survey -from .analytics import CongruousMagBC - -from SimPEG import Solver -from SimPEG import props - -from SimPEG.utils import mkvc, mat_utils, sdiag -from SimPEG.utils.code_utils import validate_string, deprecate_property, validate_type from geoana.kernels import ( - prism_fzz, - prism_fzx, - prism_fzy, - prism_fzzz, prism_fxxy, prism_fxxz, prism_fxyz, + prism_fzx, + prism_fzy, + prism_fzz, + prism_fzzz, ) +from scipy.constants import mu_0 + +from SimPEG import Solver, props, utils +from SimPEG.utils import mat_utils, mkvc, sdiag +from SimPEG.utils.code_utils import deprecate_property, validate_string, validate_type + +from ...base import BaseMagneticPDESimulation +from ..base import BaseEquivalentSourceLayerSimulation, BasePFSimulation +from .analytics import CongruousMagBC +from .survey import Survey class Simulation3DIntegral(BasePFSimulation): @@ -283,33 +280,33 @@ def evaluate_integral(self, receiver_location, components): # # inside an active cell. # node_evals["gzz"] = -node_evals["gxx"] - node_evals["gyy"] - if "bxx" in components: + if "bxx" in components or "tmi_x" in components: node_evals["gxxx"] = prism_fzzz(dy, dz, dx) node_evals["gxxy"] = prism_fxxy(dx, dy, dz) node_evals["gxxz"] = prism_fxxz(dx, dy, dz) - if "bxy" in components: + if any(s in components for s in ["bxy", "tmi_x", "tmi_y"]): if "gxxy" not in node_evals: node_evals["gxxy"] = prism_fxxy(dx, dy, dz) node_evals["gyyx"] = prism_fxxz(dy, dz, dx) node_evals["gxyz"] = prism_fxyz(dx, dy, dz) - if "bxz" in components: + if any(s in components for s in ["bxz", "tmi_x", "tmi_z"]): if "gxxz" not in node_evals: node_evals["gxxz"] = prism_fxxz(dx, dy, dz) if "gxyz" not in node_evals: node_evals["gxyz"] = prism_fxyz(dx, dy, dz) node_evals["gzzx"] = prism_fxxy(dz, dx, dy) - if "byy" in components: + if any(s in components for s in ["byy", "tmi_y"]): if "gyyx" not in node_evals: node_evals["gyyx"] = prism_fxxz(dy, dz, dx) node_evals["gyyy"] = prism_fzzz(dz, dx, dy) node_evals["gyyz"] = prism_fxxy(dy, dz, dx) - if "byz" in components: + if any(s in components for s in ["byz", "tmi_y", "tmi_z"]): if "gxyz" not in node_evals: node_evals["gxyz"] = prism_fxyz(dx, dy, dz) if "gyyz" not in node_evals: node_evals["gyyz"] = prism_fxxy(dy, dz, dx) node_evals["gzzy"] = prism_fxxz(dz, dx, dy) - if "bzz" in components: + if any(s in components for s in ["bzz", "tmi_z"]): if "gzzx" not in node_evals: node_evals["gzzx"] = prism_fxxy(dz, dx, dy) if "gzzy" not in node_evals: @@ -355,6 +352,57 @@ def evaluate_integral(self, receiver_location, components): + tmi[1] * node_evals["gyz"] + tmi[2] * node_evals["gzz"] ) + elif component == "tmi_x": + tmi = self.tmi_projection + vals_x = ( + tmi[0] * node_evals["gxxx"] + + tmi[1] * node_evals["gxxy"] + + tmi[2] * node_evals["gxxz"] + ) + vals_y = ( + tmi[0] * node_evals["gxxy"] + + tmi[1] * node_evals["gyyx"] + + tmi[2] * node_evals["gxyz"] + ) + vals_z = ( + tmi[0] * node_evals["gxxz"] + + tmi[1] * node_evals["gxyz"] + + tmi[2] * node_evals["gzzx"] + ) + elif component == "tmi_y": + tmi = self.tmi_projection + vals_x = ( + tmi[0] * node_evals["gxxy"] + + tmi[1] * node_evals["gyyx"] + + tmi[2] * node_evals["gxyz"] + ) + vals_y = ( + tmi[0] * node_evals["gyyx"] + + tmi[1] * node_evals["gyyy"] + + tmi[2] * node_evals["gyyz"] + ) + vals_z = ( + tmi[0] * node_evals["gxyz"] + + tmi[1] * node_evals["gyyz"] + + tmi[2] * node_evals["gzzy"] + ) + elif component == "tmi_z": + tmi = self.tmi_projection + vals_x = ( + tmi[0] * node_evals["gxxz"] + + tmi[1] * node_evals["gxyz"] + + tmi[2] * node_evals["gzzx"] + ) + vals_y = ( + tmi[0] * node_evals["gxyz"] + + tmi[1] * node_evals["gyyz"] + + tmi[2] * node_evals["gzzy"] + ) + vals_z = ( + tmi[0] * node_evals["gzzx"] + + tmi[1] * node_evals["gzzy"] + + tmi[2] * node_evals["gzzz"] + ) elif component == "bxx": vals_x = node_evals["gxxx"] vals_y = node_evals["gxxy"] @@ -949,11 +997,11 @@ def MagneticsDiffSecondaryInv(mesh, model, data, **kwargs): """ from SimPEG import ( - optimization, - regularization, directives, - objective_function, inversion, + objective_function, + optimization, + regularization, ) prob = Simulation3DDifferential(mesh, survey=data, mu=model) From 80c8affd307b3022f52f67813df1ec28d5960d82 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Sep 2023 16:17:59 -0700 Subject: [PATCH 043/164] docs --- SimPEG/potential_fields/magnetics/receivers.py | 3 +++ SimPEG/potential_fields/magnetics/simulation.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/receivers.py b/SimPEG/potential_fields/magnetics/receivers.py index e9f22ea631..6f885ad635 100644 --- a/SimPEG/potential_fields/magnetics/receivers.py +++ b/SimPEG/potential_fields/magnetics/receivers.py @@ -23,6 +23,9 @@ class Point(survey.BaseRx): - "byy" --> y-derivative of the y-component - "byz" --> z-derivative of the y-component (and visa versa) - "bzz" --> z-derivative of the z-component + - "tmi_x"--> x-derivative of the total magnetic intensity data + - "tmi_y"--> y-derivative of the total magnetic intensity data + - "tmi_z"--> z-derivative of the total magnetic intensity data Notes ----- diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index d1e837f886..67a5a4194b 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -243,7 +243,7 @@ def evaluate_integral(self, receiver_location, components): components: list[str] List of magnetic components chosen from: - 'bx', 'by', 'bz', 'bxx', 'bxy', 'bxz', 'byy', 'byz', 'bzz' + 'tmi', 'bx', 'by', 'bz', 'bxx', 'bxy', 'bxz', 'byy', 'byz', 'bzz', 'tmi_x', 'tmi_y', 'tmi_z' OUTPUT: Tx = [Txx Txy Txz] From de69e0369b2699dc623860a22636dd82dfafbd3c Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Sep 2023 17:12:50 -0700 Subject: [PATCH 044/164] add tests --- tests/pf/test_forward_Mag_Linear.py | 128 +++++++++++++++++++++++++--- 1 file changed, 115 insertions(+), 13 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 3f412651db..74c1480380 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -1,10 +1,12 @@ import unittest + import discretize -from SimPEG import utils, maps -from SimPEG.potential_fields import magnetics as mag +import numpy as np from geoana.em.static import MagneticPrism from scipy.constants import mu_0 -import numpy as np + +from SimPEG import maps, utils +from SimPEG.potential_fields import magnetics as mag def test_ana_mag_forward(): @@ -47,7 +49,7 @@ def get_block_inds(grid, block): active_cells = model != 0.0 model_reduced = model[active_cells] - # Create reduced identity map for Linear Pproblem + # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) # Create plane of observations @@ -59,10 +61,12 @@ def get_block_inds(grid, block): components = ["bx", "by", "bz", "tmi"] rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + ) survey = mag.Survey(srcField) - # Creat reduced identity map for Linear Pproblem + # Creat reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) sim = mag.Simulation3DIntegral( @@ -101,6 +105,98 @@ def get_block_inds(grid, block): np.testing.assert_allclose(d_t, d @ tmi) +def test_ana_mag_tmi_grad_forward(): + nx = 5 + ny = 5 + + H0 = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + chi1 = 0.01 + chi2 = 0.02 + + # Define a mesh + cs = 0.2 + hxind = [(cs, 41)] + hyind = [(cs, 41)] + hzind = [(cs, 41)] + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + + # create a model of two blocks, 1 inside the other + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + + def get_block_inds(grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + block1_inds = get_block_inds(mesh.cell_centers, block1) + block2_inds = get_block_inds(mesh.cell_centers, block2) + + model = np.zeros(mesh.n_cells) + model[block1_inds] = chi1 + model[block2_inds] = chi2 + + active_cells = model != 0.0 + model_reduced = model[active_cells] + + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + + # Create plane of observations + xr = np.linspace(-20, 20, nx) + yr = np.linspace(-20, 20, ny) + X, Y = np.meshgrid(xr, yr) + Z = np.ones_like(X) * 3.0 + locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] + components = ["tmi_x", "tmi_y", "tmi_z"] + + rxLoc = mag.Point(locXyz, components=components) + srcField = mag.UniformBackgroundField( + [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + ) + survey = mag.Survey(srcField) + + # Creat reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + + sim = mag.Simulation3DIntegral( + mesh, + survey=survey, + chiMap=idenMap, + ind_active=active_cells, + store_sensitivities="forward_only", + n_processes=None, + ) + + data = sim.dpred(model_reduced) + d_x = data[0::3] + d_y = data[1::3] + d_z = data[2::3] + + # Compute analytical response from magnetic prism + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) + + d = ( + prism_1.magnetic_field_gradient(locXyz) + + prism_2.magnetic_field_gradient(locXyz) + + prism_3.magnetic_field_gradient(locXyz) + ) * mu_0 + tmi_x = (d[:, 0, 0] * b0[0] + d[:, 0, 1] * b0[1] + d[:, 0, 2] * b0[2]) / H0[0] + tmi_y = (d[:, 1, 0] * b0[0] + d[:, 1, 1] * b0[1] + d[:, 1, 2] * b0[2]) / H0[0] + tmi_z = (d[:, 2, 0] * b0[0] + d[:, 2, 1] * b0[1] + d[:, 2, 2] * b0[2]) / H0[0] + np.testing.assert_allclose(d_x, tmi_x, rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_y, tmi_y, rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_z, tmi_z, rtol=1e-10, atol=1e-12) + + def test_ana_mag_grad_forward(): nx = 5 ny = 5 @@ -141,7 +237,7 @@ def get_block_inds(grid, block): active_cells = model != 0.0 model_reduced = model[active_cells] - # Create reduced identity map for Linear Pproblem + # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) # Create plane of observations @@ -153,10 +249,12 @@ def get_block_inds(grid, block): components = ["bxx", "bxy", "bxz", "byy", "byz", "bzz"] rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + ) survey = mag.Survey(srcField) - # Creat reduced identity map for Linear Pproblem + # Creat reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells))) sim = mag.Simulation3DIntegral( @@ -245,10 +343,12 @@ def get_block_inds(grid, block): components = ["bx", "by", "bz", "tmi"] rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + ) survey = mag.Survey(srcField) - # Create reduced identity map for Linear Pproblem + # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) sim = mag.Simulation3DIntegral( @@ -331,10 +431,12 @@ def get_block_inds(grid, block): components = ["bx", "by", "bz"] rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + ) survey = mag.Survey(srcField) - # Create reduced identity map for Linear Pproblem + # Create reduced identity map for Linear Problem idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) sim = mag.Simulation3DIntegral( From 280d4009240537f35e242e35dd7cfaf68a7bba1d Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 26 Sep 2023 16:42:11 -0700 Subject: [PATCH 045/164] Start implementing mag simulation with Choclo --- .../potential_fields/magnetics/simulation.py | 299 +++++++++++++++++- 1 file changed, 297 insertions(+), 2 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 8d926d5e4a..45e0d2457a 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -23,6 +23,184 @@ prism_fxyz, ) +import discretize + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + +def _fill_sensitivity_tmi_scalar( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, +): + """ + Fill the sensitivity matrix for TMI and scalar data (susceptibility only) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_tmi_scalar + ) + jit_sensitivity( + receivers, nodes, densities, fields, cell_nodes, bkg_field, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The conversion factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx = np.empty(n_nodes) + kyy = np.empty(n_nodes) + kzz = np.empty(n_nodes) + kxy = np.empty(n_nodes) + kxz = np.empty(n_nodes) + kyz = np.empty(n_nodes) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + node_index_0 = cell_nodes[k, 0] + node_index_1 = cell_nodes[k, 1] + node_index_2 = cell_nodes[k, 2] + node_index_3 = cell_nodes[k, 3] + node_index_4 = cell_nodes[k, 4] + node_index_5 = cell_nodes[k, 5] + node_index_6 = cell_nodes[k, 6] + node_index_7 = cell_nodes[k, 7] + uxx = ( + -kxx[node_index_0] + + kxx[node_index_1] + + kxx[node_index_2] + - kxx[node_index_3] + + kxx[node_index_4] + - kxx[node_index_5] + - kxx[node_index_6] + + kxx[node_index_7] + ) + uyy = ( + -kyy[node_index_0] + + kyy[node_index_1] + + kyy[node_index_2] + - kyy[node_index_3] + + kyy[node_index_4] + - kyy[node_index_5] + - kyy[node_index_6] + + kyy[node_index_7] + ) + uzz = ( + -kzz[node_index_0] + + kzz[node_index_1] + + kzz[node_index_2] + - kzz[node_index_3] + + kzz[node_index_4] + - kzz[node_index_5] + - kzz[node_index_6] + + kzz[node_index_7] + ) + uxy = ( + -kxy[node_index_0] + + kxy[node_index_1] + + kxy[node_index_2] + - kxy[node_index_3] + + kxy[node_index_4] + - kxy[node_index_5] + - kxy[node_index_6] + + kxy[node_index_7] + ) + uxz = ( + -kxz[node_index_0] + + kxz[node_index_1] + + kxz[node_index_2] + - kxz[node_index_3] + + kxz[node_index_4] + - kxz[node_index_5] + - kxz[node_index_6] + + kxz[node_index_7] + ) + uyz = ( + -kyz[node_index_0] + + kyz[node_index_1] + + kyz[node_index_2] + - kyz[node_index_3] + + kyz[node_index_4] + - kyz[node_index_5] + - kyz[node_index_6] + + kyz[node_index_7] + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + + +_fill_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_tmi_scalar +) +_fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_tmi_scalar +) + class Simulation3DIntegral(BasePFSimulation): """ @@ -39,7 +217,9 @@ def __init__( chiMap=None, model_type="scalar", is_amplitude_data=False, - **kwargs + engine="geoana", + choclo_parallel=True, + **kwargs, ): self.model_type = model_type super().__init__(mesh, **kwargs) @@ -51,6 +231,14 @@ def __init__( self._gtg_diagonal = None self.is_amplitude_data = is_amplitude_data self.modelMap = self.chiMap + self.engine = engine + if self.engine == "choclo": + if choclo_parallel: + self._fill_sensitivity_tmi_scalar = ( + _fill_sensitivity_tmi_scalar_parallel + ) + else: + self._fill_sensitivity_tmi_scalar = _fill_sensitivity_tmi_scalar_serial @property def model_type(self): @@ -120,7 +308,10 @@ def fields(self, model): @property def G(self): if getattr(self, "_G", None) is None: - self._G = self.linear_operator() + if self.engine == "choclo": + self._G = self._sensitivity_matrix() + else: + self._G = self.linear_operator() return self._G @@ -446,6 +637,110 @@ def deleteTheseOnModelUpdate(self): deletes = deletes + ["_gtg_diagonal", "_ampDeriv"] return deletes + def _sensitivity_matrix(self): + """ + Compute the sensitivity matrix G + + Returns + ------- + (nD, n_active_cells) array + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + # Allocate sensitivity matrix + shape = (self.survey.nD, self.nC) + sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) + # Start filling the sensitivity matrix + for components, receivers in self._get_components_and_receivers(): + if components != ["tmi"]: + raise NotImplementedError( + "Other components besides 'tmi' aren't implemented yet." + ) + conversion_factor = mu_0 / 4 / np.pi + self._fill_sensitivity_tmi_scalar( + receivers, + active_nodes, + sensitivity_matrix, + active_cell_nodes, + regional_field, + conversion_factor, + ) + return sensitivity_matrix + + def _get_cell_nodes(self): + """ + Return indices of nodes for each cell in the mesh. + """ + if isinstance(self.mesh, discretize.TreeMesh): + cell_nodes = self.mesh.cell_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + cell_nodes = self._get_tensormesh_cell_nodes() + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + return cell_nodes + + def _get_tensormesh_cell_nodes(self): + """ + Quick implmentation of ``cell_nodes`` for a ``TensorMesh``. + + This method should be removed after ``TensorMesh.cell_nodes`` is added + in discretize. + """ + inds = np.arange(self.mesh.n_nodes).reshape(self.mesh.shape_nodes, order="F") + cell_nodes = [ + inds[:-1, :-1, :-1].reshape(-1, order="F"), + inds[1:, :-1, :-1].reshape(-1, order="F"), + inds[:-1, 1:, :-1].reshape(-1, order="F"), + inds[1:, 1:, :-1].reshape(-1, order="F"), + inds[:-1, :-1, 1:].reshape(-1, order="F"), + inds[1:, :-1, 1:].reshape(-1, order="F"), + inds[:-1, 1:, 1:].reshape(-1, order="F"), + inds[1:, 1:, 1:].reshape(-1, order="F"), + ] + cell_nodes = np.stack(cell_nodes, axis=-1) + return cell_nodes + + def _get_active_nodes(self): + """ + Return locations of nodes only for active cells + + Also return an array containing the indices of the "active nodes" for + each active cell in the mesh + """ + # Get all nodes in the mesh + if isinstance(self.mesh, discretize.TreeMesh): + nodes = self.mesh.total_nodes + elif isinstance(self.mesh, discretize.TensorMesh): + nodes = self.mesh.nodes + else: + raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") + # Get original cell_nodes but only for active cells + cell_nodes = self._get_cell_nodes() + # If all cells in the mesh are active, return nodes and cell_nodes + if self.nC == self.mesh.n_cells: + return nodes, cell_nodes + # Keep only the cell_nodes for active cells + cell_nodes = cell_nodes[self.ind_active] + # Get the unique indices of the nodes that belong to every active cell + # (these indices correspond to the original `nodes` array) + unique_nodes, active_cell_nodes = np.unique(cell_nodes, return_inverse=True) + # Select only the nodes that belong to the active cells (active nodes) + active_nodes = nodes[unique_nodes] + # Reshape indices of active cells for each active cell in the mesh + active_cell_nodes = active_cell_nodes.reshape(cell_nodes.shape) + return active_nodes, active_cell_nodes + + def _get_components_and_receivers(self): + """Generator for receiver locations and their field components.""" + if not hasattr(self.survey, "source_field"): + raise AttributeError( + f"The survey '{self.survey}' has no 'source_field' attribute." + ) + for receiver_object in self.survey.source_field.receiver_list: + yield receiver_object.components, receiver_object.locations + class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral From c0c0788d51277a3a401d6fdc4d055bea43d96ae8 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Sep 2023 10:24:15 -0700 Subject: [PATCH 046/164] Run black after solving conflicts --- tests/pf/test_grav_inversion_linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pf/test_grav_inversion_linear.py b/tests/pf/test_grav_inversion_linear.py index cf3aa07191..c67a8ba474 100644 --- a/tests/pf/test_grav_inversion_linear.py +++ b/tests/pf/test_grav_inversion_linear.py @@ -117,4 +117,4 @@ def test_gravity_inversion_linear(engine): residual = np.linalg.norm(mrec - model) / np.linalg.norm(model) # Assert result - assert np.all(residual < 0.05) \ No newline at end of file + assert np.all(residual < 0.05) From fd01333a0e1aa0e22a67b4d0f6a8b2513a31f678 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Sep 2023 11:34:37 -0700 Subject: [PATCH 047/164] Fix conversion factor --- SimPEG/potential_fields/magnetics/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 45e0d2457a..2613be501f 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -658,7 +658,7 @@ def _sensitivity_matrix(self): raise NotImplementedError( "Other components besides 'tmi' aren't implemented yet." ) - conversion_factor = mu_0 / 4 / np.pi + conversion_factor = 1 / 4 / np.pi self._fill_sensitivity_tmi_scalar( receivers, active_nodes, From e6ebeda7d8602c2b58facc8932e13125d9eca549 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Sep 2023 11:47:47 -0700 Subject: [PATCH 048/164] Simplify the sensitivity matrix function Move repeated task of computing the integral from the kernels evaluates on the nodes to each cell to its own function. --- .../potential_fields/magnetics/simulation.py | 115 +++++++----------- 1 file changed, 41 insertions(+), 74 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 2613be501f..f48298bc38 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -97,12 +97,10 @@ def _fill_sensitivity_tmi_scalar( # Evaluate kernel function on each node, for each receiver location for i in prange(n_receivers): # Allocate vectors for kernels evaluated on mesh nodes - kxx = np.empty(n_nodes) - kyy = np.empty(n_nodes) - kzz = np.empty(n_nodes) - kxy = np.empty(n_nodes) - kxz = np.empty(n_nodes) - kyz = np.empty(n_nodes) + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) for j in range(n_nodes): dx = nodes[j, 0] - receivers[i, 0] dy = nodes[j, 1] - receivers[i, 1] @@ -116,74 +114,13 @@ def _fill_sensitivity_tmi_scalar( kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) # Compute sensitivity matrix elements from the kernel values for k in range(n_cells): - node_index_0 = cell_nodes[k, 0] - node_index_1 = cell_nodes[k, 1] - node_index_2 = cell_nodes[k, 2] - node_index_3 = cell_nodes[k, 3] - node_index_4 = cell_nodes[k, 4] - node_index_5 = cell_nodes[k, 5] - node_index_6 = cell_nodes[k, 6] - node_index_7 = cell_nodes[k, 7] - uxx = ( - -kxx[node_index_0] - + kxx[node_index_1] - + kxx[node_index_2] - - kxx[node_index_3] - + kxx[node_index_4] - - kxx[node_index_5] - - kxx[node_index_6] - + kxx[node_index_7] - ) - uyy = ( - -kyy[node_index_0] - + kyy[node_index_1] - + kyy[node_index_2] - - kyy[node_index_3] - + kyy[node_index_4] - - kyy[node_index_5] - - kyy[node_index_6] - + kyy[node_index_7] - ) - uzz = ( - -kzz[node_index_0] - + kzz[node_index_1] - + kzz[node_index_2] - - kzz[node_index_3] - + kzz[node_index_4] - - kzz[node_index_5] - - kzz[node_index_6] - + kzz[node_index_7] - ) - uxy = ( - -kxy[node_index_0] - + kxy[node_index_1] - + kxy[node_index_2] - - kxy[node_index_3] - + kxy[node_index_4] - - kxy[node_index_5] - - kxy[node_index_6] - + kxy[node_index_7] - ) - uxz = ( - -kxz[node_index_0] - + kxz[node_index_1] - + kxz[node_index_2] - - kxz[node_index_3] - + kxz[node_index_4] - - kxz[node_index_5] - - kxz[node_index_6] - + kxz[node_index_7] - ) - uyz = ( - -kyz[node_index_0] - + kyz[node_index_1] - + kyz[node_index_2] - - kyz[node_index_3] - + kyz[node_index_4] - - kyz[node_index_5] - - kyz[node_index_6] - + kyz[node_index_7] - ) + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) bx = uxx * fx + uxy * fy + uxz * fz by = uxy * fx + uyy * fy + uyz * fz bz = uxz * fx + uyz * fy + uzz * fz @@ -194,6 +131,36 @@ def _fill_sensitivity_tmi_scalar( ) +@jit(nopython=True) +def _kernels_in_nodes_to_cell(kernels, nodes_indices): + """ + Evaluate integral on a given cell from evaluation of kernels on nodes + + Parameters + ---------- + kernels : (n_active_nodes,) array + Array with kernel values on each one of the nodes in the mesh. + nodes_indices : (8,) array of int + Indices of the nodes for the current cell in "F" order (x changes + faster than y, and y faster than z). + + Returns + ------- + float + """ + result = ( + -kernels[nodes_indices[0]] + + kernels[nodes_indices[1]] + + kernels[nodes_indices[2]] + - kernels[nodes_indices[3]] + + kernels[nodes_indices[4]] + - kernels[nodes_indices[5]] + - kernels[nodes_indices[6]] + + kernels[nodes_indices[7]] + ) + return result + + _fill_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( _fill_sensitivity_tmi_scalar ) From 70c4853164ec71bc0593c64eb02fbc4c297b1941 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Sep 2023 11:55:12 -0700 Subject: [PATCH 049/164] Fix example in docstring --- SimPEG/potential_fields/magnetics/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index f48298bc38..fce32411eb 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -58,7 +58,7 @@ def _fill_sensitivity_tmi_scalar( _fill_sensitivity_matrix_tmi_scalar ) jit_sensitivity( - receivers, nodes, densities, fields, cell_nodes, bkg_field, const_factor + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor ) Parameters From 994e6edbb048985d7ce6dd237d5c6cebd0735537 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Sep 2023 12:25:48 -0700 Subject: [PATCH 050/164] Add forward function for tmi with susceptibilities --- .../potential_fields/magnetics/simulation.py | 144 +++++++++++++++++- 1 file changed, 143 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index fce32411eb..457f3d3378 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -131,6 +131,102 @@ def _fill_sensitivity_tmi_scalar( ) +def _forward_tmi_scalar( + receivers, + nodes, + susceptibilities, + fields, + cell_nodes, + regional_field, + constant_factor, +): + """ + Forward model the TMI with scalar data (susceptibility only) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_scalar) + jit_forward( + receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + susceptibilities : (n_active_cells) + Array with the susceptibility of each active cell in the mesh. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The conversion factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + fields[i] += ( + constant_factor + * susceptibilities[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + + @jit(nopython=True) def _kernels_in_nodes_to_cell(kernels, nodes_indices): """ @@ -167,6 +263,8 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( _fill_sensitivity_tmi_scalar ) +_forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) +_forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) class Simulation3DIntegral(BasePFSimulation): @@ -204,8 +302,10 @@ def __init__( self._fill_sensitivity_tmi_scalar = ( _fill_sensitivity_tmi_scalar_parallel ) + self._forward_tmi_scalar = _forward_tmi_scalar_parallel else: self._fill_sensitivity_tmi_scalar = _fill_sensitivity_tmi_scalar_serial + self._forward_tmi_scalar = _forward_tmi_scalar_serial @property def model_type(self): @@ -261,7 +361,10 @@ def fields(self, model): self.model = model # model = self.chiMap * model if self.store_sensitivities == "forward_only": - fields = mkvc(self.linear_operator()) + if self.engine == "choclo": + fields = self._forward(self.chi) + else: + fields = mkvc(self.linear_operator()) else: fields = np.asarray( self.G @ self.chi.astype(self.sensitivity_dtype, copy=False) @@ -604,6 +707,45 @@ def deleteTheseOnModelUpdate(self): deletes = deletes + ["_gtg_diagonal", "_ampDeriv"] return deletes + def _forward(self, susceptibilities): + """ + Forward model the fields of active cells in the mesh on receivers. + + Parameters + ---------- + susceptibilities : (n_active_cells) array + Array containing the susceptibilities of the active cells in the + mesh, in SI units. + + Returns + ------- + (nD, ) array + Always return a ``np.float64`` array. + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + # Allocate fields array + fields = np.zeros(self.survey.nD, dtype=self.sensitivity_dtype) + # Start filling the sensitivity matrix + for components, receivers in self._get_components_and_receivers(): + if components != ["tmi"]: + raise NotImplementedError( + "Other components besides 'tmi' aren't implemented yet." + ) + constant_factor = 1 / 4 / np.pi + self._forward_tmi_scalar( + receivers, + active_nodes, + susceptibilities, + fields, + active_cell_nodes, + regional_field, + constant_factor, + ) + return fields + def _sensitivity_matrix(self): """ Compute the sensitivity matrix G From fc636d40244ad802b5e2aa0bf6d03c156cd7565f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 12 Oct 2023 17:12:06 -0700 Subject: [PATCH 051/164] Set n_processes to None if passed and using choclo --- SimPEG/potential_fields/gravity/simulation.py | 2 ++ tests/pf/test_forward_Grav_Linear.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 0e095227c3..2cf5e99ada 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -311,6 +311,7 @@ def _sanity_checks_engine(self, kwargs): Sanity checks for the engine parameter. Needs the kwargs passed to the __init__ method to raise some warnings. + Will set n_processes to None if it's present in kwargs. """ if self.engine not in ("choclo", "geoana"): raise ValueError( @@ -334,6 +335,7 @@ def _sanity_checks_engine(self, kwargs): UserWarning, stacklevel=1, ) + self.n_processes = None # Sanity checks for sensitivity_path when using choclo and storing in disk if self.engine == "choclo" and self.store_sensitivities == "disk": if os.path.isdir(self.sensitivity_path): diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index badfe827b8..db9388efa0 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -371,7 +371,7 @@ def test_choclo_and_n_proceesses(self, simple_mesh, receivers_locations): # Check if warning is raised msg = "The 'n_processes' will be ignored when selecting 'choclo'" with pytest.warns(UserWarning, match=msg): - gravity.Simulation3DIntegral( + simulation = gravity.Simulation3DIntegral( simple_mesh, survey=survey, rhoMap=idenMap, @@ -379,6 +379,8 @@ def test_choclo_and_n_proceesses(self, simple_mesh, receivers_locations): engine="choclo", n_processes=2, ) + # Check if n_processes was overwritten and set to None + assert simulation.n_processes is None def test_choclo_and_sensitivity_path_as_dir( self, simple_mesh, receivers_locations, tmp_path From 25a980cb0360f06d34f6f8e6e22eb75f9c91b446 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 09:04:49 -0700 Subject: [PATCH 052/164] Draft tmi with vector data --- .../potential_fields/magnetics/simulation.py | 272 ++++++++++++++++-- 1 file changed, 251 insertions(+), 21 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 457f3d3378..104aaa1a39 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -131,6 +131,99 @@ def _fill_sensitivity_tmi_scalar( ) +def _fill_sensitivity_tmi_vector( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, +): + """ + Fill the sensitivity matrix for TMI and vector data (effective susceptibility) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_tmi_vector + ) + jit_sensitivity( + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, 3 * n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The number of columns is three times the number of active nodes since + the vector model has ``3 * n_active_nodes`` elements: three components + for each active cell. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The conversion factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the three consecutive elements in the sensitivity matrix + # that correspond to the current active cell + sensitivity_matrix[i, 3 * k] = constant_factor * bx + sensitivity_matrix[i, 3 * k + 1] = constant_factor * by + sensitivity_matrix[i, 3 * k + 2] = constant_factor * bz + + def _forward_tmi_scalar( receivers, nodes, @@ -227,6 +320,100 @@ def _forward_tmi_scalar( ) +def _forward_tmi_vector( + receivers, + nodes, + effective_susceptibilities, + fields, + cell_nodes, + regional_field, + constant_factor, +): + """ + Forward model the TMI with vector data (effective susceptibility) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_vector) + jit_forward( + receivers, nodes, effective_sus, fields, cell_nodes, regional_field, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + effective_susceptibilities : (3 * n_active_cells) + Array with the effective susceptibility vector components of each + active cell in the mesh. + The three components of the effective susceptibility vector for each cell + should be adjacent. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The conversion factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + fields[i] += constant_factor * ( + bx * effective_susceptibilities[3 * k] + + by * effective_susceptibilities[3 * k + 1] + + bz * effective_susceptibilities[3 * k + 2] + ) + + @jit(nopython=True) def _kernels_in_nodes_to_cell(kernels, nodes_indices): """ @@ -263,8 +450,16 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( _fill_sensitivity_tmi_scalar ) +_fill_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_tmi_vector +) +_fill_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_tmi_vector +) _forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) _forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) +_forward_tmi_vector_serial = jit(nopython=True, parallel=False)(_forward_tmi_vector) +_forward_tmi_vector_parallel = jit(nopython=True, parallel=True)(_forward_tmi_vector) class Simulation3DIntegral(BasePFSimulation): @@ -302,10 +497,16 @@ def __init__( self._fill_sensitivity_tmi_scalar = ( _fill_sensitivity_tmi_scalar_parallel ) + self._fill_sensitivity_tmi_vector = ( + _fill_sensitivity_tmi_vector_parallel + ) self._forward_tmi_scalar = _forward_tmi_scalar_parallel + self._forward_tmi_vector = _forward_tmi_vector_parallel else: self._fill_sensitivity_tmi_scalar = _fill_sensitivity_tmi_scalar_serial + self._fill_sensitivity_tmi_vector = _fill_sensitivity_tmi_vector_serial self._forward_tmi_scalar = _forward_tmi_scalar_serial + self._forward_tmi_vector = _forward_tmi_vector_serial @property def model_type(self): @@ -713,9 +914,14 @@ def _forward(self, susceptibilities): Parameters ---------- - susceptibilities : (n_active_cells) array - Array containing the susceptibilities of the active cells in the - mesh, in SI units. + susceptibilities : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities, or effective + susceptibilities of the active cells in the mesh, in SI units. + Susceptibilities are expected if ``model_type`` is ``"scalar"``, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``model_type`` is + ``"vector"``, and the array should have ``3 * n_active_cells`` + elements. Returns ------- @@ -735,15 +941,26 @@ def _forward(self, susceptibilities): "Other components besides 'tmi' aren't implemented yet." ) constant_factor = 1 / 4 / np.pi - self._forward_tmi_scalar( - receivers, - active_nodes, - susceptibilities, - fields, - active_cell_nodes, - regional_field, - constant_factor, - ) + if self.model_type == "scalar": + self._forward_tmi_scalar( + receivers, + active_nodes, + susceptibilities, + fields, + active_cell_nodes, + regional_field, + constant_factor, + ) + else: + self._forward_tmi_vector( + receivers, + active_nodes, + susceptibilities, + fields, + active_cell_nodes, + regional_field, + constant_factor, + ) return fields def _sensitivity_matrix(self): @@ -759,7 +976,10 @@ def _sensitivity_matrix(self): # Get regional field regional_field = self.survey.source_field.b0 # Allocate sensitivity matrix - shape = (self.survey.nD, self.nC) + if self.model_type == "scalar": + shape = (self.survey.nD, self.nC) + else: + shape = (self.survey.nD, 3 * self.nC) sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) # Start filling the sensitivity matrix for components, receivers in self._get_components_and_receivers(): @@ -768,14 +988,24 @@ def _sensitivity_matrix(self): "Other components besides 'tmi' aren't implemented yet." ) conversion_factor = 1 / 4 / np.pi - self._fill_sensitivity_tmi_scalar( - receivers, - active_nodes, - sensitivity_matrix, - active_cell_nodes, - regional_field, - conversion_factor, - ) + if self.model_type == "scalar": + self._fill_sensitivity_tmi_scalar( + receivers, + active_nodes, + sensitivity_matrix, + active_cell_nodes, + regional_field, + conversion_factor, + ) + else: + self._fill_sensitivity_tmi_vector( + receivers, + active_nodes, + sensitivity_matrix, + active_cell_nodes, + regional_field, + conversion_factor, + ) return sensitivity_matrix def _get_cell_nodes(self): From 98ed22e7a6465d9d0c2c2757647792fb6a537478 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 09:25:18 -0700 Subject: [PATCH 053/164] Fix order of sensitivity matrix elements The effective susceptibility model is organized with all of the x components first, then all the y components and then all the z components. So for each cell, the sensitivity matrix elements that must be filled are not adjacent, but separated by `n_cells` elements. --- SimPEG/potential_fields/magnetics/simulation.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 104aaa1a39..9d25bcf12c 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -217,11 +217,11 @@ def _fill_sensitivity_tmi_vector( bx = uxx * fx + uxy * fy + uxz * fz by = uxy * fx + uyy * fy + uyz * fz bz = uxz * fx + uyz * fy + uzz * fz - # Fill the three consecutive elements in the sensitivity matrix - # that correspond to the current active cell - sensitivity_matrix[i, 3 * k] = constant_factor * bx - sensitivity_matrix[i, 3 * k + 1] = constant_factor * by - sensitivity_matrix[i, 3 * k + 2] = constant_factor * bz + # Fill the sensitivity matrix elements that correspond to the + # current active cell + sensitivity_matrix[i, k] = constant_factor * bx + sensitivity_matrix[i, k + n_cells] = constant_factor * by + sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz def _forward_tmi_scalar( @@ -408,9 +408,9 @@ def _forward_tmi_vector( by = uxy * fx + uyy * fy + uyz * fz bz = uxz * fx + uyz * fy + uzz * fz fields[i] += constant_factor * ( - bx * effective_susceptibilities[3 * k] - + by * effective_susceptibilities[3 * k + 1] - + bz * effective_susceptibilities[3 * k + 2] + bx * effective_susceptibilities[k] + + by * effective_susceptibilities[k + n_cells] + + bz * effective_susceptibilities[k + 2 * n_cells] ) From ff8abb02fd5b86e8acf79fdb6e8c070b7d14a385 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 09:42:21 -0700 Subject: [PATCH 054/164] Update docstring after fixing the order of expected model --- SimPEG/potential_fields/magnetics/simulation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 9d25bcf12c..33f52c12ee 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -352,8 +352,9 @@ def _forward_tmi_vector( effective_susceptibilities : (3 * n_active_cells) Array with the effective susceptibility vector components of each active cell in the mesh. - The three components of the effective susceptibility vector for each cell - should be adjacent. + The order of the components should be the following: all x components + for every cell, then all y components for every cell and then all + z components for every cell. fields : (n_receivers) array Array full of zeros where the TMI on each receiver will be stored. This could be a preallocated array or a slice of it. From 58357e864b5f94f107bfede5a21a2a6c4b4ec0b5 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 15:41:18 -0700 Subject: [PATCH 055/164] Remove notes from some docstrings --- .../potential_fields/magnetics/simulation.py | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 33f52c12ee..20f76a542b 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -79,12 +79,6 @@ def _fill_sensitivity_tmi_scalar( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. - - Notes - ----- - The conversion factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -176,12 +170,6 @@ def _fill_sensitivity_tmi_vector( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. - - Notes - ----- - The conversion factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -267,12 +255,6 @@ def _forward_tmi_scalar( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. - - Notes - ----- - The conversion factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -367,12 +349,6 @@ def _forward_tmi_vector( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. - - Notes - ----- - The conversion factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] From 7b76263b34cd1a701fc3fbf2247dec1b645ac509 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 15:41:39 -0700 Subject: [PATCH 056/164] Simplify definition of the shape of sens matrix --- SimPEG/potential_fields/magnetics/simulation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 20f76a542b..4e8d24fd57 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -954,9 +954,10 @@ def _sensitivity_matrix(self): regional_field = self.survey.source_field.b0 # Allocate sensitivity matrix if self.model_type == "scalar": - shape = (self.survey.nD, self.nC) + n_columns = self.nC else: - shape = (self.survey.nD, 3 * self.nC) + n_columns = 3 * self.nC + shape = (self.survey.nD, n_columns) sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) # Start filling the sensitivity matrix for components, receivers in self._get_components_and_receivers(): From d744b31db28e9a912d08e2b9b7dd20ce693409ba Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 15:42:36 -0700 Subject: [PATCH 057/164] Start drafting magnetic field for components --- .../potential_fields/magnetics/simulation.py | 168 +++++++++++++++--- 1 file changed, 148 insertions(+), 20 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 4e8d24fd57..38982e749d 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -36,6 +36,98 @@ def jit(*args, **kwargs): else: from numba import jit, prange + CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} + CHOCLO_KERNELS = { + "bx": (choclo.prism.kernel_ee, choclo.prism.kernel_en, choclo.prism.kernel_eu), + "by": (choclo.prism.kernel_en, choclo.prism.kernel_nn, choclo.prism.kernel_nu), + "bz": (choclo.prism.kernel_eu, choclo.prism.kernel_nu, choclo.prism.kernel_uu), + } + + +def _fill_sensitivity_mag_scalar( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, +): + """ + Fill the sensitivity matrix for single mag component and scalar data + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_scalar + ) + jit_sensitivity( + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = _kernels_in_nodes_to_cell(kx, nodes_indices) + uy = _kernels_in_nodes_to_cell(ky, nodes_indices) + uz = _kernels_in_nodes_to_cell(kz, nodes_indices) + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + def _fill_sensitivity_tmi_scalar( receivers, @@ -427,6 +519,12 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( _fill_sensitivity_tmi_scalar ) +_fill_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_mag_scalar +) +_fill_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_mag_scalar +) _fill_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( _fill_sensitivity_tmi_vector ) @@ -477,11 +575,15 @@ def __init__( self._fill_sensitivity_tmi_vector = ( _fill_sensitivity_tmi_vector_parallel ) + self._fill_sensitivity_mag_scalar = ( + _fill_sensitivity_mag_scalar_parallel + ) self._forward_tmi_scalar = _forward_tmi_scalar_parallel self._forward_tmi_vector = _forward_tmi_vector_parallel else: self._fill_sensitivity_tmi_scalar = _fill_sensitivity_tmi_scalar_serial self._fill_sensitivity_tmi_vector = _fill_sensitivity_tmi_vector_serial + self._fill_sensitivity_mag_scalar = _fill_sensitivity_mag_scalar_serial self._forward_tmi_scalar = _forward_tmi_scalar_serial self._forward_tmi_vector = _forward_tmi_vector_serial @@ -959,31 +1061,57 @@ def _sensitivity_matrix(self): n_columns = 3 * self.nC shape = (self.survey.nD, n_columns) sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) + # Define the constant factor + constant_factor = 1 / 4 / np.pi # Start filling the sensitivity matrix + index_offset = 0 for components, receivers in self._get_components_and_receivers(): - if components != ["tmi"]: + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): raise NotImplementedError( - "Other components besides 'tmi' aren't implemented yet." + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." ) - conversion_factor = 1 / 4 / np.pi - if self.model_type == "scalar": - self._fill_sensitivity_tmi_scalar( - receivers, - active_nodes, - sensitivity_matrix, - active_cell_nodes, - regional_field, - conversion_factor, - ) - else: - self._fill_sensitivity_tmi_vector( - receivers, - active_nodes, - sensitivity_matrix, - active_cell_nodes, - regional_field, - conversion_factor, + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + matrix_slice = slice( + index_offset + i, index_offset + n_rows, n_components ) + if self.model_type == "scalar": + if component == "tmi": + self._fill_sensitivity_tmi_scalar( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + constant_factor, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + self._fill_sensitivity_mag_scalar( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + ) + else: + if component != "tmi": + raise NotImplementedError() + self._fill_sensitivity_tmi_vector( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + constant_factor, + ) + index_offset += n_rows return sensitivity_matrix def _get_cell_nodes(self): From b0022c184880773f3232fd34cff1fd29943fa696 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 15:49:34 -0700 Subject: [PATCH 058/164] Move all numba functions to their own file Clean up the simulation.py file by moving all Numba functions to their own private file. --- .../magnetics/_numba_functions.py | 508 +++++++++++++++++ .../potential_fields/magnetics/simulation.py | 510 +----------------- 2 files changed, 520 insertions(+), 498 deletions(-) create mode 100644 SimPEG/potential_fields/magnetics/_numba_functions.py diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py new file mode 100644 index 0000000000..ac9c3df84e --- /dev/null +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -0,0 +1,508 @@ +""" +Numba functions for magnetic simulation of rectangular prisms +""" +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + +def _fill_sensitivity_mag_scalar( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, +): + """ + Fill the sensitivity matrix for single mag component and scalar data + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_scalar + ) + jit_sensitivity( + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = _kernels_in_nodes_to_cell(kx, nodes_indices) + uy = _kernels_in_nodes_to_cell(ky, nodes_indices) + uz = _kernels_in_nodes_to_cell(kz, nodes_indices) + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + + +def _fill_sensitivity_tmi_scalar( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, +): + """ + Fill the sensitivity matrix for TMI and scalar data (susceptibility only) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_tmi_scalar + ) + jit_sensitivity( + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + + +def _fill_sensitivity_tmi_vector( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, +): + """ + Fill the sensitivity matrix for TMI and vector data (effective susceptibility) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)( + _fill_sensitivity_matrix_tmi_vector + ) + jit_sensitivity( + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, 3 * n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The number of columns is three times the number of active nodes since + the vector model has ``3 * n_active_nodes`` elements: three components + for each active cell. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix elements that correspond to the + # current active cell + sensitivity_matrix[i, k] = constant_factor * bx + sensitivity_matrix[i, k + n_cells] = constant_factor * by + sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz + + +def _forward_tmi_scalar( + receivers, + nodes, + susceptibilities, + fields, + cell_nodes, + regional_field, + constant_factor, +): + """ + Forward model the TMI with scalar data (susceptibility only) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_scalar) + jit_forward( + receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + susceptibilities : (n_active_cells) + Array with the susceptibility of each active cell in the mesh. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + fields[i] += ( + constant_factor + * susceptibilities[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + + +def _forward_tmi_vector( + receivers, + nodes, + effective_susceptibilities, + fields, + cell_nodes, + regional_field, + constant_factor, +): + """ + Forward model the TMI with vector data (effective susceptibility) + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_vector) + jit_forward( + receivers, nodes, effective_sus, fields, cell_nodes, regional_field, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + effective_susceptibilities : (3 * n_active_cells) + Array with the effective susceptibility vector components of each + active cell in the mesh. + The order of the components should be the following: all x components + for every cell, then all y components for every cell and then all + z components for every cell. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + fields[i] += constant_factor * ( + bx * effective_susceptibilities[k] + + by * effective_susceptibilities[k + n_cells] + + bz * effective_susceptibilities[k + 2 * n_cells] + ) + + +@jit(nopython=True) +def _kernels_in_nodes_to_cell(kernels, nodes_indices): + """ + Evaluate integral on a given cell from evaluation of kernels on nodes + + Parameters + ---------- + kernels : (n_active_nodes,) array + Array with kernel values on each one of the nodes in the mesh. + nodes_indices : (8,) array of int + Indices of the nodes for the current cell in "F" order (x changes + faster than y, and y faster than z). + + Returns + ------- + float + """ + result = ( + -kernels[nodes_indices[0]] + + kernels[nodes_indices[1]] + + kernels[nodes_indices[2]] + - kernels[nodes_indices[3]] + + kernels[nodes_indices[4]] + - kernels[nodes_indices[5]] + - kernels[nodes_indices[6]] + + kernels[nodes_indices[7]] + ) + return result + + +_fill_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_tmi_scalar +) +_fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_tmi_scalar +) +_fill_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_mag_scalar +) +_fill_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_mag_scalar +) +_fill_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( + _fill_sensitivity_tmi_vector +) +_fill_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( + _fill_sensitivity_tmi_vector +) +_forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) +_forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) +_forward_tmi_vector_serial = jit(nopython=True, parallel=False)(_forward_tmi_vector) +_forward_tmi_vector_parallel = jit(nopython=True, parallel=True)(_forward_tmi_vector) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 38982e749d..9366e1c4e9 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -28,13 +28,20 @@ try: import choclo except ImportError: - # Define dummy jit decorator - def jit(*args, **kwargs): - return lambda f: f - choclo = None else: - from numba import jit, prange + from ._numba_functions import ( + _fill_sensitivity_tmi_scalar_parallel, + _fill_sensitivity_tmi_vector_parallel, + _fill_sensitivity_mag_scalar_parallel, + _forward_tmi_scalar_parallel, + _forward_tmi_vector_parallel, + _fill_sensitivity_tmi_scalar_serial, + _fill_sensitivity_tmi_vector_serial, + _fill_sensitivity_mag_scalar_serial, + _forward_tmi_scalar_serial, + _forward_tmi_vector_serial, + ) CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} CHOCLO_KERNELS = { @@ -44,499 +51,6 @@ def jit(*args, **kwargs): } -def _fill_sensitivity_mag_scalar( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - kernel_x, - kernel_y, - kernel_z, - constant_factor, -): - """ - Fill the sensitivity matrix for single mag component and scalar data - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_scalar - ) - jit_sensitivity( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_x, kernel_y, kernel_z : callable - Kernels used to compute the desired magnetic component. For example, - for computing bx we need to use ``kernel_x=kernel_ee``, - ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kx[j] = kernel_x(dx, dy, dz, distance) - ky[j] = kernel_y(dx, dy, dz, distance) - kz[j] = kernel_z(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - ux = _kernels_in_nodes_to_cell(kx, nodes_indices) - uy = _kernels_in_nodes_to_cell(ky, nodes_indices) - uz = _kernels_in_nodes_to_cell(kz, nodes_indices) - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (ux * fx + uy * fy + uz * fz) - ) - - -def _fill_sensitivity_tmi_scalar( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - constant_factor, -): - """ - Fill the sensitivity matrix for TMI and scalar data (susceptibility only) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_tmi_scalar - ) - jit_sensitivity( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - - -def _fill_sensitivity_tmi_vector( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - constant_factor, -): - """ - Fill the sensitivity matrix for TMI and vector data (effective susceptibility) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_tmi_vector - ) - jit_sensitivity( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, 3 * n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The number of columns is three times the number of active nodes since - the vector model has ``3 * n_active_nodes`` elements: three components - for each active cell. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - # Fill the sensitivity matrix elements that correspond to the - # current active cell - sensitivity_matrix[i, k] = constant_factor * bx - sensitivity_matrix[i, k + n_cells] = constant_factor * by - sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz - - -def _forward_tmi_scalar( - receivers, - nodes, - susceptibilities, - fields, - cell_nodes, - regional_field, - constant_factor, -): - """ - Forward model the TMI with scalar data (susceptibility only) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_scalar) - jit_forward( - receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - susceptibilities : (n_active_cells) - Array with the susceptibility of each active cell in the mesh. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - fields[i] += ( - constant_factor - * susceptibilities[k] - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - - -def _forward_tmi_vector( - receivers, - nodes, - effective_susceptibilities, - fields, - cell_nodes, - regional_field, - constant_factor, -): - """ - Forward model the TMI with vector data (effective susceptibility) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_vector) - jit_forward( - receivers, nodes, effective_sus, fields, cell_nodes, regional_field, const_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - effective_susceptibilities : (3 * n_active_cells) - Array with the effective susceptibility vector components of each - active cell in the mesh. - The order of the components should be the following: all x components - for every cell, then all y components for every cell and then all - z components for every cell. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - fields[i] += constant_factor * ( - bx * effective_susceptibilities[k] - + by * effective_susceptibilities[k + n_cells] - + bz * effective_susceptibilities[k + 2 * n_cells] - ) - - -@jit(nopython=True) -def _kernels_in_nodes_to_cell(kernels, nodes_indices): - """ - Evaluate integral on a given cell from evaluation of kernels on nodes - - Parameters - ---------- - kernels : (n_active_nodes,) array - Array with kernel values on each one of the nodes in the mesh. - nodes_indices : (8,) array of int - Indices of the nodes for the current cell in "F" order (x changes - faster than y, and y faster than z). - - Returns - ------- - float - """ - result = ( - -kernels[nodes_indices[0]] - + kernels[nodes_indices[1]] - + kernels[nodes_indices[2]] - - kernels[nodes_indices[3]] - + kernels[nodes_indices[4]] - - kernels[nodes_indices[5]] - - kernels[nodes_indices[6]] - + kernels[nodes_indices[7]] - ) - return result - - -_fill_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_tmi_scalar -) -_fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_tmi_scalar -) -_fill_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_mag_scalar -) -_fill_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_mag_scalar -) -_fill_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_tmi_vector -) -_fill_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_tmi_vector -) -_forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) -_forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) -_forward_tmi_vector_serial = jit(nopython=True, parallel=False)(_forward_tmi_vector) -_forward_tmi_vector_parallel = jit(nopython=True, parallel=True)(_forward_tmi_vector) - - class Simulation3DIntegral(BasePFSimulation): """ magnetic simulation in integral form. From e4de97626375f3afe9b63847fc8a70a1352c7cc9 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 19 Oct 2023 15:56:15 -0700 Subject: [PATCH 059/164] Simplify names of numba functions --- .../magnetics/_numba_functions.py | 24 ++++++------- .../potential_fields/magnetics/simulation.py | 36 ++++++++----------- 2 files changed, 27 insertions(+), 33 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index ac9c3df84e..7b2204cc5a 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -15,7 +15,7 @@ def jit(*args, **kwargs): from numba import jit, prange -def _fill_sensitivity_mag_scalar( +def _sensitivity_mag_scalar( receivers, nodes, sensitivity_matrix, @@ -100,7 +100,7 @@ def _fill_sensitivity_mag_scalar( ) -def _fill_sensitivity_tmi_scalar( +def _sensitivity_tmi_scalar( receivers, nodes, sensitivity_matrix, @@ -484,22 +484,22 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): return result -_fill_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_tmi_scalar +_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( + _sensitivity_tmi_scalar ) -_fill_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_tmi_scalar +_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( + _sensitivity_tmi_scalar ) -_fill_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_mag_scalar +_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( + _sensitivity_mag_scalar ) -_fill_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_mag_scalar +_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( + _sensitivity_mag_scalar ) -_fill_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( +_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( _fill_sensitivity_tmi_vector ) -_fill_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( +_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( _fill_sensitivity_tmi_vector ) _forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 9366e1c4e9..94165a1ff2 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -31,14 +31,14 @@ choclo = None else: from ._numba_functions import ( - _fill_sensitivity_tmi_scalar_parallel, - _fill_sensitivity_tmi_vector_parallel, - _fill_sensitivity_mag_scalar_parallel, + _sensitivity_tmi_scalar_parallel, + _sensitivity_tmi_vector_parallel, + _sensitivity_mag_scalar_parallel, _forward_tmi_scalar_parallel, _forward_tmi_vector_parallel, - _fill_sensitivity_tmi_scalar_serial, - _fill_sensitivity_tmi_vector_serial, - _fill_sensitivity_mag_scalar_serial, + _sensitivity_tmi_scalar_serial, + _sensitivity_tmi_vector_serial, + _sensitivity_mag_scalar_serial, _forward_tmi_scalar_serial, _forward_tmi_vector_serial, ) @@ -83,21 +83,15 @@ def __init__( self.engine = engine if self.engine == "choclo": if choclo_parallel: - self._fill_sensitivity_tmi_scalar = ( - _fill_sensitivity_tmi_scalar_parallel - ) - self._fill_sensitivity_tmi_vector = ( - _fill_sensitivity_tmi_vector_parallel - ) - self._fill_sensitivity_mag_scalar = ( - _fill_sensitivity_mag_scalar_parallel - ) + self._sensitivity_tmi_scalar = _sensitivity_tmi_scalar_parallel + self._sensitivity_tmi_vector = _sensitivity_tmi_vector_parallel + self._sensitivity_mag_scalar = _sensitivity_mag_scalar_parallel self._forward_tmi_scalar = _forward_tmi_scalar_parallel self._forward_tmi_vector = _forward_tmi_vector_parallel else: - self._fill_sensitivity_tmi_scalar = _fill_sensitivity_tmi_scalar_serial - self._fill_sensitivity_tmi_vector = _fill_sensitivity_tmi_vector_serial - self._fill_sensitivity_mag_scalar = _fill_sensitivity_mag_scalar_serial + self._sensitivity_tmi_scalar = _sensitivity_tmi_scalar_serial + self._sensitivity_tmi_vector = _sensitivity_tmi_vector_serial + self._sensitivity_mag_scalar = _sensitivity_mag_scalar_serial self._forward_tmi_scalar = _forward_tmi_scalar_serial self._forward_tmi_vector = _forward_tmi_vector_serial @@ -593,7 +587,7 @@ def _sensitivity_matrix(self): ) if self.model_type == "scalar": if component == "tmi": - self._fill_sensitivity_tmi_scalar( + self._sensitivity_tmi_scalar( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -603,7 +597,7 @@ def _sensitivity_matrix(self): ) else: kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._fill_sensitivity_mag_scalar( + self._sensitivity_mag_scalar( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -617,7 +611,7 @@ def _sensitivity_matrix(self): else: if component != "tmi": raise NotImplementedError() - self._fill_sensitivity_tmi_vector( + self._sensitivity_tmi_vector( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], From f24f544a82a265282710776f9e31ca046558ca1f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 15:42:25 -0700 Subject: [PATCH 060/164] Fix docstring --- SimPEG/potential_fields/magnetics/_numba_functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 7b2204cc5a..6ae9bc5967 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -36,7 +36,7 @@ def _sensitivity_mag_scalar( from numba import jit jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_scalar + _sensitivity_matrix_scalar ) jit_sensitivity( receivers, nodes, matrix, cell_nodes, regional_field, constant_factor @@ -118,7 +118,7 @@ def _sensitivity_tmi_scalar( from numba import jit jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_tmi_scalar + _sensitivity_matrix_tmi_scalar ) jit_sensitivity( receivers, nodes, matrix, cell_nodes, regional_field, constant_factor From 38dd741c3286d7e230f70926e559c415b656b664 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 16:34:40 -0700 Subject: [PATCH 061/164] Merge the sens matrix tmi functions into single one Merge the `_sensitivity_tmi_scalar` and `_sensitivity_tmi_vector` functions into a single one that has an extra argument: `scalar_model` through which we can tell it to build the sensitivity matrix for a scalar model or for a vector model. --- .../magnetics/_numba_functions.py | 138 ++++-------------- .../potential_fields/magnetics/simulation.py | 55 +++---- 2 files changed, 50 insertions(+), 143 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 6ae9bc5967..45ee231599 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -100,16 +100,17 @@ def _sensitivity_mag_scalar( ) -def _sensitivity_tmi_scalar( +def _sensitivity_tmi( receivers, nodes, sensitivity_matrix, cell_nodes, regional_field, constant_factor, + scalar_model, ): """ - Fill the sensitivity matrix for TMI and scalar data (susceptibility only) + Fill the sensitivity matrix for TMI This function should be used with a `numba.jit` decorator, for example: @@ -117,10 +118,10 @@ def _sensitivity_tmi_scalar( from numba import jit - jit_sensitivity = jit(nopython=True, parallel=True)( - _sensitivity_matrix_tmi_scalar + jit_sensitivity_tmi = jit(nopython=True, parallel=True)( + _sensitivity_tmi ) - jit_sensitivity( + jit_sensitivity_tmi( receivers, nodes, matrix, cell_nodes, regional_field, constant_factor ) @@ -130,9 +131,13 @@ def _sensitivity_tmi_scalar( Array with the locations of the receivers nodes : (n_active_nodes, 3) array Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array + sensitivity_matrix : array Empty 2d array where the sensitivity matrix elements will be filled. This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_nodes)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` + if ``scalar_model`` is False. cell_nodes : (n_active_cells, 8) array Array of integers, where each row contains the indices of the nodes for each active cell in the mesh. @@ -142,6 +147,11 @@ def _sensitivity_tmi_scalar( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -181,98 +191,18 @@ def _sensitivity_tmi_scalar( bx = uxx * fx + uxy * fy + uxz * fz by = uxy * fx + uyy * fy + uyz * fz bz = uxz * fx + uyz * fy + uzz * fz - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - - -def _fill_sensitivity_tmi_vector( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - constant_factor, -): - """ - Fill the sensitivity matrix for TMI and vector data (effective susceptibility) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix_tmi_vector - ) - jit_sensitivity( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, 3 * n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The number of columns is three times the number of active nodes since - the vector model has ``3 * n_active_nodes`` elements: three components - for each active cell. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - # Fill the sensitivity matrix elements that correspond to the + # Fill the sensitivity matrix element(s) that correspond to the # current active cell - sensitivity_matrix[i, k] = constant_factor * bx - sensitivity_matrix[i, k + n_cells] = constant_factor * by - sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, k] = constant_factor * bx + sensitivity_matrix[i, k + n_cells] = constant_factor * by + sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz def _forward_tmi_scalar( @@ -484,24 +414,14 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): return result -_sensitivity_tmi_scalar_serial = jit(nopython=True, parallel=False)( - _sensitivity_tmi_scalar -) -_sensitivity_tmi_scalar_parallel = jit(nopython=True, parallel=True)( - _sensitivity_tmi_scalar -) +_sensitivity_tmi_serial = jit(nopython=True, parallel=False)(_sensitivity_tmi) +_sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) _sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( _sensitivity_mag_scalar ) _sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( _sensitivity_mag_scalar ) -_sensitivity_tmi_vector_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_tmi_vector -) -_sensitivity_tmi_vector_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_tmi_vector -) _forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) _forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) _forward_tmi_vector_serial = jit(nopython=True, parallel=False)(_forward_tmi_vector) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 94165a1ff2..eea1ce0240 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -31,13 +31,11 @@ choclo = None else: from ._numba_functions import ( - _sensitivity_tmi_scalar_parallel, - _sensitivity_tmi_vector_parallel, + _sensitivity_tmi_parallel, + _sensitivity_tmi_serial, _sensitivity_mag_scalar_parallel, _forward_tmi_scalar_parallel, _forward_tmi_vector_parallel, - _sensitivity_tmi_scalar_serial, - _sensitivity_tmi_vector_serial, _sensitivity_mag_scalar_serial, _forward_tmi_scalar_serial, _forward_tmi_vector_serial, @@ -83,14 +81,12 @@ def __init__( self.engine = engine if self.engine == "choclo": if choclo_parallel: - self._sensitivity_tmi_scalar = _sensitivity_tmi_scalar_parallel - self._sensitivity_tmi_vector = _sensitivity_tmi_vector_parallel + self._sensitivity_tmi = _sensitivity_tmi_parallel self._sensitivity_mag_scalar = _sensitivity_mag_scalar_parallel self._forward_tmi_scalar = _forward_tmi_scalar_parallel self._forward_tmi_vector = _forward_tmi_vector_parallel else: - self._sensitivity_tmi_scalar = _sensitivity_tmi_scalar_serial - self._sensitivity_tmi_vector = _sensitivity_tmi_vector_serial + self._sensitivity_tmi = _sensitivity_tmi_serial self._sensitivity_mag_scalar = _sensitivity_mag_scalar_serial self._forward_tmi_scalar = _forward_tmi_scalar_serial self._forward_tmi_vector = _forward_tmi_vector_serial @@ -582,41 +578,32 @@ def _sensitivity_matrix(self): n_components = len(components) n_rows = n_components * receivers.shape[0] for i, component in enumerate(components): + if component != "tmi" and self.model_type == "vector": + raise NotImplementedError() matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) - if self.model_type == "scalar": - if component == "tmi": - self._sensitivity_tmi_scalar( - receivers, - active_nodes, - sensitivity_matrix[matrix_slice, :], - active_cell_nodes, - regional_field, - constant_factor, - ) - else: - kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._sensitivity_mag_scalar( - receivers, - active_nodes, - sensitivity_matrix[matrix_slice, :], - active_cell_nodes, - regional_field, - kernel_x, - kernel_y, - kernel_z, - constant_factor, - ) + if component == "tmi": + self._sensitivity_tmi( + receivers, + active_nodes, + sensitivity_matrix[matrix_slice, :], + active_cell_nodes, + regional_field, + constant_factor, + scalar_model=(self.model_type == "scalar"), + ) else: - if component != "tmi": - raise NotImplementedError() - self._sensitivity_tmi_vector( + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + self._sensitivity_mag_scalar( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], active_cell_nodes, regional_field, + kernel_x, + kernel_y, + kernel_z, constant_factor, ) index_offset += n_rows From 67fbe6723cdc05964e1ac0aafcbcb6286bf6155f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 16:40:31 -0700 Subject: [PATCH 062/164] Update docstring --- SimPEG/potential_fields/magnetics/_numba_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 45ee231599..13484f6577 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -39,7 +39,7 @@ def _sensitivity_mag_scalar( _sensitivity_matrix_scalar ) jit_sensitivity( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor + receivers, nodes, matrix, cell_nodes, regional_field, constant_factor, True ) Parameters From 5c387446293387254e4fc40210a1841cf68f5d89 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 16:55:25 -0700 Subject: [PATCH 063/164] Fix sensitivity function Add the amplitude of the reference field --- .../potential_fields/magnetics/_numba_functions.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 13484f6577..1c0a665bb5 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -200,9 +200,15 @@ def _sensitivity_tmi( * (bx * fx + by * fy + bz * fz) ) else: - sensitivity_matrix[i, k] = constant_factor * bx - sensitivity_matrix[i, k + n_cells] = constant_factor * by - sensitivity_matrix[i, k + 2 * n_cells] = constant_factor * bz + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) def _forward_tmi_scalar( From d881389ed9a4f4060b10a8780a4c32889831cc60 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 16:59:31 -0700 Subject: [PATCH 064/164] Merge numba functions for forward tmi Merge the `_forward_tmi_scalar` and `_forward_tmi_vector` functions into a single `_forward_tmi` function. Add a `scalar_model` attribute for specifying if the passed model are susceptibilities (scalar) or effective susceptibilities (vector). --- .../magnetics/_numba_functions.py | 145 +++++------------- .../potential_fields/magnetics/simulation.py | 42 ++--- 2 files changed, 53 insertions(+), 134 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 1c0a665bb5..7ece064421 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -211,17 +211,18 @@ def _sensitivity_tmi( ) -def _forward_tmi_scalar( +def _forward_tmi( receivers, nodes, - susceptibilities, + model, fields, cell_nodes, regional_field, constant_factor, + scalar_model, ): """ - Forward model the TMI with scalar data (susceptibility only) + Forward model the TMI This function should be used with a `numba.jit` decorator, for example: @@ -229,9 +230,9 @@ def _forward_tmi_scalar( from numba import jit - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_scalar) + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) jit_forward( - receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor + receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor, scalar_model=True ) Parameters @@ -240,8 +241,13 @@ def _forward_tmi_scalar( Array with the locations of the receivers nodes : (n_active_nodes, 3) array Array with the location of the mesh nodes. - susceptibilities : (n_active_cells) - Array with the susceptibility of each active cell in the mesh. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. fields : (n_receivers) array Array full of zeros where the TMI on each receiver will be stored. This could be a preallocated array or a slice of it. @@ -254,6 +260,12 @@ def _forward_tmi_scalar( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -293,101 +305,23 @@ def _forward_tmi_scalar( bx = uxx * fx + uxy * fy + uxz * fz by = uxy * fx + uyy * fy + uyz * fz bz = uxz * fx + uyz * fy + uzz * fz - fields[i] += ( - constant_factor - * susceptibilities[k] - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - - -def _forward_tmi_vector( - receivers, - nodes, - effective_susceptibilities, - fields, - cell_nodes, - regional_field, - constant_factor, -): - """ - Forward model the TMI with vector data (effective susceptibility) - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_vector) - jit_forward( - receivers, nodes, effective_sus, fields, cell_nodes, regional_field, const_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - effective_susceptibilities : (3 * n_active_cells) - Array with the effective susceptibility vector components of each - active cell in the mesh. - The order of the components should be the following: all x components - for every cell, then all y components for every cell and then all - z components for every cell. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = _kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = _kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = _kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = _kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = _kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = _kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - fields[i] += constant_factor * ( - bx * effective_susceptibilities[k] - + by * effective_susceptibilities[k + n_cells] - + bz * effective_susceptibilities[k + 2 * n_cells] - ) + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + bx * model[k] + + by * model[k + n_cells] + + bz * model[k + 2 * n_cells] + ) + ) @jit(nopython=True) @@ -422,13 +356,12 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _sensitivity_tmi_serial = jit(nopython=True, parallel=False)(_sensitivity_tmi) _sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) +_forward_tmi_serial = jit(nopython=True, parallel=False)(_forward_tmi) +_forward_tmi_parallel = jit(nopython=True, parallel=True)(_forward_tmi) + _sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( _sensitivity_mag_scalar ) _sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( _sensitivity_mag_scalar ) -_forward_tmi_scalar_serial = jit(nopython=True, parallel=False)(_forward_tmi_scalar) -_forward_tmi_scalar_parallel = jit(nopython=True, parallel=True)(_forward_tmi_scalar) -_forward_tmi_vector_serial = jit(nopython=True, parallel=False)(_forward_tmi_vector) -_forward_tmi_vector_parallel = jit(nopython=True, parallel=True)(_forward_tmi_vector) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index eea1ce0240..141f439d25 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -33,12 +33,10 @@ from ._numba_functions import ( _sensitivity_tmi_parallel, _sensitivity_tmi_serial, + _forward_tmi_parallel, + _forward_tmi_serial, _sensitivity_mag_scalar_parallel, - _forward_tmi_scalar_parallel, - _forward_tmi_vector_parallel, _sensitivity_mag_scalar_serial, - _forward_tmi_scalar_serial, - _forward_tmi_vector_serial, ) CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} @@ -82,14 +80,12 @@ def __init__( if self.engine == "choclo": if choclo_parallel: self._sensitivity_tmi = _sensitivity_tmi_parallel + self._forward_tmi = _forward_tmi_parallel self._sensitivity_mag_scalar = _sensitivity_mag_scalar_parallel - self._forward_tmi_scalar = _forward_tmi_scalar_parallel - self._forward_tmi_vector = _forward_tmi_vector_parallel else: self._sensitivity_tmi = _sensitivity_tmi_serial + self._forward_tmi = _forward_tmi_serial self._sensitivity_mag_scalar = _sensitivity_mag_scalar_serial - self._forward_tmi_scalar = _forward_tmi_scalar_serial - self._forward_tmi_vector = _forward_tmi_vector_serial @property def model_type(self): @@ -524,26 +520,16 @@ def _forward(self, susceptibilities): "Other components besides 'tmi' aren't implemented yet." ) constant_factor = 1 / 4 / np.pi - if self.model_type == "scalar": - self._forward_tmi_scalar( - receivers, - active_nodes, - susceptibilities, - fields, - active_cell_nodes, - regional_field, - constant_factor, - ) - else: - self._forward_tmi_vector( - receivers, - active_nodes, - susceptibilities, - fields, - active_cell_nodes, - regional_field, - constant_factor, - ) + self._forward_tmi( + receivers, + active_nodes, + susceptibilities, + fields, + active_cell_nodes, + regional_field, + constant_factor, + scalar_model=(self.model_type == "scalar"), + ) return fields def _sensitivity_matrix(self): From d7b04059953e724e32c5c8bec00304209ca22289 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 17:02:48 -0700 Subject: [PATCH 065/164] Rename the argument for the _forward method --- SimPEG/potential_fields/magnetics/simulation.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 141f439d25..4fac69ae3f 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -487,15 +487,16 @@ def deleteTheseOnModelUpdate(self): deletes = deletes + ["_gtg_diagonal", "_ampDeriv"] return deletes - def _forward(self, susceptibilities): + def _forward(self, model): """ Forward model the fields of active cells in the mesh on receivers. Parameters ---------- - susceptibilities : (n_active_cells) or (3 * n_active_cells) array - Array containing the susceptibilities, or effective - susceptibilities of the active cells in the mesh, in SI units. + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. Susceptibilities are expected if ``model_type`` is ``"scalar"``, and the array should have ``n_active_cells`` elements. Effective susceptibilities are expected if ``model_type`` is @@ -523,7 +524,7 @@ def _forward(self, susceptibilities): self._forward_tmi( receivers, active_nodes, - susceptibilities, + model, fields, active_cell_nodes, regional_field, From 5c0fd0e2521c8642b1cf5bae6507348d7474793f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 20 Oct 2023 17:28:24 -0700 Subject: [PATCH 066/164] Implement forward of mag component with vector model Include this other forward into the same Numba function that was defined for mag components on scalar model and rename it to `_sensitivity_mag`. --- .../magnetics/_numba_functions.py | 46 ++++++++++++------- .../potential_fields/magnetics/simulation.py | 16 +++---- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 7ece064421..009a8537f9 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -15,7 +15,7 @@ def jit(*args, **kwargs): from numba import jit, prange -def _sensitivity_mag_scalar( +def _sensitivity_mag( receivers, nodes, sensitivity_matrix, @@ -25,9 +25,10 @@ def _sensitivity_mag_scalar( kernel_y, kernel_z, constant_factor, + scalar_model, ): """ - Fill the sensitivity matrix for single mag component and scalar data + Fill the sensitivity matrix for single mag component This function should be used with a `numba.jit` decorator, for example: @@ -35,10 +36,10 @@ def _sensitivity_mag_scalar( from numba import jit - jit_sensitivity = jit(nopython=True, parallel=True)( - _sensitivity_matrix_scalar + jit_sensitivity_mag = jit(nopython=True, parallel=True)( + _sensitivity_mag ) - jit_sensitivity( + jit_sensitivity_mag( receivers, nodes, matrix, cell_nodes, regional_field, constant_factor, True ) @@ -64,6 +65,11 @@ def _sensitivity_mag_scalar( constant_factor : float Constant factor that will be used to multiply each element of the sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -93,11 +99,22 @@ def _sensitivity_mag_scalar( ux = _kernels_in_nodes_to_cell(kx, nodes_indices) uy = _kernels_in_nodes_to_cell(ky, nodes_indices) uz = _kernels_in_nodes_to_cell(kz, nodes_indices) - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (ux * fx + uy * fy + uz * fz) - ) + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * ux + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * uy + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * uz + ) def _sensitivity_tmi( @@ -358,10 +375,5 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) _forward_tmi_serial = jit(nopython=True, parallel=False)(_forward_tmi) _forward_tmi_parallel = jit(nopython=True, parallel=True)(_forward_tmi) - -_sensitivity_mag_scalar_serial = jit(nopython=True, parallel=False)( - _sensitivity_mag_scalar -) -_sensitivity_mag_scalar_parallel = jit(nopython=True, parallel=True)( - _sensitivity_mag_scalar -) +_sensitivity_mag_serial = jit(nopython=True, parallel=False)(_sensitivity_mag) +_sensitivity_mag_parallel = jit(nopython=True, parallel=True)(_sensitivity_mag) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 4fac69ae3f..5f9502f6ee 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -35,8 +35,8 @@ _sensitivity_tmi_serial, _forward_tmi_parallel, _forward_tmi_serial, - _sensitivity_mag_scalar_parallel, - _sensitivity_mag_scalar_serial, + _sensitivity_mag_parallel, + _sensitivity_mag_serial, ) CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} @@ -81,11 +81,11 @@ def __init__( if choclo_parallel: self._sensitivity_tmi = _sensitivity_tmi_parallel self._forward_tmi = _forward_tmi_parallel - self._sensitivity_mag_scalar = _sensitivity_mag_scalar_parallel + self._sensitivity_mag = _sensitivity_mag_parallel else: self._sensitivity_tmi = _sensitivity_tmi_serial self._forward_tmi = _forward_tmi_serial - self._sensitivity_mag_scalar = _sensitivity_mag_scalar_serial + self._sensitivity_mag = _sensitivity_mag_serial @property def model_type(self): @@ -556,6 +556,7 @@ def _sensitivity_matrix(self): constant_factor = 1 / 4 / np.pi # Start filling the sensitivity matrix index_offset = 0 + scalar_model = self.model_type == "scalar" for components, receivers in self._get_components_and_receivers(): if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): raise NotImplementedError( @@ -565,8 +566,6 @@ def _sensitivity_matrix(self): n_components = len(components) n_rows = n_components * receivers.shape[0] for i, component in enumerate(components): - if component != "tmi" and self.model_type == "vector": - raise NotImplementedError() matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) @@ -578,11 +577,11 @@ def _sensitivity_matrix(self): active_cell_nodes, regional_field, constant_factor, - scalar_model=(self.model_type == "scalar"), + scalar_model, ) else: kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._sensitivity_mag_scalar( + self._sensitivity_mag( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -592,6 +591,7 @@ def _sensitivity_matrix(self): kernel_y, kernel_z, constant_factor, + scalar_model, ) index_offset += n_rows return sensitivity_matrix From 68e5d45e75c8d73e1d46632d7da39ae42d7c414e Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 23 Oct 2023 14:40:07 -0700 Subject: [PATCH 067/164] Simplify imports in magnetics/simulation.py --- .../potential_fields/magnetics/simulation.py | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 5f9502f6ee..e6a6801caf 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -25,20 +25,17 @@ import discretize -try: - import choclo -except ImportError: - choclo = None -else: - from ._numba_functions import ( - _sensitivity_tmi_parallel, - _sensitivity_tmi_serial, - _forward_tmi_parallel, - _forward_tmi_serial, - _sensitivity_mag_parallel, - _sensitivity_mag_serial, - ) +from ._numba_functions import ( + choclo, + _sensitivity_tmi_parallel, + _sensitivity_tmi_serial, + _forward_tmi_parallel, + _forward_tmi_serial, + _sensitivity_mag_parallel, + _sensitivity_mag_serial, +) +if choclo is not None: CHOCLO_SUPPORTED_COMPONENTS = {"tmi", "bx", "by", "bz"} CHOCLO_KERNELS = { "bx": (choclo.prism.kernel_ee, choclo.prism.kernel_en, choclo.prism.kernel_eu), From dc8e6d8afe92f21742d64e687e1fe46925c035f8 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 23 Oct 2023 14:53:10 -0700 Subject: [PATCH 068/164] Move numba functions to their own private file Also move the try statement for importing choclo an numba to declutter the `simulation.py` file. Fix some wording and typos in docstrings. --- .../gravity/_numba_functions.py | 177 ++++++++++++++++ SimPEG/potential_fields/gravity/simulation.py | 192 ++---------------- 2 files changed, 190 insertions(+), 179 deletions(-) create mode 100644 SimPEG/potential_fields/gravity/_numba_functions.py diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py new file mode 100644 index 0000000000..040a7308fb --- /dev/null +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -0,0 +1,177 @@ +""" +Numba functions for gravity simulation using Choclo. +""" +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + +def _forward_gravity( + receivers, + nodes, + densities, + fields, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Forward model the gravity field of active cells on receivers + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) + jit_forward_gravity( + receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + densities : (n_active_cells) + Array with densities of each active cell in the mesh. + fields : (n_receivers) array + Array full of zeros where the gravity fields on each receiver will be + stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + ``fields`` array. + + Notes + ----- + The constant factor is applied here to each element of fields because + it's more efficient than doing it afterwards: it would require to + index the elements that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kernels[j] = kernel_func(dx, dy, dz, distance) + # Compute fields from the kernel values + for k in range(n_cells): + fields[i] += ( + constant_factor + * densities[k] + * ( + -kernels[cell_nodes[k, 0]] + + kernels[cell_nodes[k, 1]] + + kernels[cell_nodes[k, 2]] + - kernels[cell_nodes[k, 3]] + + kernels[cell_nodes[k, 4]] + - kernels[cell_nodes[k, 5]] + - kernels[cell_nodes[k, 6]] + + kernels[cell_nodes[k, 7]] + ) + ) + + +def _sensitivity_gravity( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Fill the sensitivity matrix + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_gravity) + jit_sensitivity( + receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor + ) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The constant factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kernels[j] = kernel_func(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + sensitivity_matrix[i, k] = constant_factor * ( + -kernels[cell_nodes[k, 0]] + + kernels[cell_nodes[k, 1]] + + kernels[cell_nodes[k, 2]] + - kernels[cell_nodes[k, 3]] + + kernels[cell_nodes[k, 4]] + - kernels[cell_nodes[k, 5]] + - kernels[cell_nodes[k, 6]] + + kernels[cell_nodes[k, 7]] + ) + + +# Define decorated versions of these functions +_sensitivity_gravity_parallel = jit(nopython=True, parallel=True)(_sensitivity_gravity) +_sensitivity_gravity_serial = jit(nopython=True, parallel=False)(_sensitivity_gravity) +_forward_gravity_parallel = jit(nopython=True, parallel=True)(_forward_gravity) +_forward_gravity_serial = jit(nopython=True, parallel=False)(_forward_gravity) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 2cf5e99ada..2618e1607c 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -12,16 +12,16 @@ from ...base import BasePDESimulation from ..base import BaseEquivalentSourceLayerSimulation, BasePFSimulation -try: - import choclo -except ImportError: - # Define dummy jit decorator - def jit(*args, **kwargs): - return lambda f: f +from ._numba_functions import ( + choclo, + _sensitivity_gravity_serial, + _sensitivity_gravity_parallel, + _forward_gravity_serial, + _forward_gravity_parallel, +) - choclo = None -else: - from numba import jit, prange +if choclo is not None: + from numba import jit @jit(nopython=True) def kernel_uv(easting, northing, upward, radius): @@ -46,172 +46,6 @@ def kernel_uv(easting, northing, upward, radius): } -def _forward_gravity( - receivers, - nodes, - densities, - fields, - cell_nodes, - kernel_func, - constant_factor, -): - """ - Forward model the gravity field of active cells on receivers - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) - jit_forward_gravity( - receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - densities : (n_active_cells) - Array with densities of each active cell in the mesh. - fields : (n_receivers) array - Array full of zeros where the gravity fields on each receiver will be - stored. This could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - kernel_func : callable - Kernel function that will be evaluated on each node of the mesh. Choose - one of the kernel functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - ``fields`` array. - - Notes - ----- - The conversion factor is applied here to each element of fields because - it's more efficient than doing it afterwards: it would require to - index the elements that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vector for kernels evaluated on mesh nodes - kernels = np.empty(n_nodes) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kernels[j] = kernel_func(dx, dy, dz, distance) - # Compute fields from the kernel values - for k in range(n_cells): - fields[i] += ( - constant_factor - * densities[k] - * ( - -kernels[cell_nodes[k, 0]] - + kernels[cell_nodes[k, 1]] - + kernels[cell_nodes[k, 2]] - - kernels[cell_nodes[k, 3]] - + kernels[cell_nodes[k, 4]] - - kernels[cell_nodes[k, 5]] - - kernels[cell_nodes[k, 6]] - + kernels[cell_nodes[k, 7]] - ) - ) - - -def _fill_sensitivity_matrix( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - kernel_func, - constant_factor, -): - """ - Fill the sensitivity matrix - - This function should be used with a `numba.jit` decorator, for example: - - ..code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)(_forward_sensitivity_matrix) - jit_sensitivity( - receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor - ) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - kernel_func : callable - Kernel function that will be evaluated on each node of the mesh. Choose - one of the kernel functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - - Notes - ----- - The conversion factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vector for kernels evaluated on mesh nodes - kernels = np.empty(n_nodes) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kernels[j] = kernel_func(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - sensitivity_matrix[i, k] = constant_factor * ( - -kernels[cell_nodes[k, 0]] - + kernels[cell_nodes[k, 1]] - + kernels[cell_nodes[k, 2]] - - kernels[cell_nodes[k, 3]] - + kernels[cell_nodes[k, 4]] - - kernels[cell_nodes[k, 5]] - - kernels[cell_nodes[k, 6]] - + kernels[cell_nodes[k, 7]] - ) - - -# Define decorated versions of these functions -_fill_sensitivity_matrix_parallel = jit(nopython=True, parallel=True)( - _fill_sensitivity_matrix -) -_fill_sensitivity_matrix_serial = jit(nopython=True, parallel=False)( - _fill_sensitivity_matrix -) -_forward_gravity_parallel = jit(nopython=True, parallel=True)(_forward_gravity) -_forward_gravity_serial = jit(nopython=True, parallel=False)(_forward_gravity) - - def _get_conversion_factor(component): """ Return conversion factor for the given component @@ -300,10 +134,10 @@ def __init__( # Define jit functions if self.engine == "choclo": if choclo_parallel: - self._fill_sensitivity_matrix = _fill_sensitivity_matrix_parallel + self._sensitivity_gravity = _sensitivity_gravity_parallel self._forward_gravity = _forward_gravity_parallel else: - self._fill_sensitivity_matrix = _fill_sensitivity_matrix_serial + self._sensitivity_gravity = _sensitivity_gravity_serial self._forward_gravity = _forward_gravity_serial def _sanity_checks_engine(self, kwargs): @@ -604,7 +438,7 @@ def _sensitivity_matrix(self): matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) - self._fill_sensitivity_matrix( + self._sensitivity_gravity( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -629,7 +463,7 @@ def _get_cell_nodes(self): def _get_tensormesh_cell_nodes(self): """ - Quick implmentation of ``cell_nodes`` for a ``TensorMesh``. + Quick implementation of ``cell_nodes`` for a ``TensorMesh``. This method should be removed after ``TensorMesh.cell_nodes`` is added in discretize. From 5dd32eaa015ab524464f21f9fcf30508e189920c Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 23 Oct 2023 15:37:41 -0700 Subject: [PATCH 069/164] Implement forward for single magnetic component --- .../magnetics/_numba_functions.py | 109 ++++++++++++++++++ .../potential_fields/magnetics/simulation.py | 67 ++++++++--- 2 files changed, 158 insertions(+), 18 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 009a8537f9..287773339a 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -228,6 +228,113 @@ def _sensitivity_tmi( ) +def _forward_mag( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, +): + """ + Forward model single magnetic component + + This function should be used with a `numba.jit` decorator, for example: + + ..code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. + Susceptibilities are expected if ``scalar_model`` is True, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``scalar_model`` is False, + and the array should have ``3 * n_active_cells`` elements. + fields : (n_receivers) array + Array full of zeros where the magnetic component on each receiver will + be stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the forward will be computing assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computing assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = _kernels_in_nodes_to_cell(kx, nodes_indices) + uy = _kernels_in_nodes_to_cell(ky, nodes_indices) + uz = _kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + fields[i] = ( + constant_factor + * model[k] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + ux * model[k] + + uy * model[k + n_cells] + + uz * model[k + 2 * n_cells] + ) + ) + + def _forward_tmi( receivers, nodes, @@ -375,5 +482,7 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): _sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) _forward_tmi_serial = jit(nopython=True, parallel=False)(_forward_tmi) _forward_tmi_parallel = jit(nopython=True, parallel=True)(_forward_tmi) +_forward_mag_serial = jit(nopython=True, parallel=False)(_forward_mag) +_forward_mag_parallel = jit(nopython=True, parallel=True)(_forward_mag) _sensitivity_mag_serial = jit(nopython=True, parallel=False)(_sensitivity_mag) _sensitivity_mag_parallel = jit(nopython=True, parallel=True)(_sensitivity_mag) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index e6a6801caf..7a36b4800d 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -29,10 +29,12 @@ choclo, _sensitivity_tmi_parallel, _sensitivity_tmi_serial, - _forward_tmi_parallel, - _forward_tmi_serial, _sensitivity_mag_parallel, _sensitivity_mag_serial, + _forward_tmi_parallel, + _forward_tmi_serial, + _forward_mag_parallel, + _forward_mag_serial, ) if choclo is not None: @@ -77,12 +79,14 @@ def __init__( if self.engine == "choclo": if choclo_parallel: self._sensitivity_tmi = _sensitivity_tmi_parallel - self._forward_tmi = _forward_tmi_parallel self._sensitivity_mag = _sensitivity_mag_parallel + self._forward_tmi = _forward_tmi_parallel + self._forward_mag = _forward_mag_parallel else: self._sensitivity_tmi = _sensitivity_tmi_serial - self._forward_tmi = _forward_tmi_serial self._sensitivity_mag = _sensitivity_mag_serial + self._forward_tmi = _forward_tmi_serial + self._forward_mag = _forward_mag_serial @property def model_type(self): @@ -511,23 +515,50 @@ def _forward(self, model): regional_field = self.survey.source_field.b0 # Allocate fields array fields = np.zeros(self.survey.nD, dtype=self.sensitivity_dtype) - # Start filling the sensitivity matrix + # Define the constant factor + constant_factor = 1 / 4 / np.pi + # Start computing the fields + index_offset = 0 + scalar_model = self.model_type == "scalar" for components, receivers in self._get_components_and_receivers(): - if components != ["tmi"]: + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): raise NotImplementedError( - "Other components besides 'tmi' aren't implemented yet." + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." ) - constant_factor = 1 / 4 / np.pi - self._forward_tmi( - receivers, - active_nodes, - model, - fields, - active_cell_nodes, - regional_field, - constant_factor, - scalar_model=(self.model_type == "scalar"), - ) + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + if component == "tmi": + self._forward_tmi( + receivers, + active_nodes, + model, + fields[vector_slice], + active_cell_nodes, + regional_field, + constant_factor, + scalar_model, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + self._forward_mag( + receivers, + active_nodes, + model, + fields[vector_slice], + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + ) + index_offset += n_rows return fields def _sensitivity_matrix(self): From 2fa6dbb8e4654ca9e26e1c5d3c94fa9c691536fb Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 23 Oct 2023 15:38:21 -0700 Subject: [PATCH 070/164] Simplify docstring examples for numba functions --- .../magnetics/_numba_functions.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 287773339a..9c68f33644 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -36,12 +36,7 @@ def _sensitivity_mag( from numba import jit - jit_sensitivity_mag = jit(nopython=True, parallel=True)( - _sensitivity_mag - ) - jit_sensitivity_mag( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor, True - ) + jit_sensitivity_mag = jit(nopython=True, parallel=True)(_sensitivity_mag) Parameters ---------- @@ -135,12 +130,7 @@ def _sensitivity_tmi( from numba import jit - jit_sensitivity_tmi = jit(nopython=True, parallel=True)( - _sensitivity_tmi - ) - jit_sensitivity_tmi( - receivers, nodes, matrix, cell_nodes, regional_field, constant_factor - ) + jit_sensitivity_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi) Parameters ---------- @@ -355,9 +345,6 @@ def _forward_tmi( from numba import jit jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) - jit_forward( - receivers, nodes, mag_sus, fields, cell_nodes, regional_field, const_factor, scalar_model=True - ) Parameters ---------- From 654e150a45b4892f5e935c25ec0a19fc3529c7d1 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 23 Oct 2023 15:41:58 -0700 Subject: [PATCH 071/164] Document which kernels to use for each mag component --- .../magnetics/_numba_functions.py | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 9c68f33644..9dc6a299e7 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -65,6 +65,31 @@ def _sensitivity_mag( (susceptibilities). If False, the sensitivity matrix is build to work with vector models (effective susceptibilities). + + Notes + ----- + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] @@ -277,6 +302,31 @@ def _forward_mag( susceptibilities (scalar model) for each active cell. If False, the forward will be computing assuming that the ``model`` has effective susceptibilities (vector model) for each active cell. + + Notes + ----- + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + """ n_receivers = receivers.shape[0] n_nodes = nodes.shape[0] From a4210fe5139ba96c8267437353e687a3f8ee0ab6 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 24 Oct 2023 12:11:11 -0700 Subject: [PATCH 072/164] Fix comment in _forward --- SimPEG/potential_fields/gravity/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 2618e1607c..6d69048c2b 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -382,7 +382,7 @@ def _forward(self, densities): active_nodes, active_cell_nodes = self._get_active_nodes() # Allocate fields array fields = np.zeros(self.survey.nD, dtype=self.sensitivity_dtype) - # Start filling the sensitivity matrix + # Compute fields index_offset = 0 for components, receivers in self._get_components_and_receivers(): n_components = len(components) From d53b2b5b6f1bb314d0f6f51141b2d457adc301c8 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 24 Oct 2023 14:43:26 -0700 Subject: [PATCH 073/164] Create private function to avoid code duplication --- .../gravity/_numba_functions.py | 52 ++++++++++++------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index 040a7308fb..a9a2acd735 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -83,16 +83,7 @@ def _forward_gravity( fields[i] += ( constant_factor * densities[k] - * ( - -kernels[cell_nodes[k, 0]] - + kernels[cell_nodes[k, 1]] - + kernels[cell_nodes[k, 2]] - - kernels[cell_nodes[k, 3]] - + kernels[cell_nodes[k, 4]] - - kernels[cell_nodes[k, 5]] - - kernels[cell_nodes[k, 6]] - + kernels[cell_nodes[k, 7]] - ) + * _kernels_in_nodes_to_cell(kernels, cell_nodes[k, :]) ) @@ -158,18 +149,41 @@ def _sensitivity_gravity( kernels[j] = kernel_func(dx, dy, dz, distance) # Compute sensitivity matrix elements from the kernel values for k in range(n_cells): - sensitivity_matrix[i, k] = constant_factor * ( - -kernels[cell_nodes[k, 0]] - + kernels[cell_nodes[k, 1]] - + kernels[cell_nodes[k, 2]] - - kernels[cell_nodes[k, 3]] - + kernels[cell_nodes[k, 4]] - - kernels[cell_nodes[k, 5]] - - kernels[cell_nodes[k, 6]] - + kernels[cell_nodes[k, 7]] + sensitivity_matrix[i, k] = constant_factor * _kernels_in_nodes_to_cell( + kernels, cell_nodes[k, :] ) +@jit(nopython=True) +def _kernels_in_nodes_to_cell(kernels, nodes_indices): + """ + Evaluate integral on a given cell from evaluation of kernels on nodes + + Parameters + ---------- + kernels : (n_active_nodes,) array + Array with kernel values on each one of the nodes in the mesh. + nodes_indices : (8,) array of int + Indices of the nodes for the current cell in "F" order (x changes + faster than y, and y faster than z). + + Returns + ------- + float + """ + result = ( + -kernels[nodes_indices[0]] + + kernels[nodes_indices[1]] + + kernels[nodes_indices[2]] + - kernels[nodes_indices[3]] + + kernels[nodes_indices[4]] + - kernels[nodes_indices[5]] + - kernels[nodes_indices[6]] + + kernels[nodes_indices[7]] + ) + return result + + # Define decorated versions of these functions _sensitivity_gravity_parallel = jit(nopython=True, parallel=True)(_sensitivity_gravity) _sensitivity_gravity_serial = jit(nopython=True, parallel=False)(_sensitivity_gravity) From 2b80d405b80fcbb70201ae7fc60b77e43b1ebfdb Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 24 Oct 2023 15:04:20 -0700 Subject: [PATCH 074/164] Ask nodes indices as ints instead of arrays --- .../gravity/_numba_functions.py | 52 ++++++++++++++----- 1 file changed, 40 insertions(+), 12 deletions(-) diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index a9a2acd735..508a40b178 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -83,7 +83,17 @@ def _forward_gravity( fields[i] += ( constant_factor * densities[k] - * _kernels_in_nodes_to_cell(kernels, cell_nodes[k, :]) + * _kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, 0], + cell_nodes[k, 1], + cell_nodes[k, 2], + cell_nodes[k, 3], + cell_nodes[k, 4], + cell_nodes[k, 5], + cell_nodes[k, 6], + cell_nodes[k, 7], + ) ) @@ -150,12 +160,30 @@ def _sensitivity_gravity( # Compute sensitivity matrix elements from the kernel values for k in range(n_cells): sensitivity_matrix[i, k] = constant_factor * _kernels_in_nodes_to_cell( - kernels, cell_nodes[k, :] + kernels, + cell_nodes[k, 0], + cell_nodes[k, 1], + cell_nodes[k, 2], + cell_nodes[k, 3], + cell_nodes[k, 4], + cell_nodes[k, 5], + cell_nodes[k, 6], + cell_nodes[k, 7], ) @jit(nopython=True) -def _kernels_in_nodes_to_cell(kernels, nodes_indices): +def _kernels_in_nodes_to_cell( + kernels, + nodes_indices_0, + nodes_indices_1, + nodes_indices_2, + nodes_indices_3, + nodes_indices_4, + nodes_indices_5, + nodes_indices_6, + nodes_indices_7, +): """ Evaluate integral on a given cell from evaluation of kernels on nodes @@ -163,7 +191,7 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): ---------- kernels : (n_active_nodes,) array Array with kernel values on each one of the nodes in the mesh. - nodes_indices : (8,) array of int + nodes_indices : ints Indices of the nodes for the current cell in "F" order (x changes faster than y, and y faster than z). @@ -172,14 +200,14 @@ def _kernels_in_nodes_to_cell(kernels, nodes_indices): float """ result = ( - -kernels[nodes_indices[0]] - + kernels[nodes_indices[1]] - + kernels[nodes_indices[2]] - - kernels[nodes_indices[3]] - + kernels[nodes_indices[4]] - - kernels[nodes_indices[5]] - - kernels[nodes_indices[6]] - + kernels[nodes_indices[7]] + -kernels[nodes_indices_0] + + kernels[nodes_indices_1] + + kernels[nodes_indices_2] + - kernels[nodes_indices_3] + + kernels[nodes_indices_4] + - kernels[nodes_indices_5] + - kernels[nodes_indices_6] + + kernels[nodes_indices_7] ) return result From fd6c8ec2ac37bec4a921b001b84ffa968563c6ec Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 24 Oct 2023 15:04:56 -0700 Subject: [PATCH 075/164] Add private function to evaluate kernel Write a private function that evaluates a given kernel function for a single a node and a receiver to reduce code duplication. --- .../gravity/_numba_functions.py | 57 +++++++++++++++---- 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index 508a40b178..077171dbae 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -73,11 +73,15 @@ def _forward_gravity( # Allocate vector for kernels evaluated on mesh nodes kernels = np.empty(n_nodes) for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kernels[j] = kernel_func(dx, dy, dz, distance) + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) # Compute fields from the kernel values for k in range(n_cells): fields[i] += ( @@ -152,11 +156,15 @@ def _sensitivity_gravity( # Allocate vector for kernels evaluated on mesh nodes kernels = np.empty(n_nodes) for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kernels[j] = kernel_func(dx, dy, dz, distance) + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) # Compute sensitivity matrix elements from the kernel values for k in range(n_cells): sensitivity_matrix[i, k] = constant_factor * _kernels_in_nodes_to_cell( @@ -172,6 +180,35 @@ def _sensitivity_gravity( ) +@jit(nopython=True) +def _evaluate_kernel( + receiver_x, receiver_y, receiver_z, node_x, node_y, node_z, kernel_func +): + """ + Evaluate a kernel function for a single node and receiver + + Parameters + ---------- + receiver_x, receiver_y, receiver_z : floats + Coordinates of the receiver. + node_x, node_y, node_z : floats + Coordinates of the node. + kernel_func : callable + Kernel function that should be evaluated. For example, use one of the + kernel functions in ``choclo.prism``. + + Returns + ------- + float + Kernel evaluated on the given node and receiver. + """ + dx = node_x - receiver_x + dy = node_y - receiver_y + dz = node_z - receiver_z + distance = np.sqrt(dx**2 + dy**2 + dz**2) + return kernel_func(dx, dy, dz, distance) + + @jit(nopython=True) def _kernels_in_nodes_to_cell( kernels, From 7694b096d1c077ab4f6f0cfe022c2f2e9f3343d7 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 24 Oct 2023 15:28:38 -0700 Subject: [PATCH 076/164] Remove uninformative lines in docstrings --- SimPEG/potential_fields/gravity/_numba_functions.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index 077171dbae..644071bd33 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -34,9 +34,6 @@ def _forward_gravity( from numba import jit jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) - jit_forward_gravity( - receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor - ) Parameters ---------- @@ -119,9 +116,6 @@ def _sensitivity_gravity( from numba import jit jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_gravity) - jit_sensitivity( - receivers, nodes, densities, fields, cell_nodes, kernel_func, const_factor - ) Parameters ---------- From a8e51487b7b3a18a93eeff1c2dafc04ba399482b Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 25 Oct 2023 11:18:54 -0700 Subject: [PATCH 077/164] Add inject active faces and active edges mappings --- SimPEG/__init__.py | 1 + SimPEG/maps.py | 187 ++++++++++++++++++++++++++++++++++++++-- tests/base/test_maps.py | 12 +++ 3 files changed, 193 insertions(+), 7 deletions(-) diff --git a/SimPEG/__init__.py b/SimPEG/__init__.py index d185e6559b..ff20906fb9 100644 --- a/SimPEG/__init__.py +++ b/SimPEG/__init__.py @@ -77,6 +77,7 @@ maps.IdentityMap maps.InjectActiveCells maps.InjectActiveFaces + maps.InjectActiveEdges maps.MuRelative maps.LogMap maps.ParametricBlock diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 90cbf1c54d..0429270da6 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3330,7 +3330,7 @@ class InjectActiveFaces(IdentityMap): where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from active faces to all mesh faces, and :math:`\mathbf{d}` is a (*nF* , 1) matrix that projects the inactive faces value - :math:`m_\perp` to all inactive mesh cells. + :math:`m_\perp` to all mesh faces. Parameters ---------- @@ -3409,7 +3409,7 @@ def nP(self): Returns ------- int - Number of parameters the model acts on; i.e. the number of active cells + Number of parameters the model acts on; i.e. the number of active faces. """ return int(self.indActive.sum()) @@ -3431,8 +3431,8 @@ def inverse(self, u): where :math:`\mathbf{P}` is a (*nF* , *nP*) projection matrix from active faces to all mesh faces, and :math:`\mathbf{d}` is a - (*nC* , 1) matrix that projects the inactive cell value - :math:`m_\perp` to all inactive mesh cells. + (*nR* , 1) matrix that projects the inactive face value + :math:`m_\perp` to all mesh faces. The inverse mapping is given by: @@ -3451,7 +3451,7 @@ def deriv(self, m, v=None): r"""Derivative of the mapping with respect to the input parameters. For a discrete set of model parameters :math:`\mathbf{m}` defined - on a set of active facees, the mapping :math:`\mathbf{u}(\mathbf{m})` + on a set of active faces, the mapping :math:`\mathbf{u}(\mathbf{m})` is defined as: .. math:: @@ -3473,9 +3473,182 @@ def deriv(self, m, v=None): Parameters ---------- m : (nP) numpy.ndarray - A vector representing a set of model parameters + A vector representing a set of model parameters. v : (nP) numpy.ndarray - If not ``None``, the method returns the derivative times the vector *v* + If not ``None``, the method returns the derivative times the vector *v*. + + Returns + ------- + scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + if v is not None: + return self.P * v + return self.P + +class InjectActiveEdges(IdentityMap): + r"""Map active edges model to all edges of a mesh. + + The ``InjectActiveEdges`` class is used to define the mapping when + the model consists of diagnostic property values defined on a set of active + mesh edges; e.g. edges below topography, z-edges only. For a discrete set of + model parameters :math:`\mathbf{m}` defined on a set of active + edges, the mapping :math:`\mathbf{u}(\mathbf{m})` is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d}\, m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nE* , 1) matrix that projects the inactive edges value + :math:`m_\perp` to all mesh edges. + + Parameters + ---------- + mesh : discretize.BaseMesh + A discretize mesh + indActive : numpy.ndarray + Active edges array. Can be a boolean ``numpy.ndarray`` of length *mesh.nE* + or a ``numpy.ndarray`` of ``int`` containing the indices of the active edges. + valInactive : float or numpy.ndarray + The physical property value assigned to all inactive edges in the mesh. + + """ + + def __init__(self, mesh, indActive=None, valInactive=0.0, nF=None): + self.mesh = mesh + self.nF = nF or mesh.nF + + self._indActive = validate_active_indices("indActive", indActive, self.nF) + self._nP = np.sum(self.indActive) + + self.P = sp.eye(self.nF, format="csr")[:, self.indActive] + + self.valInactive = valInactive + + @property + def valInactive(self): + """The physical property value assigned to all inactive edges in the mesh. + + Returns + ------- + numpy.ndarray + """ + return self._valInactive + + @valInactive.setter + def valInactive(self, value): + n_inactive = self.nF - self.nP + try: + value = validate_float("valInactive", value) + value = np.full(n_inactive, value) + except Exception: + pass + value = validate_ndarray_with_shape("valInactive", value, shape=(n_inactive,)) + + self._valInactive = np.zeros(self.nF, dtype=float) + self._valInactive[~self.indActive] = value + + @property + def indActive(self): + """ + + Returns + ------- + numpy.ndarray of bool. + + """ + return self._indActive + + @property + def shape(self): + """Dimensions of the mapping + + Returns + ------- + tuple of int + Where *nP* is the number of active edges and *nE* is + number of edges in the mesh, **shape** returns a + tuple (*nE* , *nP*). + """ + return (self.nF, self.nP) + + @property + def nP(self): + """Number of parameters the model acts on. + + Returns + ------- + int + Number of parameters the model acts on; i.e. the number of active edges. + """ + return int(self.indActive.sum()) + + def _transform(self, m): + if m.ndim > 1: + return self.P * m + self.valInactive[:, None] + return self.P * m + self.valInactive + + def inverse(self, u): + r"""Recover the model parameters (active edges) from a set of physical + property values defined on the entire mesh. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active edges, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \,m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nE* , 1) matrix that projects the inactive edge value + :math:`m_\perp` to all mesh edges. + + The inverse mapping is given by: + + .. math:: + \mathbf{m}(\mathbf{u}) = \mathbf{P^T u} + + Parameters + ---------- + u : (mesh.nE) numpy.ndarray + A vector which contains physical property values for all + mesh edges. + """ + return self.P.T * u + + def deriv(self, m, v=None): + r"""Derivative of the mapping with respect to the input parameters. + + For a discrete set of model parameters :math:`\mathbf{m}` defined + on a set of active edges, the mapping :math:`\mathbf{u}(\mathbf{m})` + is defined as: + + .. math:: + \mathbf{u}(\mathbf{m}) = \mathbf{Pm} + \mathbf{d} \, m_\perp + + where :math:`\mathbf{P}` is a (*nE* , *nP*) projection matrix from + active edges to all mesh edges, and :math:`\mathbf{d}` is a + (*nF* , 1) matrix that projects the inactive edge value + :math:`m_\perp` to all mesh edges. + + the **deriv** method returns the derivative of :math:`\mathbf{u}` with respect + to the model parameters; i.e.: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} = \mathbf{P} + + Note that in this case, **deriv** simply returns a sparse projection matrix. + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters. + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v*. Returns ------- diff --git a/tests/base/test_maps.py b/tests/base/test_maps.py index 9f6c8aaec3..ad6362c84d 100644 --- a/tests/base/test_maps.py +++ b/tests/base/test_maps.py @@ -25,6 +25,8 @@ "ComboMap", "ActiveCells", "InjectActiveCells", + "InjectActiveFaces", + "InjectActiveEdges", "LogMap", "LinearMap", "ReciprocalMap", @@ -52,6 +54,8 @@ "ComboMap", "ActiveCells", "InjectActiveCells", + "InjectActiveFaces", + "InjectActiveEdges", "LogMap", "LinearMap", "ReciprocalMap", @@ -694,6 +698,14 @@ def test_linearity(): mesh3, mesh3.cell_centers[:, -1] < 0.75, ), + maps.InjectActiveFaces( + mesh3, + mesh3.faces[:, -1] < 0.75, + ), + maps.InjectActiveEdges( + mesh3, + mesh3.edges[:, -1] < 0.75, + ), maps.TileMap( mesh_tree, mesh_tree.cell_centers[:, -1] < 0.75, From 3326b8e88825714d8c694bf8806c96482ed07d96 Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 25 Oct 2023 20:45:44 -0700 Subject: [PATCH 078/164] First pass at all necessary pieces for FDEM and TDEM forward modeling with face and edge conductivities --- SimPEG/base/__init__.py | 2 +- SimPEG/base/pde_simulation.py | 55 ++-- .../frequency_domain/fields.py | 140 ++++++++++ .../frequency_domain/simulation.py | 261 ++++++++++++++++++ SimPEG/electromagnetics/time_domain/fields.py | 225 +++++++-------- .../time_domain/simulation.py | 259 +++++++++++------ 6 files changed, 707 insertions(+), 235 deletions(-) diff --git a/SimPEG/base/__init__.py b/SimPEG/base/__init__.py index bc2cbaf8f3..49a2a81b06 100644 --- a/SimPEG/base/__init__.py +++ b/SimPEG/base/__init__.py @@ -2,7 +2,7 @@ BasePDESimulation, BaseElectricalPDESimulation, BaseMagneticPDESimulation, - BaseConductancePDESimulation, + BaseFaceEdgeElectricalPDESimulation, with_property_mass_matrices, with_surface_property_mass_matrices, with_line_property_mass_matrices, diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 111a16a9c3..00aca4602b 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -831,18 +831,13 @@ def deleteTheseOnModelUpdate(self): @with_surface_property_mass_matrices("tau") @with_line_property_mass_matrices("kappa") -@with_line_property_mass_matrices("kappai") -class BaseConductancePDESimulation(BaseElectricalPDESimulation): +class BaseFaceEdgeElectricalPDESimulation(BaseElectricalPDESimulation): tau, tauMap, tauDeriv = props.Invertible( - "Electrical Conductance (S)", + "Electrical conductivity times thickness (S); i.e. conductance", ) kappa, kappaMap, kappaDeriv = props.Invertible( - "Electrical Conductance integrated over length (Sm)", + "Electrical conductivity times cross-sectional area (Sm)", ) - kappai, kappaiMap, kappaiDeriv = props.Invertible( - "Electrical Resistance per meter (Ohm/m)", - ) - props.Reciprocal(kappa, kappai) def __init__( self, @@ -851,12 +846,10 @@ def __init__( sigmaMap=None, rho=None, rhoMap=None, - tau=None, + tau=0., tauMap=None, - kappa=0.0, + kappa=0., kappaMap=None, - kappai=None, - kappaiMap=None, **kwargs, ): super().__init__(mesh=mesh, **kwargs) @@ -865,21 +858,18 @@ def __init__( self.sigmaMap = sigmaMap self.rhoMap = rhoMap self.tau = tau - self.kappa = kappa - self.kappai = kappai self.tauMap = tauMap + self.kappa = kappa self.kappaMap = kappaMap - self.kappaiMap = kappaiMap def __setattr__(self, name, value): super().__setattr__(name, value) - if name in ["sigma", "rho", "tau", "kappa", "kappai"]: + if name in ["sigma", "rho", "tau", "kappa"]: mat_list = ( self._clear_on_sigma_update + self._clear_on_rho_update + self._clear_on_tau_update + self._clear_on_kappa_update - + self._clear_on_kappai_update + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) for mat in mat_list: @@ -900,21 +890,36 @@ def _MeSigmaTauKappaI(self): setattr(self, "__MeSigmaTauKappaI", M_prop) return getattr(self, "__MeSigmaTauKappaI") - def _MeSigmaTauKappaDeriv(self, u, v=None, adjoint=False): - """Only derivative wrt to tau at the moment""" + def _MeSigmaTauKappaDeriv_sigma(self, u, v=None, adjoint=False): + """Only derivative wrt to sigma""" + return self.MeSigmaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaDeriv_tau(self, u, v=None, adjoint=False): + """Only derivative wrt tau""" return self._MeTauDeriv(u, v, adjoint) - def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): - """Only derivative wrt to tau at the moment""" - if getattr(self, "tauMap") is None: - return Zero() - if isinstance(u, Zero) or isinstance(v, Zero): - return Zero() + def _MeSigmaTauKappaDeriv_kappa(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + return self._MeKappaDeriv(u, v, adjoint) + def _MeSigmaTauKappaIDeriv_sigma(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self.MeSigmaDeriv(u, v, adjoint) + + def _MeSigmaTauKappaIDeriv_tau(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" MI_prop = self._MeSigmaTauKappaI u = MI_prop @ (MI_prop @ -u) return self._MeTauDeriv(u, v, adjoint) + def _MeSigmaTauKappaIDeriv_kappa(self, u, v=None, adjoint=False): + """Only derivative wrt to tau""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return self._MeKappaDeriv(u, v, adjoint) + @property def deleteTheseOnModelUpdate(self): """ diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 06900f5dc3..753bfab0c2 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -622,6 +622,45 @@ def _charge_density(self, eSolution, source_list): ) / self.mesh.cell_volumes[:, None] +class Fields3DElectricFieldEdgeFaceConductivity(Fields3DElectricField): + + def _j(self, eSolution, source_list): + """ + Current density from eSolution + + :param numpy.ndarray eSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: current density + """ + return self._MeI * (self.__MeSigmaTauKappa * self._e(eSolution, source_list)) + + def _jDeriv_u(self, src, du_dm_v, adjoint=False): + """ + Derivative of the current density with respect to the thing we solved + for + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray du_dm_v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the derivative of the current density with respect + to the field we solved for with a vector + """ + if adjoint: + return self._eDeriv_u( + src, self.__MeSigmaTauKappa.T * (self._MeI.T * du_dm_v), adjoint=adjoint + ) + return self._MeI * ( + self.__MeSigmaTauKappa * (self._eDeriv_u(src, du_dm_v, adjoint=adjoint)) + ) + + def _jDeriv_m(self, src, v, adjoint=False): + raise NotImplementedError ( + "derivative wrt to model not implemented." + ) + + class Fields3DMagneticFluxDensity(FieldsFDEM): """ Fields object for Simulation3DMagneticFluxDensity. @@ -952,6 +991,107 @@ def _charge_density(self, bSolution, source_list): ) / self.mesh.cell_volumes[:, None] +class Fields3DMagneticFluxDensityEdgeFaceConductivity(Fields3DMagneticFluxDensity): + """ + Fields object for Simulation3DMagneticFluxDensity. + + :param discretize.base.BaseMesh mesh: mesh + :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey + """ + + def startup(self): + self._edgeCurl = self.simulation.mesh.edge_curl + self._MeSigma = self.simulation.MeSigma + self._MeSigmaI = self.simulation.MeSigmaI + self.__MeTau = self._MeTau + self.__MeKappa = self._MeKappa + self._MfMui = self.simulation.MfMui + self._MfMuiDeriv = self.simulation.MfMuiDeriv + self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self.__MeSigmaTauKappa = self._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self._MeSigmaTauKappaI + self._Me = self.simulation.Me + self._aveF2CCV = self.simulation.mesh.aveF2CCV + self._aveE2CCV = self.simulation.mesh.aveE2CCV + self._sigma = self.simulation.sigma + self._mui = self.simulation.mui + self._nC = self.simulation.mesh.nC + self._MeI = self.simulation.MeI + self._MfI = self.simulation.MfI + + + def _eSecondary(self, bSolution, source_list): + """ + Secondary electric field from bSolution + + :param numpy.ndarray bSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: secondary electric field + """ + + e = self._edgeCurl.T * (self._MfMui * bSolution) + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation) + e[:, i] = e[:, i] + -s_e + + if self.simulation.permittivity is not None: + MeyhatI = self.simulation._get_edge_admittivity_property_matrix( + src.frequency, invert_matrix=True + ) + self.__MeTau + self.__MeKappa + e[:, i] = MeyhatI * e[:, i] + + if self.simulation.permittivity is None: + return self._MeSigmaTauKappaI * e + else: + return e + + def _eDeriv_u(self, src, du_dm_v, adjoint=False): + """ + Derivative of the electric field with respect to the thing we solved + for + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the derivative of the electric field with respect + to the field we solved for with a vector + """ + + if not adjoint: + return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * du_dm_v)) + return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * du_dm_v)) + + # NEED TO ADD THIS + def _eDeriv_m(self, src, v, adjoint=False): + raise NotImplementedError ( + "Derivative wrt model not implemented yet." + ) + + def _j(self, bSolution, source_list): + """ + Secondary current density from bSolution + + :param numpy.ndarray bSolution: field we solved for + :param list source_list: list of sources + :rtype: numpy.ndarray + :return: primary current density + """ + + if self.simulation.permittivity is None: + j = self._edgeCurl.T * (self._MfMui * bSolution) + + for i, src in enumerate(source_list): + s_e = src.s_e(self.simulation) + j[:, i] = j[:, i] - s_e + + return self._MeI * j + else: + return self._MeI * (self.__MeSigmaTauKappa * self._e(bSolution, source_list)) + + class Fields3DCurrentDensity(FieldsFDEM): """ Fields object for Simulation3DCurrentDensity. diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 50b138cf9e..d4f101ce82 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -5,13 +5,16 @@ from ... import props from ...data import Data from ...utils import mkvc, validate_type +from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from ..utils import omega from .survey import Survey from .fields import ( FieldsFDEM, Fields3DElectricField, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticFluxDensity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, ) @@ -426,6 +429,105 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): ) * s_eDeriv(v) +class Simulation3DElectricFieldFaceEdgeConductivity( + Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation +): + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity + + def __init__(self, mesh, survey=None, **kwargs): + super().__init__(mesh=mesh, survey=survey, **kwargs) + + if self.sigmaMap is not None or self.rhoMap is not None: + raise NotImplementedError( + "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " + "Simulation3DElectricFieldFaceEdgeConductivity class. The mapping for the " + "invertible property is 'tauMap'." + ) + + if self.kappaMap is not None: + raise NotImplementedError( + "Conductance times length (kappa) is not an invertible property, yet." + ) + + if self.kappaiMap is not None: + raise NotImplementedError( + "Resistance per unit length (kappai) is not an invertible property, yet." + ) + + def getA(self, freq): + + MfMui = self.MfMui + C = self.mesh.edge_curl + + if self.permittivity is None: + MeSigmaTauKappa = self._MeSigmaTauKappa + A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * MeSigmaTauKappa + else: + Meyhat = ( + self._get_edge_admittivity_property_matrix(freq) + + self._MeKappa + + self._MeTau + ) + A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * Meyhat + + return A + + def getADeriv_tau(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + conductivity model and a vector + + .. math :: + + \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\sigma}} = + i \omega \frac{d \mathbf{M^e_{\sigma}}(\mathbf{u})\mathbf{v} }{d\mathbf{m}} + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nE,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + + dMe_dtau_v = self._MeTauDeriv(u, v, adjoint) + return 1j * omega(freq) * dMe_dtau_v + + def getADeriv_kappa(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + conductivity model and a vector + + .. math :: + + \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\sigma}} = + i \omega \frac{d \mathbf{M^e_{\sigma}}(\mathbf{u})\mathbf{v} }{d\mathbf{m}} + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nE,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + + dMe_dkappa_v = self._MeKappaDeriv(u, v, adjoint) + return 1j * omega(freq) * dMe_dkappa_v + + def getADeriv(self, freq, u, v, adjoint=False): + return ( + self.getADeriv_sigma(freq, u, v, adjoint) + + self.getADeriv_tau(freq, u, v, adjoint) + + self.getADeriv_kappa(freq, u, v, adjoint) + + self.getADeriv_mui(freq, u, v, adjoint) + # + self.getADeriv_permittivity(freq, u, v, adjoint) + ) + + class Simulation3DMagneticFluxDensity(BaseFDEMSimulation): r""" We eliminate :math:`\mathbf{e}` using @@ -607,6 +709,165 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): return RHSderiv + SrcDeriv +class Simulation3DMagneticFluxDensity( + Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation +): + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity + + def getA(self, freq): + r""" + System matrix + + .. math :: + + \mathbf{A} = \mathbf{C} \mathbf{M^e_{\sigma}}^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} + i \omega + + :param float freq: Frequency + :rtype: scipy.sparse.csr_matrix + :return: A + """ + + MfMui = self.MfMui + C = self.mesh.edge_curl + iomega = 1j * omega(freq) * sp.eye(self.mesh.nF) + + if self.permittivity is None: + MeSigmaTauKappaI = self._MeSigmaTauKappaI + A = C * (MeSigmaTauKappaI * (C.T.tocsr() * MfMui)) + iomega + else: + MeyhatI = self._get_edge_admittivity_property_matrix( + freq, invert_matrix=True + ) + A = C * (MeyhatI * (C.T.tocsr() * MfMui)) + iomega + + if self._makeASymmetric: + return MfMui.T.tocsr() * A + return A + + def getADeriv_tau(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + model and a vector + + .. math :: + + \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}} = + \mathbf{C} \frac{\mathbf{M^e_{\sigma}} \mathbf{v}}{d\mathbf{m}} + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ + + MfMui = self.MfMui + C = self.mesh.edge_curl + MeSigmaTauKappaIDeriv = self._MeSigmaTauKappaIDeriv + vec = C.T * (MfMui * u) + + if adjoint: + return MeSigmaTauKappaIDeriv(vec, C.T * v, adjoint) + return C * MeSigmaTauKappaIDeriv(vec, v, adjoint) + + # if adjoint: + # return MeSigmaIDeriv.T * (C.T * v) + # return C * (MeSigmaIDeriv * v) + + def getADeriv_mui(self, freq, u, v, adjoint=False): + MfMuiDeriv = self.MfMuiDeriv(u) + MeSigmaTauKappaI = self._MeSigmaTauKappaI + C = self.mesh.edge_curl + + if adjoint: + return MfMuiDeriv.T * (C * (MeSigmaTauKappaI.T * (C.T * v))) + return C * (MeSigmaTauKappaI * (C.T * (MfMuiDeriv * v))) + + def getADeriv(self, freq, u, v, adjoint=False): + if adjoint is True and self._makeASymmetric: + v = self.MfMui * v + + ADeriv = self.getADeriv_sigma(freq, u, v, adjoint) + self.getADeriv_mui( + freq, u, v, adjoint + ) + + if adjoint is False and self._makeASymmetric: + return self.MfMui.T * ADeriv + + return ADeriv + + def getRHS(self, freq): + r""" + Right hand side for the system + + .. math :: + + \mathbf{RHS} = \mathbf{s_m} + + \mathbf{M^e_{\sigma}}^{-1}\mathbf{s_e} + + :param float freq: Frequency + :rtype: numpy.ndarray + :return: RHS (nE, nSrc) + """ + + s_m, s_e = self.getSourceTerm(freq) + C = self.mesh.edge_curl + + if self.permittivity is None: + MeSigmaTauKappaI = self._MeSigmaTauKappaI + RHS = s_m + C * (MeSigmaI * s_e) + else: + MeyhatI = self._get_edge_admittivity_property_matrix( + freq, invert_matrix=True + ) + RHS = s_m + C * (MeyhatI * s_e) + + if self._makeASymmetric is True: + MfMui = self.MfMui + return MfMui.T * RHS + + return RHS + + def getRHSDeriv(self, freq, src, v, adjoint=False): + """ + Derivative of the right hand side with respect to the model + + :param float freq: frequency + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of rhs deriv with a vector + """ + + C = self.mesh.edge_curl + s_m, s_e = src.eval(self) + MfMui = self.MfMui + + if self._makeASymmetric and adjoint: + v = self.MfMui * v + + # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) + s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) + + if not adjoint: + # RHSderiv = C * (MeSigmaIDeriv * v) + RHSderiv = C * self.MeSigmaIDeriv(s_e, v, adjoint) + SrcDeriv = s_mDeriv(v) + C * (self.MeSigmaI * s_eDeriv(v)) + elif adjoint: + # RHSderiv = MeSigmaIDeriv.T * (C.T * v) + RHSderiv = self.MeSigmaIDeriv(s_e, C.T * v, adjoint) + SrcDeriv = s_mDeriv(v) + s_eDeriv(self.MeSigmaI.T * (C.T * v)) + + if self._makeASymmetric is True and not adjoint: + return MfMui.T * (SrcDeriv + RHSderiv) + + return RHSderiv + SrcDeriv + ############################################################################### # H-J Formulation # diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index d1c0dc41c2..1aff43f8bf 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -272,7 +272,7 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) -class Fields3DMagneticFluxDensityConductance(Fields3DMagneticFluxDensity): +class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): """Field Storage for a TDEM simulation.""" knownFields = {"bSolution": "F"} @@ -287,76 +287,69 @@ class Fields3DMagneticFluxDensityConductance(Fields3DMagneticFluxDensity): def startup(self): self._times = self.simulation.times - self._MeSigma = self.simulation.MeSigma + # self._MeSigma = self.simulation.MeSigma # self._MeSigmaI = self.simulation.MeSigmaI - self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui self._timeMesh = self.simulation.time_mesh - def _TLoc(self, fieldType): - return "N" + # Inherited + # def _TLoc(self, fieldType): + # return "N" - def _b(self, bSolution, source_list, tInd): - return bSolution + # def _b(self, bSolution, source_list, tInd): + # return bSolution - def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - return dun_dm_v + # def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + # return dun_dm_v - def _bDeriv_m(self, tInd, src, v, adjoint=False): - return Zero() + # def _bDeriv_m(self, tInd, src, v, adjoint=False): + # return Zero() - def _dbdt(self, bSolution, source_list, tInd): - # self._timeMesh.face_divergence - dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd) - for i, src in enumerate(source_list): - s_m = src.s_m(self.simulation, self._times[tInd]) - dbdt[:, i] = dbdt[:, i] + s_m - return dbdt + # def _dbdt(self, bSolution, source_list, tInd): + # # self._timeMesh.face_divergence + # dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd) + # for i, src in enumerate(source_list): + # s_m = src.s_m(self.simulation, self._times[tInd]) + # dbdt[:, i] = dbdt[:, i] + s_m + # return dbdt - def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - if adjoint is True: - return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint) - return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v)) + # def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + # if adjoint is True: + # return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint) + # return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v)) - def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): - if adjoint is True: - return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint)) - return -( - self._edgeCurl * self._eDeriv_m(tInd, src, v) - ) # + src.s_mDeriv() assuming src doesn't have deriv for now + # def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): + # if adjoint is True: + # return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint)) + # return -( + # self._edgeCurl * self._eDeriv_m(tInd, src, v) + # ) # + src.s_mDeriv() assuming src doesn't have deriv for now def _e(self, bSolution, source_list, tInd): - e = self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * bSolution)) + e = self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * bSolution)) for i, src in enumerate(source_list): s_e = src.s_e(self.simulation, self._times[tInd]) - e[:, i] = e[:, i] - self._MeSigmaI * s_e + e[:, i] = e[:, i] - self.__MeSigmaTauKappaI * s_e return e def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint is True: - return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * dun_dm_v)) - return self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) + return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * dun_dm_v)) + return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) def _eDeriv_m(self, tInd, src, v, adjoint=False): - _, s_e = src.eval(self.simulation, self._times[tInd]) - bSolution = self[[src], "bSolution", tInd].flatten() - - _, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint) - - if adjoint is True: - return self._MeSigmaIDeriv( - -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint - ) - s_eDeriv(self._MeSigmaI.T * v) - - return self._MeSigmaIDeriv( - -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint - ) - self._MeSigmaI * s_eDeriv(v) + raise NotImplementedError( + "Derivative of e-field wrt to model not implemented" + ) def _j(self, hSolution, source_list, tInd): return self.simulation.MeI * ( - self._MeSigma * self._e(hSolution, source_list, tInd) + self.__MeSigmaKappaTau * self._e(hSolution, source_list, tInd) ) def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): @@ -364,69 +357,59 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return self._eDeriv_u( tInd, src, - self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v), + self.__MeSigmaTauKappa.T * (self.simulation.MeI.T * dun_dm_v), adjoint=True, ) return self.simulation.MeI * ( - self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v) + self._MeSigmaTauKappaI * self._eDeriv_u(tInd, src, dun_dm_v) ) def _jDeriv_m(self, tInd, src, v, adjoint=False): - e = self[src, "e", tInd] - if adjoint: - w = self.simulation.MeI.T * v - return self._MeSigmaDeriv(e).T * w + self._eDeriv_m( - tInd, src, self._MeSigma.T * w, adjoint=True - ) - return self.simulation.MeI * ( - self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v) - ) - - def _h(self, hSolution, source_list, tInd): - return self.simulation.MfI * ( - self._MfMui * self._b(hSolution, source_list, tInd) - ) - - def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - if adjoint: - return self._bDeriv_u( - tInd, - src, - self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), - adjoint=True, - ) - return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v)) - - def _hDeriv_m(self, tInd, src, v, adjoint=False): - if adjoint: - return self._bDeriv_m( - tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True - ) - return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v)) - - def _dhdt(self, hSolution, source_list, tInd): - return self.simulation.MfI * ( - self._MfMui * self._dbdt(hSolution, source_list, tInd) - ) - - def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - if adjoint: - return self._dbdtDeriv_u( - tInd, - src, - self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), - adjoint=True, - ) - return self.simulation.MfI * ( - self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) + raise NotImplementedError( + "Derivative of current density wrt to model not implemented" ) - def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): - if adjoint: - return self._dbdtDeriv_m( - tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True - ) - return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) + # # Inherited methods + # def _h(self, hSolution, source_list, tInd): + # return self.simulation.MfI * ( + # self._MfMui * self._b(hSolution, source_list, tInd) + # ) + + # def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + # if adjoint: + # return self._bDeriv_u( + # tInd, + # src, + # self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), + # adjoint=True, + # ) + # return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v)) + + # def _hDeriv_m(self, tInd, src, v, adjoint=False): + # if adjoint: + # return self._bDeriv_m( + # tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True + # ) + # return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v)) + + # def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): + # if adjoint: + # return self._dbdtDeriv_u( + # tInd, + # src, + # self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), + # adjoint=True, + # ) + # return self.simulation.MfI * ( + # self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) + # ) + + # def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): + # if adjoint: + # return self._dbdtDeriv_m( + # tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True + # ) + # return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) class Fields3DElectricField(FieldsTDEM): @@ -548,28 +531,27 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) -class Fields3DElectricFieldConductance(Fields3DElectricField): +class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): """Fancy Field Storage for a TDEM simulation.""" def startup(self): self._times = self.simulation.times - self._MeSigma = self.simulation.MeSigma - # self._MeSigmaI = self.simulation.MeSigmaI - self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI + # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui - self.__MeTau = self.simulation._MeTau + # self.__MeTau = self.simulation._MeTau # self.__MeTauI = self.simulation._MeTauI - self.__MeTauDeriv = self.simulation._MeTauDeriv + # self.__MeTauDeriv = self.simulation._MeTauDeriv # self.__MeTauIDeriv = self.simulation._MeTauIDeriv - self.__MeKappa = self.simulation._MeKappa + # self.__MeKappa = self.simulation._MeKappa # self.__MeKappaI = self.simulation._MeKappaI def _j(self, eSolution, source_list, tInd): return self.simulation.MeI * ( - (self._MeSigma + self.__MeTau + self.__MeKappa) - * self._e(eSolution, source_list, tInd) + self.__MeSigmaTauKappa * self._e(eSolution, source_list, tInd) ) def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): @@ -577,29 +559,18 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return self._eDeriv_u( tInd, src, - (self._MeSigma + self.__MeTau + self.__MeKappa).T + (self.__MeSigmaTauKappa).T * (self.simulation.MeI.T * dun_dm_v), adjoint=True, ) return self.simulation.MeI * ( - (self._MeSigma + self.__MeTau + self.__MeKappa) - * self._eDeriv_u(tInd, src, dun_dm_v) + self.__MeSigmaTauKappa * self._eDeriv_u(tInd, src, dun_dm_v) ) + # NEED TO THINK ABOUT THIS def _jDeriv_m(self, tInd, src, v, adjoint=False): - e = self[src, "e", tInd] - if adjoint: - w = self.simulation.MeI.T * v - return self.__MeTauDeriv(e).T * w + self._eDeriv_m( - tInd, - src, - (self._MeSigma + self.__MeTau + self.__MeKappa).T * w, - adjoint=True, - ) - return self.simulation.MeI * ( - self._MeTauDeriv(e) * v - + (self._MeSigma + self.__MeTau + self.__MeKappa) - * self._eDeriv_m(tInd, src, v) + raise NotImplementedError( + "Derivative of current density wrt model not currently implemented." ) diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 8194bf179d..10cda94c5c 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -4,14 +4,14 @@ from ...data import Data from ...simulation import BaseTimeSimulation from ...utils import mkvc, sdiag, sdinv, speye, Zero, validate_type, validate_float -from ...base import BaseConductancePDESimulation +from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from .survey import Survey from .fields import ( Fields3DMagneticFluxDensity, - Fields3DMagneticFluxDensityConductance, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DElectricField, - Fields3DElectricFieldConductance, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, FieldsDerivativesEB, @@ -966,8 +966,8 @@ def getAdcDeriv(self, u, v, adjoint=False): # ------------------------------- Simulation3DElectricField ------------------------------- # -class Simulation3DMagneticFluxDensityConductance( - Simulation3DMagneticFluxDensity, BaseConductancePDESimulation +class Simulation3DMagneticFluxDensityFaceEdgeConductivity( + Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): r""" Starting from the quasi-static E-B formulation of Maxwell's equations @@ -1032,28 +1032,11 @@ class Simulation3DMagneticFluxDensityConductance( """ - fieldsPair = Fields3DMagneticFluxDensityConductance #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): super().__init__(mesh=mesh, survey=survey, **kwargs) - if self.sigmaMap is not None or self.rhoMap is not None: - raise NotImplementedError( - "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " - "Simulation3DMagneticFluxDensityConductance class. The mapping for the " - "invertible property is 'tauMap'." - ) - - if self.kappaMap is not None: - raise NotImplementedError( - "Conductance times length (kappa) is not an invertible property, yet." - ) - - if self.kappaiMap is not None: - raise NotImplementedError( - "Resistance per unit length (kappai) is not an invertible property, yet." - ) - def getAdiag(self, tInd): r""" System matrix at a given time index @@ -1068,8 +1051,7 @@ def getAdiag(self, tInd): dt = self.time_steps[tInd] C = self.mesh.edge_curl - MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - # MeSigmaTauKappaI = self._MeSigmaTauKappaI + MeSigmaTauKappaI = self._MeSigmaTauKappaI MfMui = self.MfMui I = speye(self.mesh.n_faces) @@ -1079,9 +1061,9 @@ def getAdiag(self, tInd): return MfMui.T.tocsr() * A return A - def getAdiagDeriv(self, tInd, u, v, adjoint=False): + def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): """ - Derivative of ADiag + Derivative of ADiag wrt tau """ C = self.mesh.edge_curl @@ -1093,38 +1075,20 @@ def getAdiagDeriv(self, tInd, u, v, adjoint=False): if adjoint: if self._makeASymmetric is True: v = MfMui * v - return self._MeSigmaTauKappaIDeriv(C.T * (MfMui * u), C.T * v, adjoint) + return self._MeSigmaTauKappaIDeriv_tau(C.T * (MfMui * u), C.T * v, adjoint) - ADeriv = C * (self._MeSigmaTauKappaIDeriv(C.T * (MfMui * u), v, adjoint)) + ADeriv = C * (self._MeSigmaTauKappaIDeriv_tau(C.T * (MfMui * u), v, adjoint)) if self._makeASymmetric is True: return MfMui.T * ADeriv return ADeriv - def getAsubdiag(self, tInd): - """ - Matrix below the diagonal - """ - - dt = self.time_steps[tInd] - MfMui = self.MfMui - Asubdiag = -1.0 / dt * sp.eye(self.mesh.n_faces) - - if self._makeASymmetric is True: - return MfMui.T * Asubdiag - - return Asubdiag - - def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): - return Zero() * v - def getRHS(self, tInd): """ Assemble the RHS """ C = self.mesh.edge_curl - MeSigmaTauKappaI = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - # MeSigmaTauKappaI = self._MeSigmaTauKappaI + MeSigmaTauKappaI = self._MeSigmaTauKappaI MfMui = self.MfMui s_m, s_e = self.getSourceTerm(tInd) @@ -1134,7 +1098,7 @@ def getRHS(self, tInd): return MfMui.T * rhs return rhs - def getRHSDeriv(self, tInd, src, v, adjoint=False): + def getRHSDeriv_sigma(self, tInd, src, v, adjoint=False): """ Derivative of the RHS """ @@ -1151,7 +1115,7 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): if isinstance(s_e, Zero): MeSigmaTauKappaIDerivT_v = Zero() else: - MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv( + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_sigma( s_e, C.T * v, adjoint ) @@ -1166,7 +1130,7 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): if isinstance(s_e, Zero): MeSigmaTauKappaIDeriv_v = Zero() else: - MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_sigma(s_e, v, adjoint) RHSDeriv = ( C * MeSigmaTauKappaIDeriv_v @@ -1178,33 +1142,104 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): return self.MfMui.T * RHSDeriv return RHSDeriv + def getRHSDeriv_tau(self, tInd, src, v, adjoint=False): + """ + Derivative of the RHS + """ -# ------------------------------- Simulation3DElectricField ------------------------------- # -class Simulation3DElectricFieldConductance( - Simulation3DElectricField, BaseConductancePDESimulation -): - fieldsPair = Fields3DElectricFieldConductance + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI - def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): - super().__init__(mesh=mesh, survey=survey, **kwargs) + _, s_e = src.eval(self, self.times[tInd]) + s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) - if self.sigmaMap is not None or self.rhoMap is not None: - raise NotImplementedError( - "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " - "Simulation3DElectricFieldConductance class. The mapping for the " - "invertible property is 'tauMap'." - ) + if adjoint: + if self._makeASymmetric is True: + v = self.MfMui * v + if isinstance(s_e, Zero): + MeSigmaTauKappaIDerivT_v = Zero() + else: + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_tau( + s_e, C.T * v, adjoint + ) - if self.kappaMap is not None: - raise NotImplementedError( - "Conductance times length (kappa) is not an invertible property, yet." + RHSDeriv = ( + MeSigmaTauKappaIDerivT_v + + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + + s_mDeriv(v) ) - if self.kappaiMap is not None: - raise NotImplementedError( - "Resistance per unit length (kappai) is not an invertible property, yet." + return RHSDeriv + + if isinstance(s_e, Zero): + MeSigmaTauKappaIDeriv_v = Zero() + else: + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_tau(s_e, v, adjoint) + + RHSDeriv = ( + C * MeSigmaTauKappaIDeriv_v + + C * MeSigmaTauKappaI * s_eDeriv(v) + + s_mDeriv(v) + ) + + if self._makeASymmetric is True: + return self.MfMui.T * RHSDeriv + return RHSDeriv + + def getRHSDeriv_kappa(self, tInd, src, v, adjoint=False): + """ + Derivative of the RHS + """ + + C = self.mesh.edge_curl + MeSigmaTauKappaI = self._MeSigmaTauKappaI + + _, s_e = src.eval(self, self.times[tInd]) + s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) + + if adjoint: + if self._makeASymmetric is True: + v = self.MfMui * v + if isinstance(s_e, Zero): + MeSigmaTauKappaIDerivT_v = Zero() + else: + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_kappa( + s_e, C.T * v, adjoint + ) + + RHSDeriv = ( + MeSigmaTauKappaIDerivT_v + + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) + + s_mDeriv(v) ) + return RHSDeriv + + if isinstance(s_e, Zero): + MeSigmaTauKappaIDeriv_v = Zero() + else: + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_kappa(s_e, v, adjoint) + + RHSDeriv = ( + C * MeSigmaTauKappaIDeriv_v + + C * MeSigmaTauKappaI * s_eDeriv(v) + + s_mDeriv(v) + ) + + if self._makeASymmetric is True: + return self.MfMui.T * RHSDeriv + return RHSDeriv + + +# ------------------------------- Simulation3DElectricField ------------------------------- # +class Simulation3DElectricFieldFaceEdgeConductivity( + Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation +): + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity + + def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): + super().__init__(mesh=mesh, survey=survey, **kwargs) + def getAdiag(self, tInd): """ Diagonal of the system matrix at a given time index @@ -1214,12 +1249,11 @@ def getAdiag(self, tInd): dt = self.time_steps[tInd] C = self.mesh.edge_curl MfMui = self.MfMui - # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa MeSigmaTauKappa = self._MeSigmaTauKappa return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa - def getAdiagDeriv(self, tInd, u, v, adjoint=False): + def getAdiagDeriv_sigma(self, tInd, u, v, adjoint=False): """ Deriv of ADiag with respect to conductance """ @@ -1228,9 +1262,35 @@ def getAdiagDeriv(self, tInd, u, v, adjoint=False): dt = self.time_steps[tInd] if adjoint: - return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + return 1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + return 1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) + + def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): + """ + Deriv of ADiag with respect to conductance + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + if adjoint: + return 1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) + + return 1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) + + def getAdiagDeriv_kappa(self, tInd, u, v, adjoint=False): + """ + Deriv of ADiag with respect to conductance + """ + assert tInd >= 0 and tInd < self.nT + + dt = self.time_steps[tInd] + + if adjoint: + return 1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + + return 1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) def getAsubdiag(self, tInd): """ @@ -1240,24 +1300,45 @@ def getAsubdiag(self, tInd): dt = self.time_steps[tInd] - # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa MeSigmaTauKappa = self._MeSigmaTauKappa return -1.0 / dt * MeSigmaTauKappa - def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): + def getAsubdiagDeriv_sigma(self, tInd, u, v, adjoint=False): """ Derivative of the matrix below the diagonal with respect to conductance """ dt = self.time_steps[tInd] if adjoint: - return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) + + def getAsubdiagDeriv_tau(self, tInd, u, v, adjoint=False): + """ + Derivative of the matrix below the diagonal with respect to conductance + """ + dt = self.time_steps[tInd] + + if adjoint: + return -1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) + + return -1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) + + def getAsubdiagDeriv_kappa(self, tInd, u, v, adjoint=False): + """ + Derivative of the matrix below the diagonal with respect to conductance + """ + dt = self.time_steps[tInd] + + if adjoint: + return -1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + + return -1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) def getAdc(self): - # MeSigmaTauKappa = self.MeSigma + self._MeTau + self._MeKappa + MeSigmaTauKappa = self._MeSigmaTauKappa Grad = self.mesh.nodal_gradient @@ -1266,12 +1347,26 @@ def getAdc(self): Adc[0, 0] = Adc[0, 0] + 1.0 return Adc - def getAdcDeriv(self, u, v, adjoint=False): + def getAdcDeriv_sigma(self, u, v, adjoint=False): + Grad = self.mesh.nodal_gradient + if not adjoint: + return Grad.T * self._MeSigmaTauKappaDeriv_sigma(-u, v, adjoint) + else: + return self._MeSigmaTauKappaDeriv_sigma(-u, Grad * v, adjoint) + + def getAdcDeriv_tau(self, u, v, adjoint=False): + Grad = self.mesh.nodal_gradient + if not adjoint: + return Grad.T * self._MeSigmaTauKappaDeriv_tau(-u, v, adjoint) + else: + return self._MeSigmaTauKappaDeriv_tau(-u, Grad * v, adjoint) + + def getAdcDeriv_kappa(self, u, v, adjoint=False): Grad = self.mesh.nodal_gradient if not adjoint: - return Grad.T * self._MeSigmaTauKappaDeriv(-u, v, adjoint) + return Grad.T * self._MeSigmaTauKappaDeriv_kappa(-u, v, adjoint) else: - return self._MeSigmaTauKappaDeriv(-u, Grad * v, adjoint) + return self._MeSigmaTauKappaDeriv_kappa(-u, Grad * v, adjoint) ############################################################################### From a5ee023aee5a8ce3a4d738cc78dcc5a81c8e9083 Mon Sep 17 00:00:00 2001 From: dccowan Date: Thu, 26 Oct 2023 15:58:08 -0700 Subject: [PATCH 079/164] Analytic conductance: TDEM passes, FDEM only for cyl mesh --- SimPEG/base/pde_simulation.py | 1 - .../frequency_domain/__init__.py | 8 + .../frequency_domain/fields.py | 10 +- .../frequency_domain/simulation.py | 111 ++++---- .../electromagnetics/time_domain/__init__.py | 12 +- SimPEG/electromagnetics/time_domain/fields.py | 103 +------ .../time_domain/simulation.py | 69 ----- ..._FDEM_analytic_edge_face_conductivities.py | 253 ++++++++++++++++++ tests/em/tdem/test_TDEM_forward_Analytic.py | 21 +- 9 files changed, 342 insertions(+), 246 deletions(-) create mode 100644 tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 00aca4602b..0f2571be2b 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -937,7 +937,6 @@ def deleteTheseOnModelUpdate(self): + self._clear_on_rho_update + self._clear_on_tau_update + self._clear_on_kappa_update - + self._clear_on_kappai_update + ["__MeSigmaTauKappa", "__MeSigmaTauKappaI"] ) return toDelete diff --git a/SimPEG/electromagnetics/frequency_domain/__init__.py b/SimPEG/electromagnetics/frequency_domain/__init__.py index 3dad3cde28..48f3418138 100644 --- a/SimPEG/electromagnetics/frequency_domain/__init__.py +++ b/SimPEG/electromagnetics/frequency_domain/__init__.py @@ -16,6 +16,8 @@ Simulation3DMagneticFluxDensity Simulation3DCurrentDensity Simulation3DMagneticField + Simulation3DElectricFieldFaceEdgeConductivity + Simulation3DMagneticFluxDensityFaceEdgeConductivity Receivers @@ -60,6 +62,8 @@ Fields3DMagneticFluxDensity Fields3DCurrentDensity Fields3DMagneticField + Fields3DElectricFieldFaceEdgeConductivity + Fields3DMagneticFluxDensityFaceEdgeConductivity Base Classes ============ @@ -81,6 +85,8 @@ Simulation3DMagneticFluxDensity, Simulation3DCurrentDensity, Simulation3DMagneticField, + Simulation3DElectricFieldFaceEdgeConductivity, + Simulation3DMagneticFluxDensityFaceEdgeConductivity, ) from .simulation_1d import Simulation1DLayered from .fields import ( @@ -88,6 +94,8 @@ Fields3DMagneticFluxDensity, Fields3DCurrentDensity, Fields3DMagneticField, + Fields3DElectricFieldFaceEdgeConductivity, + Fields3DMagneticFluxDensityFaceEdgeConductivity, ) from . import sources as Src diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 753bfab0c2..6661c10552 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -622,7 +622,7 @@ def _charge_density(self, eSolution, source_list): ) / self.mesh.cell_volumes[:, None] -class Fields3DElectricFieldEdgeFaceConductivity(Fields3DElectricField): +class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): def _j(self, eSolution, source_list): """ @@ -991,7 +991,7 @@ def _charge_density(self, bSolution, source_list): ) / self.mesh.cell_volumes[:, None] -class Fields3DMagneticFluxDensityEdgeFaceConductivity(Fields3DMagneticFluxDensity): +class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): """ Fields object for Simulation3DMagneticFluxDensity. @@ -1003,14 +1003,12 @@ def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl self._MeSigma = self.simulation.MeSigma self._MeSigmaI = self.simulation.MeSigmaI - self.__MeTau = self._MeTau - self.__MeKappa = self._MeKappa self._MfMui = self.simulation.MfMui self._MfMuiDeriv = self.simulation.MfMuiDeriv self._MeSigmaDeriv = self.simulation.MeSigmaDeriv self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv - self.__MeSigmaTauKappa = self._MeSigmaTauKappa - self.__MeSigmaTauKappaI = self._MeSigmaTauKappaI + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI self._Me = self.simulation.Me self._aveF2CCV = self.simulation.mesh.aveF2CCV self._aveE2CCV = self.simulation.mesh.aveE2CCV diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index d4f101ce82..3572137ca3 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -4,7 +4,7 @@ from ... import props from ...data import Data -from ...utils import mkvc, validate_type +from ...utils import mkvc, validate_type, sdinv from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from ..utils import omega @@ -432,27 +432,10 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): class Simulation3DElectricFieldFaceEdgeConductivity( Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation ): - fieldsPair = Fields3DElectricFieldFaceEdgeConductivity - - def __init__(self, mesh, survey=None, **kwargs): - super().__init__(mesh=mesh, survey=survey, **kwargs) - - if self.sigmaMap is not None or self.rhoMap is not None: - raise NotImplementedError( - "Conductivity (sigma) and resistivity (rho) are not invertible properties for the " - "Simulation3DElectricFieldFaceEdgeConductivity class. The mapping for the " - "invertible property is 'tauMap'." - ) - - if self.kappaMap is not None: - raise NotImplementedError( - "Conductance times length (kappa) is not an invertible property, yet." - ) - if self.kappaiMap is not None: - raise NotImplementedError( - "Resistance per unit length (kappai) is not an invertible property, yet." - ) + _solutionType = "eSolution" + _formulation = "EB" + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getA(self, freq): @@ -463,11 +446,7 @@ def getA(self, freq): MeSigmaTauKappa = self._MeSigmaTauKappa A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * MeSigmaTauKappa else: - Meyhat = ( - self._get_edge_admittivity_property_matrix(freq) + - self._MeKappa + - self._MeTau - ) + Meyhat = self._get_edge_admittivity_property_matrix(freq) + self._MeTau + self._MeKappa A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * Meyhat return A @@ -709,7 +688,7 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): return RHSderiv + SrcDeriv -class Simulation3DMagneticFluxDensity( +class Simulation3DMagneticFluxDensityFaceEdgeConductivity( Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity @@ -819,10 +798,12 @@ def getRHS(self, freq): if self.permittivity is None: MeSigmaTauKappaI = self._MeSigmaTauKappaI - RHS = s_m + C * (MeSigmaI * s_e) + RHS = s_m + C * (MeSigmaTauKappaI * s_e) else: - MeyhatI = self._get_edge_admittivity_property_matrix( - freq, invert_matrix=True + MeyhatI = sdinv( + self._get_edge_admittivity_property_matrix( + freq, invert_matrix=False + ) + self._MeTau + self._MeKappa ) RHS = s_m + C * (MeyhatI * s_e) @@ -832,41 +813,41 @@ def getRHS(self, freq): return RHS - def getRHSDeriv(self, freq, src, v, adjoint=False): - """ - Derivative of the right hand side with respect to the model - - :param float freq: frequency - :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source - :param numpy.ndarray v: vector to take product with - :param bool adjoint: adjoint? - :rtype: numpy.ndarray - :return: product of rhs deriv with a vector - """ - - C = self.mesh.edge_curl - s_m, s_e = src.eval(self) - MfMui = self.MfMui - - if self._makeASymmetric and adjoint: - v = self.MfMui * v - - # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) - s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) - - if not adjoint: - # RHSderiv = C * (MeSigmaIDeriv * v) - RHSderiv = C * self.MeSigmaIDeriv(s_e, v, adjoint) - SrcDeriv = s_mDeriv(v) + C * (self.MeSigmaI * s_eDeriv(v)) - elif adjoint: - # RHSderiv = MeSigmaIDeriv.T * (C.T * v) - RHSderiv = self.MeSigmaIDeriv(s_e, C.T * v, adjoint) - SrcDeriv = s_mDeriv(v) + s_eDeriv(self.MeSigmaI.T * (C.T * v)) - - if self._makeASymmetric is True and not adjoint: - return MfMui.T * (SrcDeriv + RHSderiv) - - return RHSderiv + SrcDeriv + # def getRHSDeriv(self, freq, src, v, adjoint=False): + # """ + # Derivative of the right hand side with respect to the model + + # :param float freq: frequency + # :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source + # :param numpy.ndarray v: vector to take product with + # :param bool adjoint: adjoint? + # :rtype: numpy.ndarray + # :return: product of rhs deriv with a vector + # """ + + # C = self.mesh.edge_curl + # s_m, s_e = src.eval(self) + # MfMui = self.MfMui + + # if self._makeASymmetric and adjoint: + # v = self.MfMui * v + + # # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) + # s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) + + # if not adjoint: + # # RHSderiv = C * (MeSigmaIDeriv * v) + # RHSderiv = C * self.MeSigmaIDeriv(s_e, v, adjoint) + # SrcDeriv = s_mDeriv(v) + C * (self.MeSigmaI * s_eDeriv(v)) + # elif adjoint: + # # RHSderiv = MeSigmaIDeriv.T * (C.T * v) + # RHSderiv = self.MeSigmaIDeriv(s_e, C.T * v, adjoint) + # SrcDeriv = s_mDeriv(v) + s_eDeriv(self.MeSigmaI.T * (C.T * v)) + + # if self._makeASymmetric is True and not adjoint: + # return MfMui.T * (SrcDeriv + RHSderiv) + + # return RHSderiv + SrcDeriv ############################################################################### diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index 4074242423..d04e73b75c 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -13,9 +13,9 @@ Simulation1DLayered Simulation3DMagneticFluxDensity - Simulation3DMagneticFluxDensityConductance + Simulation3DMagneticFluxDensityFaceEdgeConductivity Simulation3DElectricField - Simulation3DElectricFieldConductance + Simulation3DElectricFieldFaceEdgeConductivity Simulation3DMagneticField Simulation3DCurrentDensity @@ -95,18 +95,18 @@ """ from .simulation import ( Simulation3DMagneticFluxDensity, - Simulation3DMagneticFluxDensityConductance, + Simulation3DMagneticFluxDensityFaceEdgeConductivity, Simulation3DElectricField, - Simulation3DElectricFieldConductance, + Simulation3DElectricFieldFaceEdgeConductivity, Simulation3DMagneticField, Simulation3DCurrentDensity, ) from .simulation_1d import Simulation1DLayered from .fields import ( Fields3DMagneticFluxDensity, - Fields3DMagneticFluxDensityConductance, + Fields3DMagneticFluxDensityFaceEdgeConductivity, Fields3DElectricField, - Fields3DElectricFieldConductance, + Fields3DElectricFieldFaceEdgeConductivity, Fields3DMagneticField, Fields3DCurrentDensity, ) diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 1aff43f8bf..7b99332ba1 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -275,61 +275,28 @@ def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): """Field Storage for a TDEM simulation.""" - knownFields = {"bSolution": "F"} - aliasFields = { - "b": ["bSolution", "F", "_b"], - "h": ["bSolution", "F", "_h"], - "e": ["bSolution", "E", "_e"], - "j": ["bSolution", "E", "_j"], - "dbdt": ["bSolution", "F", "_dbdt"], - "dhdt": ["bSolution", "F", "_dhdt"], - } + # knownFields = {"bSolution": "F"} + # aliasFields = { + # "b": ["bSolution", "F", "_b"], + # "h": ["bSolution", "F", "_h"], + # "e": ["bSolution", "E", "_e"], + # "j": ["bSolution", "E", "_j"], + # "dbdt": ["bSolution", "F", "_dbdt"], + # "dhdt": ["bSolution", "F", "_dhdt"], + # } def startup(self): self._times = self.simulation.times - # self._MeSigma = self.simulation.MeSigma - # self._MeSigmaI = self.simulation.MeSigmaI - # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self._MeSigma = self.simulation.MeSigma + self._MeSigmaI = self.simulation.MeSigmaI + self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui self._timeMesh = self.simulation.time_mesh - # Inherited - # def _TLoc(self, fieldType): - # return "N" - - # def _b(self, bSolution, source_list, tInd): - # return bSolution - - # def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - # return dun_dm_v - - # def _bDeriv_m(self, tInd, src, v, adjoint=False): - # return Zero() - - # def _dbdt(self, bSolution, source_list, tInd): - # # self._timeMesh.face_divergence - # dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd) - # for i, src in enumerate(source_list): - # s_m = src.s_m(self.simulation, self._times[tInd]) - # dbdt[:, i] = dbdt[:, i] + s_m - # return dbdt - - # def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - # if adjoint is True: - # return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint) - # return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v)) - - # def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): - # if adjoint is True: - # return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint)) - # return -( - # self._edgeCurl * self._eDeriv_m(tInd, src, v) - # ) # + src.s_mDeriv() assuming src doesn't have deriv for now - def _e(self, bSolution, source_list, tInd): e = self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * bSolution)) for i, src in enumerate(source_list): @@ -361,7 +328,7 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): adjoint=True, ) return self.simulation.MeI * ( - self._MeSigmaTauKappaI * self._eDeriv_u(tInd, src, dun_dm_v) + self.__MeSigmaTauKappaI * self._eDeriv_u(tInd, src, dun_dm_v) ) def _jDeriv_m(self, tInd, src, v, adjoint=False): @@ -369,48 +336,6 @@ def _jDeriv_m(self, tInd, src, v, adjoint=False): "Derivative of current density wrt to model not implemented" ) - # # Inherited methods - # def _h(self, hSolution, source_list, tInd): - # return self.simulation.MfI * ( - # self._MfMui * self._b(hSolution, source_list, tInd) - # ) - - # def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - # if adjoint: - # return self._bDeriv_u( - # tInd, - # src, - # self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), - # adjoint=True, - # ) - # return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v)) - - # def _hDeriv_m(self, tInd, src, v, adjoint=False): - # if adjoint: - # return self._bDeriv_m( - # tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True - # ) - # return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v)) - - # def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): - # if adjoint: - # return self._dbdtDeriv_u( - # tInd, - # src, - # self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), - # adjoint=True, - # ) - # return self.simulation.MfI * ( - # self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) - # ) - - # def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): - # if adjoint: - # return self._dbdtDeriv_m( - # tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True - # ) - # return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) - class Fields3DElectricField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 10cda94c5c..a521c71040 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -969,74 +969,8 @@ def getAdcDeriv(self, u, v, adjoint=False): class Simulation3DMagneticFluxDensityFaceEdgeConductivity( Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): - r""" - Starting from the quasi-static E-B formulation of Maxwell's equations - (semi-discretized) - - .. math:: - - \mathbf{C} \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = - \mathbf{s_m} \\ - \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - - \mathbf{M_{\sigma}^e} \mathbf{e} = \mathbf{s_e} - - - where :math:`\mathbf{s_e}` is an integrated quantity, we eliminate - :math:`\mathbf{e}` using - - .. math:: - - \mathbf{e} = \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} - \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - - \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} - - - to obtain a second order semi-discretized system in :math:`\mathbf{b}` - - .. math:: - - \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} - \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + - \frac{\partial \mathbf{b}}{\partial t} = - \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} + \mathbf{s_m} - - - and moving everything except the time derivative to the rhs gives - - .. math:: - \frac{\partial \mathbf{b}}{\partial t} = - -\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} - \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + - \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e} + \mathbf{s_m} - - For the time discretization, we use backward euler. To solve for the - :math:`n+1` th time step, we have - - .. math:: - - \frac{\mathbf{b}^{n+1} - \mathbf{b}^{n}}{\mathbf{dt}} = - -\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} - \mathbf{M_{\mu^{-1}}^f} \mathbf{b}^{n+1} + - \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} \mathbf{s_e}^{n+1} + - \mathbf{s_m}^{n+1} - - - re-arranging to put :math:`\mathbf{b}^{n+1}` on the left hand side gives - - .. math:: - - (\mathbf{I} + \mathbf{dt} \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} - \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) \mathbf{b}^{n+1} = - \mathbf{b}^{n} + \mathbf{dt}(\mathbf{C} \mathbf{M_{\sigma}^e}^{-1} - \mathbf{s_e}^{n+1} + \mathbf{s_m}^{n+1}) - - """ - fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object - def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): - super().__init__(mesh=mesh, survey=survey, **kwargs) - def getAdiag(self, tInd): r""" System matrix at a given time index @@ -1237,9 +1171,6 @@ class Simulation3DElectricFieldFaceEdgeConductivity( ): fieldsPair = Fields3DElectricFieldFaceEdgeConductivity - def __init__(self, mesh, survey=None, dt_threshold=1e-8, **kwargs): - super().__init__(mesh=mesh, survey=survey, **kwargs) - def getAdiag(self, tInd): """ Diagonal of the system matrix at a given time index diff --git a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py new file mode 100644 index 0000000000..e48e4b9271 --- /dev/null +++ b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py @@ -0,0 +1,253 @@ +import unittest + +import discretize +import matplotlib.pyplot as plt +import numpy as np +from pymatsolver import Pardiso as Solver +from scipy.constants import mu_0 +from SimPEG import maps +from SimPEG.electromagnetics import analytics +from SimPEG.electromagnetics import frequency_domain as fdem + + +def analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, +): + # Some static parameters + PHI = np.linspace(0, 2 * np.pi, 21) + loop_radius = np.pi**-0.5 + receiver_location = np.c_[50.0, 0.0, 1.0] + source_location = np.r_[0.0, 0.0, 1.0] + + if orientation == "X": + source_nodes = np.c_[ + np.zeros_like(PHI), + loop_radius * np.cos(PHI), + 1.0 + loop_radius * np.sin(PHI), + ] + elif orientation == "Z": + source_nodes = np.c_[ + loop_radius * np.cos(PHI), loop_radius * np.sin(PHI), np.ones_like(PHI) + ] + + layer_depth = 24.0 + layer_thickness = 0.1 + layer_conductivity = 5e-3 + background_conductivity = 5e-3 + + tau = layer_thickness * layer_conductivity + + # if bounds is None: + # bounds = [1e-5, 1e-3] + + # 1D LAYER MODEL + thicknesses = np.array([layer_depth - layer_thickness / 2, layer_thickness]) + n_layer = len(thicknesses) + 1 + + sigma_1d = background_conductivity * np.ones(n_layer) + sigma_1d[1] = layer_conductivity + + sigma_map_1d = maps.IdentityMap(nP=n_layer) + + # 3D LAYER MODEL + if mesh_type == "CYL": + cs, ncx, ncz, npad = 4.0, 40, 20, 20 + hx = [(cs, ncx), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] + mesh = discretize.CylindricalMesh([hx, 1, hz], "00C") + + elif mesh_type == "TENSOR": + cs, nc, npad = 8.0, 15, 10 + hx = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hy = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] + mesh = discretize.TensorMesh([hx, hy, hz], "CCC") + + sigma_3d = 1e-8 * np.ones(mesh.nC) + sigma_3d[mesh.cell_centers[:, -1] < 0.0] = background_conductivity + + tau_3d = np.zeros(mesh.nF) + # tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau + tau_map = maps.IdentityMap(nP=mesh.n_faces) + + # DEFINE SURVEY + frequencies = np.logspace(3, 4, 2) + rx_list = [ + getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( + receiver_location, component="real", orientation=orientation + ), + getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( + receiver_location, component="imag", orientation=orientation + ) + ] + + # 1D SURVEY AND SIMULATION + src_1d = [ + fdem.sources.MagDipole( + rx_list, + f, + location=np.r_[0.0, 0.0, 1.0], + orientation=orientation + ) for f in frequencies + ] + survey_1d = fdem.Survey(src_1d) + + sim_1d = fdem.Simulation1DLayered( + survey=survey_1d, + thicknesses=thicknesses, + sigmaMap=sigma_map_1d, + ) + + # 3D SURVEY AND SIMULATION + if mesh_type == "CYL": + src_3d = [ + fdem.sources.CircularLoop( + rx_list, + f, + radius=loop_radius, + location=source_location, + ) for f in frequencies + ] + else: + if formulation == "MagneticFluxDensity": + src_3d = [ + fdem.sources.MagDipole( + rx_list, + f, + location=source_location, + orientation=orientation, + ) for f in frequencies + ] + else: + src_3d = [ + fdem.sources.LineCurrent( + rx_list, f, location=source_nodes, + ) for f in frequencies + ] + + survey_3d = fdem.Survey(src_3d) + + # DEFINE THE SIMULATIONS + if formulation == "MagneticFluxDensity": + sim_3d = fdem.simulation.Simulation3DMagneticFluxDensityFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + else: + sim_3d = fdem.simulation.Simulation3DElectricFieldFaceEdgeConductivity( + mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map + ) + + # COMPUTE SOLUTIONS + analytic_solution = mu_0 * sim_1d.dpred(sigma_1d) # ALWAYS RETURNS H-FIELD + numeric_solution = sim_3d.dpred(tau_3d) + + print(analytic_solution) + print(numeric_solution) + + diff = ( + np.linalg.norm(np.abs(numeric_solution - analytic_solution)) / + np.linalg.norm(np.abs(analytic_solution)) + ) + + print( + " |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}".format( + ana=np.linalg.norm(analytic_solution), + num=np.linalg.norm(numeric_solution), + diff=np.linalg.norm(analytic_solution - numeric_solution), + ) + ) + print("Difference: {}".format(diff)) + + return diff + + +class LayerConductanceTests(unittest.TestCase): + # Compares analytic 1D layered Earth solution to a plate of equivalent + # conductance. + + # def test_tensor_magdipole_b_x(self): + # assert ( + # analytic_layer_small_loop_face_conductivity_comparison( + # mesh_type="TENSOR", + # formulation="MagneticFluxDensity", + # rx_type="MagneticFluxDensity", + # orientation="X", + # bounds=None, + # plotIt=False, + # ) + # < 0.01 + # ) + + # def test_tensor_magdipole_b_z(self): + # assert ( + # analytic_layer_small_loop_face_conductivity_comparison( + # mesh_type="TENSOR", + # formulation="MagneticFluxDensity", + # rx_type="MagneticFluxDensity", + # orientation="Z", + # bounds=None, + # plotIt=False, + # ) + # < 0.02 + # ) + + def test_cyl_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + # def test_tensor_linecurrent_b_x(self): + # assert ( + # analytic_layer_small_loop_face_conductivity_comparison( + # mesh_type="TENSOR", + # formulation="ElectricField", + # rx_type="MagneticFluxDensity", + # orientation="X", + # bounds=None, + # plotIt=False, + # ) + # < 0.01 + # ) + + # def test_tensor_linecurrent_b_z(self): + # assert ( + # analytic_layer_small_loop_face_conductivity_comparison( + # mesh_type="TENSOR", + # formulation="ElectricField", + # rx_type="MagneticFluxDensity", + # orientation="Z", + # bounds=None, + # plotIt=False, + # ) + # < 0.01 + # ) + + def test_cyl_linecurrent_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="CYL", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.01 + ) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/em/tdem/test_TDEM_forward_Analytic.py b/tests/em/tdem/test_TDEM_forward_Analytic.py index 87d8dc01eb..6227c06cf5 100644 --- a/tests/em/tdem/test_TDEM_forward_Analytic.py +++ b/tests/em/tdem/test_TDEM_forward_Analytic.py @@ -311,7 +311,7 @@ def analytic_halfspace_mag_dipole_comparison( return log10diff -def analytic_layer_small_loop_conductance_comparison( +def analytic_layer_small_loop_face_conductivity_comparison( mesh_type="CYL", rx_type="MagneticFluxTimeDerivative", orientation="Z", @@ -421,11 +421,11 @@ def analytic_layer_small_loop_conductance_comparison( # DEFINE THE SIMULATIONS if rx_type == "MagneticFluxDensity": - sim_3d = tdem.simulation.Simulation3DMagneticFluxDensityConductance( + sim_3d = tdem.simulation.Simulation3DMagneticFluxDensityFaceEdgeConductivity( mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map ) else: - sim_3d = tdem.simulation.Simulation3DElectricFieldConductance( + sim_3d = tdem.simulation.Simulation3DElectricFieldFaceEdgeConductivity( mesh=mesh, survey=survey_3d, sigma=sigma_3d, tauMap=tau_map ) @@ -769,11 +769,12 @@ def test_analytic_m3_CYL_0m_CircularLoop(self): class LayerConductanceTests(unittest.TestCase): - # WORKING + # Compares analytic 1D layered Earth solution to a plate of equivalent + # conductance. def test_tensor_magdipole_b_x(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="TENSOR", rx_type="MagneticFluxDensity", orientation="X", @@ -785,7 +786,7 @@ def test_tensor_magdipole_b_x(self): def test_tensor_magdipole_b_z(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="TENSOR", rx_type="MagneticFluxDensity", orientation="Z", @@ -797,7 +798,7 @@ def test_tensor_magdipole_b_z(self): def test_cyl_magdipole_b_z(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="CYL", rx_type="MagneticFluxDensity", orientation="Z", @@ -809,7 +810,7 @@ def test_cyl_magdipole_b_z(self): def test_tensor_linecurrent_dbdt_x(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="TENSOR", rx_type="MagneticFluxTimeDerivative", orientation="X", @@ -821,7 +822,7 @@ def test_tensor_linecurrent_dbdt_x(self): def test_tensor_linecurrent_dbdt_z(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="TENSOR", rx_type="MagneticFluxTimeDerivative", orientation="Z", @@ -833,7 +834,7 @@ def test_tensor_linecurrent_dbdt_z(self): def test_cyl_circularloop_dbdt_z(self): assert ( - analytic_layer_small_loop_conductance_comparison( + analytic_layer_small_loop_face_conductivity_comparison( mesh_type="CYL", rx_type="MagneticFluxTimeDerivative", orientation="Z", From a66fd08145be72943d4f61c32fe677589dc02646 Mon Sep 17 00:00:00 2001 From: dccowan Date: Mon, 30 Oct 2023 13:03:18 -0700 Subject: [PATCH 080/164] Adjoint tests for face edge conductivity pass. Sigma only. --- SimPEG/base/pde_simulation.py | 25 +- SimPEG/electromagnetics/time_domain/fields.py | 57 ++- .../electromagnetics/time_domain/receivers.py | 2 +- .../time_domain/simulation.py | 75 ++++ SimPEG/maps.py | 14 +- tests/base/test_mass_matrices.py | 4 +- tests/em/tdem/test_TDEM_DerivAdjoint.py | 402 ++++++++++-------- 7 files changed, 360 insertions(+), 219 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 0f2571be2b..ec2da25896 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -902,6 +902,14 @@ def _MeSigmaTauKappaDeriv_kappa(self, u, v=None, adjoint=False): """Only derivative wrt to kappa""" return self._MeKappaDeriv(u, v, adjoint) + def _MeSigmaTauKappaDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + return ( + self.MeSigmaDeriv(u, v, adjoint) + + self._MeTauDeriv(u, v, adjoint) + + self._MeKappaDeriv(u, v, adjoint) + ) + def _MeSigmaTauKappaIDeriv_sigma(self, u, v=None, adjoint=False): """Only derivative wrt to tau""" MI_prop = self._MeSigmaTauKappaI @@ -920,16 +928,27 @@ def _MeSigmaTauKappaIDeriv_kappa(self, u, v=None, adjoint=False): u = MI_prop @ (MI_prop @ -u) return self._MeKappaDeriv(u, v, adjoint) + def _MeSigmaTauKappaIDeriv(self, u, v=None, adjoint=False): + """Only derivative wrt to kappa""" + MI_prop = self._MeSigmaTauKappaI + u = MI_prop @ (MI_prop @ -u) + return ( + self.MeSigmaDeriv(u, v, adjoint) + + self._MeTauDeriv(u, v, adjoint) + + self._MeKappaDeriv(u, v, adjoint) + ) + @property def deleteTheseOnModelUpdate(self): """ - items to be deleted if the model for conductance or resistance per meter is updated + items to be deleted if the model for cell, face or edge conductivity is updated """ toDelete = super().deleteTheseOnModelUpdate if ( - self.tauMap is not None + self.sigmaMap is not None + or self.rhoMap is not None + or self.tauMap is not None or self.kappaMap is not None - or self.kappaiMap is not None ): toDelete = ( toDelete diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 7b99332ba1..210c618353 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -287,12 +287,10 @@ class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensit def startup(self): self._times = self.simulation.times - self._MeSigma = self.simulation.MeSigma - self._MeSigmaI = self.simulation.MeSigmaI - self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self.__MeSigmaTauKappaIDeriv = self.simulation._MeSigmaTauKappaIDeriv self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui self._timeMesh = self.simulation.time_mesh @@ -310,13 +308,23 @@ def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) def _eDeriv_m(self, tInd, src, v, adjoint=False): - raise NotImplementedError( - "Derivative of e-field wrt to model not implemented" - ) + _, s_e = src.eval(self.simulation, self._times[tInd]) + bSolution = self[[src], "bSolution", tInd].flatten() + + _, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint) + + if adjoint is True: + return self.__MeSigmaTauKappaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - s_eDeriv(self.__MeSigmaTauKappaI.T * v) + + return self.__MeSigmaTauKappaIDeriv( + -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint + ) - self.__MeSigmaTauKappaI * s_eDeriv(v) def _j(self, hSolution, source_list, tInd): return self.simulation.MeI * ( - self.__MeSigmaKappaTau * self._e(hSolution, source_list, tInd) + self.__MeSigmaTauKappa * self._e(hSolution, source_list, tInd) ) def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): @@ -328,12 +336,18 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): adjoint=True, ) return self.simulation.MeI * ( - self.__MeSigmaTauKappaI * self._eDeriv_u(tInd, src, dun_dm_v) + self.__MeSigmaTauKappa * self._eDeriv_u(tInd, src, dun_dm_v) ) def _jDeriv_m(self, tInd, src, v, adjoint=False): - raise NotImplementedError( - "Derivative of current density wrt to model not implemented" + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self.__MeSigmaTauKappaDeriv(e).T * w + self._eDeriv_m( + tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappaDeriv(e) * v + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) ) @@ -462,17 +476,9 @@ class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): def startup(self): self._times = self.simulation.times self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa - self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI - # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv self._edgeCurl = self.simulation.mesh.edge_curl self._MfMui = self.simulation.MfMui - # self.__MeTau = self.simulation._MeTau - # self.__MeTauI = self.simulation._MeTauI - # self.__MeTauDeriv = self.simulation._MeTauDeriv - # self.__MeTauIDeriv = self.simulation._MeTauIDeriv - # self.__MeKappa = self.simulation._MeKappa - # self.__MeKappaI = self.simulation._MeKappaI def _j(self, eSolution, source_list, tInd): return self.simulation.MeI * ( @@ -492,10 +498,15 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): self.__MeSigmaTauKappa * self._eDeriv_u(tInd, src, dun_dm_v) ) - # NEED TO THINK ABOUT THIS def _jDeriv_m(self, tInd, src, v, adjoint=False): - raise NotImplementedError( - "Derivative of current density wrt model not currently implemented." + e = self[src, "e", tInd] + if adjoint: + w = self.simulation.MeI.T * v + return self.__MeSigmaTauKappaDeriv(e).T * w + self._eDeriv_m( + tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True + ) + return self.simulation.MeI * ( + self.__MeSigmaTauKappaDeriv(e) * v + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) ) diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index 3179c527af..6c3734c46d 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -106,7 +106,7 @@ def getSpatialP(self, mesh, f): if strength != 0.0: P = P + strength * mesh.get_interpolation_matrix( self.locations, field + comp - ) + ) return P def getTimeP(self, time_mesh, f): diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index a521c71040..535610bc91 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -995,6 +995,28 @@ def getAdiag(self, tInd): return MfMui.T.tocsr() * A return A + def getAdiagDeriv_sigma(self, tInd, u, v, adjoint=False): + """ + Derivative of ADiag wrt tau + """ + C = self.mesh.edge_curl + + # def MeSigmaIDeriv(x): + # return self.MeSigmaIDeriv(x) + + MfMui = self.MfMui + + if adjoint: + if self._makeASymmetric is True: + v = MfMui * v + return self._MeSigmaTauKappaIDeriv_sigma(C.T * (MfMui * u), C.T * v, adjoint) + + ADeriv = C * (self._MeSigmaTauKappaIDeriv_sigma(C.T * (MfMui * u), v, adjoint)) + + if self._makeASymmetric is True: + return MfMui.T * ADeriv + return ADeriv + def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): """ Derivative of ADiag wrt tau @@ -1017,6 +1039,45 @@ def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): return MfMui.T * ADeriv return ADeriv + def getAdiagDeriv_kappa(self, tInd, u, v, adjoint=False): + """ + Derivative of ADiag wrt tau + """ + C = self.mesh.edge_curl + + # def MeSigmaIDeriv(x): + # return self.MeSigmaIDeriv(x) + + MfMui = self.MfMui + + if adjoint: + if self._makeASymmetric is True: + v = MfMui * v + return self._MeSigmaTauKappaIDeriv_kappa(C.T * (MfMui * u), C.T * v, adjoint) + + ADeriv = C * (self._MeSigmaTauKappaIDeriv_kappa(C.T * (MfMui * u), v, adjoint)) + + if self._makeASymmetric is True: + return MfMui.T * ADeriv + return ADeriv + + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + C = self.mesh.edge_curl + MfMui = self.MfMui + + u = C.T * (MfMui * u) + + if adjoint: + if self._makeASymmetric is True: + v = MfMui * v + return self._MeSigmaTauKappaIDeriv(u, C.T * v, adjoint) + + ADeriv = C * self._MeSigmaTauKappaIDeriv(u, v, adjoint) + + if self._makeASymmetric is True: + return MfMui.T * ADeriv + return ADeriv + def getRHS(self, tInd): """ Assemble the RHS @@ -1223,6 +1284,13 @@ def getAdiagDeriv_kappa(self, tInd, u, v, adjoint=False): return 1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + return ( + self.getAdiagDeriv_sigma(tInd, u, v, adjoint) + + self.getAdiagDeriv_tau(tInd, u, v, adjoint) + + self.getAdiagDeriv_kappa(tInd, u, v, adjoint) + ) + def getAsubdiag(self, tInd): """ Matrix below the diagonal @@ -1298,6 +1366,13 @@ def getAdcDeriv_kappa(self, u, v, adjoint=False): return Grad.T * self._MeSigmaTauKappaDeriv_kappa(-u, v, adjoint) else: return self._MeSigmaTauKappaDeriv_kappa(-u, Grad * v, adjoint) + + def getAdcDeriv(self, u, v, adjoint=False): + return ( + self.getAdcDeriv_sigma(self, u, v, adjoint=False) + + self.getAdcDeriv_tau(self, u, v, adjoint=False) + + self.getAdcDeriv_kappa(self, u, v, adjoint=False) + ) ############################################################################### diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 0429270da6..75354e4151 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3517,14 +3517,14 @@ class InjectActiveEdges(IdentityMap): """ - def __init__(self, mesh, indActive=None, valInactive=0.0, nF=None): + def __init__(self, mesh, indActive=None, valInactive=0.0, nE=None): self.mesh = mesh - self.nF = nF or mesh.nF + self.nE = nE or mesh.nE - self._indActive = validate_active_indices("indActive", indActive, self.nF) + self._indActive = validate_active_indices("indActive", indActive, self.nE) self._nP = np.sum(self.indActive) - self.P = sp.eye(self.nF, format="csr")[:, self.indActive] + self.P = sp.eye(self.nE, format="csr")[:, self.indActive] self.valInactive = valInactive @@ -3540,7 +3540,7 @@ def valInactive(self): @valInactive.setter def valInactive(self, value): - n_inactive = self.nF - self.nP + n_inactive = self.nE - self.nP try: value = validate_float("valInactive", value) value = np.full(n_inactive, value) @@ -3548,7 +3548,7 @@ def valInactive(self, value): pass value = validate_ndarray_with_shape("valInactive", value, shape=(n_inactive,)) - self._valInactive = np.zeros(self.nF, dtype=float) + self._valInactive = np.zeros(self.nE, dtype=float) self._valInactive[~self.indActive] = value @property @@ -3573,7 +3573,7 @@ def shape(self): number of edges in the mesh, **shape** returns a tuple (*nE* , *nP*). """ - return (self.nF, self.nP) + return (self.nE, self.nP) @property def nP(self): diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index c4ccc906c0..4f9f8d760b 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -25,8 +25,8 @@ class SimpleSim(BasePDESimulation): rho, rhoMap, rhoDeriv = props.Invertible("Electrical conductivity (S/m)") props.Reciprocal(sigma, rho) mu, muMap, muDeriv = props.Invertible("Magnetic Permeability") - tau, tauMap, tauDeriv = props.Invertible("Conductance (S)") - kappa, kappaMap, kappaDeriv = props.Invertible("Resistance per meter (Ohm/m)") + tau, tauMap, tauDeriv = props.Invertible("Face conductivity, conductance (S)") + kappa, kappaMap, kappaDeriv = props.Invertible("Edge conductivity, conductivity times area (Sm)") def __init__( self, diff --git a/tests/em/tdem/test_TDEM_DerivAdjoint.py b/tests/em/tdem/test_TDEM_DerivAdjoint.py index 36419b0e39..715f0dba57 100644 --- a/tests/em/tdem/test_TDEM_DerivAdjoint.py +++ b/tests/em/tdem/test_TDEM_DerivAdjoint.py @@ -33,34 +33,73 @@ def get_mesh(): ) -def get_mapping(mesh): - active = mesh.cell_centers_z < 0.0 - activeMap = maps.InjectActiveCells( - mesh, active, np.log(1e-8), nC=mesh.shape_cells[2] +def get_sigma_mapping(mesh): + # active = mesh.cell_centers_z < 0.0 + # activeMap = maps.InjectActiveCells( + # mesh, active, np.log(1e-8), nC=mesh.shape_cells[2] + # ) + # return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap + active = mesh.cell_centers[:, -1] < 0.0 + activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) + return maps.ExpMap(mesh) * activeMap + +def get_tau_mapping(mesh): + active = mesh.cell_faces[:, -1] < 0.0 + activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) + return maps.ExpMap(mesh.nF) * activeMap + +def get_kappa_mapping(mesh): + active = mesh.cell_edges[:, -1] < 0.0 + activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) + return maps.ExpMap(mesh.nE) * activeMap + +# def get_wire_mappings(mesh): + +# # active cells, faces + edges +# active_cells = mesh.cell_centers[:, -1] < 0.0 +# active_faces = mesh.faces[:, -1] < 0.0 +# active_edges = mesh.edges[:, -1] < 0.0 +# n_active_cells = np.sum(active_cells) +# n_active_faces = np.sum(active_faces) +# n_active_edges = np.sum(active_edges) + +# # wire map +# wire_map = maps.Wires( +# ("log_sigma", n_active_cells), +# ("log_tau", n_active_faces), +# ("log_kappa", n_active_edges) +# ) + +# sigma_map = maps.InjectActiveCells( +# mesh, active_cells, 1e-8 +# ) * maps.ExpMap(nP=n_active_cells) * wire_map.log_sigma +# tau_map = maps.InjectActiveFaces( +# mesh, active_faces, 0 +# ) * maps.ExpMap(nP=n_active_faces) * wire_map.log_tau +# kappa_map = maps.InjectActiveEdges( +# mesh, active_edges, 0 +# ) * maps.ExpMap(nP=n_active_edges) * wire_map.log_kappa + +# return sigma_map, tau_map, kappa_map + +def get_prob(mesh, formulation, sigma_map, **kwargs): + prb = getattr(tdem, "Simulation3D{}".format(formulation))( + mesh, sigmaMap=sigma_map, **kwargs ) - return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap + prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] + prb.solver = Solver + return prb -def get_face_mapping(mesh): - active = mesh.faces[:, -1] < 0.0 - activeMap = maps.InjectActiveFaces( - mesh, active, 0. +def get_face_edge_prob(mesh, formulation, sigma_map=None, tau_map=None, kappa_map=None, **kwargs): + prb = getattr(tdem, "Simulation3D{}".format(formulation))( + mesh, + sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, + **kwargs ) - return activeMap * maps.ExpMap(nP=np.sum(active)) - -def get_prob(mesh, mapping, formulation, **kwargs): - if "Conductance" in formulation: - prb = getattr(tdem, "Simulation3D{}".format(formulation))( - mesh, tauMap=mapping, **kwargs - ) - else: - prb = getattr(tdem, "Simulation3D{}".format(formulation))( - mesh, sigmaMap=mapping, **kwargs - ) prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] prb.solver = Solver return prb - def get_survey(): src1 = tdem.Src.MagDipole([], location=np.array([0.0, 0.0, 0.0])) src2 = tdem.Src.MagDipole([], location=np.array([0.0, 0.0, 8.0])) @@ -75,22 +114,17 @@ class Base_DerivAdjoint_Test(unittest.TestCase): def setUpClass(self): # create a prob where we will store the fields mesh = get_mesh() - - if "Conductance" in self.formulation: - mapping = get_face_mapping(mesh) - else: - mapping = get_mapping(mesh) self.survey = get_survey() - self.prob = get_prob(mesh, mapping, self.formulation, survey=self.survey) - if "Conductance" in self.formulation: - self.m = np.log(1e-1) * np.ones(self.prob.tauMap.nP) + 1e-3 * np.random.randn( - self.prob.tauMap.nP - ) + if "FaceEdgeConductivity" in self.formulation: + sigma_map = get_sigma_mapping(mesh) + self.prob = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.survey) + self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) #+ 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + else: - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn( - self.prob.sigmaMap.nP - ) + sigma_map = get_sigma_mapping(mesh) + self.prob = get_prob(mesh, self.formulation, sigma_map, survey=self.survey) + self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) #+ 1e-3 * np.random.randn(self.prob.sigmaMap.nP) print("Solving Fields for problem {}".format(self.formulation)) t = time.time() @@ -100,12 +134,13 @@ def setUpClass(self): # create a prob where will be re-computing fields at each jvec # iteration mesh = get_mesh() - if "Conductance" in self.formulation: - mapping = get_face_mapping(mesh) - else: - mapping = get_mapping(mesh) self.surveyfwd = get_survey() - self.probfwd = get_prob(mesh, mapping, self.formulation, survey=self.surveyfwd) + if "FaceEdgeConductivity" in self.formulation: + sigma_map = get_sigma_mapping(mesh) + self.probfwd = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.surveyfwd) + else: + sigma_map = get_sigma_mapping(mesh) + self.probfwd = get_prob(mesh, self.formulation, sigma_map, survey=self.surveyfwd) def get_rx(self, rxcomp): rxOffset = 15.0 @@ -151,10 +186,7 @@ def JvecVsJtvecTest(self, rxcomp): "\nAdjoint Testing Jvec, Jtvec prob {}, {}".format(self.formulation, rxcomp) ) - if "Conductance" in self.formulation: - m = np.random.rand(self.prob.tauMap.nP) - else: - m = np.random.rand(self.prob.sigmaMap.nP) + m = np.random.rand(self.prob.sigmaMap.nP) d = np.random.randn(self.prob.survey.nD) V1 = d.dot(self.prob.Jvec(self.m, m, f=self.fields)) V2 = m.dot(self.prob.Jtvec(self.m, d, f=self.fields)) @@ -201,220 +233,224 @@ def test_eDeriv_u_adjoint(self): self.assertTrue(passed) -class DerivAdjoint_E(Base_DerivAdjoint_Test): - formulation = "ElectricField" +# class DerivAdjoint_E(Base_DerivAdjoint_Test): +# formulation = "ElectricField" - if testDeriv: +# if testDeriv: - def test_Jvec_e_dbxdt(self): - self.JvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_e_dbxdt(self): +# self.JvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_e_dbzdt(self): - self.JvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_e_dbzdt(self): +# self.JvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_e_ey(self): - self.JvecTest("ElectricFieldy") +# def test_Jvec_e_ey(self): +# self.JvecTest("ElectricFieldy") - def test_Jvec_e_dhxdt(self): - self.JvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_e_dhxdt(self): +# self.JvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_e_dhzdt(self): - self.JvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_e_dhzdt(self): +# self.JvecTest("MagneticFieldTimeDerivativez") - def test_Jvec_e_jy(self): - self.JvecTest("CurrentDensityy") +# def test_Jvec_e_jy(self): +# self.JvecTest("CurrentDensityy") - if testAdjoint: +# if testAdjoint: - def test_Jvec_adjoint_e_dbdtx(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_adjoint_e_dbdtx(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_adjoint_e_dbdtz(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_adjoint_e_dbdtz(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_adjoint_e_ey(self): - self.JvecVsJtvecTest("ElectricFieldy") +# def test_Jvec_adjoint_e_ey(self): +# self.JvecVsJtvecTest("ElectricFieldy") - def test_Jvec_adjoint_e_dhdtx(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_adjoint_e_dhdtx(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_adjoint_e_dhdtz(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_adjoint_e_dhdtz(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") - def test_Jvec_adjoint_e_jy(self): - self.JvecVsJtvecTest("CurrentDensityy") +# def test_Jvec_adjoint_e_jy(self): +# self.JvecVsJtvecTest("CurrentDensityy") +# pass -class DerivAdjoint_E_Conductance(Base_DerivAdjoint_Test): - formulation = "ElectricFieldConductance" +# class DerivAdjoint_E_FaceEdgeConductivity(Base_DerivAdjoint_Test): +# formulation = "ElectricFieldFaceEdgeConductivity" - if testDeriv: +# if testDeriv: - def test_Jvec_e_dbxdt(self): - self.JvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_e_dbxdt(self): +# self.JvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_e_dbzdt(self): - self.JvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_e_dbzdt(self): +# self.JvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_e_ey(self): - self.JvecTest("ElectricFieldy") +# def test_Jvec_e_ey(self): +# self.JvecTest("ElectricFieldy") - def test_Jvec_e_dhxdt(self): - self.JvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_e_dhxdt(self): +# self.JvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_e_dhzdt(self): - self.JvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_e_dhzdt(self): +# self.JvecTest("MagneticFieldTimeDerivativez") - # def test_Jvec_e_jy(self): - # self.JvecTest("CurrentDensityy") +# def test_Jvec_e_jy(self): +# self.JvecTest("CurrentDensityy") - if testAdjoint: +# if testAdjoint: - def test_Jvec_adjoint_e_dbdtx(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_adjoint_e_dbdtx(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_adjoint_e_dbdtz(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_adjoint_e_dbdtz(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_adjoint_e_ey(self): - self.JvecVsJtvecTest("ElectricFieldy") +# def test_Jvec_adjoint_e_ey(self): +# self.JvecVsJtvecTest("ElectricFieldy") - def test_Jvec_adjoint_e_dhdtx(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_adjoint_e_dhdtx(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_adjoint_e_dhdtz(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_adjoint_e_dhdtz(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") - # def test_Jvec_adjoint_e_jy(self): - # self.JvecVsJtvecTest("CurrentDensityy") +# def test_Jvec_adjoint_e_jy(self): +# self.JvecVsJtvecTest("CurrentDensityy") -class DerivAdjoint_B(Base_DerivAdjoint_Test): - formulation = "MagneticFluxDensity" +# class DerivAdjoint_B(Base_DerivAdjoint_Test): +# formulation = "MagneticFluxDensity" - if testDeriv: +# if testDeriv: - def test_Jvec_b_bx(self): - self.JvecTest("MagneticFluxDensityx") +# def test_Jvec_b_bx(self): +# self.JvecTest("MagneticFluxDensityx") - def test_Jvec_b_bz(self): - self.JvecTest("MagneticFluxDensityz") +# def test_Jvec_b_bz(self): +# self.JvecTest("MagneticFluxDensityz") - def test_Jvec_b_dbdtx(self): - self.JvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_b_dbdtx(self): +# self.JvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_b_dbdtz(self): - self.JvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_b_dbdtz(self): +# self.JvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_b_hx(self): - self.JvecTest("MagneticFieldx") +# def test_Jvec_b_hx(self): +# self.JvecTest("MagneticFieldx") - def test_Jvec_b_hz(self): - self.JvecTest("MagneticFieldz") +# def test_Jvec_b_hz(self): +# self.JvecTest("MagneticFieldz") - def test_Jvec_b_dhdtx(self): - self.JvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_b_dhdtx(self): +# self.JvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_b_dhdtz(self): - self.JvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_b_dhdtz(self): +# self.JvecTest("MagneticFieldTimeDerivativez") - def test_Jvec_b_jy(self): - self.JvecTest("CurrentDensityy") +# def test_Jvec_b_jy(self): +# self.JvecTest("CurrentDensityy") - if testAdjoint: +# if testAdjoint: - def test_Jvec_adjoint_b_bx(self): - self.JvecVsJtvecTest("MagneticFluxDensityx") +# def test_Jvec_adjoint_b_bx(self): +# self.JvecVsJtvecTest("MagneticFluxDensityx") - def test_Jvec_adjoint_b_bz(self): - self.JvecVsJtvecTest("MagneticFluxDensityz") +# def test_Jvec_adjoint_b_bz(self): +# self.JvecVsJtvecTest("MagneticFluxDensityz") - def test_Jvec_adjoint_b_dbdtx(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_adjoint_b_dbdtx(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_adjoint_b_dbdtz(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_adjoint_b_dbdtz(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_adjoint_b_ey(self): - self.JvecVsJtvecTest("ElectricFieldy") +# def test_Jvec_adjoint_b_ey(self): +# self.JvecVsJtvecTest("ElectricFieldy") - def test_Jvec_adjoint_b_hx(self): - self.JvecVsJtvecTest("MagneticFieldx") +# def test_Jvec_adjoint_b_hx(self): +# self.JvecVsJtvecTest("MagneticFieldx") - def test_Jvec_adjoint_b_hz(self): - self.JvecVsJtvecTest("MagneticFieldz") +# def test_Jvec_adjoint_b_hz(self): +# self.JvecVsJtvecTest("MagneticFieldz") - def test_Jvec_adjoint_b_dhdtx(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_adjoint_b_dhdtx(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_adjoint_b_dhdtz(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_adjoint_b_dhdtz(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") - def test_Jvec_adjoint_b_jy(self): - self.JvecVsJtvecTest("CurrentDensityy") +# def test_Jvec_adjoint_b_jy(self): +# self.JvecVsJtvecTest("CurrentDensityy") -class DerivAdjoint_B_Conductance(Base_DerivAdjoint_Test): - formulation = "MagneticFluxDensityConductance" +# class DerivAdjoint_B_FaceEdgeConductivity(Base_DerivAdjoint_Test): +# formulation = "MagneticFluxDensityFaceEdgeConductivity" - if testDeriv: +# if testDeriv: - def test_Jvec_b_bx(self): - self.JvecTest("MagneticFluxDensityx") +# def test_Jvec_b_bx(self): +# self.JvecTest("MagneticFluxDensityx") - def test_Jvec_b_bz(self): - self.JvecTest("MagneticFluxDensityz") +# def test_Jvec_b_bz(self): +# self.JvecTest("MagneticFluxDensityz") - def test_Jvec_b_dbdtx(self): - self.JvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_b_dbdtx(self): +# self.JvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_b_dbdtz(self): - self.JvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_b_dbdtz(self): +# self.JvecTest("MagneticFluxTimeDerivativez") + +# def test_Jvec_b_ey(self): +# self.JvecTest("ElectricFieldy") - def test_Jvec_b_hx(self): - self.JvecTest("MagneticFieldx") +# def test_Jvec_b_hx(self): +# self.JvecTest("MagneticFieldx") - def test_Jvec_b_hz(self): - self.JvecTest("MagneticFieldz") +# def test_Jvec_b_hz(self): +# self.JvecTest("MagneticFieldz") - def test_Jvec_b_dhdtx(self): - self.JvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_b_dhdtx(self): +# self.JvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_b_dhdtz(self): - self.JvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_b_dhdtz(self): +# self.JvecTest("MagneticFieldTimeDerivativez") - # def test_Jvec_b_jy(self): - # self.JvecTest("CurrentDensityy") +# def test_Jvec_b_jy(self): +# self.JvecTest("CurrentDensityy") - if testAdjoint: +# if testAdjoint: - def test_Jvec_adjoint_b_bx(self): - self.JvecVsJtvecTest("MagneticFluxDensityx") +# def test_Jvec_adjoint_b_bx(self): +# self.JvecVsJtvecTest("MagneticFluxDensityx") - def test_Jvec_adjoint_b_bz(self): - self.JvecVsJtvecTest("MagneticFluxDensityz") +# def test_Jvec_adjoint_b_bz(self): +# self.JvecVsJtvecTest("MagneticFluxDensityz") - def test_Jvec_adjoint_b_dbdtx(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") +# def test_Jvec_adjoint_b_dbdtx(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") - def test_Jvec_adjoint_b_dbdtz(self): - self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") +# def test_Jvec_adjoint_b_dbdtz(self): +# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") - def test_Jvec_adjoint_b_ey(self): - self.JvecVsJtvecTest("ElectricFieldy") +# def test_Jvec_adjoint_b_ey(self): +# self.JvecVsJtvecTest("ElectricFieldy") - def test_Jvec_adjoint_b_hx(self): - self.JvecVsJtvecTest("MagneticFieldx") +# def test_Jvec_adjoint_b_hx(self): +# self.JvecVsJtvecTest("MagneticFieldx") - def test_Jvec_adjoint_b_hz(self): - self.JvecVsJtvecTest("MagneticFieldz") +# def test_Jvec_adjoint_b_hz(self): +# self.JvecVsJtvecTest("MagneticFieldz") - def test_Jvec_adjoint_b_dhdtx(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") +# def test_Jvec_adjoint_b_dhdtx(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") - def test_Jvec_adjoint_b_dhdtz(self): - self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") +# def test_Jvec_adjoint_b_dhdtz(self): +# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") - # def test_Jvec_adjoint_b_jy(self): - # self.JvecVsJtvecTest("CurrentDensityy") +# def test_Jvec_adjoint_b_jy(self): +# self.JvecVsJtvecTest("CurrentDensityy") class DerivAdjoint_H(Base_DerivAdjoint_Test): From 933647a4019250e557f4a35146d45d70301fa167 Mon Sep 17 00:00:00 2001 From: dccowan Date: Mon, 30 Oct 2023 13:27:33 -0700 Subject: [PATCH 081/164] TDEM adjoint tests pass --- .../time_domain/simulation.py | 257 +----------- tests/em/tdem/test_TDEM_DerivAdjoint.py | 397 +++++++++--------- 2 files changed, 215 insertions(+), 439 deletions(-) diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 535610bc91..513b7fcc91 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -995,72 +995,6 @@ def getAdiag(self, tInd): return MfMui.T.tocsr() * A return A - def getAdiagDeriv_sigma(self, tInd, u, v, adjoint=False): - """ - Derivative of ADiag wrt tau - """ - C = self.mesh.edge_curl - - # def MeSigmaIDeriv(x): - # return self.MeSigmaIDeriv(x) - - MfMui = self.MfMui - - if adjoint: - if self._makeASymmetric is True: - v = MfMui * v - return self._MeSigmaTauKappaIDeriv_sigma(C.T * (MfMui * u), C.T * v, adjoint) - - ADeriv = C * (self._MeSigmaTauKappaIDeriv_sigma(C.T * (MfMui * u), v, adjoint)) - - if self._makeASymmetric is True: - return MfMui.T * ADeriv - return ADeriv - - def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): - """ - Derivative of ADiag wrt tau - """ - C = self.mesh.edge_curl - - # def MeSigmaIDeriv(x): - # return self.MeSigmaIDeriv(x) - - MfMui = self.MfMui - - if adjoint: - if self._makeASymmetric is True: - v = MfMui * v - return self._MeSigmaTauKappaIDeriv_tau(C.T * (MfMui * u), C.T * v, adjoint) - - ADeriv = C * (self._MeSigmaTauKappaIDeriv_tau(C.T * (MfMui * u), v, adjoint)) - - if self._makeASymmetric is True: - return MfMui.T * ADeriv - return ADeriv - - def getAdiagDeriv_kappa(self, tInd, u, v, adjoint=False): - """ - Derivative of ADiag wrt tau - """ - C = self.mesh.edge_curl - - # def MeSigmaIDeriv(x): - # return self.MeSigmaIDeriv(x) - - MfMui = self.MfMui - - if adjoint: - if self._makeASymmetric is True: - v = MfMui * v - return self._MeSigmaTauKappaIDeriv_kappa(C.T * (MfMui * u), C.T * v, adjoint) - - ADeriv = C * (self._MeSigmaTauKappaIDeriv_kappa(C.T * (MfMui * u), v, adjoint)) - - if self._makeASymmetric is True: - return MfMui.T * ADeriv - return ADeriv - def getAdiagDeriv(self, tInd, u, v, adjoint=False): C = self.mesh.edge_curl MfMui = self.MfMui @@ -1093,95 +1027,7 @@ def getRHS(self, tInd): return MfMui.T * rhs return rhs - def getRHSDeriv_sigma(self, tInd, src, v, adjoint=False): - """ - Derivative of the RHS - """ - - C = self.mesh.edge_curl - MeSigmaTauKappaI = self._MeSigmaTauKappaI - - _, s_e = src.eval(self, self.times[tInd]) - s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) - - if adjoint: - if self._makeASymmetric is True: - v = self.MfMui * v - if isinstance(s_e, Zero): - MeSigmaTauKappaIDerivT_v = Zero() - else: - MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_sigma( - s_e, C.T * v, adjoint - ) - - RHSDeriv = ( - MeSigmaTauKappaIDerivT_v - + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) - + s_mDeriv(v) - ) - - return RHSDeriv - - if isinstance(s_e, Zero): - MeSigmaTauKappaIDeriv_v = Zero() - else: - MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_sigma(s_e, v, adjoint) - - RHSDeriv = ( - C * MeSigmaTauKappaIDeriv_v - + C * MeSigmaTauKappaI * s_eDeriv(v) - + s_mDeriv(v) - ) - - if self._makeASymmetric is True: - return self.MfMui.T * RHSDeriv - return RHSDeriv - - def getRHSDeriv_tau(self, tInd, src, v, adjoint=False): - """ - Derivative of the RHS - """ - - C = self.mesh.edge_curl - MeSigmaTauKappaI = self._MeSigmaTauKappaI - - _, s_e = src.eval(self, self.times[tInd]) - s_mDeriv, s_eDeriv = src.evalDeriv(self, self.times[tInd], adjoint=adjoint) - - if adjoint: - if self._makeASymmetric is True: - v = self.MfMui * v - if isinstance(s_e, Zero): - MeSigmaTauKappaIDerivT_v = Zero() - else: - MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_tau( - s_e, C.T * v, adjoint - ) - - RHSDeriv = ( - MeSigmaTauKappaIDerivT_v - + s_eDeriv(MeSigmaTauKappaI.T * (C.T * v)) - + s_mDeriv(v) - ) - - return RHSDeriv - - if isinstance(s_e, Zero): - MeSigmaTauKappaIDeriv_v = Zero() - else: - MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_tau(s_e, v, adjoint) - - RHSDeriv = ( - C * MeSigmaTauKappaIDeriv_v - + C * MeSigmaTauKappaI * s_eDeriv(v) - + s_mDeriv(v) - ) - - if self._makeASymmetric is True: - return self.MfMui.T * RHSDeriv - return RHSDeriv - - def getRHSDeriv_kappa(self, tInd, src, v, adjoint=False): + def getRHSDeriv(self, tInd, src, v, adjoint=False): """ Derivative of the RHS """ @@ -1198,7 +1044,7 @@ def getRHSDeriv_kappa(self, tInd, src, v, adjoint=False): if isinstance(s_e, Zero): MeSigmaTauKappaIDerivT_v = Zero() else: - MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv_kappa( + MeSigmaTauKappaIDerivT_v = self._MeSigmaTauKappaIDeriv( s_e, C.T * v, adjoint ) @@ -1213,7 +1059,7 @@ def getRHSDeriv_kappa(self, tInd, src, v, adjoint=False): if isinstance(s_e, Zero): MeSigmaTauKappaIDeriv_v = Zero() else: - MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv_kappa(s_e, v, adjoint) + MeSigmaTauKappaIDeriv_v = self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) RHSDeriv = ( C * MeSigmaTauKappaIDeriv_v @@ -1245,51 +1091,16 @@ def getAdiag(self, tInd): return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa - def getAdiagDeriv_sigma(self, tInd, u, v, adjoint=False): - """ - Deriv of ADiag with respect to conductance - """ - assert tInd >= 0 and tInd < self.nT - - dt = self.time_steps[tInd] - - if adjoint: - return 1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - - return 1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - - def getAdiagDeriv_tau(self, tInd, u, v, adjoint=False): - """ - Deriv of ADiag with respect to conductance - """ - assert tInd >= 0 and tInd < self.nT - - dt = self.time_steps[tInd] - - if adjoint: - return 1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) - - return 1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) - - def getAdiagDeriv_kappa(self, tInd, u, v, adjoint=False): - """ - Deriv of ADiag with respect to conductance - """ + def getAdiagDeriv(self, tInd, u, v, adjoint=False): + assert tInd >= 0 and tInd < self.nT dt = self.time_steps[tInd] if adjoint: - return 1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) - - return 1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) - def getAdiagDeriv(self, tInd, u, v, adjoint=False): - return ( - self.getAdiagDeriv_sigma(tInd, u, v, adjoint) - + self.getAdiagDeriv_tau(tInd, u, v, adjoint) - + self.getAdiagDeriv_kappa(tInd, u, v, adjoint) - ) + return 1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAsubdiag(self, tInd): """ @@ -1303,38 +1114,16 @@ def getAsubdiag(self, tInd): return -1.0 / dt * MeSigmaTauKappa - def getAsubdiagDeriv_sigma(self, tInd, u, v, adjoint=False): - """ - Derivative of the matrix below the diagonal with respect to conductance - """ - dt = self.time_steps[tInd] - - if adjoint: - return -1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - - return -1.0 / dt * self._MeSigmaTauKappaDeriv_sigma(u, v, adjoint) - - def getAsubdiagDeriv_tau(self, tInd, u, v, adjoint=False): - """ - Derivative of the matrix below the diagonal with respect to conductance - """ - dt = self.time_steps[tInd] - - if adjoint: - return -1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) - - return -1.0 / dt * self._MeSigmaTauKappaDeriv_tau(u, v, adjoint) - - def getAsubdiagDeriv_kappa(self, tInd, u, v, adjoint=False): + def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): """ Derivative of the matrix below the diagonal with respect to conductance """ dt = self.time_steps[tInd] if adjoint: - return -1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) - return -1.0 / dt * self._MeSigmaTauKappaDeriv_kappa(u, v, adjoint) + return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAdc(self): @@ -1346,33 +1135,13 @@ def getAdc(self): Adc[0, 0] = Adc[0, 0] + 1.0 return Adc - def getAdcDeriv_sigma(self, u, v, adjoint=False): - Grad = self.mesh.nodal_gradient - if not adjoint: - return Grad.T * self._MeSigmaTauKappaDeriv_sigma(-u, v, adjoint) - else: - return self._MeSigmaTauKappaDeriv_sigma(-u, Grad * v, adjoint) - - def getAdcDeriv_tau(self, u, v, adjoint=False): + def getAdcDeriv(self, u, v, adjoint=False): Grad = self.mesh.nodal_gradient if not adjoint: - return Grad.T * self._MeSigmaTauKappaDeriv_tau(-u, v, adjoint) + return Grad.T * self._MeSigmaTauKappaDeriv(-u, v, adjoint) else: - return self._MeSigmaTauKappaDeriv_tau(-u, Grad * v, adjoint) + return self._MeSigmaTauKappaDeriv(-u, Grad * v, adjoint) - def getAdcDeriv_kappa(self, u, v, adjoint=False): - Grad = self.mesh.nodal_gradient - if not adjoint: - return Grad.T * self._MeSigmaTauKappaDeriv_kappa(-u, v, adjoint) - else: - return self._MeSigmaTauKappaDeriv_kappa(-u, Grad * v, adjoint) - - def getAdcDeriv(self, u, v, adjoint=False): - return ( - self.getAdcDeriv_sigma(self, u, v, adjoint=False) + - self.getAdcDeriv_tau(self, u, v, adjoint=False) + - self.getAdcDeriv_kappa(self, u, v, adjoint=False) - ) ############################################################################### diff --git a/tests/em/tdem/test_TDEM_DerivAdjoint.py b/tests/em/tdem/test_TDEM_DerivAdjoint.py index 715f0dba57..fcb9299af3 100644 --- a/tests/em/tdem/test_TDEM_DerivAdjoint.py +++ b/tests/em/tdem/test_TDEM_DerivAdjoint.py @@ -34,53 +34,41 @@ def get_mesh(): def get_sigma_mapping(mesh): - # active = mesh.cell_centers_z < 0.0 - # activeMap = maps.InjectActiveCells( - # mesh, active, np.log(1e-8), nC=mesh.shape_cells[2] - # ) - # return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap - active = mesh.cell_centers[:, -1] < 0.0 - activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) - return maps.ExpMap(mesh) * activeMap - -def get_tau_mapping(mesh): - active = mesh.cell_faces[:, -1] < 0.0 - activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) - return maps.ExpMap(mesh.nF) * activeMap - -def get_kappa_mapping(mesh): - active = mesh.cell_edges[:, -1] < 0.0 - activeMap = maps.InjectActiveCells(mesh, active, np.log(1e-8)) - return maps.ExpMap(mesh.nE) * activeMap - -# def get_wire_mappings(mesh): - -# # active cells, faces + edges -# active_cells = mesh.cell_centers[:, -1] < 0.0 -# active_faces = mesh.faces[:, -1] < 0.0 -# active_edges = mesh.edges[:, -1] < 0.0 -# n_active_cells = np.sum(active_cells) -# n_active_faces = np.sum(active_faces) -# n_active_edges = np.sum(active_edges) + # H AND J FORMULATIONS UNSTABLE WITHOUT SURJECT VERTICAL 1D + active = mesh.cell_centers_z < 0.0 + activeMap = maps.InjectActiveCells( + mesh, active, np.log(1e-8), nC=mesh.shape_cells[2] + ) + return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap + +def get_wire_mappings(mesh): + + # active cells, faces + edges + active_cells = mesh.cell_centers[:, -1] < 0.0 + active_faces = mesh.faces[:, -1] < 0.0 + active_edges = mesh.edges[:, -1] < 0.0 + n_active_cells = np.sum(active_cells) + n_active_faces = np.sum(active_faces) + n_active_edges = np.sum(active_edges) -# # wire map -# wire_map = maps.Wires( -# ("log_sigma", n_active_cells), -# ("log_tau", n_active_faces), -# ("log_kappa", n_active_edges) -# ) - -# sigma_map = maps.InjectActiveCells( -# mesh, active_cells, 1e-8 -# ) * maps.ExpMap(nP=n_active_cells) * wire_map.log_sigma -# tau_map = maps.InjectActiveFaces( -# mesh, active_faces, 0 -# ) * maps.ExpMap(nP=n_active_faces) * wire_map.log_tau -# kappa_map = maps.InjectActiveEdges( -# mesh, active_edges, 0 -# ) * maps.ExpMap(nP=n_active_edges) * wire_map.log_kappa - -# return sigma_map, tau_map, kappa_map + # wire map + wire_map = maps.Wires( + ("log_sigma", n_active_cells), + ("log_tau", n_active_faces), + ("log_kappa", n_active_edges) + ) + + sigma_map = maps.InjectActiveCells( + mesh, active_cells, 1e-8 + ) * maps.ExpMap(nP=n_active_cells) * wire_map.log_sigma + tau_map = maps.InjectActiveFaces( + mesh, active_faces, 0 + ) * maps.ExpMap(nP=n_active_faces) * wire_map.log_tau + kappa_map = maps.InjectActiveEdges( + mesh, active_edges, 0 + ) * maps.ExpMap(nP=n_active_edges) * wire_map.log_kappa + + return sigma_map, tau_map, kappa_map def get_prob(mesh, formulation, sigma_map, **kwargs): prb = getattr(tdem, "Simulation3D{}".format(formulation))( @@ -117,14 +105,28 @@ def setUpClass(self): self.survey = get_survey() if "FaceEdgeConductivity" in self.formulation: - sigma_map = get_sigma_mapping(mesh) - self.prob = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.survey) - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) #+ 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + # sigma_map = get_sigma_mapping(mesh) + # self.prob = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.survey) + # self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + + active_cells = mesh.cell_centers[:, -1] < 0.0 + active_faces = mesh.faces[:, -1] < 0.0 + active_edges = mesh.edges[:, -1] < 0.0 + + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) + self.prob = get_face_edge_prob( + mesh, self.formulation, sigma_map=sigma_map, tau_map=tau_map, kappa_map=kappa_map, survey=self.survey + ) + self.m = np.r_[ + np.log(1e-1) * np.ones(np.sum(active_cells)) + 1e-3 * np.random.randn(np.sum(active_cells)), + np.log(10*1e-1) * np.ones(np.sum(active_faces)) + 1e-3 * np.random.randn(np.sum(active_faces)), + np.log(100*1e-1) * np.ones(np.sum(active_edges)) + 1e-3 * np.random.randn(np.sum(active_edges)) + ] else: sigma_map = get_sigma_mapping(mesh) self.prob = get_prob(mesh, self.formulation, sigma_map, survey=self.survey) - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) #+ 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) print("Solving Fields for problem {}".format(self.formulation)) t = time.time() @@ -136,8 +138,13 @@ def setUpClass(self): mesh = get_mesh() self.surveyfwd = get_survey() if "FaceEdgeConductivity" in self.formulation: - sigma_map = get_sigma_mapping(mesh) - self.probfwd = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.surveyfwd) + # sigma_map = get_sigma_mapping(mesh) + # self.probfwd = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.surveyfwd) + + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) + self.probfwd = get_face_edge_prob( + mesh, self.formulation, sigma_map=sigma_map, tau_map=tau_map, kappa_map=kappa_map, survey=self.surveyfwd + ) else: sigma_map = get_sigma_mapping(mesh) self.probfwd = get_prob(mesh, self.formulation, sigma_map, survey=self.surveyfwd) @@ -233,224 +240,224 @@ def test_eDeriv_u_adjoint(self): self.assertTrue(passed) -# class DerivAdjoint_E(Base_DerivAdjoint_Test): -# formulation = "ElectricField" +class DerivAdjoint_E(Base_DerivAdjoint_Test): + formulation = "ElectricField" -# if testDeriv: + if testDeriv: -# def test_Jvec_e_dbxdt(self): -# self.JvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_e_dbxdt(self): + self.JvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_e_dbzdt(self): -# self.JvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_e_dbzdt(self): + self.JvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_e_ey(self): -# self.JvecTest("ElectricFieldy") + def test_Jvec_e_ey(self): + self.JvecTest("ElectricFieldy") -# def test_Jvec_e_dhxdt(self): -# self.JvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_e_dhxdt(self): + self.JvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_e_dhzdt(self): -# self.JvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_e_dhzdt(self): + self.JvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_e_jy(self): -# self.JvecTest("CurrentDensityy") + def test_Jvec_e_jy(self): + self.JvecTest("CurrentDensityy") -# if testAdjoint: + if testAdjoint: -# def test_Jvec_adjoint_e_dbdtx(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_adjoint_e_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_adjoint_e_dbdtz(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_adjoint_e_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_adjoint_e_ey(self): -# self.JvecVsJtvecTest("ElectricFieldy") + def test_Jvec_adjoint_e_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") -# def test_Jvec_adjoint_e_dhdtx(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_adjoint_e_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_adjoint_e_dhdtz(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_adjoint_e_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_adjoint_e_jy(self): -# self.JvecVsJtvecTest("CurrentDensityy") -# pass + def test_Jvec_adjoint_e_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") + pass -# class DerivAdjoint_E_FaceEdgeConductivity(Base_DerivAdjoint_Test): -# formulation = "ElectricFieldFaceEdgeConductivity" +class DerivAdjoint_E_FaceEdgeConductivity(Base_DerivAdjoint_Test): + formulation = "ElectricFieldFaceEdgeConductivity" -# if testDeriv: + if testDeriv: -# def test_Jvec_e_dbxdt(self): -# self.JvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_e_dbxdt(self): + self.JvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_e_dbzdt(self): -# self.JvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_e_dbzdt(self): + self.JvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_e_ey(self): -# self.JvecTest("ElectricFieldy") + def test_Jvec_e_ey(self): + self.JvecTest("ElectricFieldy") -# def test_Jvec_e_dhxdt(self): -# self.JvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_e_dhxdt(self): + self.JvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_e_dhzdt(self): -# self.JvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_e_dhzdt(self): + self.JvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_e_jy(self): -# self.JvecTest("CurrentDensityy") + def test_Jvec_e_jy(self): + self.JvecTest("CurrentDensityy") -# if testAdjoint: + if testAdjoint: -# def test_Jvec_adjoint_e_dbdtx(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_adjoint_e_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_adjoint_e_dbdtz(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_adjoint_e_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_adjoint_e_ey(self): -# self.JvecVsJtvecTest("ElectricFieldy") + def test_Jvec_adjoint_e_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") -# def test_Jvec_adjoint_e_dhdtx(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_adjoint_e_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_adjoint_e_dhdtz(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_adjoint_e_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_adjoint_e_jy(self): -# self.JvecVsJtvecTest("CurrentDensityy") + def test_Jvec_adjoint_e_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") -# class DerivAdjoint_B(Base_DerivAdjoint_Test): -# formulation = "MagneticFluxDensity" +class DerivAdjoint_B(Base_DerivAdjoint_Test): + formulation = "MagneticFluxDensity" -# if testDeriv: + if testDeriv: -# def test_Jvec_b_bx(self): -# self.JvecTest("MagneticFluxDensityx") + def test_Jvec_b_bx(self): + self.JvecTest("MagneticFluxDensityx") -# def test_Jvec_b_bz(self): -# self.JvecTest("MagneticFluxDensityz") + def test_Jvec_b_bz(self): + self.JvecTest("MagneticFluxDensityz") -# def test_Jvec_b_dbdtx(self): -# self.JvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_b_dbdtx(self): + self.JvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_b_dbdtz(self): -# self.JvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_b_dbdtz(self): + self.JvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_b_hx(self): -# self.JvecTest("MagneticFieldx") + def test_Jvec_b_hx(self): + self.JvecTest("MagneticFieldx") -# def test_Jvec_b_hz(self): -# self.JvecTest("MagneticFieldz") + def test_Jvec_b_hz(self): + self.JvecTest("MagneticFieldz") -# def test_Jvec_b_dhdtx(self): -# self.JvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_b_dhdtx(self): + self.JvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_b_dhdtz(self): -# self.JvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_b_dhdtz(self): + self.JvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_b_jy(self): -# self.JvecTest("CurrentDensityy") + def test_Jvec_b_jy(self): + self.JvecTest("CurrentDensityy") -# if testAdjoint: + if testAdjoint: -# def test_Jvec_adjoint_b_bx(self): -# self.JvecVsJtvecTest("MagneticFluxDensityx") + def test_Jvec_adjoint_b_bx(self): + self.JvecVsJtvecTest("MagneticFluxDensityx") -# def test_Jvec_adjoint_b_bz(self): -# self.JvecVsJtvecTest("MagneticFluxDensityz") + def test_Jvec_adjoint_b_bz(self): + self.JvecVsJtvecTest("MagneticFluxDensityz") -# def test_Jvec_adjoint_b_dbdtx(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_adjoint_b_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_adjoint_b_dbdtz(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_adjoint_b_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_adjoint_b_ey(self): -# self.JvecVsJtvecTest("ElectricFieldy") + def test_Jvec_adjoint_b_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") -# def test_Jvec_adjoint_b_hx(self): -# self.JvecVsJtvecTest("MagneticFieldx") + def test_Jvec_adjoint_b_hx(self): + self.JvecVsJtvecTest("MagneticFieldx") -# def test_Jvec_adjoint_b_hz(self): -# self.JvecVsJtvecTest("MagneticFieldz") + def test_Jvec_adjoint_b_hz(self): + self.JvecVsJtvecTest("MagneticFieldz") -# def test_Jvec_adjoint_b_dhdtx(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_adjoint_b_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_adjoint_b_dhdtz(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_adjoint_b_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_adjoint_b_jy(self): -# self.JvecVsJtvecTest("CurrentDensityy") + def test_Jvec_adjoint_b_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") -# class DerivAdjoint_B_FaceEdgeConductivity(Base_DerivAdjoint_Test): -# formulation = "MagneticFluxDensityFaceEdgeConductivity" +class DerivAdjoint_B_FaceEdgeConductivity(Base_DerivAdjoint_Test): + formulation = "MagneticFluxDensityFaceEdgeConductivity" -# if testDeriv: + if testDeriv: -# def test_Jvec_b_bx(self): -# self.JvecTest("MagneticFluxDensityx") + def test_Jvec_b_bx(self): + self.JvecTest("MagneticFluxDensityx") -# def test_Jvec_b_bz(self): -# self.JvecTest("MagneticFluxDensityz") + def test_Jvec_b_bz(self): + self.JvecTest("MagneticFluxDensityz") -# def test_Jvec_b_dbdtx(self): -# self.JvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_b_dbdtx(self): + self.JvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_b_dbdtz(self): -# self.JvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_b_dbdtz(self): + self.JvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_b_ey(self): -# self.JvecTest("ElectricFieldy") + def test_Jvec_b_ey(self): + self.JvecTest("ElectricFieldy") -# def test_Jvec_b_hx(self): -# self.JvecTest("MagneticFieldx") + def test_Jvec_b_hx(self): + self.JvecTest("MagneticFieldx") -# def test_Jvec_b_hz(self): -# self.JvecTest("MagneticFieldz") + def test_Jvec_b_hz(self): + self.JvecTest("MagneticFieldz") -# def test_Jvec_b_dhdtx(self): -# self.JvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_b_dhdtx(self): + self.JvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_b_dhdtz(self): -# self.JvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_b_dhdtz(self): + self.JvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_b_jy(self): -# self.JvecTest("CurrentDensityy") + def test_Jvec_b_jy(self): + self.JvecTest("CurrentDensityy") -# if testAdjoint: + if testAdjoint: -# def test_Jvec_adjoint_b_bx(self): -# self.JvecVsJtvecTest("MagneticFluxDensityx") + def test_Jvec_adjoint_b_bx(self): + self.JvecVsJtvecTest("MagneticFluxDensityx") -# def test_Jvec_adjoint_b_bz(self): -# self.JvecVsJtvecTest("MagneticFluxDensityz") + def test_Jvec_adjoint_b_bz(self): + self.JvecVsJtvecTest("MagneticFluxDensityz") -# def test_Jvec_adjoint_b_dbdtx(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") + def test_Jvec_adjoint_b_dbdtx(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativex") -# def test_Jvec_adjoint_b_dbdtz(self): -# self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") + def test_Jvec_adjoint_b_dbdtz(self): + self.JvecVsJtvecTest("MagneticFluxTimeDerivativez") -# def test_Jvec_adjoint_b_ey(self): -# self.JvecVsJtvecTest("ElectricFieldy") + def test_Jvec_adjoint_b_ey(self): + self.JvecVsJtvecTest("ElectricFieldy") -# def test_Jvec_adjoint_b_hx(self): -# self.JvecVsJtvecTest("MagneticFieldx") + def test_Jvec_adjoint_b_hx(self): + self.JvecVsJtvecTest("MagneticFieldx") -# def test_Jvec_adjoint_b_hz(self): -# self.JvecVsJtvecTest("MagneticFieldz") + def test_Jvec_adjoint_b_hz(self): + self.JvecVsJtvecTest("MagneticFieldz") -# def test_Jvec_adjoint_b_dhdtx(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") + def test_Jvec_adjoint_b_dhdtx(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativex") -# def test_Jvec_adjoint_b_dhdtz(self): -# self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") + def test_Jvec_adjoint_b_dhdtz(self): + self.JvecVsJtvecTest("MagneticFieldTimeDerivativez") -# def test_Jvec_adjoint_b_jy(self): -# self.JvecVsJtvecTest("CurrentDensityy") + def test_Jvec_adjoint_b_jy(self): + self.JvecVsJtvecTest("CurrentDensityy") class DerivAdjoint_H(Base_DerivAdjoint_Test): From ed45788867eb2b0eacbfc3d0b2ea15dcdd73fb67 Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 1 Nov 2023 12:59:25 -0700 Subject: [PATCH 082/164] add FDEM forward tests for face conductivity --- .../frequency_domain/simulation.py | 105 ++++------ ..._FDEM_analytic_edge_face_conductivities.py | 198 +++++++++--------- 2 files changed, 133 insertions(+), 170 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 3572137ca3..f581952161 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -451,7 +451,7 @@ def getA(self, freq): return A - def getADeriv_tau(self, freq, u, v, adjoint=False): + def getADeriv_sigma(self, freq, u, v, adjoint=False): r""" Product of the derivative of our system matrix with respect to the conductivity model and a vector @@ -471,37 +471,12 @@ def getADeriv_tau(self, freq, u, v, adjoint=False): adjoint (nD,) """ - dMe_dtau_v = self._MeTauDeriv(u, v, adjoint) + dMe_dtau_v = self._MeSigmaTauKappaDeriv(u, v, adjoint) return 1j * omega(freq) * dMe_dtau_v - def getADeriv_kappa(self, freq, u, v, adjoint=False): - r""" - Product of the derivative of our system matrix with respect to the - conductivity model and a vector - - .. math :: - - \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\sigma}} = - i \omega \frac{d \mathbf{M^e_{\sigma}}(\mathbf{u})\mathbf{v} }{d\mathbf{m}} - - :param float freq: frequency - :param numpy.ndarray u: solution vector (nE,) - :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for - adjoint - :param bool adjoint: adjoint? - :rtype: numpy.ndarray - :return: derivative of the system matrix times a vector (nP,) or - adjoint (nD,) - """ - - dMe_dkappa_v = self._MeKappaDeriv(u, v, adjoint) - return 1j * omega(freq) * dMe_dkappa_v - def getADeriv(self, freq, u, v, adjoint=False): return ( self.getADeriv_sigma(freq, u, v, adjoint) - + self.getADeriv_tau(freq, u, v, adjoint) - + self.getADeriv_kappa(freq, u, v, adjoint) + self.getADeriv_mui(freq, u, v, adjoint) # + self.getADeriv_permittivity(freq, u, v, adjoint) ) @@ -724,7 +699,7 @@ def getA(self, freq): return MfMui.T.tocsr() * A return A - def getADeriv_tau(self, freq, u, v, adjoint=False): + def getADeriv_sigma(self, freq, u, v, adjoint=False): r""" Product of the derivative of our system matrix with respect to the model and a vector @@ -753,10 +728,6 @@ def getADeriv_tau(self, freq, u, v, adjoint=False): return MeSigmaTauKappaIDeriv(vec, C.T * v, adjoint) return C * MeSigmaTauKappaIDeriv(vec, v, adjoint) - # if adjoint: - # return MeSigmaIDeriv.T * (C.T * v) - # return C * (MeSigmaIDeriv * v) - def getADeriv_mui(self, freq, u, v, adjoint=False): MfMuiDeriv = self.MfMuiDeriv(u) MeSigmaTauKappaI = self._MeSigmaTauKappaI @@ -813,41 +784,41 @@ def getRHS(self, freq): return RHS - # def getRHSDeriv(self, freq, src, v, adjoint=False): - # """ - # Derivative of the right hand side with respect to the model - - # :param float freq: frequency - # :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source - # :param numpy.ndarray v: vector to take product with - # :param bool adjoint: adjoint? - # :rtype: numpy.ndarray - # :return: product of rhs deriv with a vector - # """ - - # C = self.mesh.edge_curl - # s_m, s_e = src.eval(self) - # MfMui = self.MfMui - - # if self._makeASymmetric and adjoint: - # v = self.MfMui * v - - # # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) - # s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) - - # if not adjoint: - # # RHSderiv = C * (MeSigmaIDeriv * v) - # RHSderiv = C * self.MeSigmaIDeriv(s_e, v, adjoint) - # SrcDeriv = s_mDeriv(v) + C * (self.MeSigmaI * s_eDeriv(v)) - # elif adjoint: - # # RHSderiv = MeSigmaIDeriv.T * (C.T * v) - # RHSderiv = self.MeSigmaIDeriv(s_e, C.T * v, adjoint) - # SrcDeriv = s_mDeriv(v) + s_eDeriv(self.MeSigmaI.T * (C.T * v)) - - # if self._makeASymmetric is True and not adjoint: - # return MfMui.T * (SrcDeriv + RHSderiv) - - # return RHSderiv + SrcDeriv + def getRHSDeriv(self, freq, src, v, adjoint=False): + """ + Derivative of the right hand side with respect to the model + + :param float freq: frequency + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of rhs deriv with a vector + """ + + C = self.mesh.edge_curl + s_m, s_e = src.eval(self) + MfMui = self.MfMui + + if self._makeASymmetric and adjoint: + v = self.MfMui * v + + # MeSigmaIDeriv = self.MeSigmaIDeriv(s_e) + s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint) + + if not adjoint: + # RHSderiv = C * (MeSigmaIDeriv * v) + RHSderiv = C * self._MeSigmaTauKappaIDeriv(s_e, v, adjoint) + SrcDeriv = s_mDeriv(v) + C * (self._MeSigmaTauKappaI * s_eDeriv(v)) + elif adjoint: + # RHSderiv = MeSigmaIDeriv.T * (C.T * v) + RHSderiv = self._MeSigmaTauKappaIDeriv(s_e, C.T * v, adjoint) + SrcDeriv = s_mDeriv(v) + s_eDeriv(self._MeSigmaTauKappaI.T * (C.T * v)) + + if self._makeASymmetric is True and not adjoint: + return MfMui.T * (SrcDeriv + RHSderiv) + + return RHSderiv + SrcDeriv ############################################################################### diff --git a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py index e48e4b9271..0e5387c149 100644 --- a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py +++ b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py @@ -19,32 +19,18 @@ def analytic_layer_small_loop_face_conductivity_comparison( plotIt=False, ): # Some static parameters - PHI = np.linspace(0, 2 * np.pi, 21) loop_radius = np.pi**-0.5 - receiver_location = np.c_[50.0, 0.0, 1.0] + receiver_location = np.c_[12.0, 0.0, 1.0] source_location = np.r_[0.0, 0.0, 1.0] + frequencies = np.logspace(2, 3, 2) - if orientation == "X": - source_nodes = np.c_[ - np.zeros_like(PHI), - loop_radius * np.cos(PHI), - 1.0 + loop_radius * np.sin(PHI), - ] - elif orientation == "Z": - source_nodes = np.c_[ - loop_radius * np.cos(PHI), loop_radius * np.sin(PHI), np.ones_like(PHI) - ] - - layer_depth = 24.0 + layer_depth = 40.0 layer_thickness = 0.1 - layer_conductivity = 5e-3 - background_conductivity = 5e-3 + layer_conductivity = 100 + background_conductivity = 2.5e-3 tau = layer_thickness * layer_conductivity - # if bounds is None: - # bounds = [1e-5, 1e-3] - # 1D LAYER MODEL thicknesses = np.array([layer_depth - layer_thickness / 2, layer_thickness]) n_layer = len(thicknesses) + 1 @@ -56,27 +42,40 @@ def analytic_layer_small_loop_face_conductivity_comparison( # 3D LAYER MODEL if mesh_type == "CYL": - cs, ncx, ncz, npad = 4.0, 40, 20, 20 - hx = [(cs, ncx), (cs, npad, 1.3)] - hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] - mesh = discretize.CylindricalMesh([hx, 1, hz], "00C") - - elif mesh_type == "TENSOR": - cs, nc, npad = 8.0, 15, 10 - hx = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] - hy = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] - hz = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)] - mesh = discretize.TensorMesh([hx, hy, hz], "CCC") + hr = [(2.0, 120), (2.0, 25, 1.3)] + hz = [(2.0, 25, -1.3), (2.0, 200), (2.0, 25, 1.3)] + + mesh = discretize.CylindricalMesh([hr, 1, hz], x0="00C") + + ind = np.where(mesh.h[2] == np.min(mesh.h[2]))[0] + ind = ind[int(len(ind)/2)] + + mesh.origin = mesh.origin - np.r_[0., 0., mesh.nodes_z[ind]-24] + + elif mesh_type == "TREE": + dh = 2.5 # base cell width + dom_width = 8000.0 # domain width + nbc = 2 ** int(np.round(np.log(dom_width / dh) / np.log(2.0))) # num. base cells + + h = [(dh, nbc)] + mesh = discretize.TreeMesh([h, h, h], x0="CCC") + mesh.refine_points( + np.reshape(source_location, (1, 3)), level=-1, padding_cells_by_level=[8, 4, 4, 4], finalize=False + ) + x0s = np.vstack([ii*np.c_[-60, -60, -60] for ii in range(1, 5)]) + x1s = np.vstack([ii*np.c_[60, 60, 10] for ii in range(1, 5)]) + + mesh.refine_box(x0s, x1s, levels=[-2, -3, -4, -5], finalize=False) + mesh.finalize() sigma_3d = 1e-8 * np.ones(mesh.nC) sigma_3d[mesh.cell_centers[:, -1] < 0.0] = background_conductivity tau_3d = np.zeros(mesh.nF) - # tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau + tau_3d[np.isclose(mesh.faces[:, -1], -layer_depth)] = tau tau_map = maps.IdentityMap(nP=mesh.n_faces) # DEFINE SURVEY - frequencies = np.logspace(3, 4, 2) rx_list = [ getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( receiver_location, component="real", orientation=orientation @@ -114,21 +113,14 @@ def analytic_layer_small_loop_face_conductivity_comparison( ) for f in frequencies ] else: - if formulation == "MagneticFluxDensity": - src_3d = [ - fdem.sources.MagDipole( - rx_list, - f, - location=source_location, - orientation=orientation, - ) for f in frequencies - ] - else: - src_3d = [ - fdem.sources.LineCurrent( - rx_list, f, location=source_nodes, - ) for f in frequencies - ] + src_3d = [ + fdem.sources.MagDipole( + rx_list, + f, + location=source_location, + orientation=orientation, + ) for f in frequencies + ] survey_3d = fdem.Survey(src_3d) @@ -146,8 +138,8 @@ def analytic_layer_small_loop_face_conductivity_comparison( analytic_solution = mu_0 * sim_1d.dpred(sigma_1d) # ALWAYS RETURNS H-FIELD numeric_solution = sim_3d.dpred(tau_3d) - print(analytic_solution) - print(numeric_solution) + # print(analytic_solution) + # print(numeric_solution) diff = ( np.linalg.norm(np.abs(numeric_solution - analytic_solution)) / @@ -170,33 +162,33 @@ class LayerConductanceTests(unittest.TestCase): # Compares analytic 1D layered Earth solution to a plate of equivalent # conductance. - # def test_tensor_magdipole_b_x(self): - # assert ( - # analytic_layer_small_loop_face_conductivity_comparison( - # mesh_type="TENSOR", - # formulation="MagneticFluxDensity", - # rx_type="MagneticFluxDensity", - # orientation="X", - # bounds=None, - # plotIt=False, - # ) - # < 0.01 - # ) - - # def test_tensor_magdipole_b_z(self): - # assert ( - # analytic_layer_small_loop_face_conductivity_comparison( - # mesh_type="TENSOR", - # formulation="MagneticFluxDensity", - # rx_type="MagneticFluxDensity", - # orientation="Z", - # bounds=None, - # plotIt=False, - # ) - # < 0.02 - # ) - - def test_cyl_magdipole_b_z(self): + def test_tree_Bform_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_tree_Bform_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="MagneticFluxDensity", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_cyl_Bform_loop_b_z(self): assert ( analytic_layer_small_loop_face_conductivity_comparison( mesh_type="CYL", @@ -209,33 +201,33 @@ def test_cyl_magdipole_b_z(self): < 0.01 ) - # def test_tensor_linecurrent_b_x(self): - # assert ( - # analytic_layer_small_loop_face_conductivity_comparison( - # mesh_type="TENSOR", - # formulation="ElectricField", - # rx_type="MagneticFluxDensity", - # orientation="X", - # bounds=None, - # plotIt=False, - # ) - # < 0.01 - # ) - - # def test_tensor_linecurrent_b_z(self): - # assert ( - # analytic_layer_small_loop_face_conductivity_comparison( - # mesh_type="TENSOR", - # formulation="ElectricField", - # rx_type="MagneticFluxDensity", - # orientation="Z", - # bounds=None, - # plotIt=False, - # ) - # < 0.01 - # ) - - def test_cyl_linecurrent_b_z(self): + def test_tree_Eform_magdipole_b_x(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="X", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_tree_Eform_magdipole_b_z(self): + assert ( + analytic_layer_small_loop_face_conductivity_comparison( + mesh_type="TREE", + formulation="ElectricField", + rx_type="MagneticFluxDensity", + orientation="Z", + bounds=None, + plotIt=False, + ) + < 0.04 + ) + + def test_cyl_Eform_loop_b_z(self): assert ( analytic_layer_small_loop_face_conductivity_comparison( mesh_type="CYL", From e013ec0b1bb2452fe6a6f420b702ee07d4657298 Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 1 Nov 2023 16:12:12 -0700 Subject: [PATCH 083/164] Add deriv and adjoint tests for FDEM --- .../frequency_domain/fields.py | 66 +++++-- .../frequency_domain/simulation.py | 4 +- .../electromagnetics/utils/testing_utils.py | 146 +++++++++++++++- .../inverse/adjoint/test_FDEM_adjointEB.py | 164 +++++++++++++++++- .../fdem/inverse/derivs/test_FDEM_derivs.py | 84 ++++++++- 5 files changed, 437 insertions(+), 27 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 6661c10552..6e059f4bed 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -623,6 +623,18 @@ def _charge_density(self, eSolution, source_list): class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): + + def startup(self): + self._edgeCurl = self.simulation.mesh.edge_curl + self._aveE2CCV = self.simulation.mesh.aveE2CCV + self._aveF2CCV = self.simulation.mesh.aveF2CCV + self._nC = self.simulation.mesh.nC + self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self._MfMui = self.simulation.MfMui + self._MfMuiDeriv = self.simulation.MfMuiDeriv + self._MeI = self.simulation.MeI + self._MfI = self.simulation.MfI def _j(self, eSolution, source_list): """ @@ -656,9 +668,21 @@ def _jDeriv_u(self, src, du_dm_v, adjoint=False): ) def _jDeriv_m(self, src, v, adjoint=False): - raise NotImplementedError ( - "derivative wrt to model not implemented." - ) + + e = self[src, "e"] + + if adjoint: + return ( + self.__MeSigmaTauKappaDeriv(e, (self._MeI.T * v), adjoint=adjoint) + + self._eDeriv_m(src, (self._MeI.T * v), adjoint=adjoint) + ) + src.jPrimaryDeriv(self.simulation, v, adjoint) + return ( + self._MeI + * ( + self._eDeriv_m(src, v, adjoint=adjoint) + + self.__MeSigmaTauKappaDeriv(e, v, adjoint=adjoint) + ) + ) + src.jPrimaryDeriv(self.simulation, v, adjoint) class Fields3DMagneticFluxDensity(FieldsFDEM): @@ -1001,14 +1025,16 @@ class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensit def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl - self._MeSigma = self.simulation.MeSigma - self._MeSigmaI = self.simulation.MeSigmaI + # self._MeSigma = self.simulation.MeSigma + # self._MeSigmaI = self.simulation.MeSigmaI self._MfMui = self.simulation.MfMui self._MfMuiDeriv = self.simulation.MfMuiDeriv - self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv + # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv + # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI + self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv + self.__MeSigmaTauKappaIDeriv = self.simulation._MeSigmaTauKappaIDeriv self._Me = self.simulation.Me self._aveF2CCV = self.simulation.mesh.aveF2CCV self._aveE2CCV = self.simulation.mesh.aveE2CCV @@ -1041,7 +1067,7 @@ def _eSecondary(self, bSolution, source_list): e[:, i] = MeyhatI * e[:, i] if self.simulation.permittivity is None: - return self._MeSigmaTauKappaI * e + return self.__MeSigmaTauKappaI * e else: return e @@ -1062,10 +1088,28 @@ def _eDeriv_u(self, src, du_dm_v, adjoint=False): return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * du_dm_v)) return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * du_dm_v)) - # NEED TO ADD THIS def _eDeriv_m(self, src, v, adjoint=False): - raise NotImplementedError ( - "Derivative wrt model not implemented yet." + bSolution = mkvc(self[src, "bSolution"]) + s_e = src.s_e(self.simulation) + + w = -s_e + self._edgeCurl.T * (self._MfMui * bSolution) + + if adjoint: + s_eDeriv = src.s_eDeriv(self.simulation, self.__MeSigmaTauKappaI.T * v, adjoint) + return ( + self.__MeSigmaTauKappaIDeriv(w, v, adjoint) + + self._MfMuiDeriv( + bSolution, self._edgeCurl * (self.__MeSigmaTauKappaI.T * v), adjoint + ) + - s_eDeriv + + src.ePrimaryDeriv(self.simulation, v, adjoint) + ) + s_eDeriv = src.s_eDeriv(self.simulation, v, adjoint) + return ( + self.__MeSigmaTauKappaIDeriv(w, v) + + self.__MeSigmaTauKappaI * (self._edgeCurl.T * self._MfMuiDeriv(bSolution, v)) + - self.__MeSigmaTauKappaI * s_eDeriv + + src.ePrimaryDeriv(self.simulation, v, adjoint) ) def _j(self, bSolution, source_list): diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index f581952161..a0daac55cd 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -471,8 +471,8 @@ def getADeriv_sigma(self, freq, u, v, adjoint=False): adjoint (nD,) """ - dMe_dtau_v = self._MeSigmaTauKappaDeriv(u, v, adjoint) - return 1j * omega(freq) * dMe_dtau_v + dMe_dsigma_v = self._MeSigmaTauKappaDeriv(u, v, adjoint) + return 1j * omega(freq) * dMe_dsigma_v def getADeriv(self, freq, u, v, adjoint=False): return ( diff --git a/SimPEG/electromagnetics/utils/testing_utils.py b/SimPEG/electromagnetics/utils/testing_utils.py index 3315c71061..8302b3c4b0 100644 --- a/SimPEG/electromagnetics/utils/testing_utils.py +++ b/SimPEG/electromagnetics/utils/testing_utils.py @@ -138,6 +138,124 @@ def getFDEMProblem(fdemType, comp, SrcList, freq, useMu=False, verbose=False): return prb +def getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq, useMu=False, verbose=False): + cs = 10.0 + ncx, ncy, ncz = 0, 0, 0 + npad = 8 + hx = [(cs, npad, -1.3), (cs, ncx), (cs, npad, 1.3)] + hy = [(cs, npad, -1.3), (cs, ncy), (cs, npad, 1.3)] + hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] + mesh = TensorMesh([hx, hy, hz], ["C", "C", "C"]) + + if useMu is True: + wire_map = maps.Wires( + ("log_sigma", mesh.nC), + ("log_tau", mesh.nF), + ("log_kappa", mesh.nE), + ("mu", mesh.nC) + ) + else: + wire_map = maps.Wires( + ("log_sigma", mesh.nC), + ("log_tau", mesh.nF), + ("log_kappa", mesh.nE) + ) + + sigma_map = maps.ExpMap(nP=mesh.nC) * wire_map.log_sigma + tau_map = maps.ExpMap(nP=mesh.nF) * wire_map.log_tau + kappa_map = maps.ExpMap(nP=mesh.nE) * wire_map.log_kappa + if useMu: + mu_map = maps.IdentityMap(nP=mesh.nC) * wire_map.mu + else: + mu_map = None + + x = ( + np.array( + [np.linspace(-5.0 * cs, -2.0 * cs, 3), np.linspace(5.0 * cs, 2.0 * cs, 3)] + ) + + cs / 4.0 + ) # don't sample right by the source, slightly off alignment from either staggered grid + XYZ = utils.ndgrid(x, x, np.linspace(-2.0 * cs, 2.0 * cs, 5)) + Rx0 = getattr(fdem.Rx, "Point" + comp[0]) + if comp[-1] == "r": + real_or_imag = "real" + elif comp[-1] == "i": + real_or_imag = "imag" + rx0 = Rx0(XYZ, comp[1], real_or_imag) + + Src = [] + + for SrcType in SrcList: + if SrcType == "MagDipole": + Src.append( + fdem.Src.MagDipole([rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0]) + ) + elif SrcType == "MagDipole_Bfield": + Src.append( + fdem.Src.MagDipole_Bfield( + [rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0] + ) + ) + elif SrcType == "CircularLoop": + Src.append( + fdem.Src.CircularLoop( + [rx0], frequency=freq, location=np.r_[0.0, 0.0, 0.0] + ) + ) + elif SrcType == "LineCurrent": + Src.append( + fdem.Src.LineCurrent( + [rx0], + frequency=freq, + location=np.array([[0.0, 0.0, 0.0], [20.0, 0.0, 0.0]]), + ) + ) + elif SrcType == "RawVec": + S_m = np.zeros(mesh.nF) + S_e = np.zeros(mesh.nE) + S_m[ + mesh.closest_points_index([0.0, 0.0, 0.0], "Fz") + + np.sum(mesh.vnF[:1]) + ] = 1e-3 + S_e[ + mesh.closest_points_index([0.0, 0.0, 0.0], "Ez") + + np.sum(mesh.vnE[:1]) + ] = 1e-3 + Src.append( + fdem.Src.RawVec( + [rx0], freq, S_m, mesh.get_edge_inner_product() * S_e + ) + ) + + if verbose: + print(" Fetching {0!s} problem".format((fdemType))) + + if fdemType == "e": + survey = fdem.Survey(Src) + prb = fdem.Simulation3DElectricFieldFaceEdgeConductivity( + mesh, survey=survey, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, muMap=mu_map + ) + + elif fdemType == "b": + survey = fdem.Survey(Src) + prb = fdem.Simulation3DMagneticFluxDensityFaceEdgeConductivity( + mesh, survey=survey, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, muMap=mu_map + ) + + else: + raise NotImplementedError("NO SIMULATION FOR H OR J FORMULATION") + + try: + from pymatsolver import Pardiso + + prb.solver = Pardiso + except ImportError: + prb.solver = SolverLU + # prb.solver_opts = dict(check_accuracy=True) + + return prb + + def crossCheckTest( SrcList, fdemType1, @@ -147,11 +265,16 @@ def crossCheckTest( useMu=False, TOL=1e-5, verbose=False, + sigma_only=True ): def l2norm(r): return np.sqrt(r.dot(r)) - prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) + if sigma_only: + prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) + else: + prb1 = getFDEMProblem_FaceEdgeConductivity(fdemType1, comp, SrcList, freq, useMu, verbose) + mesh = prb1.mesh print( "Cross Checking Forward: {0!s}, {1!s} formulations - {2!s}".format( @@ -160,23 +283,36 @@ def l2norm(r): ) logsig = np.log(np.ones(mesh.nC) * CONDUCTIVITY) + logtau = np.log(np.ones(mesh.nF) * CONDUCTIVITY * np.min(mesh.h[0])) + logkappa = np.log(np.ones(mesh.nE) * CONDUCTIVITY * np.min(mesh.h[0])**2) mu = np.ones(mesh.nC) * MU if addrandoms is True: logsig += np.random.randn(mesh.nC) * np.log(CONDUCTIVITY) * 1e-1 + logtau += np.random.randn(mesh.nF) * np.log(CONDUCTIVITY) * 1e-1 + logkappa += np.random.randn(mesh.nE) * np.log(CONDUCTIVITY) * 1e-1 mu += np.random.randn(mesh.nC) * MU * 1e-1 - if useMu is True: - m = np.r_[logsig, mu] + if sigma_only: + if useMu: + m = np.r_[logsig, mu] + else: + m = logsig else: - m = logsig + if useMu: + m = np.r_[logsig, logtau, logkappa, mu] + else: + m = np.r_[logsig, logtau, logkappa] d1 = prb1.dpred(m) if verbose: print(" Problem 1 solved") - prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) + if sigma_only: + prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) + else: + prb2 = getFDEMProblem_FaceEdgeConductivity(fdemType2, comp, SrcList, freq, useMu, verbose) d2 = prb2.dpred(m) diff --git a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py index 814cefebaa..26134071b5 100644 --- a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py +++ b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py @@ -1,7 +1,7 @@ import unittest import numpy as np from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem +from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem, getFDEMProblem_FaceEdgeConductivity testE = True testB = True @@ -18,12 +18,16 @@ SrcList = ["RawVec", "MagDipole"] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec' -def adjointTest(fdemType, comp): - prb = getFDEMProblem(fdemType, comp, SrcList, freq) +def adjointTest(fdemType, comp, sigma_only=True): + + if sigma_only: + prb = getFDEMProblem(fdemType, comp, SrcList, freq) + else: + prb = getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq) # prb.solverOpts = dict(check_accuracy=True) print("Adjoint {0!s} formulation - {1!s}".format(fdemType, comp)) - m = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) + m = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # works for sigma_only and sigma, tau, kappa mu = np.ones(prb.mesh.nC) * MU if addrandoms is True: @@ -36,7 +40,7 @@ def adjointTest(fdemType, comp): u = prb.fields(m) v = np.random.rand(survey.nD) - w = np.random.rand(prb.mesh.nC) + w = np.random.rand(prb.sigmaMap.nP) # works for sigma_only and sigma, tau, kappa vJw = v.dot(prb.Jvec(m, w, u)) wJtv = w.dot(prb.Jtvec(m, v, u)) @@ -48,6 +52,7 @@ def adjointTest(fdemType, comp): class FDEM_AdjointTests(unittest.TestCase): if testE: + # SIGMA ONLY def test_Jtvec_adjointTest_exr_Eform(self): self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"])) @@ -119,6 +124,79 @@ def test_Jtvec_adjointTest_hyi_Eform(self): def test_Jtvec_adjointTest_hzi_Eform(self): self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"])) + + # FACE EDGE CONDUCTIVITY + def test_Jtvec_adjointTest_exr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"], False)) + + def test_Jtvec_adjointTest_eyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "y", "r"], False)) + + def test_Jtvec_adjointTest_ezr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "z", "r"], False)) + + def test_Jtvec_adjointTest_exi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "x", "i"], False)) + + def test_Jtvec_adjointTest_eyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "y", "i"], False)) + + def test_Jtvec_adjointTest_ezi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["ElectricField", "z", "i"], False)) + + def test_Jtvec_adjointTest_bxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_byr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_bzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_bxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_byi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_bzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_jxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_jyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_jzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_jxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_jyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_jzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_hxr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "x", "r"], False)) + + def test_Jtvec_adjointTest_hyr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "y", "r"], False)) + + def test_Jtvec_adjointTest_hzr_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "z", "r"], False)) + + def test_Jtvec_adjointTest_hxi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "x", "i"], False)) + + def test_Jtvec_adjointTest_hyi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "y", "i"], False)) + + def test_Jtvec_adjointTest_hzi_Eform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"], False)) if testB: @@ -193,3 +271,79 @@ def test_Jtvec_adjointTest_hyi_Bform(self): def test_Jtvec_adjointTest_hzi_Bform(self): self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"])) + + # FACE EDGE CONDUCTIVITY + def test_Jtvec_adjointTest_exr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "x", "r"], False)) + + def test_Jtvec_adjointTest_eyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "y", "r"], False)) + + def test_Jtvec_adjointTest_ezr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "z", "r"], False)) + + def test_Jtvec_adjointTest_exi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "x", "i"], False)) + + def test_Jtvec_adjointTest_eyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "y", "i"], False)) + + def test_Jtvec_adjointTest_ezi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["ElectricField", "z", "i"], False)) + + def test_Jtvec_adjointTest_bxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_byr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_bzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_bxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_byi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_bzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_jxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "r"], False)) + + def test_Jtvec_adjointTest_jyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "r"], False)) + + def test_Jtvec_adjointTest_jzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "r"], False)) + + def test_Jtvec_adjointTest_jxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "i"], False)) + + def test_Jtvec_adjointTest_jyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "i"], False)) + + def test_Jtvec_adjointTest_jzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "i"], False)) + + def test_Jtvec_adjointTest_hxr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "x", "r"], False)) + + def test_Jtvec_adjointTest_hyr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "y", "r"], False)) + + def test_Jtvec_adjointTest_hzr_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "z", "r"], False)) + + def test_Jtvec_adjointTest_hxi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "x", "i"], False)) + + def test_Jtvec_adjointTest_hyi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "y", "i"], False)) + + def test_Jtvec_adjointTest_hzi_Bform_FaceEdgeConductivity(self): + self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"], False)) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py index dc02014c59..220f8eade4 100644 --- a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py +++ b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py @@ -2,7 +2,7 @@ import numpy as np from SimPEG import tests from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem +from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem, getFDEMProblem_FaceEdgeConductivity testE = False testB = True @@ -27,12 +27,17 @@ ] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec' -def derivTest(fdemType, comp, src): - prb = getFDEMProblem(fdemType, comp, SrcType, freq) +def derivTest(fdemType, comp, src, sigma_only=True): + + if sigma_only: + prb = getFDEMProblem(fdemType, comp, SrcType, freq) + else: + prb = getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcType, freq) # prb.solverOpts = dict(check_accuracy=True) print(f"{fdemType} formulation {src} - {comp}") - x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) + + x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # should work # mu = np.log(np.ones(prb.mesh.nC)*MU) if addrandoms is True: @@ -82,6 +87,40 @@ def test_Jvec_h_Eform(self): derivTest("e", ["MagneticField", orientation, comp], src) ) + def test_Jvec_e_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("e", ["ElectricField", orientation, comp], src, sigma_only=False) + ) + + def test_Jvec_b_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "e", ["MagneticFluxDensity", orientation, comp], src, sigma_only=False + ) + ) + + def test_Jvec_j_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("e", ["CurrentDensity", orientation, comp], src, sigma_only=False) + ) + + def test_Jvec_h_Eform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("e", ["MagneticField", orientation, comp], src, sigma_only=False) + ) + if testB: def test_Jvec_e_Bform(self): @@ -117,6 +156,40 @@ def test_Jvec_h_Bform(self): self.assertTrue( derivTest("b", ["MagneticField", orientation, comp], src) ) + + def test_Jvec_e_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("b", ["ElectricField", orientation, comp], src, sigma_only=False) + ) + + def test_Jvec_b_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest( + "b", ["MagneticFluxDensity", orientation, comp], src, sigma_only=False + ) + ) + + def test_Jvec_j_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("b", ["CurrentDensity", orientation, comp], src, sigma_only=False) + ) + + def test_Jvec_h_Bform_FaceEdgeConductivity(self): + for src in SrcType: + for orientation in ["x", "y", "z"]: + for comp in ["r", "i"]: + self.assertTrue( + derivTest("b", ["MagneticField", orientation, comp], src, sigma_only=False) + ) if testJ: @@ -189,3 +262,6 @@ def test_Jvec_h_Hform(self): self.assertTrue( derivTest("h", ["MagneticField", orientation, comp], src) ) + +if __name__ == "__main__": + unittest.main() From 0d1f50b590384badf39c4445d15b3462d3d92ebb Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 1 Nov 2023 17:28:02 -0700 Subject: [PATCH 084/164] black and flake8 --- SimPEG/base/pde_simulation.py | 4 +- .../frequency_domain/fields.py | 28 +++--- .../frequency_domain/simulation.py | 15 ++-- .../electromagnetics/time_domain/receivers.py | 2 +- .../time_domain/simulation.py | 9 +- .../electromagnetics/time_domain/sources.py | 12 +-- .../electromagnetics/utils/testing_utils.py | 48 ++++++---- SimPEG/maps.py | 1 + tests/base/test_mass_matrices.py | 8 +- ..._FDEM_analytic_edge_face_conductivities.py | 61 ++++++------- .../inverse/adjoint/test_FDEM_adjointEB.py | 18 ++-- .../fdem/inverse/derivs/test_FDEM_derivs.py | 67 +++++++++++--- tests/em/tdem/test_TDEM_DerivAdjoint.py | 87 ++++++++++++------- 13 files changed, 228 insertions(+), 132 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index ec2da25896..4df2238de0 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -846,9 +846,9 @@ def __init__( sigmaMap=None, rho=None, rhoMap=None, - tau=0., + tau=0.0, tauMap=None, - kappa=0., + kappa=0.0, kappaMap=None, **kwargs, ): diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 6e059f4bed..31552ac12d 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -623,7 +623,6 @@ def _charge_density(self, eSolution, source_list): class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): - def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl self._aveE2CCV = self.simulation.mesh.aveE2CCV @@ -668,7 +667,6 @@ def _jDeriv_u(self, src, du_dm_v, adjoint=False): ) def _jDeriv_m(self, src, v, adjoint=False): - e = self[src, "e"] if adjoint: @@ -1044,7 +1042,6 @@ def startup(self): self._MeI = self.simulation.MeI self._MfI = self.simulation.MfI - def _eSecondary(self, bSolution, source_list): """ Secondary electric field from bSolution @@ -1061,9 +1058,13 @@ def _eSecondary(self, bSolution, source_list): e[:, i] = e[:, i] + -s_e if self.simulation.permittivity is not None: - MeyhatI = self.simulation._get_edge_admittivity_property_matrix( - src.frequency, invert_matrix=True - ) + self.__MeTau + self.__MeKappa + MeyhatI = ( + self.simulation._get_edge_admittivity_property_matrix( + src.frequency, invert_matrix=True + ) + + self.__MeTau + + self.__MeKappa + ) e[:, i] = MeyhatI * e[:, i] if self.simulation.permittivity is None: @@ -1085,7 +1086,9 @@ def _eDeriv_u(self, src, du_dm_v, adjoint=False): """ if not adjoint: - return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * du_dm_v)) + return self.__MeSigmaTauKappaI * ( + self._edgeCurl.T * (self._MfMui * du_dm_v) + ) return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * du_dm_v)) def _eDeriv_m(self, src, v, adjoint=False): @@ -1095,7 +1098,9 @@ def _eDeriv_m(self, src, v, adjoint=False): w = -s_e + self._edgeCurl.T * (self._MfMui * bSolution) if adjoint: - s_eDeriv = src.s_eDeriv(self.simulation, self.__MeSigmaTauKappaI.T * v, adjoint) + s_eDeriv = src.s_eDeriv( + self.simulation, self.__MeSigmaTauKappaI.T * v, adjoint + ) return ( self.__MeSigmaTauKappaIDeriv(w, v, adjoint) + self._MfMuiDeriv( @@ -1107,7 +1112,8 @@ def _eDeriv_m(self, src, v, adjoint=False): s_eDeriv = src.s_eDeriv(self.simulation, v, adjoint) return ( self.__MeSigmaTauKappaIDeriv(w, v) - + self.__MeSigmaTauKappaI * (self._edgeCurl.T * self._MfMuiDeriv(bSolution, v)) + + self.__MeSigmaTauKappaI + * (self._edgeCurl.T * self._MfMuiDeriv(bSolution, v)) - self.__MeSigmaTauKappaI * s_eDeriv + src.ePrimaryDeriv(self.simulation, v, adjoint) ) @@ -1131,7 +1137,9 @@ def _j(self, bSolution, source_list): return self._MeI * j else: - return self._MeI * (self.__MeSigmaTauKappa * self._e(bSolution, source_list)) + return self._MeI * ( + self.__MeSigmaTauKappa * self._e(bSolution, source_list) + ) class Fields3DCurrentDensity(FieldsFDEM): diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index a0daac55cd..ead7c9c291 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -432,13 +432,11 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): class Simulation3DElectricFieldFaceEdgeConductivity( Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation ): - _solutionType = "eSolution" _formulation = "EB" fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getA(self, freq): - MfMui = self.MfMui C = self.mesh.edge_curl @@ -446,7 +444,11 @@ def getA(self, freq): MeSigmaTauKappa = self._MeSigmaTauKappa A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * MeSigmaTauKappa else: - Meyhat = self._get_edge_admittivity_property_matrix(freq) + self._MeTau + self._MeKappa + Meyhat = ( + self._get_edge_admittivity_property_matrix(freq) + + self._MeTau + + self._MeKappa + ) A = C.T.tocsr() * MfMui * C + 1j * omega(freq) * Meyhat return A @@ -663,6 +665,7 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): return RHSderiv + SrcDeriv + class Simulation3DMagneticFluxDensityFaceEdgeConductivity( Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): @@ -772,9 +775,9 @@ def getRHS(self, freq): RHS = s_m + C * (MeSigmaTauKappaI * s_e) else: MeyhatI = sdinv( - self._get_edge_admittivity_property_matrix( - freq, invert_matrix=False - ) + self._MeTau + self._MeKappa + self._get_edge_admittivity_property_matrix(freq, invert_matrix=False) + + self._MeTau + + self._MeKappa ) RHS = s_m + C * (MeyhatI * s_e) diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index 6c3734c46d..3179c527af 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -106,7 +106,7 @@ def getSpatialP(self, mesh, f): if strength != 0.0: P = P + strength * mesh.get_interpolation_matrix( self.locations, field + comp - ) + ) return P def getTimeP(self, time_mesh, f): diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 513b7fcc91..b08ed3fad3 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -3,7 +3,7 @@ from ...data import Data from ...simulation import BaseTimeSimulation -from ...utils import mkvc, sdiag, sdinv, speye, Zero, validate_type, validate_float +from ...utils import mkvc, sdiag, speye, Zero, validate_type, validate_float from ...base import BaseFaceEdgeElectricalPDESimulation from ..base import BaseEMSimulation from .survey import Survey @@ -998,14 +998,14 @@ def getAdiag(self, tInd): def getAdiagDeriv(self, tInd, u, v, adjoint=False): C = self.mesh.edge_curl MfMui = self.MfMui - + u = C.T * (MfMui * u) if adjoint: if self._makeASymmetric is True: v = MfMui * v return self._MeSigmaTauKappaIDeriv(u, C.T * v, adjoint) - + ADeriv = C * self._MeSigmaTauKappaIDeriv(u, v, adjoint) if self._makeASymmetric is True: @@ -1092,7 +1092,6 @@ def getAdiag(self, tInd): return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa def getAdiagDeriv(self, tInd, u, v, adjoint=False): - assert tInd >= 0 and tInd < self.nT dt = self.time_steps[tInd] @@ -1126,7 +1125,6 @@ def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): return -1.0 / dt * self._MeSigmaTauKappaDeriv(u, v, adjoint) def getAdc(self): - MeSigmaTauKappa = self._MeSigmaTauKappa Grad = self.mesh.nodal_gradient @@ -1143,7 +1141,6 @@ def getAdcDeriv(self, u, v, adjoint=False): return self._MeSigmaTauKappaDeriv(-u, Grad * v, adjoint) - ############################################################################### # # # H-J Formulation # diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index 06c90f66ee..873c44f581 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -1311,35 +1311,35 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): def _aSrc(self, simulation): coordinates = "cartesian" - + if isinstance(simulation.mesh, SimplexMesh): if simulation._formulation == "EB": edges = simulation.mesh.edges edge_tangents = simulation.mesh.edge_tangents axyz = self._srcFct(edges, coordinates) - a = np.sum(axyz*edge_tangents, axis=1) + a = np.sum(axyz * edge_tangents, axis=1) else: faces = simulation.mesh.faces face_normals = simulation.mesh.face_normals axyz = self._srcFct(faces, coordinates) - a = np.sum(axyz*face_normals, axis=1) + a = np.sum(axyz * face_normals, axis=1) else: if simulation._formulation == "EB": gridX = simulation.mesh.gridEx gridY = simulation.mesh.gridEy gridZ = simulation.mesh.gridEz - + elif simulation._formulation == "HJ": gridX = simulation.mesh.gridFx gridY = simulation.mesh.gridFy gridZ = simulation.mesh.gridFz - + if simulation.mesh._meshType == "CYL": coordinates = "cylindrical" if simulation.mesh.is_symmetric: return self._srcFct(gridY)[:, 1] - + ax = self._srcFct(gridX, coordinates)[:, 0] ay = self._srcFct(gridY, coordinates)[:, 1] az = self._srcFct(gridZ, coordinates)[:, 2] diff --git a/SimPEG/electromagnetics/utils/testing_utils.py b/SimPEG/electromagnetics/utils/testing_utils.py index 8302b3c4b0..a1818856e0 100644 --- a/SimPEG/electromagnetics/utils/testing_utils.py +++ b/SimPEG/electromagnetics/utils/testing_utils.py @@ -138,7 +138,9 @@ def getFDEMProblem(fdemType, comp, SrcList, freq, useMu=False, verbose=False): return prb -def getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq, useMu=False, verbose=False): +def getFDEMProblem_FaceEdgeConductivity( + fdemType, comp, SrcList, freq, useMu=False, verbose=False +): cs = 10.0 ncx, ncy, ncz = 0, 0, 0 npad = 8 @@ -152,13 +154,11 @@ def getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq, useMu=Fal ("log_sigma", mesh.nC), ("log_tau", mesh.nF), ("log_kappa", mesh.nE), - ("mu", mesh.nC) + ("mu", mesh.nC), ) else: wire_map = maps.Wires( - ("log_sigma", mesh.nC), - ("log_tau", mesh.nF), - ("log_kappa", mesh.nE) + ("log_sigma", mesh.nC), ("log_tau", mesh.nF), ("log_kappa", mesh.nE) ) sigma_map = maps.ExpMap(nP=mesh.nC) * wire_map.log_sigma @@ -214,17 +214,13 @@ def getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq, useMu=Fal S_m = np.zeros(mesh.nF) S_e = np.zeros(mesh.nE) S_m[ - mesh.closest_points_index([0.0, 0.0, 0.0], "Fz") - + np.sum(mesh.vnF[:1]) + mesh.closest_points_index([0.0, 0.0, 0.0], "Fz") + np.sum(mesh.vnF[:1]) ] = 1e-3 S_e[ - mesh.closest_points_index([0.0, 0.0, 0.0], "Ez") - + np.sum(mesh.vnE[:1]) + mesh.closest_points_index([0.0, 0.0, 0.0], "Ez") + np.sum(mesh.vnE[:1]) ] = 1e-3 Src.append( - fdem.Src.RawVec( - [rx0], freq, S_m, mesh.get_edge_inner_product() * S_e - ) + fdem.Src.RawVec([rx0], freq, S_m, mesh.get_edge_inner_product() * S_e) ) if verbose: @@ -233,13 +229,23 @@ def getFDEMProblem_FaceEdgeConductivity(fdemType, comp, SrcList, freq, useMu=Fal if fdemType == "e": survey = fdem.Survey(Src) prb = fdem.Simulation3DElectricFieldFaceEdgeConductivity( - mesh, survey=survey, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, muMap=mu_map + mesh, + survey=survey, + sigmaMap=sigma_map, + tauMap=tau_map, + kappaMap=kappa_map, + muMap=mu_map, ) elif fdemType == "b": survey = fdem.Survey(Src) prb = fdem.Simulation3DMagneticFluxDensityFaceEdgeConductivity( - mesh, survey=survey, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, muMap=mu_map + mesh, + survey=survey, + sigmaMap=sigma_map, + tauMap=tau_map, + kappaMap=kappa_map, + muMap=mu_map, ) else: @@ -265,7 +271,7 @@ def crossCheckTest( useMu=False, TOL=1e-5, verbose=False, - sigma_only=True + sigma_only=True, ): def l2norm(r): return np.sqrt(r.dot(r)) @@ -273,8 +279,10 @@ def l2norm(r): if sigma_only: prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) else: - prb1 = getFDEMProblem_FaceEdgeConductivity(fdemType1, comp, SrcList, freq, useMu, verbose) - + prb1 = getFDEMProblem_FaceEdgeConductivity( + fdemType1, comp, SrcList, freq, useMu, verbose + ) + mesh = prb1.mesh print( "Cross Checking Forward: {0!s}, {1!s} formulations - {2!s}".format( @@ -284,7 +292,7 @@ def l2norm(r): logsig = np.log(np.ones(mesh.nC) * CONDUCTIVITY) logtau = np.log(np.ones(mesh.nF) * CONDUCTIVITY * np.min(mesh.h[0])) - logkappa = np.log(np.ones(mesh.nE) * CONDUCTIVITY * np.min(mesh.h[0])**2) + logkappa = np.log(np.ones(mesh.nE) * CONDUCTIVITY * np.min(mesh.h[0]) ** 2) mu = np.ones(mesh.nC) * MU if addrandoms is True: @@ -312,7 +320,9 @@ def l2norm(r): if sigma_only: prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) else: - prb2 = getFDEMProblem_FaceEdgeConductivity(fdemType2, comp, SrcList, freq, useMu, verbose) + prb2 = getFDEMProblem_FaceEdgeConductivity( + fdemType2, comp, SrcList, freq, useMu, verbose + ) d2 = prb2.dpred(m) diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 75354e4151..88cf2073f6 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -3488,6 +3488,7 @@ def deriv(self, m, v=None): return self.P * v return self.P + class InjectActiveEdges(IdentityMap): r"""Map active edges model to all edges of a mesh. diff --git a/tests/base/test_mass_matrices.py b/tests/base/test_mass_matrices.py index 4f9f8d760b..7bbb657fac 100644 --- a/tests/base/test_mass_matrices.py +++ b/tests/base/test_mass_matrices.py @@ -26,7 +26,9 @@ class SimpleSim(BasePDESimulation): props.Reciprocal(sigma, rho) mu, muMap, muDeriv = props.Invertible("Magnetic Permeability") tau, tauMap, tauDeriv = props.Invertible("Face conductivity, conductance (S)") - kappa, kappaMap, kappaDeriv = props.Invertible("Edge conductivity, conductivity times area (Sm)") + kappa, kappaMap, kappaDeriv = props.Invertible( + "Edge conductivity, conductivity times area (Sm)" + ) def __init__( self, @@ -1412,6 +1414,7 @@ def test_MeI_adjoint(self): vJty = v @ sim._MeKappaIDeriv(u, y, adjoint=True) np.testing.assert_allclose(yJv, vJty) + def test_bad_derivative_stash(): mesh = discretize.TensorMesh([5, 6, 7]) sim = SimpleSim(mesh, sigmaMap=maps.ExpMap()) @@ -1432,5 +1435,6 @@ def test_bad_derivative_stash(): with pytest.raises(TypeError): sim.MeSigmaDeriv(u, v) + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py index 0e5387c149..641cc442bc 100644 --- a/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py +++ b/tests/em/fdem/forward/test_FDEM_analytic_edge_face_conductivities.py @@ -1,12 +1,9 @@ import unittest import discretize -import matplotlib.pyplot as plt import numpy as np -from pymatsolver import Pardiso as Solver from scipy.constants import mu_0 from SimPEG import maps -from SimPEG.electromagnetics import analytics from SimPEG.electromagnetics import frequency_domain as fdem @@ -44,27 +41,32 @@ def analytic_layer_small_loop_face_conductivity_comparison( if mesh_type == "CYL": hr = [(2.0, 120), (2.0, 25, 1.3)] hz = [(2.0, 25, -1.3), (2.0, 200), (2.0, 25, 1.3)] - + mesh = discretize.CylindricalMesh([hr, 1, hz], x0="00C") - + ind = np.where(mesh.h[2] == np.min(mesh.h[2]))[0] - ind = ind[int(len(ind)/2)] - - mesh.origin = mesh.origin - np.r_[0., 0., mesh.nodes_z[ind]-24] - + ind = ind[int(len(ind) / 2)] + + mesh.origin = mesh.origin - np.r_[0.0, 0.0, mesh.nodes_z[ind] - 24] + elif mesh_type == "TREE": dh = 2.5 # base cell width dom_width = 8000.0 # domain width - nbc = 2 ** int(np.round(np.log(dom_width / dh) / np.log(2.0))) # num. base cells - + nbc = 2 ** int( + np.round(np.log(dom_width / dh) / np.log(2.0)) + ) # num. base cells + h = [(dh, nbc)] mesh = discretize.TreeMesh([h, h, h], x0="CCC") mesh.refine_points( - np.reshape(source_location, (1, 3)), level=-1, padding_cells_by_level=[8, 4, 4, 4], finalize=False + np.reshape(source_location, (1, 3)), + level=-1, + padding_cells_by_level=[8, 4, 4, 4], + finalize=False, ) - x0s = np.vstack([ii*np.c_[-60, -60, -60] for ii in range(1, 5)]) - x1s = np.vstack([ii*np.c_[60, 60, 10] for ii in range(1, 5)]) - + x0s = np.vstack([ii * np.c_[-60, -60, -60] for ii in range(1, 5)]) + x1s = np.vstack([ii * np.c_[60, 60, 10] for ii in range(1, 5)]) + mesh.refine_box(x0s, x1s, levels=[-2, -3, -4, -5], finalize=False) mesh.finalize() @@ -82,17 +84,15 @@ def analytic_layer_small_loop_face_conductivity_comparison( ), getattr(fdem.receivers, "Point{}Secondary".format(rx_type))( receiver_location, component="imag", orientation=orientation - ) + ), ] # 1D SURVEY AND SIMULATION src_1d = [ fdem.sources.MagDipole( - rx_list, - f, - location=np.r_[0.0, 0.0, 1.0], - orientation=orientation - ) for f in frequencies + rx_list, f, location=np.r_[0.0, 0.0, 1.0], orientation=orientation + ) + for f in frequencies ] survey_1d = fdem.Survey(src_1d) @@ -110,7 +110,8 @@ def analytic_layer_small_loop_face_conductivity_comparison( f, radius=loop_radius, location=source_location, - ) for f in frequencies + ) + for f in frequencies ] else: src_3d = [ @@ -119,7 +120,8 @@ def analytic_layer_small_loop_face_conductivity_comparison( f, location=source_location, orientation=orientation, - ) for f in frequencies + ) + for f in frequencies ] survey_3d = fdem.Survey(src_3d) @@ -137,14 +139,13 @@ def analytic_layer_small_loop_face_conductivity_comparison( # COMPUTE SOLUTIONS analytic_solution = mu_0 * sim_1d.dpred(sigma_1d) # ALWAYS RETURNS H-FIELD numeric_solution = sim_3d.dpred(tau_3d) - + # print(analytic_solution) # print(numeric_solution) - diff = ( - np.linalg.norm(np.abs(numeric_solution - analytic_solution)) / - np.linalg.norm(np.abs(analytic_solution)) - ) + diff = np.linalg.norm( + np.abs(numeric_solution - analytic_solution) + ) / np.linalg.norm(np.abs(analytic_solution)) print( " |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}".format( @@ -154,7 +155,7 @@ def analytic_layer_small_loop_face_conductivity_comparison( ) ) print("Difference: {}".format(diff)) - + return diff @@ -242,4 +243,4 @@ def test_cyl_Eform_loop_b_z(self): if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py index 26134071b5..889b959d81 100644 --- a/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py +++ b/tests/em/fdem/inverse/adjoint/test_FDEM_adjointEB.py @@ -1,7 +1,10 @@ import unittest import numpy as np from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem, getFDEMProblem_FaceEdgeConductivity +from SimPEG.electromagnetics.utils.testing_utils import ( + getFDEMProblem, + getFDEMProblem_FaceEdgeConductivity, +) testE = True testB = True @@ -19,7 +22,6 @@ def adjointTest(fdemType, comp, sigma_only=True): - if sigma_only: prb = getFDEMProblem(fdemType, comp, SrcList, freq) else: @@ -27,7 +29,9 @@ def adjointTest(fdemType, comp, sigma_only=True): # prb.solverOpts = dict(check_accuracy=True) print("Adjoint {0!s} formulation - {1!s}".format(fdemType, comp)) - m = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # works for sigma_only and sigma, tau, kappa + m = np.log( + np.ones(prb.sigmaMap.nP) * CONDUCTIVITY + ) # works for sigma_only and sigma, tau, kappa mu = np.ones(prb.mesh.nC) * MU if addrandoms is True: @@ -51,7 +55,6 @@ def adjointTest(fdemType, comp, sigma_only=True): class FDEM_AdjointTests(unittest.TestCase): if testE: - # SIGMA ONLY def test_Jtvec_adjointTest_exr_Eform(self): self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"])) @@ -124,7 +127,7 @@ def test_Jtvec_adjointTest_hyi_Eform(self): def test_Jtvec_adjointTest_hzi_Eform(self): self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"])) - + # FACE EDGE CONDUCTIVITY def test_Jtvec_adjointTest_exr_Eform_FaceEdgeConductivity(self): self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"], False)) @@ -271,7 +274,7 @@ def test_Jtvec_adjointTest_hyi_Bform(self): def test_Jtvec_adjointTest_hzi_Bform(self): self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"])) - + # FACE EDGE CONDUCTIVITY def test_Jtvec_adjointTest_exr_Bform_FaceEdgeConductivity(self): self.assertTrue(adjointTest("b", ["ElectricField", "x", "r"], False)) @@ -344,6 +347,7 @@ def test_Jtvec_adjointTest_hyi_Bform_FaceEdgeConductivity(self): def test_Jtvec_adjointTest_hzi_Bform_FaceEdgeConductivity(self): self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"], False)) - + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py index 220f8eade4..996352d2d0 100644 --- a/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py +++ b/tests/em/fdem/inverse/derivs/test_FDEM_derivs.py @@ -2,7 +2,10 @@ import numpy as np from SimPEG import tests from scipy.constants import mu_0 -from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem, getFDEMProblem_FaceEdgeConductivity +from SimPEG.electromagnetics.utils.testing_utils import ( + getFDEMProblem, + getFDEMProblem_FaceEdgeConductivity, +) testE = False testB = True @@ -28,7 +31,6 @@ def derivTest(fdemType, comp, src, sigma_only=True): - if sigma_only: prb = getFDEMProblem(fdemType, comp, SrcType, freq) else: @@ -36,8 +38,8 @@ def derivTest(fdemType, comp, src, sigma_only=True): # prb.solverOpts = dict(check_accuracy=True) print(f"{fdemType} formulation {src} - {comp}") - - x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # should work + + x0 = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY) # should work # mu = np.log(np.ones(prb.mesh.nC)*MU) if addrandoms is True: @@ -92,7 +94,12 @@ def test_Jvec_e_Eform_FaceEdgeConductivity(self): for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("e", ["ElectricField", orientation, comp], src, sigma_only=False) + derivTest( + "e", + ["ElectricField", orientation, comp], + src, + sigma_only=False, + ) ) def test_Jvec_b_Eform_FaceEdgeConductivity(self): @@ -101,7 +108,10 @@ def test_Jvec_b_Eform_FaceEdgeConductivity(self): for comp in ["r", "i"]: self.assertTrue( derivTest( - "e", ["MagneticFluxDensity", orientation, comp], src, sigma_only=False + "e", + ["MagneticFluxDensity", orientation, comp], + src, + sigma_only=False, ) ) @@ -110,7 +120,12 @@ def test_Jvec_j_Eform_FaceEdgeConductivity(self): for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("e", ["CurrentDensity", orientation, comp], src, sigma_only=False) + derivTest( + "e", + ["CurrentDensity", orientation, comp], + src, + sigma_only=False, + ) ) def test_Jvec_h_Eform_FaceEdgeConductivity(self): @@ -118,7 +133,12 @@ def test_Jvec_h_Eform_FaceEdgeConductivity(self): for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("e", ["MagneticField", orientation, comp], src, sigma_only=False) + derivTest( + "e", + ["MagneticField", orientation, comp], + src, + sigma_only=False, + ) ) if testB: @@ -156,13 +176,18 @@ def test_Jvec_h_Bform(self): self.assertTrue( derivTest("b", ["MagneticField", orientation, comp], src) ) - + def test_Jvec_e_Bform_FaceEdgeConductivity(self): for src in SrcType: for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("b", ["ElectricField", orientation, comp], src, sigma_only=False) + derivTest( + "b", + ["ElectricField", orientation, comp], + src, + sigma_only=False, + ) ) def test_Jvec_b_Bform_FaceEdgeConductivity(self): @@ -171,7 +196,10 @@ def test_Jvec_b_Bform_FaceEdgeConductivity(self): for comp in ["r", "i"]: self.assertTrue( derivTest( - "b", ["MagneticFluxDensity", orientation, comp], src, sigma_only=False + "b", + ["MagneticFluxDensity", orientation, comp], + src, + sigma_only=False, ) ) @@ -180,7 +208,12 @@ def test_Jvec_j_Bform_FaceEdgeConductivity(self): for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("b", ["CurrentDensity", orientation, comp], src, sigma_only=False) + derivTest( + "b", + ["CurrentDensity", orientation, comp], + src, + sigma_only=False, + ) ) def test_Jvec_h_Bform_FaceEdgeConductivity(self): @@ -188,7 +221,12 @@ def test_Jvec_h_Bform_FaceEdgeConductivity(self): for orientation in ["x", "y", "z"]: for comp in ["r", "i"]: self.assertTrue( - derivTest("b", ["MagneticField", orientation, comp], src, sigma_only=False) + derivTest( + "b", + ["MagneticField", orientation, comp], + src, + sigma_only=False, + ) ) if testJ: @@ -262,6 +300,7 @@ def test_Jvec_h_Hform(self): self.assertTrue( derivTest("h", ["MagneticField", orientation, comp], src) ) - + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/tdem/test_TDEM_DerivAdjoint.py b/tests/em/tdem/test_TDEM_DerivAdjoint.py index fcb9299af3..70abb10996 100644 --- a/tests/em/tdem/test_TDEM_DerivAdjoint.py +++ b/tests/em/tdem/test_TDEM_DerivAdjoint.py @@ -41,8 +41,8 @@ def get_sigma_mapping(mesh): ) return maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * activeMap -def get_wire_mappings(mesh): +def get_wire_mappings(mesh): # active cells, faces + edges active_cells = mesh.cell_centers[:, -1] < 0.0 active_faces = mesh.faces[:, -1] < 0.0 @@ -50,26 +50,33 @@ def get_wire_mappings(mesh): n_active_cells = np.sum(active_cells) n_active_faces = np.sum(active_faces) n_active_edges = np.sum(active_edges) - + # wire map wire_map = maps.Wires( ("log_sigma", n_active_cells), ("log_tau", n_active_faces), - ("log_kappa", n_active_edges) + ("log_kappa", n_active_edges), ) - sigma_map = maps.InjectActiveCells( - mesh, active_cells, 1e-8 - ) * maps.ExpMap(nP=n_active_cells) * wire_map.log_sigma - tau_map = maps.InjectActiveFaces( - mesh, active_faces, 0 - ) * maps.ExpMap(nP=n_active_faces) * wire_map.log_tau - kappa_map = maps.InjectActiveEdges( - mesh, active_edges, 0 - ) * maps.ExpMap(nP=n_active_edges) * wire_map.log_kappa + sigma_map = ( + maps.InjectActiveCells(mesh, active_cells, 1e-8) + * maps.ExpMap(nP=n_active_cells) + * wire_map.log_sigma + ) + tau_map = ( + maps.InjectActiveFaces(mesh, active_faces, 0) + * maps.ExpMap(nP=n_active_faces) + * wire_map.log_tau + ) + kappa_map = ( + maps.InjectActiveEdges(mesh, active_edges, 0) + * maps.ExpMap(nP=n_active_edges) + * wire_map.log_kappa + ) return sigma_map, tau_map, kappa_map + def get_prob(mesh, formulation, sigma_map, **kwargs): prb = getattr(tdem, "Simulation3D{}".format(formulation))( mesh, sigmaMap=sigma_map, **kwargs @@ -78,16 +85,18 @@ def get_prob(mesh, formulation, sigma_map, **kwargs): prb.solver = Solver return prb -def get_face_edge_prob(mesh, formulation, sigma_map=None, tau_map=None, kappa_map=None, **kwargs): + +def get_face_edge_prob( + mesh, formulation, sigma_map=None, tau_map=None, kappa_map=None, **kwargs +): prb = getattr(tdem, "Simulation3D{}".format(formulation))( - mesh, - sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, - **kwargs + mesh, sigmaMap=sigma_map, tauMap=tau_map, kappaMap=kappa_map, **kwargs ) prb.time_steps = [(1e-05, 10), (5e-05, 10), (2.5e-4, 10)] prb.solver = Solver return prb + def get_survey(): src1 = tdem.Src.MagDipole([], location=np.array([0.0, 0.0, 0.0])) src2 = tdem.Src.MagDipole([], location=np.array([0.0, 0.0, 8.0])) @@ -108,25 +117,35 @@ def setUpClass(self): # sigma_map = get_sigma_mapping(mesh) # self.prob = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.survey) # self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) - + active_cells = mesh.cell_centers[:, -1] < 0.0 active_faces = mesh.faces[:, -1] < 0.0 active_edges = mesh.edges[:, -1] < 0.0 - + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) self.prob = get_face_edge_prob( - mesh, self.formulation, sigma_map=sigma_map, tau_map=tau_map, kappa_map=kappa_map, survey=self.survey + mesh, + self.formulation, + sigma_map=sigma_map, + tau_map=tau_map, + kappa_map=kappa_map, + survey=self.survey, ) self.m = np.r_[ - np.log(1e-1) * np.ones(np.sum(active_cells)) + 1e-3 * np.random.randn(np.sum(active_cells)), - np.log(10*1e-1) * np.ones(np.sum(active_faces)) + 1e-3 * np.random.randn(np.sum(active_faces)), - np.log(100*1e-1) * np.ones(np.sum(active_edges)) + 1e-3 * np.random.randn(np.sum(active_edges)) + np.log(1e-1) * np.ones(np.sum(active_cells)) + + 1e-3 * np.random.randn(np.sum(active_cells)), + np.log(10 * 1e-1) * np.ones(np.sum(active_faces)) + + 1e-3 * np.random.randn(np.sum(active_faces)), + np.log(100 * 1e-1) * np.ones(np.sum(active_edges)) + + 1e-3 * np.random.randn(np.sum(active_edges)), ] else: sigma_map = get_sigma_mapping(mesh) self.prob = get_prob(mesh, self.formulation, sigma_map, survey=self.survey) - self.m = np.log(1e-1) * np.ones(self.prob.sigmaMap.nP) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) + self.m = np.log(1e-1) * np.ones( + self.prob.sigmaMap.nP + ) + 1e-3 * np.random.randn(self.prob.sigmaMap.nP) print("Solving Fields for problem {}".format(self.formulation)) t = time.time() @@ -140,14 +159,21 @@ def setUpClass(self): if "FaceEdgeConductivity" in self.formulation: # sigma_map = get_sigma_mapping(mesh) # self.probfwd = get_face_edge_prob(mesh, self.formulation, sigma_map=sigma_map, survey=self.surveyfwd) - + sigma_map, tau_map, kappa_map = get_wire_mappings(mesh) self.probfwd = get_face_edge_prob( - mesh, self.formulation, sigma_map=sigma_map, tau_map=tau_map, kappa_map=kappa_map, survey=self.surveyfwd + mesh, + self.formulation, + sigma_map=sigma_map, + tau_map=tau_map, + kappa_map=kappa_map, + survey=self.surveyfwd, ) else: sigma_map = get_sigma_mapping(mesh) - self.probfwd = get_prob(mesh, self.formulation, sigma_map, survey=self.surveyfwd) + self.probfwd = get_prob( + mesh, self.formulation, sigma_map, survey=self.surveyfwd + ) def get_rx(self, rxcomp): rxOffset = 15.0 @@ -192,7 +218,7 @@ def JvecVsJtvecTest(self, rxcomp): print( "\nAdjoint Testing Jvec, Jtvec prob {}, {}".format(self.formulation, rxcomp) ) - + m = np.random.rand(self.prob.sigmaMap.nP) d = np.random.randn(self.prob.survey.nD) V1 = d.dot(self.prob.Jvec(self.m, m, f=self.fields)) @@ -282,8 +308,10 @@ def test_Jvec_adjoint_e_dhdtz(self): def test_Jvec_adjoint_e_jy(self): self.JvecVsJtvecTest("CurrentDensityy") + pass + class DerivAdjoint_E_FaceEdgeConductivity(Base_DerivAdjoint_Test): formulation = "ElectricFieldFaceEdgeConductivity" @@ -392,6 +420,7 @@ def test_Jvec_adjoint_b_dhdtz(self): def test_Jvec_adjoint_b_jy(self): self.JvecVsJtvecTest("CurrentDensityy") + class DerivAdjoint_B_FaceEdgeConductivity(Base_DerivAdjoint_Test): formulation = "MagneticFluxDensityFaceEdgeConductivity" @@ -408,7 +437,7 @@ def test_Jvec_b_dbdtx(self): def test_Jvec_b_dbdtz(self): self.JvecTest("MagneticFluxTimeDerivativez") - + def test_Jvec_b_ey(self): self.JvecTest("ElectricFieldy") @@ -573,4 +602,4 @@ def test_Jvec_adjoint_j_dbdtz(self): if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From 44b6b15bda57de88673e120446302d5152c3e877 Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 1 Nov 2023 17:35:38 -0700 Subject: [PATCH 085/164] fix style checks --- SimPEG/base/pde_simulation.py | 8 ++++---- SimPEG/electromagnetics/time_domain/fields.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 4df2238de0..0dff4a2781 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -880,15 +880,15 @@ def __setattr__(self, name, value): def _MeSigmaTauKappa(self): if getattr(self, "__MeSigmaTauKappa", None) is None: M_prop = self.MeSigma + self._MeTau + self._MeKappa - setattr(self, "__MeSigmaTauKappa", M_prop) - return getattr(self, "__MeSigmaTauKappa") + setattr(self, "__MeSigmaTauKappa", M_prop) # B010 + return getattr(self, "__MeSigmaTauKappa") # B09 @property def _MeSigmaTauKappaI(self): if getattr(self, "__MeSigmaTauKappaI", None) is None: M_prop = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - setattr(self, "__MeSigmaTauKappaI", M_prop) - return getattr(self, "__MeSigmaTauKappaI") + setattr(self, "__MeSigmaTauKappaI", M_prop) # B010 + return getattr(self, "__MeSigmaTauKappaI") # B009 def _MeSigmaTauKappaDeriv_sigma(self, u, v=None, adjoint=False): """Only derivative wrt to sigma""" diff --git a/SimPEG/electromagnetics/time_domain/fields.py b/SimPEG/electromagnetics/time_domain/fields.py index 210c618353..c7ea27400a 100644 --- a/SimPEG/electromagnetics/time_domain/fields.py +++ b/SimPEG/electromagnetics/time_domain/fields.py @@ -304,7 +304,9 @@ def _e(self, bSolution, source_list, tInd): def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint is True: - return self._MfMui.T * (self._edgeCurl * (self.__MeSigmaTauKappaI.T * dun_dm_v)) + return self._MfMui.T * ( + self._edgeCurl * (self.__MeSigmaTauKappaI.T * dun_dm_v) + ) return self.__MeSigmaTauKappaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) def _eDeriv_m(self, tInd, src, v, adjoint=False): @@ -347,7 +349,8 @@ def _jDeriv_m(self, tInd, src, v, adjoint=False): tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True ) return self.simulation.MeI * ( - self.__MeSigmaTauKappaDeriv(e) * v + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) + self.__MeSigmaTauKappaDeriv(e) * v + + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) ) @@ -490,8 +493,7 @@ def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return self._eDeriv_u( tInd, src, - (self.__MeSigmaTauKappa).T - * (self.simulation.MeI.T * dun_dm_v), + (self.__MeSigmaTauKappa).T * (self.simulation.MeI.T * dun_dm_v), adjoint=True, ) return self.simulation.MeI * ( @@ -506,7 +508,8 @@ def _jDeriv_m(self, tInd, src, v, adjoint=False): tInd, src, self.__MeSigmaTauKappa.T * w, adjoint=True ) return self.simulation.MeI * ( - self.__MeSigmaTauKappaDeriv(e) * v + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) + self.__MeSigmaTauKappaDeriv(e) * v + + self.__MeSigmaTauKappa * self._eDeriv_m(tInd, src, v) ) From 884838413bd02b6672fb8b278098ac5e90362402 Mon Sep 17 00:00:00 2001 From: dccowan Date: Wed, 1 Nov 2023 17:41:06 -0700 Subject: [PATCH 086/164] add noqa exception --- SimPEG/base/pde_simulation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 0dff4a2781..1d1764e3fd 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -880,15 +880,15 @@ def __setattr__(self, name, value): def _MeSigmaTauKappa(self): if getattr(self, "__MeSigmaTauKappa", None) is None: M_prop = self.MeSigma + self._MeTau + self._MeKappa - setattr(self, "__MeSigmaTauKappa", M_prop) # B010 - return getattr(self, "__MeSigmaTauKappa") # B09 + setattr(self, "__MeSigmaTauKappa", M_prop) # noqa: B010 + return getattr(self, "__MeSigmaTauKappa") # noqa: B009 @property def _MeSigmaTauKappaI(self): if getattr(self, "__MeSigmaTauKappaI", None) is None: M_prop = sdinv(self.MeSigma + self._MeTau + self._MeKappa) - setattr(self, "__MeSigmaTauKappaI", M_prop) # B010 - return getattr(self, "__MeSigmaTauKappaI") # B009 + setattr(self, "__MeSigmaTauKappaI", M_prop) # noqa: B010 + return getattr(self, "__MeSigmaTauKappaI") # noqa: B009 def _MeSigmaTauKappaDeriv_sigma(self, u, v=None, adjoint=False): """Only derivative wrt to sigma""" From 97d4789d0720a33914de762fe503ceef9296356e Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 3 Nov 2023 13:28:26 -0700 Subject: [PATCH 087/164] Replace private method for TensorMesh.cell_nodes Use the new `discretize.TensorMesh.cell_nodes` instead of the private method that was in the simulation. Increase the minimum version of discretize in `setup.py` and `environment_test.yml`. --- SimPEG/potential_fields/gravity/simulation.py | 28 ++----------------- environment_test.yml | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 28 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 6d69048c2b..2346f0ee9e 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -453,33 +453,9 @@ def _get_cell_nodes(self): """ Return indices of nodes for each cell in the mesh. """ - if isinstance(self.mesh, discretize.TreeMesh): - cell_nodes = self.mesh.cell_nodes - elif isinstance(self.mesh, discretize.TensorMesh): - cell_nodes = self._get_tensormesh_cell_nodes() - else: + if not isinstance(self.mesh, (discretize.TreeMesh, discretize.TensorMesh)): raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") - return cell_nodes - - def _get_tensormesh_cell_nodes(self): - """ - Quick implementation of ``cell_nodes`` for a ``TensorMesh``. - - This method should be removed after ``TensorMesh.cell_nodes`` is added - in discretize. - """ - inds = np.arange(self.mesh.n_nodes).reshape(self.mesh.shape_nodes, order="F") - cell_nodes = [ - inds[:-1, :-1, :-1].reshape(-1, order="F"), - inds[1:, :-1, :-1].reshape(-1, order="F"), - inds[:-1, 1:, :-1].reshape(-1, order="F"), - inds[1:, 1:, :-1].reshape(-1, order="F"), - inds[:-1, :-1, 1:].reshape(-1, order="F"), - inds[1:, :-1, 1:].reshape(-1, order="F"), - inds[:-1, 1:, 1:].reshape(-1, order="F"), - inds[1:, 1:, 1:].reshape(-1, order="F"), - ] - cell_nodes = np.stack(cell_nodes, axis=-1) + cell_nodes = self.mesh.cell_nodes return cell_nodes def _get_active_nodes(self): diff --git a/environment_test.yml b/environment_test.yml index 06d6ff8db6..888c2180a0 100644 --- a/environment_test.yml +++ b/environment_test.yml @@ -7,7 +7,7 @@ dependencies: - scikit-learn>=1.2 - pymatsolver>=0.2 - matplotlib - - discretize>=0.8 + - discretize>=0.10 - geoana>=0.5.0 - empymod>=2.0.0 - setuptools_scm diff --git a/setup.py b/setup.py index 1a2459028e..9e55ae825a 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ "scikit-learn>=1.2", "pymatsolver>=0.2", "matplotlib", - "discretize>=0.8", + "discretize>=0.10", "geoana>=0.5.0", "empymod>=2.0.0", "pandas", From c613381e527d84b3f9588f18bd6bcb7d89b4f6f2 Mon Sep 17 00:00:00 2001 From: sgkang Date: Mon, 6 Nov 2023 11:49:17 -0800 Subject: [PATCH 088/164] start implementing lateral constraint for 1D inversion --- .../regularization/laterally_constrained.py | 218 ++++++++++++++ .../regularization_mesh_lateral.py | 282 ++++++++++++++++++ 2 files changed, 500 insertions(+) create mode 100644 SimPEG/regularization/laterally_constrained.py create mode 100644 SimPEG/regularization/regularization_mesh_lateral.py diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py new file mode 100644 index 0000000000..342b965d62 --- /dev/null +++ b/SimPEG/regularization/laterally_constrained.py @@ -0,0 +1,218 @@ +import scipy as sp +import numpy as np +from .sparse import SparseSmoothness, SparseSmallness, Sparse +from .. import utils +import properties +from .. import props + +class LaterallyConstrainedSmallness(SparseSmallness): + """ + Duplicate of SparseSmallness Class + """ + +class LaterallyConstrainedSmoothness(SparseSmoothness): + """ + Modification of SparseSmoothness Class + for addressing radial and vertical gradients of model parameters, + which is a 1D vertical resistivity profile at each of lateral locations. + """ + @property + def cell_gradient(self): + if getattr(self, "_cell_gradient", None) is None: + self._cell_gradient = getattr( + self.regularization_mesh, "cell_gradient_{}".format(self.orientation) + ) + return self._cell_gradient + @property + def _cell_distances(self): + """ + Distances between cell centers for the cell center difference. + """ + return getattr(self.regularization_mesh, f"cell_distances_{self.orientation}") + + @property + def orientation(self): + """Direction along which smoothness is enforced. + + Returns + ------- + {'x','y','z'} + The direction along which smoothness is enforced. + + """ + return self._orientation + + def f_m(self, m): + dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) + + if self.units is not None and self.units.lower() == "radian": + return ( + utils.mat_utils.coterminal(dfm_dl * self._cell_distances) + / self._cell_distances + ) + return dfm_dl + + def f_m_deriv(self, m) -> csr_matrix: + return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) + + def update_weights(self, m): + if self.gradient_type == "total" and self.parent is not None: + f_m = np.zeros(self.regularization_mesh.nC) + for obj in self.parent.objfcts: + if isinstance(obj, SparseSmoothness): + avg = getattr(self.regularization_mesh, f"aveF{obj.orientation}2CC") + f_m += np.abs(avg * obj.f_m(m)) + + f_m = getattr(self.regularization_mesh, f"aveCC2F{self.orientation}") * f_m + + else: + f_m = self.f_m(m) + + self.set_weights(irls=self.get_lp_weights(f_m)) + + @property + def W(self) -> csr_matrix: + r"""Weighting matrix. + + Returns the weighting matrix for the objective function. To see how the + weighting matrix is constructed, see the *Notes* section for the + :class:`SmoothnessFirstOrder` regularization class. + + Returns + ------- + scipy.sparse.csr_matrix + The weighting matrix applied in the objective function. + """ + if getattr(self, "_W", None) is None: + average_cell_2_face = getattr( + self.regularization_mesh, "aveCC2F{}".format(self.orientation) + ) + weights = 1.0 + for values in self._weights.values(): + if values.shape[0] == self.regularization_mesh.nC: + values = average_cell_2_face * values + weights *= values + self._W = utils.sdiag(weights**0.5) + return self._W + + +################### + @property + def W(self): + + gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) + + if getattr(self, "model", None) is None: + R = utils.speye(gradient.shape[0]) + + else: + r = self.R(self.f_m) + R = utils.sdiag(r) + + if self.scale is None: + self.scale = np.ones(self.mapping.shape[0]) + + weights = self.scale * self.regmesh.vol + + if self.cell_weights is not None: + weights *= self.cell_weights + gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) + average = getattr(self.regmesh, "average_{}".format(self.orientation)) + return utils.sdiag((average * weights ** 0.5)) * R * gradient + + @property + def f_m(self): + + if self.mrefInSmooth: + + f_m = self._delta_m(self.model) + + else: + f_m = self.model + + # Not sure how effective it is + if self.gradientType == "total": + + average = getattr(self.regmesh, "average_{}".format(self.orientation)) + + dm_dx = np.abs( + self.regmesh.aveE2N + * self.regmesh.gradient_r + * (self.mapping * f_m) + ) + + dm_dx += np.abs( + self.regmesh.aveFz2CC + * self.regmesh.gradient_z + * (self.mapping * f_m) + ) + + dm_dx = average * dm_dx + + else: + gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) + dm_dx = gradient * (self.mapping * f_m) + + return dm_dx + + @property + def length_scales(self): + """ + Normalized cell based weighting + + """ + average = getattr(self.regmesh, "average_{}".format(self.orientation)) + + if getattr(self, "_length_scales", None) is None: + if self.orientation == 'r': + # removing the length scale for the radial component seems better + # length_scales = average * self.regmesh.h_gridded_r + length_scales = np.ones(average.shape[0], dtype=float) + elif self.orientation == 'z': + length_scales = average * self.regmesh.h_gridded_z + self._length_scales = length_scales.min() / length_scales + + return self._length_scales + + +class LaterallyConstrained(Sparse): + """ + This regularization function is designed to regularize model parameters + connected with a 2D simplex mesh and 1D vertical mesh. + Motivating example is a stitched inversion of the electromagnetic data. + In such a case, a model is a 1D vertical conductivity (or resistivity) profile + at each sounding location. Each profile has the same number of layers. + The 2D simplex mesh connects resistivity values of each layer in lateral dimensions + while the 1D vertical mesh connects resistivity values along the vertical profile. + This LaterallyConstrained class is designed in a way that can handle sparse norm inversion. + And that is the reason why it inherits the Sparse Class. + + """ + + def __init__( + self, mesh, alpha_s=1.0, alpha_r=1.0, alpha_z=1.0, **kwargs + ): + objfcts = [ + LaterallyConstrainedSmall(mesh=mesh, **kwargs), + LaterallyConstrainedDeriv(mesh=mesh, orientation="r", **kwargs), + LaterallyConstrainedDeriv(mesh=mesh, orientation="z", **kwargs), + ] + # Inherits the upper level class of Sparse + self.alpha_r = alpha_r + + super(Sparse, self).__init__( + mesh=mesh, + objfcts=objfcts, + alpha_s=alpha_s, + alpha_z=alpha_z, + **kwargs + ) + + alpha_r = props.Float("weight for the first radial-derivative") + # Observers + @properties.observer("norms") + def _mirror_norms_to_objfcts(self, change): + self.objfcts[0].norm = change["value"][:, 0] + for i, objfct in enumerate(self.objfcts[1:]): + ave_cc_f = getattr(objfct.regmesh, "average_{}".format(objfct.orientation)) + objfct.norm = ave_cc_f * change["value"][:, i + 1] \ No newline at end of file diff --git a/SimPEG/regularization/regularization_mesh_lateral.py b/SimPEG/regularization/regularization_mesh_lateral.py new file mode 100644 index 0000000000..f6c76e2943 --- /dev/null +++ b/SimPEG/regularization/regularization_mesh_lateral.py @@ -0,0 +1,282 @@ +import numpy as np +import scipy.sparse as sp +from SimPEG.utils.code_utils import deprecate_property, validate_active_indices + +from .. import props +from .. import utils + +class LCRegularizationMesh(RegularizationMesh): + """ + **LCRegularization Mesh** + + :param list mesh: lit including two discretize meshes + :param numpy.ndarray active_cells: bool array, size nC, that is True where we have active cells. Used to reduce the operators so we regularize only on active cells + :param numpy.ndarray active_edges: bool array, size nE, that is True where we have active edges. Used to reduce the operators so we regularize only on active edges + + """ + + _active_edges = None + + def __init__(self, mesh, active_cells=None, active_edges=None, **kwargs): + self.mesh_radial = mesh[0] + self.mesh_vertical = mesh[1] + self.active_cells = active_cells + self.active_edges = active_edges + utils.setKwargs(self, **kwargs) + + @active_cells.setter + def active_cells(self, values: np.ndarray): + if getattr(self, "_active_cells", None) is not None and not all( + self._active_cells == values + ): + raise AttributeError( + "The RegulatizationMesh already has an 'active_cells' property set." + ) + if values is not None: + values = validate_active_indices("values", values, self.mesh.nC) + # Ensure any cached operators created when + # active_cells was None are deleted + self._vol = None + self._Pac = None + self._Paer = None + self._Pafz = None + self._h_gridded_r = None + self._h_gridded_z = None + self._gradient_r = None + self._average_r = None + self._gradient_z = None + self._average_z = None + self._aveFz2CC = None + self._aveE2N = None + self._active_cells = values + + @active_edges.setter + def active_edges(self, values: np.ndarray): + if getattr(self, "_active_edges", None) is not None and not all( + self._active_edges == values + ): + raise AttributeError( + "The RegulatizationMesh already has an 'active_edges' property set." + ) + self._active_edges = values + + @property + def vol(self) -> np.ndarray: + # Assume a unit area for the radial points) + # We could use the average of cells to nodes + self._vol = (np.ones(self.n_nodes, dtype=float)[:, None] * self.mesh_vertical.h[0]).flatten() + return self._vol[self.active_cells].flatten() + + @property + def h_gridded_r(self) -> np.ndarray: + """ + Length of cells in the raidal direction + + """ + if getattr(self, "_h_gridded_r", None) is None: + # assume a unit length scale in radial direction + n = self.nz * self.n_nodes + self._h_gridded_r = np.ones(n) + return self._h_gridded_r + + @property + def h_gridded_z(self) -> np.ndarray: + """ + Length of cells in the vertical direction + + """ + if getattr(self, "_h_gridded_z", None) is None: + self._h_gridded_z = np.tile( + self.mesh_vertical.h[0], self.n_nodes + ).flatten() + return self._h_gridded_z + + @property + def nodal_gradient_stencil(self) -> sp.csr_matrix: + ind_ptr = 2 * np.arange(self.mesh_radial.n_edges+1) + col_inds = self.mesh_radial._edges.reshape(-1) + Aijs = (np.ones(self.mesh_radial.n_edges, dtype=float)[:, None] * [-1, 1]).reshape(-1) + + return sp.csr_matrix((Aijs, col_inds, ind_ptr), shape=(self.mesh_radial.n_edges, self.n_nodes)) + + @property + def gradient_r(self) -> sp.csr_matrix: + """ + Nodal gradient in radial direction + + """ + if getattr(self, "_gradient_r", None) is None: + grad = self.nodal_gradient_stencil + self._gradient_r = self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac + return self._gradient_r + + @property + def average_r(self) -> sp.csr_matrix: + """ + Average of cells in the radial direction + + """ + if getattr(self, "_average_r", None) is None: + ave = self.mesh_radial.average_node_to_edge + self._average_r = self.Paer.T * sp.kron(ave, utils.speye(self.nz)) * self.Pac + return self._average_r + + @property + def gradient_z(self) -> sp.csr_matrix: + """ + Cell gradeint in vertical direction + + """ + if getattr(self, "_gradient_z", None) is None: + grad = self.mesh_vertical.stencil_cell_gradient + self._gradient_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac + return self._gradient_z + + @property + def average_z(self) -> sp.csr_matrix: + """ + Average of cells in the vertical direction + + """ + if getattr(self, "_average_z", None) is None: + ave = self.mesh_vertical.average_cell_to_face + self._average_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac + return self._average_z + + @property + def nz(self) -> int: + """ + Number of cells of the 1D vertical mesh + """ + if getattr(self, "_nz", None) is None: + self._nz = self.mesh_vertical.n_cells + return self._nz + + @property + def nFz(self) -> int: + """ + Number of faces in the vertical direction + """ + if getattr(self, "_nFz", None) is None: + self._nFz = self.mesh_vertical.n_faces * self.n_nodes + return self._nFz + + @property + def nE(self) -> int: + """ + Number of edges in the radial direction + """ + if getattr(self, "_nE", None) is None: + self._nE = self.nz * self.n_edges + return self._nE + + @property + def nC(self) -> int: + """ + reduced number of cells + + :rtype: int + :return: number of cells being regularized + """ + if self.active_cells is not None: + return int(self.active_cells.sum()) + return self.nz * self.n_nodes + + @property + def n_nodes(self) -> int: + """ + Number of nodes of the 2D simplex mesh + """ + if getattr(self, "_n_nodes", None) is None: + self._n_nodes = self.mesh_radial.n_nodes + return self._n_nodes + + @property + def n_edges(self) -> int: + """ + Number of edges of the 2D simplex mesh + """ + if getattr(self, "_n_edges", None) is None: + self._n_edges = self.mesh_radial.n_edges + return self._n_edges + + @property + def Pafz(self): + """ + projection matrix that takes from the reduced space of active z-faces + to full modelling space (ie. nFz x nactive_cells_Fz ) + + :rtype: scipy.sparse.csr_matrix + :return: active face-x projection matrix + """ + if getattr(self, "_Pafz", None) is None: + if self.active_cells is None: + self._Pafz = utils.speye(self.nFz) + else: + ave = self.mesh_vertical.average_face_to_cell + aveFz2CC = sp.kron(utils.speye(self.n_nodes), ave) + active_cells_Fz = aveFz2CC.T * self.active_cells >= 1 + self._Pafz = utils.speye(self.nFz)[:, active_cells_Fz] + return self._Pafz + + @property + def Pac(self): + """ + projection matrix that takes from the reduced space of active cells to + full modelling space (ie. nC x nactive_cells) + + :rtype: scipy.sparse.csr_matrix + :return: active cell projection matrix + """ + if getattr(self, "_Pac", None) is None: + if self.active_cells is None: + self._Pac = utils.speye(self.nz*self.n_nodes) + else: + self._Pac = utils.speye(self.nz*self.n_nodes)[:, self.active_cells] + return self._Pac + + @property + def Paer(self): + """ + projection matrix that takes from the reduced space of active edges + to full modelling space (ie. nE x nactive_cells_E ) + + :rtype: scipy.sparse.csr_matrix + :return: active edge projection matrix + """ + if getattr(self, "_Paer", None) is None: + if self.active_edges is None: + self._Paer = utils.speye(self.nE) + else: + ave = self.mesh_vertical.average_face_to_cell + aveFz2CC = sp.kron(utils.speye(self.n_nodes), ave) + self._Paer = utils.speye(self.nE)[:, self.active_edges] + return self._Paer + + @property + def aveFz2CC(self): + """ + averaging from active cell centers to active x-faces + + :rtype: scipy.sparse.csr_matrix + :return: averaging from active cell centers to active x-faces + """ + if getattr(self, "_aveFz2CC", None) is None: + ave = self.mesh_vertical.average_face_to_cell + self._aveFz2CC = self.Pac.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pafz + return self._aveFz2CC + + @property + def aveE2N(self): + """ + averaging from active nodes to active edges + + :rtype: scipy.sparse.csr_matrix + :return: averaging from active cell centers to active edges + """ + + if getattr(self, "_aveE2N", None) is None: + ave = self.mesh_radial.average_node_to_edge.T + self._aveE2N = self.Pac.T * sp.kron(ave, utils.speye(self.nz)) * self.Paer + return self._aveE2N +LCRegularizationMesh.__module__ = "SimPEG.regularization" \ No newline at end of file From c5b8f54045b4cecc80a2d0f1d644c377dba462a8 Mon Sep 17 00:00:00 2001 From: sgkang Date: Tue, 7 Nov 2023 09:27:59 -0800 Subject: [PATCH 089/164] working LCregulariztion mesh and LaterallyConstrained Reg. --- SimPEG/regularization/__init__.py | 1 + SimPEG/regularization/base.py | 29 +- .../regularization/laterally_constrained.py | 283 +++++++----------- .../regularization_mesh_lateral.py | 145 +++++++-- 4 files changed, 241 insertions(+), 217 deletions(-) diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 5d1a7910ac..3c7b472f6f 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -156,6 +156,7 @@ SmoothnessSecondOrder, ) from .regularization_mesh import RegularizationMesh +from .regularization_mesh_lateral import LCRegularizationMesh from .sparse import BaseSparse, SparseSmallness, SparseSmoothness, Sparse from .pgi import PGIsmallness, PGI from .cross_gradient import CrossGradient diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 1c89fb4ec4..04181022a3 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -8,6 +8,7 @@ from ..objective_function import BaseObjectiveFunction, ComboObjectiveFunction from .. import utils from .regularization_mesh import RegularizationMesh +from .regularization_mesh_lateral import LCRegularizationMesh from SimPEG.utils.code_utils import deprecate_property, validate_ndarray_with_shape @@ -875,20 +876,24 @@ def __init__( self, mesh, orientation="x", reference_model_in_smooth=False, **kwargs ): self.reference_model_in_smooth = reference_model_in_smooth + if isinstance(mesh, LCRegularizationMesh): + if orientation not in ["r", "z"]: + raise ValueError("Orientation must be 'r' or 'z'") + else: + if orientation not in ["x", "y", "z"]: + raise ValueError("Orientation must be 'x', 'y' or 'z'") - if orientation not in ["x", "y", "z"]: - raise ValueError("Orientation must be 'x', 'y' or 'z'") + if orientation == "y" and mesh.dim < 2: + raise ValueError( + "Mesh must have at least 2 dimensions to regularize along the " + "y-direction." + ) + elif orientation == "z" and mesh.dim < 3: + raise ValueError( + "Mesh must have at least 3 dimensions to regularize along the " + "z-direction" + ) - if orientation == "y" and mesh.dim < 2: - raise ValueError( - "Mesh must have at least 2 dimensions to regularize along the " - "y-direction." - ) - elif orientation == "z" and mesh.dim < 3: - raise ValueError( - "Mesh must have at least 3 dimensions to regularize along the " - "z-direction" - ) self._orientation = orientation super().__init__(mesh=mesh, **kwargs) diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py index 342b965d62..af4f785157 100644 --- a/SimPEG/regularization/laterally_constrained.py +++ b/SimPEG/regularization/laterally_constrained.py @@ -4,6 +4,11 @@ from .. import utils import properties from .. import props +from .regularization_mesh_lateral import LCRegularizationMesh +from typing import TYPE_CHECKING +# if TYPE_CHECKING: +from scipy.sparse import csr_matrix + class LaterallyConstrainedSmallness(SparseSmallness): """ @@ -16,163 +21,12 @@ class LaterallyConstrainedSmoothness(SparseSmoothness): for addressing radial and vertical gradients of model parameters, which is a 1D vertical resistivity profile at each of lateral locations. """ - @property - def cell_gradient(self): - if getattr(self, "_cell_gradient", None) is None: - self._cell_gradient = getattr( - self.regularization_mesh, "cell_gradient_{}".format(self.orientation) - ) - return self._cell_gradient - @property - def _cell_distances(self): - """ - Distances between cell centers for the cell center difference. - """ - return getattr(self.regularization_mesh, f"cell_distances_{self.orientation}") - - @property - def orientation(self): - """Direction along which smoothness is enforced. - - Returns - ------- - {'x','y','z'} - The direction along which smoothness is enforced. - - """ - return self._orientation - - def f_m(self, m): - dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) - - if self.units is not None and self.units.lower() == "radian": - return ( - utils.mat_utils.coterminal(dfm_dl * self._cell_distances) - / self._cell_distances - ) - return dfm_dl - - def f_m_deriv(self, m) -> csr_matrix: - return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) - - def update_weights(self, m): - if self.gradient_type == "total" and self.parent is not None: - f_m = np.zeros(self.regularization_mesh.nC) - for obj in self.parent.objfcts: - if isinstance(obj, SparseSmoothness): - avg = getattr(self.regularization_mesh, f"aveF{obj.orientation}2CC") - f_m += np.abs(avg * obj.f_m(m)) - - f_m = getattr(self.regularization_mesh, f"aveCC2F{self.orientation}") * f_m - - else: - f_m = self.f_m(m) - - self.set_weights(irls=self.get_lp_weights(f_m)) - - @property - def W(self) -> csr_matrix: - r"""Weighting matrix. - - Returns the weighting matrix for the objective function. To see how the - weighting matrix is constructed, see the *Notes* section for the - :class:`SmoothnessFirstOrder` regularization class. - - Returns - ------- - scipy.sparse.csr_matrix - The weighting matrix applied in the objective function. - """ - if getattr(self, "_W", None) is None: - average_cell_2_face = getattr( - self.regularization_mesh, "aveCC2F{}".format(self.orientation) - ) - weights = 1.0 - for values in self._weights.values(): - if values.shape[0] == self.regularization_mesh.nC: - values = average_cell_2_face * values - weights *= values - self._W = utils.sdiag(weights**0.5) - return self._W - - -################### - @property - def W(self): - - gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) - - if getattr(self, "model", None) is None: - R = utils.speye(gradient.shape[0]) - - else: - r = self.R(self.f_m) - R = utils.sdiag(r) - - if self.scale is None: - self.scale = np.ones(self.mapping.shape[0]) - - weights = self.scale * self.regmesh.vol - - if self.cell_weights is not None: - weights *= self.cell_weights - gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) - average = getattr(self.regmesh, "average_{}".format(self.orientation)) - return utils.sdiag((average * weights ** 0.5)) * R * gradient - - @property - def f_m(self): - - if self.mrefInSmooth: - - f_m = self._delta_m(self.model) - - else: - f_m = self.model - - # Not sure how effective it is - if self.gradientType == "total": - - average = getattr(self.regmesh, "average_{}".format(self.orientation)) - - dm_dx = np.abs( - self.regmesh.aveE2N - * self.regmesh.gradient_r - * (self.mapping * f_m) - ) - - dm_dx += np.abs( - self.regmesh.aveFz2CC - * self.regmesh.gradient_z - * (self.mapping * f_m) - ) - - dm_dx = average * dm_dx - + def __init__(self, mesh, orientation="r", gradient_type="total", **kwargs): + if "gradientType" in kwargs: + self.gradientType = kwargs.pop("gradientType") else: - gradient = getattr(self.regmesh, "gradient_{}".format(self.orientation)) - dm_dx = gradient * (self.mapping * f_m) - - return dm_dx - - @property - def length_scales(self): - """ - Normalized cell based weighting - - """ - average = getattr(self.regmesh, "average_{}".format(self.orientation)) - - if getattr(self, "_length_scales", None) is None: - if self.orientation == 'r': - # removing the length scale for the radial component seems better - # length_scales = average * self.regmesh.h_gridded_r - length_scales = np.ones(average.shape[0], dtype=float) - elif self.orientation == 'z': - length_scales = average * self.regmesh.h_gridded_z - self._length_scales = length_scales.min() / length_scales - - return self._length_scales + self.gradient_type = gradient_type + super().__init__(mesh=mesh, orientation=orientation, **kwargs) class LaterallyConstrained(Sparse): @@ -190,29 +44,102 @@ class LaterallyConstrained(Sparse): """ def __init__( - self, mesh, alpha_s=1.0, alpha_r=1.0, alpha_z=1.0, **kwargs + self, + mesh, + active_cells=None, + alpha_r=None, + length_scale_r=None, + norms=None, + gradient_type="total", + irls_scaled=True, + irls_threshold=1e-8, + objfcts=None, + **kwargs, ): - objfcts = [ - LaterallyConstrainedSmall(mesh=mesh, **kwargs), - LaterallyConstrainedDeriv(mesh=mesh, orientation="r", **kwargs), - LaterallyConstrainedDeriv(mesh=mesh, orientation="z", **kwargs), - ] - # Inherits the upper level class of Sparse - self.alpha_r = alpha_r - - super(Sparse, self).__init__( - mesh=mesh, + if not isinstance(mesh, LCRegularizationMesh): + mesh = LCRegularizationMesh(mesh) + + if not isinstance(mesh, LCRegularizationMesh): + TypeError( + f"'regularization_mesh' must be of type {LCRegularizationMesh}. " + f"Value of type {type(mesh)} provided." + ) + self._regularization_mesh = mesh + if active_cells is not None: + self._regularization_mesh.active_cells = active_cells + + if alpha_r is not None: + if length_scale_r is not None: + raise ValueError( + "Attempted to set both alpha_r and length_scale_r at the same time. Please " + "use only one of them" + ) + self.alpha_r = alpha_r + else: + self.length_scale_r = length_scale_r + + if objfcts is None: + objfcts = [ + SparseSmallness(mesh=self.regularization_mesh), + SparseSmoothness(mesh=self.regularization_mesh, orientation="r"), + SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), + ] + gradientType = kwargs.pop("gradientType", None) + + super().__init__( + self.regularization_mesh, objfcts=objfcts, - alpha_s=alpha_s, - alpha_z=alpha_z, - **kwargs + **kwargs, ) - alpha_r = props.Float("weight for the first radial-derivative") - # Observers - @properties.observer("norms") - def _mirror_norms_to_objfcts(self, change): - self.objfcts[0].norm = change["value"][:, 0] - for i, objfct in enumerate(self.objfcts[1:]): - ave_cc_f = getattr(objfct.regmesh, "average_{}".format(objfct.orientation)) - objfct.norm = ave_cc_f * change["value"][:, i + 1] \ No newline at end of file + @property + def alpha_r(self): + """Multiplier constant for first-order smoothness along x. + + Returns + ------- + float + Multiplier constant for first-order smoothness along x. + """ + return self._alpha_r + + @alpha_r.setter + def alpha_r(self, value): + try: + value = float(value) + except (ValueError, TypeError): + raise TypeError(f"alpha_r must be a real number, saw type{type(value)}") + if value < 0: + raise ValueError(f"alpha_r must be non-negative, not {value}") + self._alpha_r = value + + @property + def length_scale_r(self): + r"""Multiplier constant for smoothness along x relative to base scale length. + + Where the :math:`\Delta h` defines the base length scale (i.e. minimum cell dimension), + and :math:`\alpha_r` defines the multiplier constant for first-order smoothness along x, + the length-scale is given by: + + .. math:: + L_x = \bigg ( \frac{\alpha_r}{\Delta h} \bigg )^{1/2} + + Returns + ------- + float + Multiplier constant for smoothness along x relative to base scale length. + """ + return np.sqrt(self.alpha_r) / self.regularization_mesh.base_length + + @length_scale_r.setter + def length_scale_r(self, value: float): + if value is None: + value = 1.0 + try: + value = float(value) + except (TypeError, ValueError): + raise TypeError( + f"length_scale_r must be a real number, saw type{type(value)}" + ) + print ("Set alpha_s") + self.alpha_r = (value * self.regularization_mesh.base_length) ** 2 \ No newline at end of file diff --git a/SimPEG/regularization/regularization_mesh_lateral.py b/SimPEG/regularization/regularization_mesh_lateral.py index f6c76e2943..2338487123 100644 --- a/SimPEG/regularization/regularization_mesh_lateral.py +++ b/SimPEG/regularization/regularization_mesh_lateral.py @@ -4,6 +4,7 @@ from .. import props from .. import utils +from .regularization_mesh import RegularizationMesh class LCRegularizationMesh(RegularizationMesh): """ @@ -20,10 +21,29 @@ class LCRegularizationMesh(RegularizationMesh): def __init__(self, mesh, active_cells=None, active_edges=None, **kwargs): self.mesh_radial = mesh[0] self.mesh_vertical = mesh[1] - self.active_cells = active_cells self.active_edges = active_edges utils.setKwargs(self, **kwargs) + @property + def active_cells(self) -> np.ndarray: + """Active cells on the regularization mesh. + + A boolean array defining the cells in the regularization mesh that are active + (i.e. updated) throughout the inversion. The values of inactive cells + remain equal to their starting model values. + + Returns + ------- + (n_cells, ) array of bool + + Notes + ----- + If the property is set using a ``numpy.ndarray`` of ``int``, the setter interprets the + array as representing the indices of the active cells. When called however, the quantity + will have been internally converted to a boolean array. + """ + return self._active_cells + @active_cells.setter def active_cells(self, values: np.ndarray): if getattr(self, "_active_cells", None) is not None and not all( @@ -41,14 +61,15 @@ def active_cells(self, values: np.ndarray): self._Paer = None self._Pafz = None self._h_gridded_r = None - self._h_gridded_z = None - self._gradient_r = None - self._average_r = None - self._gradient_z = None - self._average_z = None + self._h_gridded_z = None + self._cell_gradient_z = None + self._aveCC2Fz = None self._aveFz2CC = None - self._aveE2N = None self._active_cells = values + + @property + def active_edges(self) -> np.ndarray: + return self._active_edges @active_edges.setter def active_edges(self, values: np.ndarray): @@ -58,6 +79,11 @@ def active_edges(self, values: np.ndarray): raise AttributeError( "The RegulatizationMesh already has an 'active_edges' property set." ) + if values is not None: + self._aveCC2Fr = None + self._cell_gradient_r = None + self._aveFr2CC = None + self._active_edges = values @property @@ -90,6 +116,43 @@ def h_gridded_z(self) -> np.ndarray: self.mesh_vertical.h[0], self.n_nodes ).flatten() return self._h_gridded_z + + @property + def base_length(self) -> float: + """Smallest dimension (i.e. edge length) for smallest cell in the mesh. + + Returns + ------- + float + Smallest dimension (i.e. edge length) for smallest cell in the mesh. + """ + if getattr(self, "_base_length", None) is None: + self._base_length = self.mesh_vertical.h[0].min() + return self._base_length + + @property + def dim(self) -> int: + """Dimension of regularization mesh. + + Returns + ------- + {2} + Dimension of the regularization mesh. + """ + return 2 + + @property + def cell_gradient(self) -> sp.csr_matrix: + """Cell gradient operator (cell centers to faces). + + Built from :py:property:`~discretize.operators.differential_operators.DiffOperators.cell_gradient`. + + Returns + ------- + (n_faces, n_cells) scipy.sparse.csr_matrix + Cell gradient operator (cell centers to faces). + """ + return sp.vstack([self.cell_gradient_r, self.cell_gradient_z]) @property def nodal_gradient_stencil(self) -> sp.csr_matrix: @@ -100,48 +163,76 @@ def nodal_gradient_stencil(self) -> sp.csr_matrix: return sp.csr_matrix((Aijs, col_inds, ind_ptr), shape=(self.mesh_radial.n_edges, self.n_nodes)) @property - def gradient_r(self) -> sp.csr_matrix: + def cell_gradient_r(self) -> sp.csr_matrix: """ Nodal gradient in radial direction """ - if getattr(self, "_gradient_r", None) is None: + if getattr(self, "_cell_gradient_r", None) is None: grad = self.nodal_gradient_stencil - self._gradient_r = self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac - return self._gradient_r + self._cell_gradient_r = self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac + return self._cell_gradient_r @property - def average_r(self) -> sp.csr_matrix: + def aveCC2Fr(self) -> sp.csr_matrix: """ Average of cells in the radial direction """ - if getattr(self, "_average_r", None) is None: + if getattr(self, "_aveCC2Fr", None) is None: ave = self.mesh_radial.average_node_to_edge - self._average_r = self.Paer.T * sp.kron(ave, utils.speye(self.nz)) * self.Pac - return self._average_r + self._aveCC2Fr = self.Paer.T * sp.kron(ave, utils.speye(self.nz)) * self.Pac + return self._aveCC2Fr @property - def gradient_z(self) -> sp.csr_matrix: + def cell_distances_r(self) -> np.ndarray: + """Cell center distance array along the r-direction. + + Returns + ------- + (n_active_faces_r, ) numpy.ndarray + Cell center distance array along the r-direction. + """ + if getattr(self, "_cell_distances_r", None) is None: + Ave = self.aveCC2Fr + self._cell_distances_r = Ave * (self.Pac.T * self.h_gridded_r) + return self._cell_distances_r + + @property + def cell_gradient_z(self) -> sp.csr_matrix: """ Cell gradeint in vertical direction """ - if getattr(self, "_gradient_z", None) is None: + if getattr(self, "_cell_gradient_z", None) is None: grad = self.mesh_vertical.stencil_cell_gradient - self._gradient_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac - return self._gradient_z + self._cell_gradient_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac + return self._cell_gradient_z @property - def average_z(self) -> sp.csr_matrix: + def aveCC2Fz(self) -> sp.csr_matrix: """ Average of cells in the vertical direction """ - if getattr(self, "_average_z", None) is None: + if getattr(self, "_aveCC2Fz", None) is None: ave = self.mesh_vertical.average_cell_to_face - self._average_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac - return self._average_z + self._aveCC2Fz = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac + return self._aveCC2Fz + + @property + def cell_distances_z(self) -> np.ndarray: + """Cell center distance array along the r-direction. + + Returns + ------- + (n_active_faces_z, ) numpy.ndarray + Cell center distance array along the r-direction. + """ + if getattr(self, "_cell_distances_z", None) is None: + Ave = self.aveCC2Fr + self._cell_distances_z = Ave * (self.Pac.T * self.h_gridded_z) + return self._cell_distances_z @property def nz(self) -> int: @@ -267,7 +358,7 @@ def aveFz2CC(self): return self._aveFz2CC @property - def aveE2N(self): + def aveFr2CC(self): """ averaging from active nodes to active edges @@ -275,8 +366,8 @@ def aveE2N(self): :return: averaging from active cell centers to active edges """ - if getattr(self, "_aveE2N", None) is None: + if getattr(self, "_aveFr2CC", None) is None: ave = self.mesh_radial.average_node_to_edge.T - self._aveE2N = self.Pac.T * sp.kron(ave, utils.speye(self.nz)) * self.Paer - return self._aveE2N + self._aveFr2CC = self.Pac.T * sp.kron(ave, utils.speye(self.nz)) * self.Paer + return self._aveFr2CC LCRegularizationMesh.__module__ = "SimPEG.regularization" \ No newline at end of file From 9102ec3b2f9809a5af2d4aeba02b05cc947947e1 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 09:48:11 -0800 Subject: [PATCH 090/164] working stitcehd inversion code for tdem. --- SimPEG/electromagnetics/base_1d.py | 6 +- .../electromagnetics/time_domain/__init__.py | 1 + .../time_domain/simulation_1d.py | 4 +- .../electromagnetics/time_domain/sources.py | 17 + SimPEG/electromagnetics/time_domain/survey.py | 45 ++ SimPEG/electromagnetics/utils/em1d_utils.py | 414 ++++++++++++++++++ .../regularization/laterally_constrained.py | 3 + 7 files changed, 487 insertions(+), 3 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index f1c85f44e1..09fdb5170f 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -368,8 +368,9 @@ def _compute_hankel_coefficients(self): if is_circular_loop: if np.any(src.orientation[:-1] != 0.0): raise ValueError("Can only simulate horizontal circular loops") + # Note: this assumes a fixed height for all sources if self.hMap is not None: - h = 0 # source height above topo + h = self.h # source height above topo else: h = src.location[2] - self.topo[-1] @@ -573,6 +574,7 @@ def deleteTheseOnModelUpdate(self): toDelete += ["_J", "_gtgdiag"] return toDelete + # TODO: need to revisit this: def depth_of_investigation_christiansen_2012(self, std, thres_hold=0.8): pred = self.survey._pred.copy() delta_d = std * np.log(abs(self.survey.dobs)) @@ -610,4 +612,4 @@ def getJtJdiag(self, m, W=None, f=None): J = Js["dthick"] @ self.thicknessesDeriv out = out + np.einsum("i,ij,ij->j", W, J, J) self._gtgdiag = out - return self._gtgdiag + return self._gtgdiag \ No newline at end of file diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index dcf8dde9a8..cb164eb5e9 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -96,6 +96,7 @@ Simulation3DCurrentDensity, ) from .simulation_1d import Simulation1DLayered +from .simulation_1d_stitched import Simulation1DLayeredStitched from .fields import ( Fields3DMagneticFluxDensity, Fields3DElectricField, diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 83568857e3..873febfd8a 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -78,6 +78,7 @@ def get_coefficients(self): self._inv_lambs, self._C0s, self._C1s, + self._W ) def _set_coefficients(self, coefficients): @@ -88,6 +89,7 @@ def _set_coefficients(self, coefficients): self._inv_lambs = coefficients[4] self._C0s = coefficients[5] self._C1s = coefficients[6] + self._W = coefficients[7] self._coefficients_set = True return @@ -285,7 +287,7 @@ def getJ(self, m, f=None): # need to re-arange v_dh as it's currently (n_data x n_freqs) # however it already contains all the relevant information... # just need to map it from the rx index to the source index associated.. - v_dh = np.zeros((self.survey.nSrc, *v_dh_temp.shape)) + v_dh = np.zeros((self.survey.nSrc, *v_dh_temp.shape), dtype=complex) i = 0 for i_src, src in enumerate(self.survey.source_list): diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index fa37081259..c0ea849385 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -1037,6 +1037,7 @@ def __init__( location=None, waveform=None, srcType=None, + i_sounding=0, **kwargs, ): if waveform is None: @@ -1049,6 +1050,8 @@ def __init__( if srcType is not None: self.srcType = srcType + self.i_sounding = i_sounding + @property def waveform(self): """Current waveform for the source @@ -1079,6 +1082,20 @@ def srcType(self): def srcType(self, var): self._srcType = validate_string("srcType", var, ["inductive", "galvanic"]) + @property + def i_sounding(self): + """Sounding number for the source + + Returns + ------- + int + """ + return self._i_sounding + + @i_sounding.setter + def i_sounding(self, value): + self._i_sounding = validate_integer("i_sounding", value, min_val=0) + def bInitial(self, simulation): """Return initial B-field (``Zero`` for ``BaseTDEMSrc`` class) diff --git a/SimPEG/electromagnetics/time_domain/survey.py b/SimPEG/electromagnetics/time_domain/survey.py index e8798d8048..f89a12400a 100644 --- a/SimPEG/electromagnetics/time_domain/survey.py +++ b/SimPEG/electromagnetics/time_domain/survey.py @@ -20,6 +20,20 @@ class Survey(BaseSurvey): def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) + _source_location_dict = {} + _source_location_by_sounding_dict = {} + _source_frequency_by_sounding_dict = {} + + for src in source_list: + if src.i_sounding not in _source_location_dict: + _source_location_dict[src.i_sounding] = [] + _source_location_by_sounding_dict[src.i_sounding] = [] + _source_location_dict[src.i_sounding] += [src] + _source_location_by_sounding_dict[src.i_sounding] += [src.location] + + self._source_location_dict = _source_location_dict + self._source_location_by_sounding_dict = _source_location_by_sounding_dict + @property def source_list(self): """List of TDEM sources associated with the survey @@ -36,3 +50,34 @@ def source_list(self, new_list): self._source_list = validate_list_of_types( "source_list", new_list, BaseTDEMSrc, ensure_unique=True ) + + @property + def source_location_by_sounding_dict(self): + """ + Source location in the survey as a dictionary + """ + return self._source_location_by_sounding_dict + + def get_sources_by_sounding_number(self, i_sounding): + """ + Returns the sources associated with a specific source location. + :param float i_sounding: source location number + :rtype: dictionary + :return: sources at the sepcified source location + """ + assert ( + i_sounding in self._source_location_dict + ), "The requested sounding is not in this survey." + return self._source_location_dict[i_sounding] + + @property + def vnD_by_sounding_dict(self): + if getattr(self, '_vnD_by_sounding_dict', None) is None: + self._vnD_by_sounding_dict = {} + for i_sounding in self.source_location_by_sounding_dict: + source_list = self.get_sources_by_sounding_number(i_sounding) + nD = 0 + for src in source_list: + nD +=src.nD + self._vnD_by_sounding_dict[i_sounding] = nD + return self._vnD_by_sounding_dict \ No newline at end of file diff --git a/SimPEG/electromagnetics/utils/em1d_utils.py b/SimPEG/electromagnetics/utils/em1d_utils.py index 21a08dbd6a..04f0d194ef 100644 --- a/SimPEG/electromagnetics/utils/em1d_utils.py +++ b/SimPEG/electromagnetics/utils/em1d_utils.py @@ -1,9 +1,21 @@ import numpy as np from geoana.em.fdem.base import skin_depth from geoana.em.tdem import diffusion_distance +import matplotlib.pyplot as plt from SimPEG import utils +from discretize import TensorMesh +from SimPEG.utils.code_utils import ( + validate_ndarray_with_shape, +) + +from scipy.spatial import cKDTree as kdtree +import scipy.sparse as sp +from matplotlib.colors import LogNorm + +def set_mesh_1d(hz): + return TensorMesh([hz], x0=[0]) def get_vertical_discretization(n_layer, minimum_dz, geomtric_factor): """ @@ -219,3 +231,405 @@ def LogUniform(f, chi_inf=0.05, del_chi=0.05, tau1=1e-5, tau2=1e-2): return chi_inf + del_chi * ( 1 - np.log((1 + 1j * w * tau2) / (1 + 1j * w * tau1)) / np.log(tau2 / tau1) ) + +############################################################# +# PLOTTING RESTIVITY MODEL +############################################################# + +class Stitched1DModel: + + def __init__( + self, + topography=None, + physical_property=None, + line=None, + time_stamp=None, + thicknesses=None, + **kwargs + ): + super().__init__(**kwargs) + + self.topography = topography + self.physical_property = physical_property + self.line = line + self.time_stamp = time_stamp + self.thicknesses = thicknesses + + @property + def topography(self): + """Topography + + Returns + ------- + (n_sounding, n_dim) np.ndarray + Topography. + """ + return self._topography + + @topography.setter + def topography(self, locs): + self._topography = validate_ndarray_with_shape( + "topography", locs, shape=("*", "*"), dtype=float + ) + + @property + def physical_property(self): + """Physical property + + Returns + ------- + (n_sounding x n_layer,) np.ndarray + physical_property. + """ + return self._physical_property + + @physical_property.setter + def physical_property(self, values): + self._physical_property = validate_ndarray_with_shape( + "physical_property", values, shape=("*"), dtype=float + ) + + @property + def line(self): + """Line number + + Returns + ------- + (n_sounding,) np.ndarray + line. + """ + return self._line + + @line.setter + def line(self, values): + self._line = validate_ndarray_with_shape( + "line", values, shape=("*"), dtype=int + ) + + @property + def timestamp(self): + """Time stamp + + Returns + ------- + (n_sounding,) np.ndarray + timestamp. + """ + return self._timestamp + + @timestamp.setter + def timestamp(self, values): + self._timestamp = validate_ndarray_with_shape( + "timestamp", values, shape=("*"), dtype=float + ) + + @property + def thicknesses(self): + """Layer thicknesses + + Returns + ------- + (n_sounding,) np.ndarray + thicknesses. + """ + return self._thicknesses + + @thicknesses.setter + def thicknesses(self, values): + self._thicknesses = validate_ndarray_with_shape( + "thicknesses", values, shape=("*"), dtype=float + ) + + @property + def n_layer(self): + return len(self.hz) + + @property + def hz(self): + if getattr(self, '_hz', None) is None: + self._hz = np.r_[self.thicknesses, self.thicknesses[-1]] + return self._hz + + @property + def n_sounding(self): + if getattr(self, '_n_sounding', None) is None: + self._n_sounding = self.topography.shape[0] + return self._n_sounding + + @property + def unique_line(self): + if getattr(self, '_unique_line', None) is None: + if self.line is None: + raise Exception("line information is required!") + self._unique_line = np.unique(self.line) + return self._unique_line + + @property + def xyz(self): + if getattr(self, '_xyz', None) is None: + xyz = np.empty( + (self.n_layer, self.topography.shape[0], 3), order='F' + ) + for i_xy in range(self.topography.shape[0]): + z = -self.mesh_1d.vectorCCx + self.topography[i_xy, 2] + x = np.ones_like(z) * self.topography[i_xy, 0] + y = np.ones_like(z) * self.topography[i_xy, 1] + xyz[:, i_xy, :] = np.c_[x, y, z] + self._xyz = xyz + return self._xyz + + @property + def mesh_1d(self): + if getattr(self, '_mesh_1d', None) is None: + if self.thicknesses is None: + raise Exception("thicknesses information is required!") + self._mesh_1d = set_mesh_1d(np.r_[self.hz[:self.n_layer]]) + return self._mesh_1d + + @property + def mesh_3d(self): + if getattr(self, '_mesh_3d', None) is None: + if self.mesh_3d is None: + raise Exception("Run get_mesh_3d!") + return self._mesh_3d + + @property + def physical_property_matrix(self): + if getattr(self, '_physical_property_matrix', None) is None: + if self.physical_property is None: + raise Exception("physical_property information is required!") + self._physical_property_matrix = self.physical_property.reshape((self.n_layer, self.n_sounding), order='F') + return self._physical_property_matrix + + @property + def depth_matrix(self): + if getattr(self, '_depth_matrix', None) is None: + if self.hz.size == self.n_layer: + depth = np.cumsum(np.r_[0, self.hz]) + self._depth_matrix = np.tile(depth, (self.n_sounding, 1)).T + else: + self._depth_matrix =np.hstack( + (np.zeros((self.n_sounding,1)), np.cumsum(self.hz.reshape((self.n_sounding, self.n_layer)), axis=1)) + ).T + return self._depth_matrix + + @property + def distance(self): + if getattr(self, '_distance', None) is None: + self._distance = np.zeros(self.n_sounding, dtype=float) + for line_tmp in self.unique_line: + ind_line = self.line == line_tmp + xy_line = self.topography[ind_line,:2] + distance_line = np.r_[0, np.cumsum(np.sqrt((np.diff(xy_line, axis=0)**2).sum(axis=1)))] + self._distance[ind_line] = distance_line + return self._distance + + def plot_section( + self, i_layer=0, i_line=0, x_axis='x', + plot_type="contour", + physical_property=None, clim=None, + ax=None, cmap='viridis', ncontour=20, scale='log', + show_colorbar=True, aspect=1, zlim=None, dx=20., + invert_xaxis=False, + alpha=0.7, + pcolorOpts={} + ): + ind_line = self.line == self.unique_line[i_line] + if physical_property is not None: + physical_property_matrix = physical_property.reshape( + (self.n_layer, self.n_sounding), order='F' + ) + else: + physical_property_matrix = self.physical_property_matrix + + if x_axis.lower() == 'y': + x_ind = 1 + xlabel = 'Northing (m)' + elif x_axis.lower() == 'x': + x_ind = 0 + xlabel = 'Easting (m)' + elif x_axis.lower() == 'distance': + xlabel = 'Distance (m)' + + if ax is None: + fig = plt.figure(figsize=(15, 10)) + ax = plt.subplot(111) + + if clim is None: + vmin = np.percentile(physical_property_matrix, 5) + vmax = np.percentile(physical_property_matrix, 95) + else: + vmin, vmax = clim + + if scale == 'log': + norm = LogNorm(vmin=vmin, vmax=vmax) + vmin=None + vmax=None + else: + norm=None + + ind_line = np.arange(ind_line.size)[ind_line] + + for i in ind_line: + inds_temp = [i] + if x_axis == 'distance': + x_tmp = self.distance[i] + else: + x_tmp = self.topography[i, x_ind] + + topo_temp = np.c_[ + x_tmp-dx, + x_tmp+dx + ] + out = ax.pcolormesh( + topo_temp, -self.depth_matrix[:,i]+self.topography[i, 2], physical_property_matrix[:, inds_temp], + cmap=cmap, alpha=alpha, + vmin=vmin, vmax=vmax, norm=norm, shading='auto', **pcolorOpts + ) + + if show_colorbar: + from mpl_toolkits import axes_grid1 + cb = plt.colorbar(out, ax=ax, fraction=0.01) + cb.set_label("Conductivity (S/m)") + + ax.set_aspect(aspect) + ax.set_xlabel(xlabel) + ax.set_ylabel('Elevation (m)') + if zlim is not None: + ax.set_ylim(zlim) + + if x_axis == 'distance': + xlim = self.distance[ind_line].min()-dx, self.distance[ind_line].max()+dx + else: + xlim = self.topography[ind_line, x_ind].min()-dx, self.topography[ind_line, x_ind].max()+dx + if invert_xaxis: + ax.set_xlim(xlim[1], xlim[0]) + else: + ax.set_xlim(xlim) + + plt.tight_layout() + + if show_colorbar: + return out, ax, cb + else: + return out, ax + return ax, + + def get_3d_mesh( + self, dx=None, dy=None, dz=None, + npad_x=0, npad_y=0, npad_z=0, + core_z_length=None, + nx=100, + ny=100, + ): + + xmin, xmax = self.topography[:, 0].min(), self.topography[:, 0].max() + ymin, ymax = self.topography[:, 1].min(), self.topography[:, 1].max() + zmin, zmax = self.topography[:, 2].min(), self.topography[:, 2].max() + zmin -= self.mesh_1d.vectorNx.max() + + lx = xmax-xmin + ly = ymax-ymin + lz = zmax-zmin + + if dx is None: + dx = lx/nx + print ((">> dx:%.1e")%(dx)) + if dy is None: + dy = ly/ny + print ((">> dy:%.1e")%(dy)) + if dz is None: + dz = np.median(self.mesh_1d.hx) + + nx = int(np.floor(lx/dx)) + ny = int(np.floor(ly/dy)) + nz = int(np.floor(lz/dz)) + + if nx*ny*nz > 1e6: + warnings.warn( + ("Size of the mesh (%i) will greater than 1e6")%(nx*ny*nz) + ) + hx = [(dx, npad_x, -1.2), (dx, nx), (dx, npad_x, -1.2)] + hy = [(dy, npad_y, -1.2), (dy, ny), (dy, npad_y, -1.2)] + hz = [(dz, npad_z, -1.2), (dz, nz)] + + zmin = self.topography[:, 2].max() - utils.meshTensor(hz).sum() + self._mesh_3d = TensorMesh([hx, hy, hz], x0=[xmin, ymin, zmin]) + + return self.mesh_3d + + @property + def P(self): + if getattr(self, '_P', None) is None: + raise Exception("Run get_interpolation_matrix first!") + return self._P + + def get_interpolation_matrix( + self, + npts=20, + epsilon=None + ): + + tree_2d = kdtree(self.topography[:, :2]) + xy = utils.ndgrid(self.mesh_3d.vectorCCx, self.mesh_3d.vectorCCy) + + distance, inds = tree_2d.query(xy, k=npts) + if epsilon is None: + epsilon = np.min([self.mesh_3d.hx.min(), self.mesh_3d.hy.min()]) + + w = 1. / (distance + epsilon)**2 + w = utils.sdiag(1./np.sum(w, axis=1)) * (w) + I = utils.mkvc( + np.arange(inds.shape[0]).reshape([-1, 1]).repeat(npts, axis=1) + ) + J = utils.mkvc(inds) + + self._P = sp.coo_matrix( + (utils.mkvc(w), (I, J)), + shape=(inds.shape[0], self.topography.shape[0]) + ) + + mesh_1d = TensorMesh([np.r_[self.hz[:-1], 1e20]]) + + z = self.P*self.topography[:, 2] + + self._actinds = utils.surface2ind_topo(self.mesh_3d, np.c_[xy, z]) + + Z = np.empty(self.mesh_3d.vnC, dtype=float, order='F') + Z = self.mesh_3d.gridCC[:, 2].reshape( + (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + ) + ACTIND = self._actinds.reshape( + (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + ) + + self._Pz = [] + + # This part can be cythonized or parallelized + for i_xy in range(self.mesh_3d.nCx*self.mesh_3d.nCy): + actind_temp = ACTIND[i_xy, :] + z_temp = -(Z[i_xy, :] - z[i_xy]) + self._Pz.append(mesh_1d.getInterpolationMat(z_temp[actind_temp])) + + def interpolate_from_1d_to_3d(self, physical_property_1d): + physical_property_2d = self.P*( + physical_property_1d.reshape( + (self.n_layer, self.n_sounding), order='F' + ).T + ) + physical_property_3d = np.ones( + (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), + order='C', dtype=float + ) * np.nan + + ACTIND = self._actinds.reshape( + (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + ) + + for i_xy in range(self.mesh_3d.nCx*self.mesh_3d.nCy): + actind_temp = ACTIND[i_xy, :] + physical_property_3d[i_xy, actind_temp] = ( + self._Pz[i_xy]*physical_property_2d[i_xy, :] + ) + + return physical_property_3d diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py index af4f785157..f0109c523a 100644 --- a/SimPEG/regularization/laterally_constrained.py +++ b/SimPEG/regularization/laterally_constrained.py @@ -47,6 +47,7 @@ def __init__( self, mesh, active_cells=None, + active_edges=None, alpha_r=None, length_scale_r=None, norms=None, @@ -67,6 +68,8 @@ def __init__( self._regularization_mesh = mesh if active_cells is not None: self._regularization_mesh.active_cells = active_cells + if active_edges is not None: + self._regularization_mesh.active_edges = active_edges if alpha_r is not None: if length_scale_r is not None: From bb6584b7c3b0856824ca0dc9bc0e61703805b898 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 09:48:28 -0800 Subject: [PATCH 091/164] stitched class. --- SimPEG/electromagnetics/base_1d_stitched.py | 552 ++++++++++++++++++ .../time_domain/simulation_1d_stitched.py | 246 ++++++++ 2 files changed, 798 insertions(+) create mode 100644 SimPEG/electromagnetics/base_1d_stitched.py create mode 100644 SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py new file mode 100644 index 0000000000..fc1cbfac94 --- /dev/null +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -0,0 +1,552 @@ +from scipy.constants import mu_0 +import numpy as np +from ..simulation import BaseSimulation +from .. import props +from .. import utils +from ..utils.code_utils import ( + validate_integer, + validate_location_property, + validate_ndarray_with_shape, + validate_type, +) +############################################################################### +# # +# BaseStitchedEM1DSimulation # +# # +############################################################################### + +__all__ = ["BaseStitchedEM1DSimulation"] + +class BaseStitchedEM1DSimulation(BaseSimulation): + """ + Base class for the stitched 1D simulation. This simulation models the EM + response for a set of 1D EM soundings. + """ + + _formulation = "1D" + _coefficients = [] + _coefficients_set = False + + # _Jmatrix_sigma = None + # _Jmatrix_height = None + # _J = None + + # Properties for electrical conductivity/resistivity + sigma, sigmaMap, sigmaDeriv = props.Invertible( + "Electrical conductivity at infinite frequency (S/m)" + ) + + eta = props.PhysicalProperty("Intrinsic chargeability (V/V), 0 <= eta < 1") + tau = props.PhysicalProperty("Time constant for Cole-Cole model (s)") + c = props.PhysicalProperty("Frequency Dependency for Cole-Cole model, 0 < c < 1") + + # Properties for magnetic susceptibility + mu, muMap, muDeriv = props.Invertible( + "Magnetic permeability at infinite frequency (SI)" + ) + chi = props.PhysicalProperty( + "DC magnetic susceptibility for viscous remanent magnetization contribution (SI)" + ) + tau1 = props.PhysicalProperty( + "Lower bound for log-uniform distribution of time-relaxation constants for viscous remanent magnetization (s)" + ) + tau2 = props.PhysicalProperty( + "Upper bound for log-uniform distribution of time-relaxation constants for viscous remanent magnetization (s)" + ) + + # Additional properties + h, hMap, hDeriv = props.Invertible("Receiver Height (m), h > 0") + + thicknesses, thicknessesMap, thicknessesDeriv = props.Invertible( + "layer thicknesses (m)" + ) + + def __init__( + self, + sigma=None, + sigmaMap=None, + thicknesses=None, + thicknessesMap=None, + mu=mu_0, + muMap=None, + h=None, + hMap=None, + eta=None, + tau=None, + c=None, + dchi=None, + tau1=None, + tau2=None, + fix_Jmatrix=False, + topo=None, + parallel=False, + n_cpu=None, + **kwargs, + ): + super().__init__(mesh=None, **kwargs) + self.sigma = sigma + self.sigmaMap = sigmaMap + self.mu = mu + self.muMap = muMap + self.h = h + self.hMap = hMap + if thicknesses is None: + thicknesses = np.array([]) + self.thicknesses = thicknesses + self.thicknessesMap = thicknessesMap + self.eta = eta + self.tau = tau + self.c = c + self.dchi = dchi + self.tau1 = tau1 + self.tau2 = tau2 + self.fix_Jmatrix = fix_Jmatrix + self.topo = topo + if self.topo is None: + self.set_null_topography() + + self.parallel = parallel + self.n_cpu = n_cpu + + if self.parallel: + if self.verbose: + print(">> Use multiprocessing for parallelization") + if self.n_cpu is None: + self.n_cpu = multiprocessing.cpu_count() + print((">> n_cpu: %i") % (self.n_cpu)) + else: + if self.verbose: + print(">> Serial version is used") + + @property + def fix_Jmatrix(self): + """Whether to fix the sensitivity matrix. + + Returns + ------- + bool + """ + return self._fix_Jmatrix + + @fix_Jmatrix.setter + def fix_Jmatrix(self, value): + self._fix_Jmatrix = validate_type("fix_Jmatrix", value, bool) + + @property + def topo(self): + """Topography. + + Returns + ------- + numpy.ndarray of float + """ + return self._topo + + @topo.setter + def topo(self, value): + self._topo = validate_ndarray_with_shape("topo", value, shape=("*",3)) + + @property + def parallel(self): + """Parallel + + Returns + ------- + bool + """ + return self._parallel + + @parallel.setter + def parallel(self, value): + self._parallel = validate_type("parallel", value, bool) + + @property + def n_cpu(self): + """Number of cpus + + Returns + ------- + int + """ + return self._n_cpu + + @n_cpu.setter + def n_cpu(self, value): + self._n_cpu = validate_integer("n_cpu", value, min_val=1) + + @property + def invert_height(self): + if self.hMap is None: + return False + else: + return True + + @property + def halfspace_switch(self): + """True = halfspace, False = layered Earth""" + if (self.thicknesses is None) | (len(self.thicknesses)==0): + return True + else: + return False + + @property + def n_layer(self): + if self.thicknesses is None: + return 1 + else: + return len(self.thicknesses) + 1 + + @property + def n_sounding(self): + return len(self.survey.source_location_by_sounding_dict) + + + @property + def data_index(self): + return self.survey.data_index + + + # ------------- For physical properties ------------- # + @property + def Sigma(self): + if getattr(self, '_Sigma', None) is None: + # Ordering: first z then x + self._Sigma = self.sigma.reshape((self.n_sounding, self.n_layer)) + return self._Sigma + + @property + def Thicknesses(self): + if getattr(self, '_Thicknesses', None) is None: + # Ordering: first z then x + if len(self.thicknesses) == int(self.n_sounding * (self.n_layer-1)): + self._Thicknesses = self.thicknesses.reshape((self.n_sounding, self.n_layer-1)) + else: + self._Thicknesses = np.tile(self.thicknesses, (self.n_sounding, 1)) + return self._Thicknesses + + @property + def Eta(self): + if getattr(self, '_Eta', None) is None: + # Ordering: first z then x + if self.eta is None: + self._Eta = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._Eta = self.eta.reshape((self.n_sounding, self.n_layer)) + return self._Eta + + @property + def Tau(self): + if getattr(self, '_Tau', None) is None: + # Ordering: first z then x + if self.tau is None: + self._Tau = 1e-3*np.ones( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._Tau = self.tau.reshape((self.n_sounding, self.n_layer)) + return self._Tau + + @property + def C(self): + if getattr(self, '_C', None) is None: + # Ordering: first z then x + if self.c is None: + self._C = np.ones( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._C = self.c.reshape((self.n_sounding, self.n_layer)) + return self._C + + @property + def Chi(self): + if getattr(self, '_Chi', None) is None: + # Ordering: first z then x + if self.chi is None: + self._Chi = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._Chi = self.chi.reshape((self.n_sounding, self.n_layer)) + return self._Chi + + @property + def dChi(self): + if getattr(self, '_dChi', None) is None: + # Ordering: first z then x + if self.dchi is None: + self._dChi = np.zeros( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._dChi = self.dchi.reshape((self.n_sounding, self.n_layer)) + return self._dChi + + @property + def Tau1(self): + if getattr(self, '_Tau1', None) is None: + # Ordering: first z then x + if self.tau1 is None: + self._Tau1 = 1e-10 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._Tau1 = self.tau1.reshape((self.n_sounding, self.n_layer)) + return self._Tau1 + + @property + def Tau2(self): + if getattr(self, '_Tau2', None) is None: + # Ordering: first z then x + if self.tau2 is None: + self._Tau2 = 100. * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order='C' + ) + else: + self._Tau2 = self.tau2.reshape((self.n_sounding, self.n_layer)) + return self._Tau2 + + @property + def JtJ_sigma(self): + return self._JtJ_sigma + + def JtJ_height(self): + return self._JtJ_height + + @property + def H(self): + if self.hMap is None: + return np.ones(self.n_sounding) + else: + return self.h + + + # ------------- Etcetra .... ------------- # + @property + def IJLayers(self): + if getattr(self, '_IJLayers', None) is None: + # Ordering: first z then x + self._IJLayers = self.set_ij_n_layer() + return self._IJLayers + + @property + def IJHeight(self): + if getattr(self, '_IJHeight', None) is None: + # Ordering: first z then x + self._IJHeight = self.set_ij_n_layer(n_layer=1) + return self._IJHeight + + # ------------- For physics ------------- # + + def get_uniq_soundings(self): + self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( + self.survey._sounding_types, return_index=True + ) + + def input_args(self, i_sounding, output_type='forward'): + output = ( + self.survey.get_sources_by_sounding_number(i_sounding), + self.topo[i_sounding, :], + self.Thicknesses[i_sounding,:], + self.Sigma[i_sounding, :], + self.Eta[i_sounding, :], + self.Tau[i_sounding, :], + self.C[i_sounding, :], + self.Chi[i_sounding, :], + self.dChi[i_sounding, :], + self.Tau1[i_sounding, :], + self.Tau2[i_sounding, :], + self.H[i_sounding], + output_type, + self.invert_height, + False, + self._coefficients[i_sounding], + ) + return output + + # This is the most expensive process, but required once + # May need to find unique + def input_args_for_coeff(self, i_sounding): + output = ( + self.survey.get_sources_by_sounding_number(i_sounding), + self.topo[i_sounding, :], + self.Thicknesses[i_sounding,:], + self.Sigma[i_sounding, :], + self.Eta[i_sounding, :], + self.Tau[i_sounding, :], + self.C[i_sounding, :], + self.Chi[i_sounding, :], + self.dChi[i_sounding, :], + self.Tau1[i_sounding, :], + self.Tau2[i_sounding, :], + self.H[i_sounding], + 'forward', + self.invert_height, + True, + [], + ) + return output + + def fields(self, m): + if self.verbose: + print("Compute fields") + + return self.forward(m) + + def dpred(self, m, f=None): + """ + Return predicted data. + Predicted data, (`_pred`) are computed when + self.fields is called. + """ + if f is None: + f = self.fields(m) + + return f + + @property + def sounding_number(self): + self._sounding_number = [key for key in self.survey.source_location_by_sounding_dict.keys()] + return self._sounding_number + + @property + def sounding_number_chunks(self): + self._sounding_number_chunks = list(self.chunks(self.sounding_number, self.n_sounding_for_chunk)) + return self._sounding_number_chunks + + @property + def n_chunk(self): + self._n_chunk = len(self.sounding_number_chunks) + return self._n_chunk + + def chunks(self, lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i:i + n] + + def input_args_by_chunk(self, i_chunk, output_type): + args_by_chunks = [] + for i_sounding in self.sounding_number_chunks[i_chunk]: + args_by_chunks.append(self.input_args(i_sounding, output_type)) + return args_by_chunks + + def set_null_topography(self): + self.topo = np.vstack( + [np.c_[src.location[0], src.location[1], 0.] for i, src in enumerate(self.survey.source_list)] + ) + + + def set_ij_n_layer(self, n_layer=None): + """ + Compute (I, J) indicies to form sparse sensitivity matrix + This will be used in GlobalEM1DSimulation when after sensitivity matrix + for each sounding is computed + """ + I = [] + J = [] + shift_for_J = 0 + shift_for_I = 0 + if n_layer is None: + m = self.n_layer + else: + m = n_layer + source_location_by_sounding_dict = self.survey.source_location_by_sounding_dict + for i_sounding in range(self.n_sounding): + n = self.survey.vnD_by_sounding_dict[i_sounding] + J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J + I_temp = ( + np.tile(np.arange(n), (1, m)).reshape((n, m), order='F') + + shift_for_I + ) + J.append(utils.mkvc(J_temp)) + I.append(utils.mkvc(I_temp)) + shift_for_J += m + shift_for_I = I_temp[-1, -1] + 1 + J = np.hstack(J).astype(int) + I = np.hstack(I).astype(int) + return (I, J) + + def set_ij_height(self): + """ + Compute (I, J) indicies to form sparse sensitivity matrix + This will be used in GlobalEM1DSimulation when after sensitivity matrix + for each sounding is computed + """ + I = [] + J = [] + shift_for_J = 0 + shift_for_I = 0 + m = self.n_layer + for i_sounding in range(self.n_sounding): + n = self.survey.vnD_by_sounding_dict[i_sounding] + J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J + I_temp = ( + np.tile(np.arange(n), (1, m)).reshape((n, m), order='F') + + shift_for_I + ) + J.append(utils.mkvc(J_temp)) + I.append(utils.mkvc(I_temp)) + shift_for_J += m + shift_for_I = I_temp[-1, -1] + 1 + J = np.hstack(J).astype(int) + I = np.hstack(I).astype(int) + return (I, J) + + def Jvec(self, m, v, f=None): + J_sigma = self.getJ_sigma(m) + Jv = J_sigma@(self.sigmaDeriv@v) + if self.hMap is not None: + J_height = self.getJ_height(m) + Jv += J_height@(self.hDeriv@v) + return Jv + + def Jtvec(self, m, v, f=None): + J_sigma = self.getJ_sigma(m) + Jtv = self.sigmaDeriv.T @ (J_sigma.T*v) + if self.hMap is not None: + J_height = self.getJ_height(m) + Jtv += self.hDeriv.T*(J_height.T*v) + return Jtv + + def getJtJdiag(self, m, W=None, threshold=1e-8): + """ + Compute diagonal component of JtJ or + trace of sensitivity matrix (J) + """ + if getattr(self, "_gtgdiag", None) is None: + J_sigma = self.getJ_sigma(m) + J_matrix = J_sigma@(self.sigmaDeriv) + + if self.hMap is not None: + J_height = self.getJ_height(m) + J_matrix += J_height*self.hDeriv + + if W is None: + W = utils.speye(J_matrix.shape[0]) + J_matrix = W*J_matrix + gtgdiag = (J_matrix.T*J_matrix).diagonal() + gtgdiag /= gtgdiag.max() + gtgdiag += threshold + self._gtgdiag = gtgdiag + return self._gtgdiag + + @property + def deleteTheseOnModelUpdate(self): + toDelete = super().deleteTheseOnModelUpdate + if self.fix_Jmatrix is False: + toDelete += ['_Sigma', '_J', '_Jmatrix_sigma', '_Jmatrix_height', '_gtg_diag'] + return toDelete + + # def _run_simulation_by_chunk(self, args_chunk): + # """ + # This method simulates the EM response or computes the sensitivities for + # a single sounding. The method allows for parallelization of + # the stitched 1D problem. + # """ + # n = len(args_chunk) + # results = [ + # self.run_simulation(args_chunk[i_sounding]) for i_sounding in range(n) + # ] + # return results \ No newline at end of file diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py new file mode 100644 index 0000000000..54fcaa539a --- /dev/null +++ b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py @@ -0,0 +1,246 @@ +import numpy as np +from scipy import sparse as sp +from ... import utils +from ..base_1d_stitched import BaseStitchedEM1DSimulation +from .simulation_1d import Simulation1DLayered +from .survey import Survey +from ... import maps +from multiprocessing import Pool + +def run_simulation_time_domain(args): + import os + os.environ["MKL_NUM_THREADS"] = "1" + """ + This method simulates the EM response or computes the sensitivities for + a single sounding. The method allows for parallelization of + the stitched 1D problem. + :param src: a EM1DTM source object + :param topo: Topographic location (x, y, z) + :param np.array thicknesses: np.array(N-1,) layer thicknesses for a single sounding + :param np.array sigma: np.array(N,) layer conductivities for a single sounding + :param np.array eta: np.array(N,) intrinsic chargeabilities for a single sounding + :param np.array tau: np.array(N,) Cole-Cole time constant for a single sounding + :param np.array c: np.array(N,) Cole-Cole frequency distribution constant for a single sounding + :param np.array chi: np.array(N,) magnetic susceptibility for a single sounding + :param np.array dchi: np.array(N,) DC susceptibility for magnetic viscosity for a single sounding + :param np.array tau1: np.array(N,) lower time-relaxation constant for magnetic viscosity for a single sounding + :param np.array tau2: np.array(N,) upper time-relaxation constant for magnetic viscosity for a single sounding + :param float h: source height for a single sounding + :param string output_type: "response", "sensitivity_sigma", "sensitivity_height" + :param bool invert_height: boolean switch for inverting for source height + :return: response or sensitivities + """ + + ( + source_list, + topo, + thicknesses, + sigma, + eta, + tau, + c, + chi, + dchi, + tau1, + tau2, + h, + output_type, + invert_height, + return_projection, + coefficients + ) = args + + n_layer = len(thicknesses) + 1 + local_survey = Survey(source_list) + if output_type == "sensitivity": + wires = maps.Wires(("sigma", n_layer), ("h", 1)) + sigma_map = wires.sigma + h_map = wires.h + elif output_type == "forward": + sigma_map = maps.IdentityMap(nP=n_layer) + h_map = None + + sim = Simulation1DLayered( + survey=local_survey, + thicknesses=thicknesses, + sigmaMap=sigma_map, + hMap=h_map, + eta=eta, + tau=tau, + c=c, + topo=topo, + hankel_filter="key_101_2009", + ) + + if return_projection: + return sim.get_coefficients() + + sim._set_coefficients(coefficients) + + if output_type == "sensitivity": + J = sim.getJ(np.r_[sigma, h]) + return J + else: + em_response = sim.dpred(sigma) + return em_response + +####################################################################### +# STITCHED 1D SIMULATION CLASS AND GLOBAL FUNCTIONS +####################################################################### + + +class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): + + _simulation_type = 'time' + # survey = properties.Instance("a survey object", Survey, required=True) + # def run_simulation(self, args): + # if self.verbose: + # print(">> Time-domain") + # return self._run_simulation(args) + + # TODO: need to think about if there are piecies that are height invariant. + # + # def get_uniq_soundings(self): + # self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( + # self.survey._sounding_types, return_index=True + # ) + # def get_coefficients(self): + # if self.verbose: + # print(">> Calculate coefficients") + + # self.get_uniq_soundings() + + # run_simulation = run_simulation_time_domain + + # self._coefficients = {} + # for kk, ii in enumerate(self._ind_sounding_uniq): + # name = self._sounding_types_uniq[kk] + # self._coefficients[name] = run_simulation(self.input_args_for_coeff(ii)) + # self._coefficients_set = True + + def get_coefficients(self): + + run_simulation = run_simulation_time_domain + + if self.verbose: + print(">> Calculate coefficients") + if self.parallel: + pool = Pool(self.n_cpu) + self._coefficients = pool.map( + run_simulation, + [ + self.input_args_for_coeff(i) for i in range(self.n_sounding) + ] + ) + self._coefficients_set = True + pool.close() + pool.join() + else: + self._coefficients = [ + run_simulation(self.input_args_for_coeff(i)) for i in range(self.n_sounding) + ] + + def forward(self, m): + self.model = m + + if self.verbose: + print(">> Compute response") + + # Set flat topo at zero + # if self.topo is None: + + + # TODO: Need to pull separate hankel coeffcients + # and A matrix for convolution + # hankel coefficients vary with variable height! + + if self._coefficients_set is False: + self.get_coefficients() + + run_simulation = run_simulation_time_domain + + if self.parallel: + if self.verbose: + print ('parallel') + #This assumes the same # of layers for each of sounding + # if self.n_sounding_for_chunk is None: + pool = Pool(self.n_cpu) + result = pool.map( + run_simulation, + [ + self.input_args(i, output_type='forward') for i in range(self.n_sounding) + ] + ) + + pool.close() + pool.join() + else: + + result = [ + run_simulation(self.input_args(i, output_type='forward')) for i in range(self.n_sounding) + ] + + return np.hstack(result) + + def getJ(self, m): + """ + Compute d F / d sigma + """ + self.model = m + if getattr(self, "_J", None) is None: + + if self.verbose: + print(">> Compute J") + + if self._coefficients_set is False: + self.get_coefficients() + + run_simulation = run_simulation_time_domain + + if self.parallel: + if self.verbose: + print(">> Start pooling") + + pool = Pool(self.n_cpu) + + # Deprecate this for now, but revisit later + # It is an idea of chunking for parallelization + # if self.n_sounding_for_chunk is None: + self._J = pool.map( + run_simulation, + [ + self.input_args(i, output_type='sensitivity') for i in range(self.n_sounding) + ] + ) + + + if self.verbose: + print(">> End pooling and form J matrix") + + else: + self._J = [ + run_simulation(self.input_args(i, output_type='sensitivity')) for i in range(self.n_sounding) + ] + return self._J + + def getJ_sigma(self, m): + """ + Compute d F / d sigma + """ + if getattr(self, "_Jmatrix_sigma", None) is None: + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack([utils.mkvc(J[i]['ds']) for i in range(self.n_sounding)]) + self._Jmatrix_sigma = sp.coo_matrix( + (self._Jmatrix_sigma, self.IJLayers), dtype=float + ).tocsr() + return self._Jmatrix_sigma + + + def getJ_height(self, m): + if getattr(self, "_Jmatrix_height", None) is None: + J = self.getJ(m) + self._Jmatrix_height = np.hstack([utils.mkvc(J[i]['dh']) for i in range(self.n_sounding)]) + self._Jmatrix_height = sp.coo_matrix( + (self._Jmatrix_height, self.IJHeight), dtype=float + ).tocsr() + return self._Jmatrix_height \ No newline at end of file From 35540d3c829ebc0b19ec88db02115dd3d3afeeea Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 10:06:13 -0800 Subject: [PATCH 092/164] test --- SimPEG/electromagnetics/base_1d_stitched.py | 1 + 1 file changed, 1 insertion(+) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index fc1cbfac94..f5ab0998a6 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -139,6 +139,7 @@ def topo(self): Returns ------- numpy.ndarray of float + test """ return self._topo From b094efbbebafc8acb3aa872773d1e53f01b2cc97 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 10:18:59 -0800 Subject: [PATCH 093/164] test --- SimPEG/electromagnetics/base_1d_stitched.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index f5ab0998a6..fc1cbfac94 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -139,7 +139,6 @@ def topo(self): Returns ------- numpy.ndarray of float - test """ return self._topo From d4612044332d6e5e56f193c804e38240cf2ff329 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 8 Nov 2023 10:51:00 -0800 Subject: [PATCH 094/164] Replace ``array`` in docstrings for ``numpy.ndarray`` --- .../gravity/_numba_functions.py | 18 +++++++++--------- SimPEG/potential_fields/gravity/simulation.py | 14 +++++++------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index 644071bd33..c84069f150 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -37,16 +37,16 @@ def _forward_gravity( Parameters ---------- - receivers : (n_receivers, 3) array + receivers : (n_receivers, 3) numpy.ndarray Array with the locations of the receivers - nodes : (n_active_nodes, 3) array + nodes : (n_active_nodes, 3) numpy.ndarray Array with the location of the mesh nodes. - densities : (n_active_cells) + densities : (n_active_cells) numpy.ndarray Array with densities of each active cell in the mesh. - fields : (n_receivers) array + fields : (n_receivers) numpy.ndarray Array full of zeros where the gravity fields on each receiver will be stored. This could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array + cell_nodes : (n_active_cells, 8) numpy.ndarray Array of integers, where each row contains the indices of the nodes for each active cell in the mesh. kernel_func : callable @@ -119,14 +119,14 @@ def _sensitivity_gravity( Parameters ---------- - receivers : (n_receivers, 3) array + receivers : (n_receivers, 3) numpy.ndarray Array with the locations of the receivers - nodes : (n_active_nodes, 3) array + nodes : (n_active_nodes, 3) numpy.ndarray Array with the location of the mesh nodes. sensitivity_matrix : (n_receivers, n_active_nodes) array Empty 2d array where the sensitivity matrix elements will be filled. This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) array + cell_nodes : (n_active_cells, 8) numpy.ndarray Array of integers, where each row contains the indices of the nodes for each active cell in the mesh. kernel_func : callable @@ -220,7 +220,7 @@ def _kernels_in_nodes_to_cell( Parameters ---------- - kernels : (n_active_nodes,) array + kernels : (n_active_nodes,) numpy.ndarray Array with kernel values on each one of the nodes in the mesh. nodes_indices : ints Indices of the nodes for the current cell in "F" order (x changes diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 2346f0ee9e..1de6468eaf 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -83,9 +83,9 @@ class Simulation3DIntegral(BasePFSimulation): Mesh use to run the gravity simulation. survey : SimPEG.potential_fields.gravity.Survey Gravity survey with information of the receivers. - ind_active : (n_cells) array, optional + ind_active : (n_cells) numpy.ndarray, optional Array that indicates which cells in ``mesh`` are active cells. - rho : array (optional) + rho : numpy.ndarray (optional) Density array for the active cells in the mesh. rhoMap : Mapping (optional) Model mapping. @@ -186,12 +186,12 @@ def fields(self, m): Parameters ---------- - m : (n_active_cells,) array + m : (n_active_cells,) numpy.ndarray Array with values for the model. Returns ------- - (nD,) array + (nD,) numpy.ndarray Gravity fields generated by the given model on every receiver location. """ @@ -369,13 +369,13 @@ def _forward(self, densities): Parameters ---------- - densities : (n_active_cells) array + densities : (n_active_cells) numpy.ndarray Array containing the densities of the active cells in the mesh, in g/cc. Returns ------- - (nD,) array + (nD,) numpy.ndarray Always return a ``np.float64`` array. """ # Gather active nodes and the indices of the nodes for each active cell @@ -411,7 +411,7 @@ def _sensitivity_matrix(self): Returns ------- - (nD, n_active_cells) array + (nD, n_active_cells) numpy.ndarray """ # Gather active nodes and the indices of the nodes for each active cell active_nodes, active_cell_nodes = self._get_active_nodes() From ab22779e84c15844df562720468205e7a3e6d2f1 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 11:24:13 -0800 Subject: [PATCH 095/164] implementing tx and rx filters. --- .../electromagnetics/time_domain/receivers.py | 72 ++++++++++++++++++- .../time_domain/simulation_1d.py | 13 ++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index 3179c527af..de17420f8d 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -1,6 +1,6 @@ import scipy.sparse as sp -from ...utils import mkvc, validate_type, validate_direction +from ...utils import mkvc, validate_type, validate_direction, validate_float from discretize.utils import Zero from ...survey import BaseTimeRx import warnings @@ -25,6 +25,10 @@ def __init__( times, orientation="z", use_source_receiver_offset=False, + bw_cutoff_frequency=3e5, + bw_power=0., + lp_cutoff_frequency=2.1e5, + lp_power=0., **kwargs ): proj = kwargs.pop("projComp", None) @@ -45,6 +49,11 @@ def __init__( self.orientation = orientation self.use_source_receiver_offset = use_source_receiver_offset + self.bw_cutoff_frequency = bw_cutoff_frequency + self.bw_power = bw_power + self.lp_cutoff_frequency = lp_cutoff_frequency + self.lp_power = lp_power + super().__init__(locations=locations, times=times, **kwargs) @property @@ -84,6 +93,67 @@ def use_source_receiver_offset(self, val): "use_source_receiver_offset", val, bool ) + @property + def bw_cutoff_frequency(self): + """Butter worth low pass filter + + Returns + ------- + numpy.ndarray + Butter worth low pass filter + """ + return self._bw_cutoff_frequency + + @bw_cutoff_frequency.setter + def bw_cutoff_frequency(self, var): + self._bw_cutoff_frequency = validate_float("bw_cutoff_frequency", var, min_val=0.) + + @property + def lp_cutoff_frequency(self): + """Low pass filter + + Returns + ------- + numpy.ndarray + Low pass filter + """ + return self._lp_cutoff_frequency + + @lp_cutoff_frequency.setter + def lp_cutoff_frequency(self, var): + self._lp_cutoff_frequency = validate_float("lp_cutoff_frequency", var, min_val=0.) + + + @property + def bw_power(self): + """Butter worth low pass filter + + Returns + ------- + numpy.ndarray + Butter worth low pass filter + """ + return self._bw_power + + @bw_power.setter + def bw_power(self, var): + self._bw_power = validate_float("bw_power", var, min_val=0., max_val=2) + + @property + def lp_power(self): + """Low pass filter + + Returns + ------- + numpy.ndarray + Low pass filter + """ + return self._lp_power + + @lp_power.setter + def lp_power(self, var): + self._lp_power = validate_float("lp_power", var, min_val=0., max_val=0.99999) + def getSpatialP(self, mesh, f): """Get spatial projection matrix from mesh to receivers. diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 873febfd8a..c32ff3ffde 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -11,6 +11,7 @@ from scipy.constants import mu_0 from scipy.interpolate import InterpolatedUnivariateSpline as iuSpline from scipy.special import roots_legendre +from scipy import signal from empymod import filters from empymod.transform import get_dlf_points @@ -355,13 +356,25 @@ def _project_to_data(self, v): v_slice = v[np.arange(i, i_p1)] # this should order it as location changing faster than time # i.e. loc_1 t_1, loc_2 t_1, loc1 t2, loc2 t2 + + frequencies = self._frequencies + w = 2 * np.pi * frequencies + wc_lp = 2 * np.pi * rx.lp_cutoff_frequency + h_lp = (1+1j*w/wc_lp)**(-rx.lp_power) # low pass filter + wc_bw = 2 * np.pi * rx.bw_cutoff_frequency + numer, denom = signal.butter(rx.bw_power, wc_bw, 'low', analog=True) + _, h_bw = signal.freqs(numer, denom, worN=w) + h = h_lp * h_bw + if v.ndim == 3: + v_slice *= h[None,:,None] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.imag) else: d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.real) out[i_dat:i_datp1] = d.reshape((-1, v.shape[-1]), order="F") else: + v_slice *= h[None,:] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...j->...i", As[i_A], v_slice.imag) else: From dcfe4acdd729b3d92a3f4c2e7e62ea94a3ee3d4c Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 8 Nov 2023 12:04:50 -0800 Subject: [PATCH 096/164] Fix wrong assignment: use += operator instead --- SimPEG/potential_fields/magnetics/_numba_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/_numba_functions.py b/SimPEG/potential_fields/magnetics/_numba_functions.py index 9dc6a299e7..71dbad9100 100644 --- a/SimPEG/potential_fields/magnetics/_numba_functions.py +++ b/SimPEG/potential_fields/magnetics/_numba_functions.py @@ -357,7 +357,7 @@ def _forward_mag( uy = _kernels_in_nodes_to_cell(ky, nodes_indices) uz = _kernels_in_nodes_to_cell(kz, nodes_indices) if scalar_model: - fields[i] = ( + fields[i] += ( constant_factor * model[k] * regional_field_amplitude From bd78efbe809c3bb3231ca1c9d092552d82f1c62c Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 8 Nov 2023 12:30:00 -0800 Subject: [PATCH 097/164] Fix wrong indentation of increment of index_offset Fix the wrong indentation of the statement of incrementing the index_offset in the forward function. --- SimPEG/potential_fields/magnetics/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 7a36b4800d..8c9119d81f 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -558,7 +558,7 @@ def _forward(self, model): constant_factor, scalar_model, ) - index_offset += n_rows + index_offset += n_rows return fields def _sensitivity_matrix(self): From 3b78a05a73640d06567032535977bbe3cc68314c Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 13:03:28 -0800 Subject: [PATCH 098/164] working frequency domain --- SimPEG/electromagnetics/base_1d.py | 8 ++- SimPEG/electromagnetics/base_1d_stitched.py | 61 ++++++++----------- .../frequency_domain/__init__.py | 1 + .../frequency_domain/simulation_1d.py | 2 +- .../frequency_domain/sources.py | 17 +++++- .../frequency_domain/survey.py | 42 +++++++++++++ .../time_domain/simulation_1d_stitched.py | 34 ++--------- 7 files changed, 98 insertions(+), 67 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index 09fdb5170f..2157f31699 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -357,7 +357,11 @@ def _compute_hankel_coefficients(self): Is = [] n_w_past = 0 i_count = 0 - for src in survey.source_list: + + if self.hMap is not None: + hvec = self.h # source height above topo + + for i_src, src in enumerate(survey.source_list): # doing the check for source type by checking its name # to avoid importing and checking "isinstance" class_name = type(src).__name__ @@ -370,7 +374,7 @@ def _compute_hankel_coefficients(self): raise ValueError("Can only simulate horizontal circular loops") # Note: this assumes a fixed height for all sources if self.hMap is not None: - h = self.h # source height above topo + h = hvec[i_src] else: h = src.location[2] - self.topo[-1] diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index fc1cbfac94..f44b89929d 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -318,7 +318,8 @@ def JtJ_height(self): @property def H(self): if self.hMap is None: - return np.ones(self.n_sounding) + h = self.source_locations_for_sounding[:,2] - self.topo[:,2] + return h else: return self.h @@ -360,7 +361,6 @@ def input_args(self, i_sounding, output_type='forward'): self.Tau2[i_sounding, :], self.H[i_sounding], output_type, - self.invert_height, False, self._coefficients[i_sounding], ) @@ -383,7 +383,6 @@ def input_args_for_coeff(self, i_sounding): self.Tau2[i_sounding, :], self.H[i_sounding], 'forward', - self.invert_height, True, [], ) @@ -411,31 +410,35 @@ def sounding_number(self): self._sounding_number = [key for key in self.survey.source_location_by_sounding_dict.keys()] return self._sounding_number - @property - def sounding_number_chunks(self): - self._sounding_number_chunks = list(self.chunks(self.sounding_number, self.n_sounding_for_chunk)) - return self._sounding_number_chunks - @property def n_chunk(self): self._n_chunk = len(self.sounding_number_chunks) return self._n_chunk - - def chunks(self, lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield lst[i:i + n] - - def input_args_by_chunk(self, i_chunk, output_type): - args_by_chunks = [] - for i_sounding in self.sounding_number_chunks[i_chunk]: - args_by_chunks.append(self.input_args(i_sounding, output_type)) - return args_by_chunks + @property + def source_locations_for_sounding(self): + if getattr(self, '_source_locations_for_sounding', None) is None: + self._source_locations_for_sounding = np.vstack([self.survey._source_location_by_sounding_dict[ii][0] for ii in range(self.n_sounding)]) + return self._source_locations_for_sounding + + # def chunks(self, lst, n): + # """Yield successive n-sized chunks from lst.""" + # for i in range(0, len(lst), n): + # yield lst[i:i + n] + + # @property + # def sounding_number_chunks(self): + # self._sounding_number_chunks = list(self.chunks(self.sounding_number, self.n_sounding_for_chunk)) + # return self._sounding_number_chunks + + # def input_args_by_chunk(self, i_chunk, output_type): + # args_by_chunks = [] + # for i_sounding in self.sounding_number_chunks[i_chunk]: + # args_by_chunks.append(self.input_args(i_sounding, output_type)) + # return args_by_chunks def set_null_topography(self): - self.topo = np.vstack( - [np.c_[src.location[0], src.location[1], 0.] for i, src in enumerate(self.survey.source_list)] - ) + self.topo = self.source_locations_for_sounding.copy() + self.topo[:,2] = 0. def set_ij_n_layer(self, n_layer=None): @@ -537,16 +540,4 @@ def deleteTheseOnModelUpdate(self): toDelete = super().deleteTheseOnModelUpdate if self.fix_Jmatrix is False: toDelete += ['_Sigma', '_J', '_Jmatrix_sigma', '_Jmatrix_height', '_gtg_diag'] - return toDelete - - # def _run_simulation_by_chunk(self, args_chunk): - # """ - # This method simulates the EM response or computes the sensitivities for - # a single sounding. The method allows for parallelization of - # the stitched 1D problem. - # """ - # n = len(args_chunk) - # results = [ - # self.run_simulation(args_chunk[i_sounding]) for i_sounding in range(n) - # ] - # return results \ No newline at end of file + return toDelete \ No newline at end of file diff --git a/SimPEG/electromagnetics/frequency_domain/__init__.py b/SimPEG/electromagnetics/frequency_domain/__init__.py index 3dad3cde28..b01672595b 100644 --- a/SimPEG/electromagnetics/frequency_domain/__init__.py +++ b/SimPEG/electromagnetics/frequency_domain/__init__.py @@ -83,6 +83,7 @@ Simulation3DMagneticField, ) from .simulation_1d import Simulation1DLayered +from .simulation_1d_stitched import Simulation1DLayeredStitched from .fields import ( Fields3DElectricField, Fields3DMagneticFluxDensity, diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index fc9200b146..1cacdf73b8 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -181,7 +181,7 @@ def getJ(self, m, f=None): # need to re-arange v_dh as it's currently (n_data x 1) # however it already contains all the relevant information... # just need to map it from the rx index to the source index associated.. - v_dh = np.zeros((self.survey.nSrc, v_dh_temp.shape[0])) + v_dh = np.zeros((self.survey.nSrc, v_dh_temp.shape[0]), dtype=complex) i = 0 for i_src, src in enumerate(self.survey.source_list): diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index bc54e0fdf4..94268f476e 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -39,9 +39,10 @@ class BaseFDEMSrc(BaseEMSrc): _hPrimary = None _jPrimary = None - def __init__(self, receiver_list, frequency, location=None, **kwargs): + def __init__(self, receiver_list, frequency, location=None, i_sounding=0, **kwargs): super().__init__(receiver_list=receiver_list, location=location, **kwargs) self.frequency = frequency + self.i_sounding = i_sounding @property def frequency(self): @@ -59,6 +60,20 @@ def frequency(self, freq): freq = validate_float("frequency", freq, min_val=0.0) self._frequency = freq + @property + def i_sounding(self): + """Sounding number for the source + + Returns + ------- + int + """ + return self._i_sounding + + @i_sounding.setter + def i_sounding(self, value): + self._i_sounding = validate_integer("i_sounding", value, min_val=0) + def bPrimary(self, simulation): """Compute primary magnetic flux density diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index 4df3a90f05..7fe4b71b2e 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -16,13 +16,23 @@ def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) _frequency_dict = {} + _source_location_dict = {} + _source_location_by_sounding_dict = {} for src in self.source_list: if src.frequency not in _frequency_dict: _frequency_dict[src.frequency] = [] _frequency_dict[src.frequency] += [src] + if src.i_sounding not in _source_location_dict: + _source_location_dict[src.i_sounding] = [] + _source_location_by_sounding_dict[src.i_sounding] = [] + _source_location_dict[src.i_sounding] += [src] + _source_location_by_sounding_dict[src.i_sounding] += [src.location] + self._frequency_dict = _frequency_dict self._frequencies = sorted([f for f in self._frequency_dict]) + self._source_location_dict = _source_location_dict + self._source_location_by_sounding_dict = _source_location_by_sounding_dict @property def source_list(self): @@ -97,3 +107,35 @@ def get_sources_by_frequency(self, frequency): frequency in self._frequency_dict ), "The requested frequency is not in this survey." return self._frequency_dict[frequency] + + @property + def source_location_by_sounding_dict(self): + """ + Source locations in the survey as a dictionary + """ + return self._source_location_by_sounding_dict + + def get_sources_by_sounding_number(self, i_sounding): + """ + Returns the sources associated with a specific source location. + :param float i_sounding: source location number + :rtype: dictionary + :return: sources at the sepcified source location + """ + assert ( + i_sounding in self._source_location_dict + ), "The requested sounding is not in this survey." + return self._source_location_dict[i_sounding] + + + @property + def vnD_by_sounding_dict(self): + if getattr(self, "_vnD_by_sounding_dict", None) is None: + self._vnD_by_sounding_dict = {} + for i_sounding in self.source_location_by_sounding_dict: + source_list = self.get_sources_by_sounding_number(i_sounding) + nD = 0 + for src in source_list: + nD += src.nD + self._vnD_by_sounding_dict[i_sounding] = nD + return self._vnD_by_sounding_dict diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py index 54fcaa539a..829e7e7d7b 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py @@ -45,15 +45,16 @@ def run_simulation_time_domain(args): tau2, h, output_type, - invert_height, return_projection, coefficients ) = args n_layer = len(thicknesses) + 1 + n_src = len(source_list) + local_survey = Survey(source_list) if output_type == "sensitivity": - wires = maps.Wires(("sigma", n_layer), ("h", 1)) + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) sigma_map = wires.sigma h_map = wires.h elif output_type == "forward": @@ -78,7 +79,9 @@ def run_simulation_time_domain(args): sim._set_coefficients(coefficients) if output_type == "sensitivity": - J = sim.getJ(np.r_[sigma, h]) + J = sim.getJ(np.r_[sigma, h*np.ones(n_src)]) + # we assumed the tx heights in a sounding is fixed + J['dh'] = J['dh'].sum(axis=1) return J else: em_response = sim.dpred(sigma) @@ -92,31 +95,6 @@ def run_simulation_time_domain(args): class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): _simulation_type = 'time' - # survey = properties.Instance("a survey object", Survey, required=True) - # def run_simulation(self, args): - # if self.verbose: - # print(">> Time-domain") - # return self._run_simulation(args) - - # TODO: need to think about if there are piecies that are height invariant. - # - # def get_uniq_soundings(self): - # self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( - # self.survey._sounding_types, return_index=True - # ) - # def get_coefficients(self): - # if self.verbose: - # print(">> Calculate coefficients") - - # self.get_uniq_soundings() - - # run_simulation = run_simulation_time_domain - - # self._coefficients = {} - # for kk, ii in enumerate(self._ind_sounding_uniq): - # name = self._sounding_types_uniq[kk] - # self._coefficients[name] = run_simulation(self.input_args_for_coeff(ii)) - # self._coefficients_set = True def get_coefficients(self): From 1bbdc55c304b4c98b625d14f3ab39eb10e111690 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 8 Nov 2023 13:03:44 -0800 Subject: [PATCH 099/164] add frequency stitched code --- .../simulation_1d_stitched.py | 221 ++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py new file mode 100644 index 0000000000..85b46a4fef --- /dev/null +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py @@ -0,0 +1,221 @@ +import numpy as np +from scipy import sparse as sp +from ... import utils +from ..base_1d_stitched import BaseStitchedEM1DSimulation +from .simulation_1d import Simulation1DLayered +from .survey import Survey +from ... import maps +from multiprocessing import Pool + +def run_simulation_frequency_domain(args): + """ + This method simulates the EM response or computes the sensitivities for + a single sounding. The method allows for parallelization of + the stitched 1D problem. + :param src: a EM1DFM source object + :param topo: Topographic location (x, y, z) + :param np.array thicknesses: np.array(N-1,) layer thicknesses for a single sounding + :param np.array sigma: np.array(N,) layer conductivities for a single sounding + :param np.array eta: np.array(N,) intrinsic chargeabilities for a single sounding + :param np.array tau: np.array(N,) Cole-Cole time constant for a single sounding + :param np.array c: np.array(N,) Cole-Cole frequency distribution constant for a single sounding + :param np.array chi: np.array(N,) magnetic susceptibility for a single sounding + :param np.array dchi: np.array(N,) DC susceptibility for magnetic viscosity for a single sounding + :param np.array tau1: np.array(N,) lower time-relaxation constant for magnetic viscosity for a single sounding + :param np.array tau2: np.array(N,) upper time-relaxation constant for magnetic viscosity for a single sounding + :param float h: source height for a single sounding + :param string output_type: "response", "sensitivity_sigma", "sensitivity_height" + :param bool invert_height: boolean switch for inverting for source height + :return: response or sensitivities + """ + + ( + source_list, + topo, + thicknesses, + sigma, + eta, + tau, + c, + chi, + dchi, + tau1, + tau2, + h, + output_type, + return_projection, + coefficients + ) = args + + n_layer = len(thicknesses) + 1 + local_survey = Survey(source_list) + n_src = len(source_list) + if output_type == "sensitivity": + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) + sigma_map = wires.sigma + h_map = wires.h + elif output_type == "forward": + sigma_map = maps.IdentityMap(nP=n_layer) + h_map = None + + sim = Simulation1DLayered( + survey=local_survey, + thicknesses=thicknesses, + sigmaMap=sigma_map, + hMap=h_map, + eta=eta, + tau=tau, + c=c, + topo=topo, + hankel_filter="key_101_2009", + ) + + if return_projection: + return sim.get_coefficients() + + sim._set_coefficients(coefficients) + + if output_type == "sensitivity": + J = sim.getJ(np.r_[sigma, h * np.ones(n_src)]) + J['dh'] = J['dh'].sum(axis=1) + return J + else: + em_response = sim.dpred(sigma) + return em_response + + +####################################################################### +# STITCHED 1D SIMULATION CLASS AND GLOBAL FUNCTIONS +####################################################################### + + +class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): + + _simulation_type = 'frequency' + + def get_coefficients(self): + + run_simulation = run_simulation_frequency_domain + + if self.verbose: + print(">> Calculate coefficients") + if self.parallel: + pool = Pool(self.n_cpu) + self._coefficients = pool.map( + run_simulation, + [ + self.input_args_for_coeff(i) for i in range(self.n_sounding) + ] + ) + self._coefficients_set = True + pool.close() + pool.join() + else: + self._coefficients = [ + run_simulation(self.input_args_for_coeff(i)) for i in range(self.n_sounding) + ] + + def forward(self, m): + self.model = m + + if self.verbose: + print(">> Compute response") + + # Set flat topo at zero + # if self.topo is None: + + + # TODO: Need to pull separate hankel coeffcients + # and A matrix for convolution + # hankel coefficients vary with variable height! + + if self._coefficients_set is False: + self.get_coefficients() + + run_simulation = run_simulation_frequency_domain + + if self.parallel: + if self.verbose: + print ('parallel') + #This assumes the same # of layers for each of sounding + # if self.n_sounding_for_chunk is None: + pool = Pool(self.n_cpu) + result = pool.map( + run_simulation, + [ + self.input_args(i, output_type='forward') for i in range(self.n_sounding) + ] + ) + + pool.close() + pool.join() + else: + + result = [ + run_simulation(self.input_args(i, output_type='forward')) for i in range(self.n_sounding) + ] + + return np.hstack(result) + + def getJ(self, m): + """ + Compute d F / d sigma + """ + self.model = m + if getattr(self, "_J", None) is None: + + if self.verbose: + print(">> Compute J") + + if self._coefficients_set is False: + self.get_coefficients() + + run_simulation = run_simulation_frequency_domain + + if self.parallel: + if self.verbose: + print(">> Start pooling") + + pool = Pool(self.n_cpu) + + # Deprecate this for now, but revisit later + # It is an idea of chunking for parallelization + # if self.n_sounding_for_chunk is None: + self._J = pool.map( + run_simulation, + [ + self.input_args(i, output_type='sensitivity') for i in range(self.n_sounding) + ] + ) + + + if self.verbose: + print(">> End pooling and form J matrix") + + else: + self._J = [ + run_simulation(self.input_args(i, output_type='sensitivity')) for i in range(self.n_sounding) + ] + return self._J + + def getJ_sigma(self, m): + """ + Compute d F / d sigma + """ + if getattr(self, "_Jmatrix_sigma", None) is None: + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack([utils.mkvc(J[i]['ds']) for i in range(self.n_sounding)]) + self._Jmatrix_sigma = sp.coo_matrix( + (self._Jmatrix_sigma, self.IJLayers), dtype=float + ).tocsr() + return self._Jmatrix_sigma + + + def getJ_height(self, m): + if getattr(self, "_Jmatrix_height", None) is None: + J = self.getJ(m) + self._Jmatrix_height = np.hstack([utils.mkvc(J[i]['dh']) for i in range(self.n_sounding)]) + self._Jmatrix_height = sp.coo_matrix( + (self._Jmatrix_height, self.IJHeight), dtype=float + ).tocsr() + return self._Jmatrix_height \ No newline at end of file From 04c7779ab216e4d36feab92999646d33e7c7aa65 Mon Sep 17 00:00:00 2001 From: sgkang Date: Mon, 13 Nov 2023 17:04:01 -0800 Subject: [PATCH 100/164] add stitched FD test --- SimPEG/electromagnetics/frequency_domain/simulation_1d.py | 4 ++-- SimPEG/electromagnetics/time_domain/simulation_1d.py | 4 ++-- tests/em/em1d/test_EM1D_TD_general_jac_layers.py | 2 +- tests/em/em1d/test_EM1D_TD_off_fwd.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index 1cacdf73b8..80912f7271 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -104,9 +104,9 @@ def fields(self, m): receiver and outputs it as a list. Used for computing response or sensitivities. """ - self._compute_coefficients() - self.model = m + + self._compute_coefficients() C0s = self._C0s C1s = self._C1s diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index c32ff3ffde..2ea7a76b87 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -221,9 +221,9 @@ def fields(self, m): receiver and outputs it as a list. Used for computing response or sensitivities. """ - self._compute_coefficients() - self.model = m + + self._compute_coefficients() C0s = self._C0s C1s = self._C1s diff --git a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py index dd90a32a72..513600b080 100644 --- a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py +++ b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py @@ -23,7 +23,7 @@ def setUp(self): start_time=-0.01, peak_time=-0.005, off_time=0.0 ) - # Receiver list + # xceiver list # Define receivers at each location. b_receiver = tdem.receivers.PointMagneticFluxDensity( diff --git a/tests/em/em1d/test_EM1D_TD_off_fwd.py b/tests/em/em1d/test_EM1D_TD_off_fwd.py index 78b45f61ab..24acd91504 100644 --- a/tests/em/em1d/test_EM1D_TD_off_fwd.py +++ b/tests/em/em1d/test_EM1D_TD_off_fwd.py @@ -91,7 +91,7 @@ def test_line_current_failures(self): rx_locs, times, orientation="z", use_source_receiver_offset=False ) src = tdem.sources.LineCurrent([rx], tx_locs) - survey = tdem.Survey(src) + survey = tdem.Survey([src]) with self.assertRaises(ValueError): tdem.Simulation1DLayered(survey) @@ -103,7 +103,7 @@ def test_line_current_failures(self): [2.5, 2.5, 0], ] src = tdem.sources.LineCurrent([rx], tx_locs) - survey = tdem.Survey(src) + survey = tdem.Survey([src]) tdem.Simulation1DLayered(survey) assert src.n_segments == 4 From 879248a8d65e5450ef47463b0f787345b3be249d Mon Sep 17 00:00:00 2001 From: sgkang Date: Mon, 13 Nov 2023 17:04:14 -0800 Subject: [PATCH 101/164] test --- .../em1d/test_Stitched_EM1D_FD_jac_layers.py | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py diff --git a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py new file mode 100644 index 0000000000..513d5d8fc8 --- /dev/null +++ b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py @@ -0,0 +1,131 @@ +from __future__ import print_function +import unittest +import numpy as np +import SimPEG.electromagnetics.frequency_domain as fdem +from SimPEG import * +from discretize import TensorMesh +from pymatsolver import PardisoSolver + +np.random.seed(41) + + +class STITCHED_EM1D_FD_Jacobian_Test_MagDipole(unittest.TestCase): + + def setUp(self, parallel=False): + + dz = 1 + geometric_factor = 1.1 + n_layer = 20 + thicknesses = dz * geometric_factor ** np.arange(n_layer-1) + + frequencies = np.array([900, 7200, 56000], dtype=float) + n_sounding = 50 + dx = 20. + hx = np.ones(n_sounding) * dx + hz = np.r_[thicknesses, thicknesses[-1]] + + mesh = TensorMesh([hx, hz], x0='00') + + x = mesh.cell_centers_x + y = np.zeros_like(x) + z = np.ones_like(x) * 30. + receiver_locations = np.c_[x+8., y, z] + source_locations = np.c_[x, y, z] + topo = np.c_[x, y, z-30.].astype(float) + + sigma_map = maps.ExpMap(mesh) + + source_list = [] + + for i_sounding in range(0, n_sounding): + + source_location = mkvc(source_locations[i_sounding, :]) + receiver_location = mkvc(receiver_locations[i_sounding, :]) + receiver_list = [] + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_location, + orientation="z", + component="both" + ) + ) + + for i_freq, frequency in enumerate(frequencies): + src = fdem.sources.MagDipole( + receiver_list, frequency, source_location, + orientation="z", i_sounding=i_sounding + ) + source_list.append(src) + + survey = fdem.Survey(source_list) + + simulation = fdem.Simulation1DLayeredStitched( + survey=survey, thicknesses=thicknesses, sigmaMap=sigma_map, + topo=topo, parallel=parallel, n_cpu=2, verbose=False + ) + self.sim = simulation + self.mesh = mesh + + def test_EM1DFDJvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1./100. + sigma[inds_1] = 1./10. + sigma[inds] = 1./50. + sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() + m_stitched = np.log(sigma_em1d) + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_stitched * 0.5 + + passed = tests.check_derivative( + derChk, m_stitched, num=4, dx=dm, plotIt=False, eps=1e-15 + ) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jvec test works") + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1./100. + sigma[inds_1] = 1./10. + sigma[inds] = 1./50. + sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() + m_stitched = np.log(sigma_em1d) + + dobs = self.sim.dpred(m_stitched) + + m_ini = np.log(1./100.) * np.ones(self.mesh.n_cells) + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 + dmisfit = self.sim.Jtvec(m, dr) + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-27) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jtvec test works") + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From 8880bb1ee602732e551066e6884146b028ce94bb Mon Sep 17 00:00:00 2001 From: sgkang Date: Tue, 14 Nov 2023 10:06:44 -0800 Subject: [PATCH 102/164] add tdem sensitivity test --- .../em1d/test_Stitched_EM1D_TD_jac_layers.py | 172 ++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py diff --git a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py new file mode 100644 index 0000000000..814adb8bfe --- /dev/null +++ b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py @@ -0,0 +1,172 @@ +from __future__ import print_function +import unittest +import numpy as np +import SimPEG.electromagnetics.time_domain as tdem +from SimPEG import * +from discretize import TensorMesh +from pymatsolver import PardisoSolver + +np.random.seed(41) + + +class STITCHED_EM1D_TD_Jacobian_Test_MagDipole(unittest.TestCase): + + def setUp(self, parallel=False): + + times_hm = np.logspace(-6, -3, 31) + times_lm = np.logspace(-5, -2, 31) + + # Waveforms + waveform_hm = tdem.sources.TriangularWaveform( + start_time=-0.01, peak_time=-0.005, off_time=0.0 + ) + waveform_lm = tdem.sources.TriangularWaveform( + start_time=-0.01, peak_time=-0.0001, off_time=0.0 + ) + + dz = 1 + geometric_factor = 1.1 + n_layer = 20 + thicknesses = dz * geometric_factor ** np.arange(n_layer-1) + n_layer = 20 + + n_sounding = 5 + dx = 20. + hx = np.ones(n_sounding) * dx + hz = np.r_[thicknesses, thicknesses[-1]] + mesh = TensorMesh([hx, hz], x0='00') + inds = mesh.cell_centers[:, 1] < 25 + inds_1 = mesh.cell_centers[:, 1] < 50 + sigma = np.ones(mesh.nC) * 1./100. + sigma[inds_1] = 1./10. + sigma[inds] = 1./50. + sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten() + mSynth = np.log(sigma_em1d) + + x = mesh.cell_centers_x + y = np.zeros_like(x) + z = np.ones_like(x) * 30. + source_locations = np.c_[x, y, z] + source_current = 1. + source_orientation = 'z' + source_radius = 10. + + receiver_offset_r = 13.25 + receiver_offset_z = 2. + + receiver_locations = np.c_[x+receiver_offset_r, np.zeros(n_sounding), 30.*np.ones(n_sounding)+receiver_offset_z] + receiver_orientation = "z" # "x", "y" or "z" + + topo = np.c_[x, y, z-30.].astype(float) + + sigma_map = maps.ExpMap(mesh) + + source_list = [] + + for i_sounding in range(0, n_sounding): + + source_location = source_locations[i_sounding, :] + receiver_location = receiver_locations[i_sounding, :] + + # Receiver list + + # Define receivers at each location. + dbzdt_receiver_hm = tdem.receivers.PointMagneticFluxTimeDerivative( + receiver_location, times_hm, receiver_orientation + ) + dbzdt_receiver_lm = tdem.receivers.PointMagneticFluxTimeDerivative( + receiver_location, times_lm, receiver_orientation + ) + # Make a list containing all receivers even if just one + + # Must define the transmitter properties and associated receivers + source_list.append(tdem.sources.MagDipole( + [dbzdt_receiver_hm], + location=source_location, + waveform=waveform_hm, + orientation=source_orientation, + i_sounding=i_sounding, + ) + ) + + source_list.append(tdem.sources.MagDipole( + [dbzdt_receiver_lm], + location=source_location, + waveform=waveform_lm, + orientation=source_orientation, + i_sounding=i_sounding, + ) + ) + survey = tdem.Survey(source_list) + + simulation = tdem.Simulation1DLayeredStitched( + survey=survey, thicknesses=thicknesses, sigmaMap=sigma_map, + topo=topo, parallel=False, n_cpu=2, verbose=False, solver=PardisoSolver + ) + + self.sim = simulation + self.mesh = mesh + + def test_EM1DFDJvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1./100. + sigma[inds_1] = 1./10. + sigma[inds] = 1./50. + sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() + m_stitched = np.log(sigma_em1d) + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_stitched * 0.5 + + passed = tests.check_derivative( + derChk, m_stitched, num=4, dx=dm, plotIt=False, eps=1e-15 + ) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jvec test works") + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + inds = self.mesh.cell_centers[:, 1] < 25 + inds_1 = self.mesh.cell_centers[:, 1] < 50 + sigma = np.ones(self.mesh.n_cells) * 1./100. + sigma[inds_1] = 1./10. + sigma[inds] = 1./50. + sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() + m_stitched = np.log(sigma_em1d) + + dobs = self.sim.dpred(m_stitched) + + m_ini = np.log(1./100.) * np.ones(self.mesh.n_cells) + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 + dmisfit = self.sim.Jtvec(m, dr) + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-27) + self.assertTrue(passed) + if passed: + print("STITCHED EM1DFM MagDipole Jtvec test works") + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From ac41c8eb5726bdf563b7711ebc8e183810e3cee5 Mon Sep 17 00:00:00 2001 From: sgkang Date: Tue, 14 Nov 2023 22:46:43 -0800 Subject: [PATCH 103/164] working height inversion for both td and fd. --- SimPEG/electromagnetics/base_1d.py | 5 +- SimPEG/electromagnetics/base_1d_stitched.py | 68 ++++++++----------- .../frequency_domain/simulation_1d.py | 42 ++++++------ .../simulation_1d_stitched.py | 59 +++------------- .../time_domain/simulation_1d.py | 28 +++++--- .../time_domain/simulation_1d_stitched.py | 38 +++-------- .../regularization_mesh_lateral.py | 2 +- tests/em/em1d/test_EM1D_FD_jac_layers.py | 6 +- .../em1d/test_Stitched_EM1D_FD_jac_layers.py | 13 ++-- .../em1d/test_Stitched_EM1D_TD_jac_layers.py | 18 +++-- 10 files changed, 113 insertions(+), 166 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index 2157f31699..78d9d2cc84 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -357,7 +357,8 @@ def _compute_hankel_coefficients(self): Is = [] n_w_past = 0 i_count = 0 - + # Note: coefficients are needed to be updated if we are + # inverting for the source height. if self.hMap is not None: hvec = self.h # source height above topo @@ -372,7 +373,7 @@ def _compute_hankel_coefficients(self): if is_circular_loop: if np.any(src.orientation[:-1] != 0.0): raise ValueError("Can only simulate horizontal circular loops") - # Note: this assumes a fixed height for all sources + if self.hMap is not None: h = hvec[i_src] else: diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index f44b89929d..930395cd43 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -24,8 +24,8 @@ class BaseStitchedEM1DSimulation(BaseSimulation): """ _formulation = "1D" - _coefficients = [] - _coefficients_set = False + # _coefficients = [] + # _coefficients_set = False # _Jmatrix_sigma = None # _Jmatrix_height = None @@ -361,32 +361,32 @@ def input_args(self, i_sounding, output_type='forward'): self.Tau2[i_sounding, :], self.H[i_sounding], output_type, - False, - self._coefficients[i_sounding], + # False, + # self._coefficients[i_sounding], ) return output # This is the most expensive process, but required once # May need to find unique - def input_args_for_coeff(self, i_sounding): - output = ( - self.survey.get_sources_by_sounding_number(i_sounding), - self.topo[i_sounding, :], - self.Thicknesses[i_sounding,:], - self.Sigma[i_sounding, :], - self.Eta[i_sounding, :], - self.Tau[i_sounding, :], - self.C[i_sounding, :], - self.Chi[i_sounding, :], - self.dChi[i_sounding, :], - self.Tau1[i_sounding, :], - self.Tau2[i_sounding, :], - self.H[i_sounding], - 'forward', - True, - [], - ) - return output + # def input_args_for_coeff(self, i_sounding): + # output = ( + # self.survey.get_sources_by_sounding_number(i_sounding), + # self.topo[i_sounding, :], + # self.Thicknesses[i_sounding,:], + # self.Sigma[i_sounding, :], + # self.Eta[i_sounding, :], + # self.Tau[i_sounding, :], + # self.C[i_sounding, :], + # self.Chi[i_sounding, :], + # self.dChi[i_sounding, :], + # self.Tau1[i_sounding, :], + # self.Tau2[i_sounding, :], + # self.H[i_sounding], + # 'forward', + # True, + # [], + # ) + # return output def fields(self, m): if self.verbose: @@ -455,7 +455,7 @@ def set_ij_n_layer(self, n_layer=None): m = self.n_layer else: m = n_layer - source_location_by_sounding_dict = self.survey.source_location_by_sounding_dict + for i_sounding in range(self.n_sounding): n = self.survey.vnD_by_sounding_dict[i_sounding] J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J @@ -477,24 +477,13 @@ def set_ij_height(self): This will be used in GlobalEM1DSimulation when after sensitivity matrix for each sounding is computed """ - I = [] J = [] - shift_for_J = 0 - shift_for_I = 0 m = self.n_layer + I = np.arange(self.survey.nD) for i_sounding in range(self.n_sounding): n = self.survey.vnD_by_sounding_dict[i_sounding] - J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J - I_temp = ( - np.tile(np.arange(n), (1, m)).reshape((n, m), order='F') + - shift_for_I - ) - J.append(utils.mkvc(J_temp)) - I.append(utils.mkvc(I_temp)) - shift_for_J += m - shift_for_I = I_temp[-1, -1] + 1 + J.append(np.ones(n)*i_sounding) J = np.hstack(J).astype(int) - I = np.hstack(I).astype(int) return (I, J) def Jvec(self, m, v, f=None): @@ -507,12 +496,13 @@ def Jvec(self, m, v, f=None): def Jtvec(self, m, v, f=None): J_sigma = self.getJ_sigma(m) - Jtv = self.sigmaDeriv.T @ (J_sigma.T*v) + Jtv = self.sigmaDeriv.T @ (J_sigma.T@v) if self.hMap is not None: J_height = self.getJ_height(m) - Jtv += self.hDeriv.T*(J_height.T*v) + Jtv += self.hDeriv.T @ (J_height.T@v) return Jtv + # Revisit this def getJtJdiag(self, m, W=None, threshold=1e-8): """ Compute diagonal component of JtJ or diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index 80912f7271..3451574fd5 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -149,28 +149,30 @@ def getJ(self, m, f=None): # Grab a copy C0s_dh = C0s.copy() C1s_dh = C1s.copy() - h_vec = self.h - i = 0 - for i_src, src in enumerate(self.survey.source_list): - class_name = type(src).__name__ - is_wire_loop = class_name == "LineCurrent" - - h = h_vec[i_src] - if is_wire_loop: - n_quad_points = src.n_segments * self.n_points_per_path - nD = sum( - rx.locations.shape[0] * n_quad_points - for rx in src.receiver_list - ) - else: - nD = sum(rx.locations.shape[0] for rx in src.receiver_list) - ip1 = i + nD - v = np.exp(-lambs[i:ip1] * h) - C0s_dh[i:ip1] *= v * -lambs[i:ip1] - C1s_dh[i:ip1] *= v * -lambs[i:ip1] - i = ip1 + # h_vec = self.h + # i = 0 + # for i_src, src in enumerate(self.survey.source_list): + # class_name = type(src).__name__ + # is_wire_loop = class_name == "LineCurrent" + + # # h = h_vec[i_src] + # if is_wire_loop: + # n_quad_points = src.n_segments * self.n_points_per_path + # nD = sum( + # rx.locations.shape[0] * n_quad_points + # for rx in src.receiver_list + # ) + # else: + # nD = sum(rx.locations.shape[0] for rx in src.receiver_list) + # ip1 = i + nD + # # v = np.exp(-lambs[i:ip1] * h) + # C0s_dh[i:ip1] *= - lambs[i:ip1] + # C1s_dh[i:ip1] *= - lambs[i:ip1] + # i = ip1 # J will be n_d * n_src (each source has it's own h)... + C0s_dh *= - lambs + C1s_dh *= - lambs rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[i_freq] rTE = np.take_along_axis(rTE, inv_lambs, axis=1) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py index 85b46a4fef..b7aaf69faa 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py @@ -43,20 +43,14 @@ def run_simulation_frequency_domain(args): tau2, h, output_type, - return_projection, - coefficients ) = args n_layer = len(thicknesses) + 1 local_survey = Survey(source_list) n_src = len(source_list) - if output_type == "sensitivity": - wires = maps.Wires(("sigma", n_layer), ("h", n_src)) - sigma_map = wires.sigma - h_map = wires.h - elif output_type == "forward": - sigma_map = maps.IdentityMap(nP=n_layer) - h_map = None + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) + sigma_map = wires.sigma + h_map = wires.h sim = Simulation1DLayered( survey=local_survey, @@ -70,17 +64,13 @@ def run_simulation_frequency_domain(args): hankel_filter="key_101_2009", ) - if return_projection: - return sim.get_coefficients() - - sim._set_coefficients(coefficients) - + model = np.r_[sigma, h * np.ones(n_src)] if output_type == "sensitivity": - J = sim.getJ(np.r_[sigma, h * np.ones(n_src)]) + J = sim.getJ(model) J['dh'] = J['dh'].sum(axis=1) return J else: - em_response = sim.dpred(sigma) + em_response = sim.dpred(model) return em_response @@ -93,45 +83,12 @@ class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): _simulation_type = 'frequency' - def get_coefficients(self): - - run_simulation = run_simulation_frequency_domain - - if self.verbose: - print(">> Calculate coefficients") - if self.parallel: - pool = Pool(self.n_cpu) - self._coefficients = pool.map( - run_simulation, - [ - self.input_args_for_coeff(i) for i in range(self.n_sounding) - ] - ) - self._coefficients_set = True - pool.close() - pool.join() - else: - self._coefficients = [ - run_simulation(self.input_args_for_coeff(i)) for i in range(self.n_sounding) - ] - def forward(self, m): self.model = m if self.verbose: print(">> Compute response") - # Set flat topo at zero - # if self.topo is None: - - - # TODO: Need to pull separate hankel coeffcients - # and A matrix for convolution - # hankel coefficients vary with variable height! - - if self._coefficients_set is False: - self.get_coefficients() - run_simulation = run_simulation_frequency_domain if self.parallel: @@ -167,8 +124,8 @@ def getJ(self, m): if self.verbose: print(">> Compute J") - if self._coefficients_set is False: - self.get_coefficients() + # if self._coefficients_set is False: + # self.get_coefficients() run_simulation = run_simulation_frequency_domain diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 2ea7a76b87..c984da00a3 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -264,17 +264,23 @@ def getJ(self, m, f=None): # Grab a copy C0s_dh = C0s.copy() C1s_dh = C1s.copy() - h_vec = self.h - i = 0 - for i_src, src in enumerate(self.survey.source_list): - h = h_vec[i_src] - nD = sum(rx.locations.shape[0] for rx in src.receiver_list) - ip1 = i + nD - v = np.exp(-lambs[i:ip1] * h) - C0s_dh[i:ip1] *= v * -lambs[i:ip1] - C1s_dh[i:ip1] *= v * -lambs[i:ip1] - i = ip1 - # J will be n_d * n_src (each source has it's own h)... + # h_vec = self.h + # i = 0 + # for i_src, src in enumerate(self.survey.source_list): + # rx = src.receiver_list[0] + # h = h_vec[i_src] + # # if rx.use_source_receiver_offset: + # # dz = rx.locations[:, 2] + # # else: + # # dz = rx.locations[:, 2] - src.location[2] + # nD = sum(rx.locations.shape[0] for rx in src.receiver_list) + # ip1 = i + nD + # C0s_dh[i:ip1] *= 2 * -lambs[i:ip1] + # C1s_dh[i:ip1] *= 2 * -lambs[i:ip1] + # i = ip1 + # # J will be n_d * n_src (each source has it's own h)... + C0s_dh *= 2 * -lambs + C1s_dh *= 2 * -lambs rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[:, inv_lambs] diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py index 829e7e7d7b..8002dff4f3 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py @@ -45,21 +45,17 @@ def run_simulation_time_domain(args): tau2, h, output_type, - return_projection, - coefficients + # return_projection, + # coefficients ) = args n_layer = len(thicknesses) + 1 n_src = len(source_list) local_survey = Survey(source_list) - if output_type == "sensitivity": - wires = maps.Wires(("sigma", n_layer), ("h", n_src)) - sigma_map = wires.sigma - h_map = wires.h - elif output_type == "forward": - sigma_map = maps.IdentityMap(nP=n_layer) - h_map = None + wires = maps.Wires(("sigma", n_layer), ("h", n_src)) + sigma_map = wires.sigma + h_map = wires.h sim = Simulation1DLayered( survey=local_survey, @@ -72,19 +68,14 @@ def run_simulation_time_domain(args): topo=topo, hankel_filter="key_101_2009", ) - - if return_projection: - return sim.get_coefficients() - - sim._set_coefficients(coefficients) - + model = np.r_[sigma, h*np.ones(n_src)] if output_type == "sensitivity": - J = sim.getJ(np.r_[sigma, h*np.ones(n_src)]) + J = sim.getJ(model) # we assumed the tx heights in a sounding is fixed - J['dh'] = J['dh'].sum(axis=1) + J['dh'] = J['dh'].sum(axis=1) return J else: - em_response = sim.dpred(sigma) + em_response = sim.dpred(model) return em_response ####################################################################### @@ -126,14 +117,6 @@ def forward(self, m): # Set flat topo at zero # if self.topo is None: - - - # TODO: Need to pull separate hankel coeffcients - # and A matrix for convolution - # hankel coefficients vary with variable height! - - if self._coefficients_set is False: - self.get_coefficients() run_simulation = run_simulation_time_domain @@ -170,9 +153,6 @@ def getJ(self, m): if self.verbose: print(">> Compute J") - if self._coefficients_set is False: - self.get_coefficients() - run_simulation = run_simulation_time_domain if self.parallel: diff --git a/SimPEG/regularization/regularization_mesh_lateral.py b/SimPEG/regularization/regularization_mesh_lateral.py index 2338487123..94a7385c5e 100644 --- a/SimPEG/regularization/regularization_mesh_lateral.py +++ b/SimPEG/regularization/regularization_mesh_lateral.py @@ -53,7 +53,7 @@ def active_cells(self, values: np.ndarray): "The RegulatizationMesh already has an 'active_cells' property set." ) if values is not None: - values = validate_active_indices("values", values, self.mesh.nC) + values = validate_active_indices("values", values, self.nC) # Ensure any cached operators created when # active_cells was None are deleted self._vol = None diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers.py b/tests/em/em1d/test_EM1D_FD_jac_layers.py index 83c78f9758..fa495e0511 100644 --- a/tests/em/em1d/test_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_EM1D_FD_jac_layers.py @@ -18,7 +18,7 @@ def setUp(self): deepthick = np.logspace(1, 2, 10) thicknesses = np.r_[nearthick, deepthick] topo = np.r_[0.0, 0.0, 100.0] - + # Survey Geometry height = 1e-5 src_location = np.array([0.0, 0.0, 100.0 + height]) @@ -148,7 +148,7 @@ def test_EM1DFDJtvec_Layers(self): np.log(np.ones(self.nlayers) * sigma_half), np.log(np.ones(self.nlayers) * 1.5 * mu_half), np.log(self.thicknesses) * 0.9, - np.log(0.5 * self.height), + np.log(self.height) *1.5, ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs @@ -307,7 +307,7 @@ def test_EM1DFDJtvec_Layers(self): np.log(np.ones(self.nlayers) * sigma_half), np.log(np.ones(self.nlayers) * 1.5 * mu_half), np.log(self.thicknesses) * 0.9, - np.log(0.5 * self.height), + np.log(self.height) * 1.5, ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs diff --git a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py index 513d5d8fc8..81006f7b84 100644 --- a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py @@ -58,9 +58,14 @@ def setUp(self, parallel=False): source_list.append(src) survey = fdem.Survey(source_list) + wires = maps.Wires(('sigma', n_layer*n_sounding), ('h', n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer*n_sounding) * wires.sigma + hMap = maps.ExpMap(nP=n_sounding) * wires.h simulation = fdem.Simulation1DLayeredStitched( - survey=survey, thicknesses=thicknesses, sigmaMap=sigma_map, + survey=survey, thicknesses=thicknesses, + sigmaMap=sigmaMap, + hMap=hMap, topo=topo, parallel=parallel, n_cpu=2, verbose=False ) self.sim = simulation @@ -74,7 +79,7 @@ def test_EM1DFDJvec_Layers(self): sigma[inds_1] = 1./10. sigma[inds] = 1./50. sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.log(sigma_em1d) + m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] def fwdfun(m): resp = self.sim.dpred(m) @@ -105,11 +110,11 @@ def test_EM1DFDJtvec_Layers(self): sigma[inds_1] = 1./10. sigma[inds] = 1./50. sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.log(sigma_em1d) + m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] dobs = self.sim.dpred(m_stitched) - m_ini = np.log(1./100.) * np.ones(self.mesh.n_cells) + m_ini = np.r_[np.log(1./100.) * np.ones(self.mesh.n_cells), np.ones(self.sim.n_sounding)*np.log(30.)*1.5] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs diff --git a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py index 814adb8bfe..cea7db7613 100644 --- a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py @@ -98,16 +98,22 @@ def setUp(self, parallel=False): ) ) survey = tdem.Survey(source_list) + wires = maps.Wires(('sigma', n_layer*n_sounding), ('h', n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer*n_sounding) * wires.sigma + hMap = maps.ExpMap(nP=n_sounding) * wires.h simulation = tdem.Simulation1DLayeredStitched( - survey=survey, thicknesses=thicknesses, sigmaMap=sigma_map, + survey=survey, thicknesses=thicknesses, + sigmaMap=sigmaMap, + hMap=hMap, topo=topo, parallel=False, n_cpu=2, verbose=False, solver=PardisoSolver ) self.sim = simulation self.mesh = mesh + - def test_EM1DFDJvec_Layers(self): + def test_EM1TDJvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 @@ -115,7 +121,7 @@ def test_EM1DFDJvec_Layers(self): sigma[inds_1] = 1./10. sigma[inds] = 1./50. sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.log(sigma_em1d) + m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] def fwdfun(m): resp = self.sim.dpred(m) @@ -138,7 +144,7 @@ def derChk(m): if passed: print("STITCHED EM1DFM MagDipole Jvec test works") - def test_EM1DFDJtvec_Layers(self): + def test_EM1TDJtvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 @@ -146,11 +152,11 @@ def test_EM1DFDJtvec_Layers(self): sigma[inds_1] = 1./10. sigma[inds] = 1./50. sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.log(sigma_em1d) + m_stitched = m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] dobs = self.sim.dpred(m_stitched) - m_ini = np.log(1./100.) * np.ones(self.mesh.n_cells) + m_ini = np.r_[np.log(1./100.) * np.ones(self.mesh.n_cells), np.ones(self.sim.n_sounding)*np.log(30.)*1.5] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs From 4b66757885ef8b333c84039d0b2f22f748d024da Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 15 Nov 2023 10:54:30 -0800 Subject: [PATCH 104/164] style update with blake and flake8 --- SimPEG/electromagnetics/base_1d.py | 8 +- SimPEG/electromagnetics/base_1d_stitched.py | 135 +++++----- .../frequency_domain/simulation_1d.py | 9 +- .../simulation_1d_stitched.py | 52 ++-- .../frequency_domain/sources.py | 2 +- .../frequency_domain/survey.py | 6 +- .../electromagnetics/time_domain/receivers.py | 17 +- .../time_domain/simulation_1d.py | 16 +- .../time_domain/simulation_1d_stitched.py | 68 ++--- SimPEG/electromagnetics/time_domain/survey.py | 9 +- SimPEG/electromagnetics/utils/em1d_utils.py | 233 ++++++++++-------- SimPEG/regularization/base.py | 2 +- .../regularization/laterally_constrained.py | 23 +- .../regularization_mesh_lateral.py | 71 +++--- tests/em/em1d/test_EM1D_FD_jac_layers.py | 4 +- .../em1d/test_Stitched_EM1D_FD_jac_layers.py | 75 +++--- .../em1d/test_Stitched_EM1D_TD_jac_layers.py | 114 +++++---- 17 files changed, 458 insertions(+), 386 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index 78d9d2cc84..c05e869d40 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -357,7 +357,7 @@ def _compute_hankel_coefficients(self): Is = [] n_w_past = 0 i_count = 0 - # Note: coefficients are needed to be updated if we are + # Note: coefficients are needed to be updated if we are # inverting for the source height. if self.hMap is not None: hvec = self.h # source height above topo @@ -373,7 +373,7 @@ def _compute_hankel_coefficients(self): if is_circular_loop: if np.any(src.orientation[:-1] != 0.0): raise ValueError("Can only simulate horizontal circular loops") - + if self.hMap is not None: h = hvec[i_src] else: @@ -579,7 +579,7 @@ def deleteTheseOnModelUpdate(self): toDelete += ["_J", "_gtgdiag"] return toDelete - # TODO: need to revisit this: + # TODO: need to revisit this: def depth_of_investigation_christiansen_2012(self, std, thres_hold=0.8): pred = self.survey._pred.copy() delta_d = std * np.log(abs(self.survey.dobs)) @@ -617,4 +617,4 @@ def getJtJdiag(self, m, W=None, f=None): J = Js["dthick"] @ self.thicknessesDeriv out = out + np.einsum("i,ij,ij->j", W, J, J) self._gtgdiag = out - return self._gtgdiag \ No newline at end of file + return self._gtgdiag diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index 930395cd43..3892b851f5 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -5,10 +5,10 @@ from .. import utils from ..utils.code_utils import ( validate_integer, - validate_location_property, validate_ndarray_with_shape, validate_type, ) + ############################################################################### # # # BaseStitchedEM1DSimulation # @@ -17,6 +17,7 @@ __all__ = ["BaseStitchedEM1DSimulation"] + class BaseStitchedEM1DSimulation(BaseSimulation): """ Base class for the stitched 1D simulation. This simulation models the EM @@ -104,7 +105,7 @@ def __init__( self.topo = topo if self.topo is None: self.set_null_topography() - + self.parallel = parallel self.n_cpu = n_cpu @@ -144,7 +145,7 @@ def topo(self): @topo.setter def topo(self, value): - self._topo = validate_ndarray_with_shape("topo", value, shape=("*",3)) + self._topo = validate_ndarray_with_shape("topo", value, shape=("*", 3)) @property def parallel(self): @@ -172,7 +173,7 @@ def n_cpu(self): @n_cpu.setter def n_cpu(self, value): - self._n_cpu = validate_integer("n_cpu", value, min_val=1) + self._n_cpu = validate_integer("n_cpu", value, min_val=1) @property def invert_height(self): @@ -184,7 +185,7 @@ def invert_height(self): @property def halfspace_switch(self): """True = halfspace, False = layered Earth""" - if (self.thicknesses is None) | (len(self.thicknesses)==0): + if (self.thicknesses is None) | (len(self.thicknesses) == 0): return True else: return False @@ -200,37 +201,37 @@ def n_layer(self): def n_sounding(self): return len(self.survey.source_location_by_sounding_dict) - @property def data_index(self): return self.survey.data_index - # ------------- For physical properties ------------- # @property def Sigma(self): - if getattr(self, '_Sigma', None) is None: + if getattr(self, "_Sigma", None) is None: # Ordering: first z then x self._Sigma = self.sigma.reshape((self.n_sounding, self.n_layer)) return self._Sigma @property def Thicknesses(self): - if getattr(self, '_Thicknesses', None) is None: + if getattr(self, "_Thicknesses", None) is None: # Ordering: first z then x - if len(self.thicknesses) == int(self.n_sounding * (self.n_layer-1)): - self._Thicknesses = self.thicknesses.reshape((self.n_sounding, self.n_layer-1)) + if len(self.thicknesses) == int(self.n_sounding * (self.n_layer - 1)): + self._Thicknesses = self.thicknesses.reshape( + (self.n_sounding, self.n_layer - 1) + ) else: self._Thicknesses = np.tile(self.thicknesses, (self.n_sounding, 1)) return self._Thicknesses @property def Eta(self): - if getattr(self, '_Eta', None) is None: + if getattr(self, "_Eta", None) is None: # Ordering: first z then x if self.eta is None: self._Eta = np.zeros( - (self.n_sounding, self.n_layer), dtype=float, order='C' + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._Eta = self.eta.reshape((self.n_sounding, self.n_layer)) @@ -238,11 +239,11 @@ def Eta(self): @property def Tau(self): - if getattr(self, '_Tau', None) is None: + if getattr(self, "_Tau", None) is None: # Ordering: first z then x if self.tau is None: - self._Tau = 1e-3*np.ones( - (self.n_sounding, self.n_layer), dtype=float, order='C' + self._Tau = 1e-3 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._Tau = self.tau.reshape((self.n_sounding, self.n_layer)) @@ -250,11 +251,11 @@ def Tau(self): @property def C(self): - if getattr(self, '_C', None) is None: + if getattr(self, "_C", None) is None: # Ordering: first z then x if self.c is None: self._C = np.ones( - (self.n_sounding, self.n_layer), dtype=float, order='C' + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._C = self.c.reshape((self.n_sounding, self.n_layer)) @@ -262,11 +263,11 @@ def C(self): @property def Chi(self): - if getattr(self, '_Chi', None) is None: + if getattr(self, "_Chi", None) is None: # Ordering: first z then x if self.chi is None: self._Chi = np.zeros( - (self.n_sounding, self.n_layer), dtype=float, order='C' + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._Chi = self.chi.reshape((self.n_sounding, self.n_layer)) @@ -274,11 +275,11 @@ def Chi(self): @property def dChi(self): - if getattr(self, '_dChi', None) is None: + if getattr(self, "_dChi", None) is None: # Ordering: first z then x if self.dchi is None: self._dChi = np.zeros( - (self.n_sounding, self.n_layer), dtype=float, order='C' + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._dChi = self.dchi.reshape((self.n_sounding, self.n_layer)) @@ -286,11 +287,11 @@ def dChi(self): @property def Tau1(self): - if getattr(self, '_Tau1', None) is None: + if getattr(self, "_Tau1", None) is None: # Ordering: first z then x if self.tau1 is None: self._Tau1 = 1e-10 * np.ones( - (self.n_sounding, self.n_layer), dtype=float, order='C' + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._Tau1 = self.tau1.reshape((self.n_sounding, self.n_layer)) @@ -298,11 +299,11 @@ def Tau1(self): @property def Tau2(self): - if getattr(self, '_Tau2', None) is None: + if getattr(self, "_Tau2", None) is None: # Ordering: first z then x if self.tau2 is None: - self._Tau2 = 100. * np.ones( - (self.n_sounding, self.n_layer), dtype=float, order='C' + self._Tau2 = 100.0 * np.ones( + (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: self._Tau2 = self.tau2.reshape((self.n_sounding, self.n_layer)) @@ -318,23 +319,22 @@ def JtJ_height(self): @property def H(self): if self.hMap is None: - h = self.source_locations_for_sounding[:,2] - self.topo[:,2] + h = self.source_locations_for_sounding[:, 2] - self.topo[:, 2] return h else: return self.h - # ------------- Etcetra .... ------------- # @property def IJLayers(self): - if getattr(self, '_IJLayers', None) is None: + if getattr(self, "_IJLayers", None) is None: # Ordering: first z then x self._IJLayers = self.set_ij_n_layer() return self._IJLayers @property def IJHeight(self): - if getattr(self, '_IJHeight', None) is None: + if getattr(self, "_IJHeight", None) is None: # Ordering: first z then x self._IJHeight = self.set_ij_n_layer(n_layer=1) return self._IJHeight @@ -342,15 +342,15 @@ def IJHeight(self): # ------------- For physics ------------- # def get_uniq_soundings(self): - self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( - self.survey._sounding_types, return_index=True - ) + self._sounding_types_uniq, self._ind_sounding_uniq = np.unique( + self.survey._sounding_types, return_index=True + ) - def input_args(self, i_sounding, output_type='forward'): + def input_args(self, i_sounding, output_type="forward"): output = ( self.survey.get_sources_by_sounding_number(i_sounding), self.topo[i_sounding, :], - self.Thicknesses[i_sounding,:], + self.Thicknesses[i_sounding, :], self.Sigma[i_sounding, :], self.Eta[i_sounding, :], self.Tau[i_sounding, :], @@ -396,9 +396,9 @@ def fields(self, m): def dpred(self, m, f=None): """ - Return predicted data. - Predicted data, (`_pred`) are computed when - self.fields is called. + Return predicted data. + Predicted data, (`_pred`) are computed when + self.fields is called. """ if f is None: f = self.fields(m) @@ -407,17 +407,25 @@ def dpred(self, m, f=None): @property def sounding_number(self): - self._sounding_number = [key for key in self.survey.source_location_by_sounding_dict.keys()] + self._sounding_number = [ + key for key in self.survey.source_location_by_sounding_dict.keys() + ] return self._sounding_number @property def n_chunk(self): self._n_chunk = len(self.sounding_number_chunks) return self._n_chunk + @property def source_locations_for_sounding(self): - if getattr(self, '_source_locations_for_sounding', None) is None: - self._source_locations_for_sounding = np.vstack([self.survey._source_location_by_sounding_dict[ii][0] for ii in range(self.n_sounding)]) + if getattr(self, "_source_locations_for_sounding", None) is None: + self._source_locations_for_sounding = np.vstack( + [ + self.survey._source_location_by_sounding_dict[ii][0] + for ii in range(self.n_sounding) + ] + ) return self._source_locations_for_sounding # def chunks(self, lst, n): @@ -438,8 +446,7 @@ def source_locations_for_sounding(self): def set_null_topography(self): self.topo = self.source_locations_for_sounding.copy() - self.topo[:,2] = 0. - + self.topo[:, 2] = 0.0 def set_ij_n_layer(self, n_layer=None): """ @@ -460,8 +467,7 @@ def set_ij_n_layer(self, n_layer=None): n = self.survey.vnD_by_sounding_dict[i_sounding] J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J I_temp = ( - np.tile(np.arange(n), (1, m)).reshape((n, m), order='F') + - shift_for_I + np.tile(np.arange(n), (1, m)).reshape((n, m), order="F") + shift_for_I ) J.append(utils.mkvc(J_temp)) I.append(utils.mkvc(I_temp)) @@ -478,28 +484,27 @@ def set_ij_height(self): for each sounding is computed """ J = [] - m = self.n_layer I = np.arange(self.survey.nD) for i_sounding in range(self.n_sounding): n = self.survey.vnD_by_sounding_dict[i_sounding] - J.append(np.ones(n)*i_sounding) + J.append(np.ones(n) * i_sounding) J = np.hstack(J).astype(int) return (I, J) def Jvec(self, m, v, f=None): - J_sigma = self.getJ_sigma(m) - Jv = J_sigma@(self.sigmaDeriv@v) + J_sigma = self.getJ_sigma(m) + Jv = J_sigma @ (self.sigmaDeriv @ v) if self.hMap is not None: J_height = self.getJ_height(m) - Jv += J_height@(self.hDeriv@v) + Jv += J_height @ (self.hDeriv @ v) return Jv def Jtvec(self, m, v, f=None): - J_sigma = self.getJ_sigma(m) - Jtv = self.sigmaDeriv.T @ (J_sigma.T@v) + J_sigma = self.getJ_sigma(m) + Jtv = self.sigmaDeriv.T @ (J_sigma.T @ v) if self.hMap is not None: J_height = self.getJ_height(m) - Jtv += self.hDeriv.T @ (J_height.T@v) + Jtv += self.hDeriv.T @ (J_height.T @ v) return Jtv # Revisit this @@ -510,24 +515,30 @@ def getJtJdiag(self, m, W=None, threshold=1e-8): """ if getattr(self, "_gtgdiag", None) is None: J_sigma = self.getJ_sigma(m) - J_matrix = J_sigma@(self.sigmaDeriv) + J_matrix = J_sigma @ (self.sigmaDeriv) if self.hMap is not None: J_height = self.getJ_height(m) - J_matrix += J_height*self.hDeriv + J_matrix += J_height * self.hDeriv if W is None: W = utils.speye(J_matrix.shape[0]) - J_matrix = W*J_matrix - gtgdiag = (J_matrix.T*J_matrix).diagonal() + J_matrix = W * J_matrix + gtgdiag = (J_matrix.T * J_matrix).diagonal() gtgdiag /= gtgdiag.max() gtgdiag += threshold self._gtgdiag = gtgdiag return self._gtgdiag - + @property def deleteTheseOnModelUpdate(self): toDelete = super().deleteTheseOnModelUpdate - if self.fix_Jmatrix is False: - toDelete += ['_Sigma', '_J', '_Jmatrix_sigma', '_Jmatrix_height', '_gtg_diag'] - return toDelete \ No newline at end of file + if self.fix_Jmatrix is False: + toDelete += [ + "_Sigma", + "_J", + "_Jmatrix_sigma", + "_Jmatrix_height", + "_gtg_diag", + ] + return toDelete diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index 3451574fd5..bd80116632 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -105,7 +105,7 @@ def fields(self, m): or sensitivities. """ self.model = m - + self._compute_coefficients() C0s = self._C0s @@ -169,10 +169,11 @@ def getJ(self, m, f=None): # C0s_dh[i:ip1] *= - lambs[i:ip1] # C1s_dh[i:ip1] *= - lambs[i:ip1] # i = ip1 - # J will be n_d * n_src (each source has it's own h)... + # J will be n_d * n_src (each source has it's own h)... - C0s_dh *= - lambs - C1s_dh *= - lambs + # It seems to be the 2 * lambs to be multiplied, but had to drop factor of 2 + C0s_dh *= -lambs + C1s_dh *= -lambs rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[i_freq] rTE = np.take_along_axis(rTE, inv_lambs, axis=1) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py index b7aaf69faa..928aa568ba 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d_stitched.py @@ -7,6 +7,7 @@ from ... import maps from multiprocessing import Pool + def run_simulation_frequency_domain(args): """ This method simulates the EM response or computes the sensitivities for @@ -64,10 +65,10 @@ def run_simulation_frequency_domain(args): hankel_filter="key_101_2009", ) - model = np.r_[sigma, h * np.ones(n_src)] + model = np.r_[sigma, h * np.ones(n_src)] if output_type == "sensitivity": J = sim.getJ(model) - J['dh'] = J['dh'].sum(axis=1) + J["dh"] = J["dh"].sum(axis=1) return J else: em_response = sim.dpred(model) @@ -80,8 +81,7 @@ def run_simulation_frequency_domain(args): class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): - - _simulation_type = 'frequency' + _simulation_type = "frequency" def forward(self, m): self.model = m @@ -93,34 +93,34 @@ def forward(self, m): if self.parallel: if self.verbose: - print ('parallel') - #This assumes the same # of layers for each of sounding + print("parallel") + # This assumes the same # of layers for each of sounding # if self.n_sounding_for_chunk is None: pool = Pool(self.n_cpu) result = pool.map( run_simulation, [ - self.input_args(i, output_type='forward') for i in range(self.n_sounding) - ] + self.input_args(i, output_type="forward") + for i in range(self.n_sounding) + ], ) pool.close() pool.join() else: - result = [ - run_simulation(self.input_args(i, output_type='forward')) for i in range(self.n_sounding) + run_simulation(self.input_args(i, output_type="forward")) + for i in range(self.n_sounding) ] return np.hstack(result) def getJ(self, m): """ - Compute d F / d sigma + Compute d F / d sigma """ - self.model = m + self.model = m if getattr(self, "_J", None) is None: - if self.verbose: print(">> Compute J") @@ -141,38 +141,42 @@ def getJ(self, m): self._J = pool.map( run_simulation, [ - self.input_args(i, output_type='sensitivity') for i in range(self.n_sounding) - ] + self.input_args(i, output_type="sensitivity") + for i in range(self.n_sounding) + ], ) - if self.verbose: print(">> End pooling and form J matrix") else: self._J = [ - run_simulation(self.input_args(i, output_type='sensitivity')) for i in range(self.n_sounding) + run_simulation(self.input_args(i, output_type="sensitivity")) + for i in range(self.n_sounding) ] return self._J def getJ_sigma(self, m): """ - Compute d F / d sigma + Compute d F / d sigma """ if getattr(self, "_Jmatrix_sigma", None) is None: - J = self.getJ(m) - self._Jmatrix_sigma = np.hstack([utils.mkvc(J[i]['ds']) for i in range(self.n_sounding)]) + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack( + [utils.mkvc(J[i]["ds"]) for i in range(self.n_sounding)] + ) self._Jmatrix_sigma = sp.coo_matrix( (self._Jmatrix_sigma, self.IJLayers), dtype=float ).tocsr() return self._Jmatrix_sigma - def getJ_height(self, m): if getattr(self, "_Jmatrix_height", None) is None: - J = self.getJ(m) - self._Jmatrix_height = np.hstack([utils.mkvc(J[i]['dh']) for i in range(self.n_sounding)]) + J = self.getJ(m) + self._Jmatrix_height = np.hstack( + [utils.mkvc(J[i]["dh"]) for i in range(self.n_sounding)] + ) self._Jmatrix_height = sp.coo_matrix( (self._Jmatrix_height, self.IJHeight), dtype=float ).tocsr() - return self._Jmatrix_height \ No newline at end of file + return self._Jmatrix_height diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index 94268f476e..61fac07c95 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -72,7 +72,7 @@ def i_sounding(self): @i_sounding.setter def i_sounding(self, value): - self._i_sounding = validate_integer("i_sounding", value, min_val=0) + self._i_sounding = validate_integer("i_sounding", value, min_val=0) def bPrimary(self, simulation): """Compute primary magnetic flux density diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index 7fe4b71b2e..65f71a9664 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -17,7 +17,7 @@ def __init__(self, source_list, **kwargs): _frequency_dict = {} _source_location_dict = {} - _source_location_by_sounding_dict = {} + _source_location_by_sounding_dict = {} for src in self.source_list: if src.frequency not in _frequency_dict: _frequency_dict[src.frequency] = [] @@ -28,7 +28,6 @@ def __init__(self, source_list, **kwargs): _source_location_dict[src.i_sounding] += [src] _source_location_by_sounding_dict[src.i_sounding] += [src.location] - self._frequency_dict = _frequency_dict self._frequencies = sorted([f for f in self._frequency_dict]) self._source_location_dict = _source_location_dict @@ -127,7 +126,6 @@ def get_sources_by_sounding_number(self, i_sounding): ), "The requested sounding is not in this survey." return self._source_location_dict[i_sounding] - @property def vnD_by_sounding_dict(self): if getattr(self, "_vnD_by_sounding_dict", None) is None: @@ -138,4 +136,4 @@ def vnD_by_sounding_dict(self): for src in source_list: nD += src.nD self._vnD_by_sounding_dict[i_sounding] = nD - return self._vnD_by_sounding_dict + return self._vnD_by_sounding_dict diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index de17420f8d..f8dd65b4c1 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -26,9 +26,9 @@ def __init__( orientation="z", use_source_receiver_offset=False, bw_cutoff_frequency=3e5, - bw_power=0., + bw_power=0.0, lp_cutoff_frequency=2.1e5, - lp_power=0., + lp_power=0.0, **kwargs ): proj = kwargs.pop("projComp", None) @@ -106,7 +106,9 @@ def bw_cutoff_frequency(self): @bw_cutoff_frequency.setter def bw_cutoff_frequency(self, var): - self._bw_cutoff_frequency = validate_float("bw_cutoff_frequency", var, min_val=0.) + self._bw_cutoff_frequency = validate_float( + "bw_cutoff_frequency", var, min_val=0.0 + ) @property def lp_cutoff_frequency(self): @@ -121,8 +123,9 @@ def lp_cutoff_frequency(self): @lp_cutoff_frequency.setter def lp_cutoff_frequency(self, var): - self._lp_cutoff_frequency = validate_float("lp_cutoff_frequency", var, min_val=0.) - + self._lp_cutoff_frequency = validate_float( + "lp_cutoff_frequency", var, min_val=0.0 + ) @property def bw_power(self): @@ -137,7 +140,7 @@ def bw_power(self): @bw_power.setter def bw_power(self, var): - self._bw_power = validate_float("bw_power", var, min_val=0., max_val=2) + self._bw_power = validate_float("bw_power", var, min_val=0.0, max_val=2) @property def lp_power(self): @@ -152,7 +155,7 @@ def lp_power(self): @lp_power.setter def lp_power(self, var): - self._lp_power = validate_float("lp_power", var, min_val=0., max_val=0.99999) + self._lp_power = validate_float("lp_power", var, min_val=0.0, max_val=0.99999) def getSpatialP(self, mesh, f): """Get spatial projection matrix from mesh to receivers. diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index c984da00a3..2659cd1e38 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -79,7 +79,7 @@ def get_coefficients(self): self._inv_lambs, self._C0s, self._C1s, - self._W + self._W, ) def _set_coefficients(self, coefficients): @@ -222,7 +222,7 @@ def fields(self, m): or sensitivities. """ self.model = m - + self._compute_coefficients() C0s = self._C0s @@ -365,22 +365,22 @@ def _project_to_data(self, v): frequencies = self._frequencies w = 2 * np.pi * frequencies - wc_lp = 2 * np.pi * rx.lp_cutoff_frequency - h_lp = (1+1j*w/wc_lp)**(-rx.lp_power) # low pass filter + wc_lp = 2 * np.pi * rx.lp_cutoff_frequency + h_lp = (1 + 1j * w / wc_lp) ** (-rx.lp_power) # low pass filter wc_bw = 2 * np.pi * rx.bw_cutoff_frequency - numer, denom = signal.butter(rx.bw_power, wc_bw, 'low', analog=True) + numer, denom = signal.butter(rx.bw_power, wc_bw, "low", analog=True) _, h_bw = signal.freqs(numer, denom, worN=w) h = h_lp * h_bw - + if v.ndim == 3: - v_slice *= h[None,:,None] + v_slice *= h[None, :, None] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.imag) else: d = np.einsum("ij,...jk->...ik", As[i_A], v_slice.real) out[i_dat:i_datp1] = d.reshape((-1, v.shape[-1]), order="F") else: - v_slice *= h[None,:] + v_slice *= h[None, :] if isinstance(rx, (PointMagneticFluxDensity, PointMagneticField)): d = np.einsum("ij,...j->...i", As[i_A], v_slice.imag) else: diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py index 8002dff4f3..9f05cc0811 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d_stitched.py @@ -7,9 +7,11 @@ from ... import maps from multiprocessing import Pool + def run_simulation_time_domain(args): import os - os.environ["MKL_NUM_THREADS"] = "1" + + os.environ["MKL_NUM_THREADS"] = "1" """ This method simulates the EM response or computes the sensitivities for a single sounding. The method allows for parallelization of @@ -56,7 +58,7 @@ def run_simulation_time_domain(args): wires = maps.Wires(("sigma", n_layer), ("h", n_src)) sigma_map = wires.sigma h_map = wires.h - + sim = Simulation1DLayered( survey=local_survey, thicknesses=thicknesses, @@ -68,27 +70,26 @@ def run_simulation_time_domain(args): topo=topo, hankel_filter="key_101_2009", ) - model = np.r_[sigma, h*np.ones(n_src)] + model = np.r_[sigma, h * np.ones(n_src)] if output_type == "sensitivity": J = sim.getJ(model) # we assumed the tx heights in a sounding is fixed - J['dh'] = J['dh'].sum(axis=1) + J["dh"] = J["dh"].sum(axis=1) return J else: em_response = sim.dpred(model) return em_response + ####################################################################### # STITCHED 1D SIMULATION CLASS AND GLOBAL FUNCTIONS ####################################################################### class Simulation1DLayeredStitched(BaseStitchedEM1DSimulation): - - _simulation_type = 'time' + _simulation_type = "time" def get_coefficients(self): - run_simulation = run_simulation_time_domain if self.verbose: @@ -97,16 +98,15 @@ def get_coefficients(self): pool = Pool(self.n_cpu) self._coefficients = pool.map( run_simulation, - [ - self.input_args_for_coeff(i) for i in range(self.n_sounding) - ] - ) + [self.input_args_for_coeff(i) for i in range(self.n_sounding)], + ) self._coefficients_set = True pool.close() pool.join() else: self._coefficients = [ - run_simulation(self.input_args_for_coeff(i)) for i in range(self.n_sounding) + run_simulation(self.input_args_for_coeff(i)) + for i in range(self.n_sounding) ] def forward(self, m): @@ -122,34 +122,34 @@ def forward(self, m): if self.parallel: if self.verbose: - print ('parallel') - #This assumes the same # of layers for each of sounding + print("parallel") + # This assumes the same # of layers for each of sounding # if self.n_sounding_for_chunk is None: pool = Pool(self.n_cpu) result = pool.map( run_simulation, [ - self.input_args(i, output_type='forward') for i in range(self.n_sounding) - ] + self.input_args(i, output_type="forward") + for i in range(self.n_sounding) + ], ) pool.close() pool.join() else: - result = [ - run_simulation(self.input_args(i, output_type='forward')) for i in range(self.n_sounding) + run_simulation(self.input_args(i, output_type="forward")) + for i in range(self.n_sounding) ] return np.hstack(result) def getJ(self, m): """ - Compute d F / d sigma + Compute d F / d sigma """ - self.model = m + self.model = m if getattr(self, "_J", None) is None: - if self.verbose: print(">> Compute J") @@ -167,38 +167,42 @@ def getJ(self, m): self._J = pool.map( run_simulation, [ - self.input_args(i, output_type='sensitivity') for i in range(self.n_sounding) - ] + self.input_args(i, output_type="sensitivity") + for i in range(self.n_sounding) + ], ) - if self.verbose: print(">> End pooling and form J matrix") else: self._J = [ - run_simulation(self.input_args(i, output_type='sensitivity')) for i in range(self.n_sounding) + run_simulation(self.input_args(i, output_type="sensitivity")) + for i in range(self.n_sounding) ] return self._J def getJ_sigma(self, m): """ - Compute d F / d sigma + Compute d F / d sigma """ if getattr(self, "_Jmatrix_sigma", None) is None: - J = self.getJ(m) - self._Jmatrix_sigma = np.hstack([utils.mkvc(J[i]['ds']) for i in range(self.n_sounding)]) + J = self.getJ(m) + self._Jmatrix_sigma = np.hstack( + [utils.mkvc(J[i]["ds"]) for i in range(self.n_sounding)] + ) self._Jmatrix_sigma = sp.coo_matrix( (self._Jmatrix_sigma, self.IJLayers), dtype=float ).tocsr() return self._Jmatrix_sigma - def getJ_height(self, m): if getattr(self, "_Jmatrix_height", None) is None: - J = self.getJ(m) - self._Jmatrix_height = np.hstack([utils.mkvc(J[i]['dh']) for i in range(self.n_sounding)]) + J = self.getJ(m) + self._Jmatrix_height = np.hstack( + [utils.mkvc(J[i]["dh"]) for i in range(self.n_sounding)] + ) self._Jmatrix_height = sp.coo_matrix( (self._Jmatrix_height, self.IJHeight), dtype=float ).tocsr() - return self._Jmatrix_height \ No newline at end of file + return self._Jmatrix_height diff --git a/SimPEG/electromagnetics/time_domain/survey.py b/SimPEG/electromagnetics/time_domain/survey.py index f89a12400a..4882b45447 100644 --- a/SimPEG/electromagnetics/time_domain/survey.py +++ b/SimPEG/electromagnetics/time_domain/survey.py @@ -22,7 +22,6 @@ def __init__(self, source_list, **kwargs): _source_location_dict = {} _source_location_by_sounding_dict = {} - _source_frequency_by_sounding_dict = {} for src in source_list: if src.i_sounding not in _source_location_dict: @@ -32,7 +31,7 @@ def __init__(self, source_list, **kwargs): _source_location_by_sounding_dict[src.i_sounding] += [src.location] self._source_location_dict = _source_location_dict - self._source_location_by_sounding_dict = _source_location_by_sounding_dict + self._source_location_by_sounding_dict = _source_location_by_sounding_dict @property def source_list(self): @@ -72,12 +71,12 @@ def get_sources_by_sounding_number(self, i_sounding): @property def vnD_by_sounding_dict(self): - if getattr(self, '_vnD_by_sounding_dict', None) is None: + if getattr(self, "_vnD_by_sounding_dict", None) is None: self._vnD_by_sounding_dict = {} for i_sounding in self.source_location_by_sounding_dict: source_list = self.get_sources_by_sounding_number(i_sounding) nD = 0 for src in source_list: - nD +=src.nD + nD += src.nD self._vnD_by_sounding_dict[i_sounding] = nD - return self._vnD_by_sounding_dict \ No newline at end of file + return self._vnD_by_sounding_dict diff --git a/SimPEG/electromagnetics/utils/em1d_utils.py b/SimPEG/electromagnetics/utils/em1d_utils.py index 04f0d194ef..1d023f5af9 100644 --- a/SimPEG/electromagnetics/utils/em1d_utils.py +++ b/SimPEG/electromagnetics/utils/em1d_utils.py @@ -14,9 +14,11 @@ import scipy.sparse as sp from matplotlib.colors import LogNorm + def set_mesh_1d(hz): return TensorMesh([hz], x0=[0]) + def get_vertical_discretization(n_layer, minimum_dz, geomtric_factor): """ Creates a list of vertical discretizations generate from a geometric series. @@ -232,28 +234,29 @@ def LogUniform(f, chi_inf=0.05, del_chi=0.05, tau1=1e-5, tau2=1e-2): 1 - np.log((1 + 1j * w * tau2) / (1 + 1j * w * tau1)) / np.log(tau2 / tau1) ) + ############################################################# # PLOTTING RESTIVITY MODEL ############################################################# -class Stitched1DModel: +class Stitched1DModel: def __init__( - self, + self, topography=None, physical_property=None, line=None, time_stamp=None, thicknesses=None, **kwargs - ): + ): super().__init__(**kwargs) self.topography = topography self.physical_property = physical_property self.line = line self.time_stamp = time_stamp - self.thicknesses = thicknesses + self.thicknesses = thicknesses @property def topography(self): @@ -270,7 +273,7 @@ def topography(self): def topography(self, locs): self._topography = validate_ndarray_with_shape( "topography", locs, shape=("*", "*"), dtype=float - ) + ) @property def physical_property(self): @@ -302,9 +305,7 @@ def line(self): @line.setter def line(self, values): - self._line = validate_ndarray_with_shape( - "line", values, shape=("*"), dtype=int - ) + self._line = validate_ndarray_with_shape("line", values, shape=("*"), dtype=int) @property def timestamp(self): @@ -339,26 +340,26 @@ def thicknesses(self, values): self._thicknesses = validate_ndarray_with_shape( "thicknesses", values, shape=("*"), dtype=float ) - + @property def n_layer(self): return len(self.hz) @property def hz(self): - if getattr(self, '_hz', None) is None: + if getattr(self, "_hz", None) is None: self._hz = np.r_[self.thicknesses, self.thicknesses[-1]] return self._hz @property def n_sounding(self): - if getattr(self, '_n_sounding', None) is None: + if getattr(self, "_n_sounding", None) is None: self._n_sounding = self.topography.shape[0] return self._n_sounding @property def unique_line(self): - if getattr(self, '_unique_line', None) is None: + if getattr(self, "_unique_line", None) is None: if self.line is None: raise Exception("line information is required!") self._unique_line = np.unique(self.line) @@ -366,10 +367,8 @@ def unique_line(self): @property def xyz(self): - if getattr(self, '_xyz', None) is None: - xyz = np.empty( - (self.n_layer, self.topography.shape[0], 3), order='F' - ) + if getattr(self, "_xyz", None) is None: + xyz = np.empty((self.n_layer, self.topography.shape[0], 3), order="F") for i_xy in range(self.topography.shape[0]): z = -self.mesh_1d.vectorCCx + self.topography[i_xy, 2] x = np.ones_like(z) * self.topography[i_xy, 0] @@ -380,79 +379,98 @@ def xyz(self): @property def mesh_1d(self): - if getattr(self, '_mesh_1d', None) is None: + if getattr(self, "_mesh_1d", None) is None: if self.thicknesses is None: raise Exception("thicknesses information is required!") - self._mesh_1d = set_mesh_1d(np.r_[self.hz[:self.n_layer]]) + self._mesh_1d = set_mesh_1d(np.r_[self.hz[: self.n_layer]]) return self._mesh_1d @property def mesh_3d(self): - if getattr(self, '_mesh_3d', None) is None: + if getattr(self, "_mesh_3d", None) is None: if self.mesh_3d is None: raise Exception("Run get_mesh_3d!") return self._mesh_3d @property def physical_property_matrix(self): - if getattr(self, '_physical_property_matrix', None) is None: + if getattr(self, "_physical_property_matrix", None) is None: if self.physical_property is None: raise Exception("physical_property information is required!") - self._physical_property_matrix = self.physical_property.reshape((self.n_layer, self.n_sounding), order='F') + self._physical_property_matrix = self.physical_property.reshape( + (self.n_layer, self.n_sounding), order="F" + ) return self._physical_property_matrix @property def depth_matrix(self): - if getattr(self, '_depth_matrix', None) is None: + if getattr(self, "_depth_matrix", None) is None: if self.hz.size == self.n_layer: depth = np.cumsum(np.r_[0, self.hz]) self._depth_matrix = np.tile(depth, (self.n_sounding, 1)).T else: - self._depth_matrix =np.hstack( - (np.zeros((self.n_sounding,1)), np.cumsum(self.hz.reshape((self.n_sounding, self.n_layer)), axis=1)) + self._depth_matrix = np.hstack( + ( + np.zeros((self.n_sounding, 1)), + np.cumsum( + self.hz.reshape((self.n_sounding, self.n_layer)), axis=1 + ), + ) ).T return self._depth_matrix @property def distance(self): - if getattr(self, '_distance', None) is None: + if getattr(self, "_distance", None) is None: self._distance = np.zeros(self.n_sounding, dtype=float) for line_tmp in self.unique_line: ind_line = self.line == line_tmp - xy_line = self.topography[ind_line,:2] - distance_line = np.r_[0, np.cumsum(np.sqrt((np.diff(xy_line, axis=0)**2).sum(axis=1)))] + xy_line = self.topography[ind_line, :2] + distance_line = np.r_[ + 0, np.cumsum(np.sqrt((np.diff(xy_line, axis=0) ** 2).sum(axis=1))) + ] self._distance[ind_line] = distance_line return self._distance def plot_section( - self, i_layer=0, i_line=0, x_axis='x', + self, + i_layer=0, + i_line=0, + x_axis="x", plot_type="contour", - physical_property=None, clim=None, - ax=None, cmap='viridis', ncontour=20, scale='log', - show_colorbar=True, aspect=1, zlim=None, dx=20., + physical_property=None, + clim=None, + ax=None, + cmap="viridis", + ncontour=20, + scale="log", + show_colorbar=True, + aspect=1, + zlim=None, + dx=20.0, invert_xaxis=False, alpha=0.7, - pcolorOpts={} + pcolorOpts={}, ): ind_line = self.line == self.unique_line[i_line] if physical_property is not None: physical_property_matrix = physical_property.reshape( - (self.n_layer, self.n_sounding), order='F' + (self.n_layer, self.n_sounding), order="F" ) else: physical_property_matrix = self.physical_property_matrix - if x_axis.lower() == 'y': + if x_axis.lower() == "y": x_ind = 1 - xlabel = 'Northing (m)' - elif x_axis.lower() == 'x': + xlabel = "Northing (m)" + elif x_axis.lower() == "x": x_ind = 0 - xlabel = 'Easting (m)' - elif x_axis.lower() == 'distance': - xlabel = 'Distance (m)' + xlabel = "Easting (m)" + elif x_axis.lower() == "distance": + xlabel = "Distance (m)" if ax is None: - fig = plt.figure(figsize=(15, 10)) + plt.figure(figsize=(15, 10)) ax = plt.subplot(111) if clim is None: @@ -461,47 +479,56 @@ def plot_section( else: vmin, vmax = clim - if scale == 'log': + if scale == "log": norm = LogNorm(vmin=vmin, vmax=vmax) - vmin=None - vmax=None + vmin = None + vmax = None else: - norm=None + norm = None ind_line = np.arange(ind_line.size)[ind_line] for i in ind_line: inds_temp = [i] - if x_axis == 'distance': + if x_axis == "distance": x_tmp = self.distance[i] else: x_tmp = self.topography[i, x_ind] - topo_temp = np.c_[ - x_tmp-dx, - x_tmp+dx - ] + topo_temp = np.c_[x_tmp - dx, x_tmp + dx] out = ax.pcolormesh( - topo_temp, -self.depth_matrix[:,i]+self.topography[i, 2], physical_property_matrix[:, inds_temp], - cmap=cmap, alpha=alpha, - vmin=vmin, vmax=vmax, norm=norm, shading='auto', **pcolorOpts + topo_temp, + -self.depth_matrix[:, i] + self.topography[i, 2], + physical_property_matrix[:, inds_temp], + cmap=cmap, + alpha=alpha, + vmin=vmin, + vmax=vmax, + norm=norm, + shading="auto", + **pcolorOpts ) if show_colorbar: - from mpl_toolkits import axes_grid1 cb = plt.colorbar(out, ax=ax, fraction=0.01) cb.set_label("Conductivity (S/m)") ax.set_aspect(aspect) ax.set_xlabel(xlabel) - ax.set_ylabel('Elevation (m)') + ax.set_ylabel("Elevation (m)") if zlim is not None: ax.set_ylim(zlim) - if x_axis == 'distance': - xlim = self.distance[ind_line].min()-dx, self.distance[ind_line].max()+dx + if x_axis == "distance": + xlim = ( + self.distance[ind_line].min() - dx, + self.distance[ind_line].max() + dx, + ) else: - xlim = self.topography[ind_line, x_ind].min()-dx, self.topography[ind_line, x_ind].max()+dx + xlim = ( + self.topography[ind_line, x_ind].min() - dx, + self.topography[ind_line, x_ind].max() + dx, + ) if invert_xaxis: ax.set_xlim(xlim[1], xlim[0]) else: @@ -513,41 +540,45 @@ def plot_section( return out, ax, cb else: return out, ax - return ax, + return (ax,) def get_3d_mesh( - self, dx=None, dy=None, dz=None, - npad_x=0, npad_y=0, npad_z=0, + self, + dx=None, + dy=None, + dz=None, + npad_x=0, + npad_y=0, + npad_z=0, core_z_length=None, nx=100, ny=100, ): - xmin, xmax = self.topography[:, 0].min(), self.topography[:, 0].max() ymin, ymax = self.topography[:, 1].min(), self.topography[:, 1].max() zmin, zmax = self.topography[:, 2].min(), self.topography[:, 2].max() zmin -= self.mesh_1d.vectorNx.max() - lx = xmax-xmin - ly = ymax-ymin - lz = zmax-zmin + lx = xmax - xmin + ly = ymax - ymin + lz = zmax - zmin if dx is None: - dx = lx/nx - print ((">> dx:%.1e")%(dx)) + dx = lx / nx + print((">> dx:%.1e") % (dx)) if dy is None: - dy = ly/ny - print ((">> dy:%.1e")%(dy)) + dy = ly / ny + print((">> dy:%.1e") % (dy)) if dz is None: dz = np.median(self.mesh_1d.hx) - nx = int(np.floor(lx/dx)) - ny = int(np.floor(ly/dy)) - nz = int(np.floor(lz/dz)) + nx = int(np.floor(lx / dx)) + ny = int(np.floor(ly / dy)) + nz = int(np.floor(lz / dz)) - if nx*ny*nz > 1e6: + if nx * ny * nz > 1e6: warnings.warn( - ("Size of the mesh (%i) will greater than 1e6")%(nx*ny*nz) + ("Size of the mesh (%i) will greater than 1e6") % (nx * ny * nz) ) hx = [(dx, npad_x, -1.2), (dx, nx), (dx, npad_x, -1.2)] hy = [(dy, npad_y, -1.2), (dy, ny), (dy, npad_y, -1.2)] @@ -560,16 +591,11 @@ def get_3d_mesh( @property def P(self): - if getattr(self, '_P', None) is None: + if getattr(self, "_P", None) is None: raise Exception("Run get_interpolation_matrix first!") return self._P - def get_interpolation_matrix( - self, - npts=20, - epsilon=None - ): - + def get_interpolation_matrix(self, npts=20, epsilon=None): tree_2d = kdtree(self.topography[:, :2]) xy = utils.ndgrid(self.mesh_3d.vectorCCx, self.mesh_3d.vectorCCy) @@ -577,59 +603,58 @@ def get_interpolation_matrix( if epsilon is None: epsilon = np.min([self.mesh_3d.hx.min(), self.mesh_3d.hy.min()]) - w = 1. / (distance + epsilon)**2 - w = utils.sdiag(1./np.sum(w, axis=1)) * (w) - I = utils.mkvc( - np.arange(inds.shape[0]).reshape([-1, 1]).repeat(npts, axis=1) - ) + w = 1.0 / (distance + epsilon) ** 2 + w = utils.sdiag(1.0 / np.sum(w, axis=1)) * (w) + I = utils.mkvc(np.arange(inds.shape[0]).reshape([-1, 1]).repeat(npts, axis=1)) J = utils.mkvc(inds) self._P = sp.coo_matrix( - (utils.mkvc(w), (I, J)), - shape=(inds.shape[0], self.topography.shape[0]) + (utils.mkvc(w), (I, J)), shape=(inds.shape[0], self.topography.shape[0]) ) mesh_1d = TensorMesh([np.r_[self.hz[:-1], 1e20]]) - z = self.P*self.topography[:, 2] + z = self.P * self.topography[:, 2] self._actinds = utils.surface2ind_topo(self.mesh_3d, np.c_[xy, z]) - Z = np.empty(self.mesh_3d.vnC, dtype=float, order='F') + Z = np.empty(self.mesh_3d.vnC, dtype=float, order="F") Z = self.mesh_3d.gridCC[:, 2].reshape( - (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" ) ACTIND = self._actinds.reshape( - (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" ) self._Pz = [] # This part can be cythonized or parallelized - for i_xy in range(self.mesh_3d.nCx*self.mesh_3d.nCy): + for i_xy in range(self.mesh_3d.nCx * self.mesh_3d.nCy): actind_temp = ACTIND[i_xy, :] z_temp = -(Z[i_xy, :] - z[i_xy]) self._Pz.append(mesh_1d.getInterpolationMat(z_temp[actind_temp])) def interpolate_from_1d_to_3d(self, physical_property_1d): - physical_property_2d = self.P*( - physical_property_1d.reshape( - (self.n_layer, self.n_sounding), order='F' - ).T + physical_property_2d = self.P * ( + physical_property_1d.reshape((self.n_layer, self.n_sounding), order="F").T + ) + physical_property_3d = ( + np.ones( + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), + order="C", + dtype=float, + ) + * np.nan ) - physical_property_3d = np.ones( - (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), - order='C', dtype=float - ) * np.nan ACTIND = self._actinds.reshape( - (self.mesh_3d.nCx*self.mesh_3d.nCy, self.mesh_3d.nCz), order='F' + (self.mesh_3d.nCx * self.mesh_3d.nCy, self.mesh_3d.nCz), order="F" ) - for i_xy in range(self.mesh_3d.nCx*self.mesh_3d.nCy): + for i_xy in range(self.mesh_3d.nCx * self.mesh_3d.nCy): actind_temp = ACTIND[i_xy, :] physical_property_3d[i_xy, actind_temp] = ( - self._Pz[i_xy]*physical_property_2d[i_xy, :] + self._Pz[i_xy] * physical_property_2d[i_xy, :] ) - return physical_property_3d + return physical_property_3d diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 04181022a3..9a145988af 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -878,7 +878,7 @@ def __init__( self.reference_model_in_smooth = reference_model_in_smooth if isinstance(mesh, LCRegularizationMesh): if orientation not in ["r", "z"]: - raise ValueError("Orientation must be 'r' or 'z'") + raise ValueError("Orientation must be 'r' or 'z'") else: if orientation not in ["x", "y", "z"]: raise ValueError("Orientation must be 'x', 'y' or 'z'") diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py index f0109c523a..1d46165b4e 100644 --- a/SimPEG/regularization/laterally_constrained.py +++ b/SimPEG/regularization/laterally_constrained.py @@ -1,13 +1,6 @@ -import scipy as sp import numpy as np from .sparse import SparseSmoothness, SparseSmallness, Sparse -from .. import utils -import properties -from .. import props from .regularization_mesh_lateral import LCRegularizationMesh -from typing import TYPE_CHECKING -# if TYPE_CHECKING: -from scipy.sparse import csr_matrix class LaterallyConstrainedSmallness(SparseSmallness): @@ -15,12 +8,14 @@ class LaterallyConstrainedSmallness(SparseSmallness): Duplicate of SparseSmallness Class """ + class LaterallyConstrainedSmoothness(SparseSmoothness): """ Modification of SparseSmoothness Class for addressing radial and vertical gradients of model parameters, which is a 1D vertical resistivity profile at each of lateral locations. """ + def __init__(self, mesh, orientation="r", gradient_type="total", **kwargs): if "gradientType" in kwargs: self.gradientType = kwargs.pop("gradientType") @@ -49,7 +44,7 @@ def __init__( active_cells=None, active_edges=None, alpha_r=None, - length_scale_r=None, + length_scale_r=None, norms=None, gradient_type="total", irls_scaled=True, @@ -85,10 +80,8 @@ def __init__( objfcts = [ SparseSmallness(mesh=self.regularization_mesh), SparseSmoothness(mesh=self.regularization_mesh, orientation="r"), - SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), - ] - gradientType = kwargs.pop("gradientType", None) - + SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), ] + super().__init__( self.regularization_mesh, objfcts=objfcts, @@ -114,7 +107,7 @@ def alpha_r(self, value): raise TypeError(f"alpha_r must be a real number, saw type{type(value)}") if value < 0: raise ValueError(f"alpha_r must be non-negative, not {value}") - self._alpha_r = value + self._alpha_r = value @property def length_scale_r(self): @@ -144,5 +137,5 @@ def length_scale_r(self, value: float): raise TypeError( f"length_scale_r must be a real number, saw type{type(value)}" ) - print ("Set alpha_s") - self.alpha_r = (value * self.regularization_mesh.base_length) ** 2 \ No newline at end of file + print("Set alpha_s") + self.alpha_r = (value * self.regularization_mesh.base_length) ** 2 diff --git a/SimPEG/regularization/regularization_mesh_lateral.py b/SimPEG/regularization/regularization_mesh_lateral.py index 94a7385c5e..d68e47acec 100644 --- a/SimPEG/regularization/regularization_mesh_lateral.py +++ b/SimPEG/regularization/regularization_mesh_lateral.py @@ -1,11 +1,11 @@ import numpy as np import scipy.sparse as sp -from SimPEG.utils.code_utils import deprecate_property, validate_active_indices +from SimPEG.utils.code_utils import validate_active_indices -from .. import props from .. import utils from .regularization_mesh import RegularizationMesh + class LCRegularizationMesh(RegularizationMesh): """ **LCRegularization Mesh** @@ -15,8 +15,8 @@ class LCRegularizationMesh(RegularizationMesh): :param numpy.ndarray active_edges: bool array, size nE, that is True where we have active edges. Used to reduce the operators so we regularize only on active edges """ - - _active_edges = None + + _active_edges = None def __init__(self, mesh, active_cells=None, active_edges=None, **kwargs): self.mesh_radial = mesh[0] @@ -42,7 +42,7 @@ def active_cells(self) -> np.ndarray: array as representing the indices of the active cells. When called however, the quantity will have been internally converted to a boolean array. """ - return self._active_cells + return self._active_cells @active_cells.setter def active_cells(self, values: np.ndarray): @@ -61,15 +61,15 @@ def active_cells(self, values: np.ndarray): self._Paer = None self._Pafz = None self._h_gridded_r = None - self._h_gridded_z = None + self._h_gridded_z = None self._cell_gradient_z = None self._aveCC2Fz = None self._aveFz2CC = None self._active_cells = values - + @property def active_edges(self) -> np.ndarray: - return self._active_edges + return self._active_edges @active_edges.setter def active_edges(self, values: np.ndarray): @@ -81,16 +81,18 @@ def active_edges(self, values: np.ndarray): ) if values is not None: self._aveCC2Fr = None - self._cell_gradient_r = None - self._aveFr2CC = None + self._cell_gradient_r = None + self._aveFr2CC = None self._active_edges = values - + @property def vol(self) -> np.ndarray: # Assume a unit area for the radial points) # We could use the average of cells to nodes - self._vol = (np.ones(self.n_nodes, dtype=float)[:, None] * self.mesh_vertical.h[0]).flatten() + self._vol = ( + np.ones(self.n_nodes, dtype=float)[:, None] * self.mesh_vertical.h[0] + ).flatten() return self._vol[self.active_cells].flatten() @property @@ -112,11 +114,9 @@ def h_gridded_z(self) -> np.ndarray: """ if getattr(self, "_h_gridded_z", None) is None: - self._h_gridded_z = np.tile( - self.mesh_vertical.h[0], self.n_nodes - ).flatten() + self._h_gridded_z = np.tile(self.mesh_vertical.h[0], self.n_nodes).flatten() return self._h_gridded_z - + @property def base_length(self) -> float: """Smallest dimension (i.e. edge length) for smallest cell in the mesh. @@ -140,7 +140,7 @@ def dim(self) -> int: Dimension of the regularization mesh. """ return 2 - + @property def cell_gradient(self) -> sp.csr_matrix: """Cell gradient operator (cell centers to faces). @@ -156,11 +156,15 @@ def cell_gradient(self) -> sp.csr_matrix: @property def nodal_gradient_stencil(self) -> sp.csr_matrix: - ind_ptr = 2 * np.arange(self.mesh_radial.n_edges+1) + ind_ptr = 2 * np.arange(self.mesh_radial.n_edges + 1) col_inds = self.mesh_radial._edges.reshape(-1) - Aijs = (np.ones(self.mesh_radial.n_edges, dtype=float)[:, None] * [-1, 1]).reshape(-1) + Aijs = ( + np.ones(self.mesh_radial.n_edges, dtype=float)[:, None] * [-1, 1] + ).reshape(-1) - return sp.csr_matrix((Aijs, col_inds, ind_ptr), shape=(self.mesh_radial.n_edges, self.n_nodes)) + return sp.csr_matrix( + (Aijs, col_inds, ind_ptr), shape=(self.mesh_radial.n_edges, self.n_nodes) + ) @property def cell_gradient_r(self) -> sp.csr_matrix: @@ -170,7 +174,9 @@ def cell_gradient_r(self) -> sp.csr_matrix: """ if getattr(self, "_cell_gradient_r", None) is None: grad = self.nodal_gradient_stencil - self._cell_gradient_r = self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac + self._cell_gradient_r = ( + self.Paer.T * sp.kron(grad, utils.speye(self.nz)) * self.Pac + ) return self._cell_gradient_r @property @@ -206,7 +212,9 @@ def cell_gradient_z(self) -> sp.csr_matrix: """ if getattr(self, "_cell_gradient_z", None) is None: grad = self.mesh_vertical.stencil_cell_gradient - self._cell_gradient_z = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac + self._cell_gradient_z = ( + self.Pafz.T * sp.kron(utils.speye(self.n_nodes), grad) * self.Pac + ) return self._cell_gradient_z @property @@ -217,7 +225,9 @@ def aveCC2Fz(self) -> sp.csr_matrix: """ if getattr(self, "_aveCC2Fz", None) is None: ave = self.mesh_vertical.average_cell_to_face - self._aveCC2Fz = self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac + self._aveCC2Fz = ( + self.Pafz.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pac + ) return self._aveCC2Fz @property @@ -232,7 +242,7 @@ def cell_distances_z(self) -> np.ndarray: if getattr(self, "_cell_distances_z", None) is None: Ave = self.aveCC2Fr self._cell_distances_z = Ave * (self.Pac.T * self.h_gridded_z) - return self._cell_distances_z + return self._cell_distances_z @property def nz(self) -> int: @@ -321,9 +331,9 @@ def Pac(self): """ if getattr(self, "_Pac", None) is None: if self.active_cells is None: - self._Pac = utils.speye(self.nz*self.n_nodes) + self._Pac = utils.speye(self.nz * self.n_nodes) else: - self._Pac = utils.speye(self.nz*self.n_nodes)[:, self.active_cells] + self._Pac = utils.speye(self.nz * self.n_nodes)[:, self.active_cells] return self._Pac @property @@ -340,7 +350,6 @@ def Paer(self): self._Paer = utils.speye(self.nE) else: ave = self.mesh_vertical.average_face_to_cell - aveFz2CC = sp.kron(utils.speye(self.n_nodes), ave) self._Paer = utils.speye(self.nE)[:, self.active_edges] return self._Paer @@ -354,7 +363,9 @@ def aveFz2CC(self): """ if getattr(self, "_aveFz2CC", None) is None: ave = self.mesh_vertical.average_face_to_cell - self._aveFz2CC = self.Pac.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pafz + self._aveFz2CC = ( + self.Pac.T * sp.kron(utils.speye(self.n_nodes), ave) * self.Pafz + ) return self._aveFz2CC @property @@ -370,4 +381,6 @@ def aveFr2CC(self): ave = self.mesh_radial.average_node_to_edge.T self._aveFr2CC = self.Pac.T * sp.kron(ave, utils.speye(self.nz)) * self.Paer return self._aveFr2CC -LCRegularizationMesh.__module__ = "SimPEG.regularization" \ No newline at end of file + + +LCRegularizationMesh.__module__ = "SimPEG.regularization" diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers.py b/tests/em/em1d/test_EM1D_FD_jac_layers.py index fa495e0511..20c331f295 100644 --- a/tests/em/em1d/test_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_EM1D_FD_jac_layers.py @@ -18,7 +18,7 @@ def setUp(self): deepthick = np.logspace(1, 2, 10) thicknesses = np.r_[nearthick, deepthick] topo = np.r_[0.0, 0.0, 100.0] - + # Survey Geometry height = 1e-5 src_location = np.array([0.0, 0.0, 100.0 + height]) @@ -148,7 +148,7 @@ def test_EM1DFDJtvec_Layers(self): np.log(np.ones(self.nlayers) * sigma_half), np.log(np.ones(self.nlayers) * 1.5 * mu_half), np.log(self.thicknesses) * 0.9, - np.log(self.height) *1.5, + np.log(self.height) * 1.5, ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs diff --git a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py index 81006f7b84..c59745676b 100644 --- a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py @@ -4,69 +4,68 @@ import SimPEG.electromagnetics.frequency_domain as fdem from SimPEG import * from discretize import TensorMesh -from pymatsolver import PardisoSolver np.random.seed(41) class STITCHED_EM1D_FD_Jacobian_Test_MagDipole(unittest.TestCase): - def setUp(self, parallel=False): - dz = 1 geometric_factor = 1.1 n_layer = 20 - thicknesses = dz * geometric_factor ** np.arange(n_layer-1) + thicknesses = dz * geometric_factor ** np.arange(n_layer - 1) frequencies = np.array([900, 7200, 56000], dtype=float) n_sounding = 50 - dx = 20. + dx = 20.0 hx = np.ones(n_sounding) * dx hz = np.r_[thicknesses, thicknesses[-1]] - mesh = TensorMesh([hx, hz], x0='00') + mesh = TensorMesh([hx, hz], x0="00") x = mesh.cell_centers_x y = np.zeros_like(x) - z = np.ones_like(x) * 30. - receiver_locations = np.c_[x+8., y, z] + z = np.ones_like(x) * 30.0 + receiver_locations = np.c_[x + 8.0, y, z] source_locations = np.c_[x, y, z] - topo = np.c_[x, y, z-30.].astype(float) - - sigma_map = maps.ExpMap(mesh) + topo = np.c_[x, y, z - 30.0].astype(float) source_list = [] for i_sounding in range(0, n_sounding): - source_location = mkvc(source_locations[i_sounding, :]) receiver_location = mkvc(receiver_locations[i_sounding, :]) receiver_list = [] receiver_list.append( fdem.receivers.PointMagneticFieldSecondary( - receiver_location, - orientation="z", - component="both" + receiver_location, orientation="z", component="both" ) ) for i_freq, frequency in enumerate(frequencies): src = fdem.sources.MagDipole( - receiver_list, frequency, source_location, - orientation="z", i_sounding=i_sounding + receiver_list, + frequency, + source_location, + orientation="z", + i_sounding=i_sounding, ) source_list.append(src) survey = fdem.Survey(source_list) - wires = maps.Wires(('sigma', n_layer*n_sounding), ('h', n_sounding)) - sigmaMap = maps.ExpMap(nP=n_layer*n_sounding) * wires.sigma + wires = maps.Wires(("sigma", n_layer * n_sounding), ("h", n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer * n_sounding) * wires.sigma hMap = maps.ExpMap(nP=n_sounding) * wires.h simulation = fdem.Simulation1DLayeredStitched( - survey=survey, thicknesses=thicknesses, + survey=survey, + thicknesses=thicknesses, sigmaMap=sigmaMap, hMap=hMap, - topo=topo, parallel=parallel, n_cpu=2, verbose=False + topo=topo, + parallel=parallel, + n_cpu=2, + verbose=False, ) self.sim = simulation self.mesh = mesh @@ -75,11 +74,13 @@ def test_EM1DFDJvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 - sigma = np.ones(self.mesh.n_cells) * 1./100. - sigma[inds_1] = 1./10. - sigma[inds] = 1./50. - sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] def fwdfun(m): resp = self.sim.dpred(m) @@ -106,15 +107,20 @@ def test_EM1DFDJtvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 - sigma = np.ones(self.mesh.n_cells) * 1./100. - sigma[inds_1] = 1./10. - sigma[inds] = 1./50. - sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] dobs = self.sim.dpred(m_stitched) - m_ini = np.r_[np.log(1./100.) * np.ones(self.mesh.n_cells), np.ones(self.sim.n_sounding)*np.log(30.)*1.5] + m_ini = np.r_[ + np.log(1.0 / 100.0) * np.ones(self.mesh.n_cells), + np.ones(self.sim.n_sounding) * np.log(30.0) * 1.5, + ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs @@ -132,5 +138,6 @@ def derChk(m): if passed: print("STITCHED EM1DFM MagDipole Jtvec test works") -if __name__ == '__main__': - unittest.main() \ No newline at end of file + +if __name__ == "__main__": + unittest.main() diff --git a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py index cea7db7613..f83840740a 100644 --- a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py @@ -10,9 +10,7 @@ class STITCHED_EM1D_TD_Jacobian_Test_MagDipole(unittest.TestCase): - def setUp(self, parallel=False): - times_hm = np.logspace(-6, -3, 31) times_lm = np.logspace(-5, -2, 31) @@ -27,44 +25,46 @@ def setUp(self, parallel=False): dz = 1 geometric_factor = 1.1 n_layer = 20 - thicknesses = dz * geometric_factor ** np.arange(n_layer-1) + thicknesses = dz * geometric_factor ** np.arange(n_layer - 1) n_layer = 20 n_sounding = 5 - dx = 20. + dx = 20.0 hx = np.ones(n_sounding) * dx hz = np.r_[thicknesses, thicknesses[-1]] - mesh = TensorMesh([hx, hz], x0='00') + mesh = TensorMesh([hx, hz], x0="00") inds = mesh.cell_centers[:, 1] < 25 inds_1 = mesh.cell_centers[:, 1] < 50 - sigma = np.ones(mesh.nC) * 1./100. - sigma[inds_1] = 1./10. - sigma[inds] = 1./50. - sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten() - mSynth = np.log(sigma_em1d) + sigma = np.ones(mesh.nC) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(mesh.vnC, order="F").flatten() x = mesh.cell_centers_x y = np.zeros_like(x) - z = np.ones_like(x) * 30. + z = np.ones_like(x) * 30.0 source_locations = np.c_[x, y, z] - source_current = 1. - source_orientation = 'z' - source_radius = 10. + source_current = 1.0 + source_orientation = "z" + source_radius = 10.0 receiver_offset_r = 13.25 - receiver_offset_z = 2. + receiver_offset_z = 2.0 - receiver_locations = np.c_[x+receiver_offset_r, np.zeros(n_sounding), 30.*np.ones(n_sounding)+receiver_offset_z] + receiver_locations = np.c_[ + x + receiver_offset_r, + np.zeros(n_sounding), + 30.0 * np.ones(n_sounding) + receiver_offset_z, + ] receiver_orientation = "z" # "x", "y" or "z" - topo = np.c_[x, y, z-30.].astype(float) + topo = np.c_[x, y, z - 30.0].astype(float) sigma_map = maps.ExpMap(mesh) source_list = [] for i_sounding in range(0, n_sounding): - source_location = source_locations[i_sounding, :] receiver_location = receiver_locations[i_sounding, :] @@ -80,48 +80,56 @@ def setUp(self, parallel=False): # Make a list containing all receivers even if just one # Must define the transmitter properties and associated receivers - source_list.append(tdem.sources.MagDipole( - [dbzdt_receiver_hm], - location=source_location, - waveform=waveform_hm, - orientation=source_orientation, - i_sounding=i_sounding, - ) + source_list.append( + tdem.sources.MagDipole( + [dbzdt_receiver_hm], + location=source_location, + waveform=waveform_hm, + orientation=source_orientation, + i_sounding=i_sounding, + ) ) - source_list.append(tdem.sources.MagDipole( - [dbzdt_receiver_lm], - location=source_location, - waveform=waveform_lm, - orientation=source_orientation, - i_sounding=i_sounding, - ) + source_list.append( + tdem.sources.MagDipole( + [dbzdt_receiver_lm], + location=source_location, + waveform=waveform_lm, + orientation=source_orientation, + i_sounding=i_sounding, + ) ) survey = tdem.Survey(source_list) - wires = maps.Wires(('sigma', n_layer*n_sounding), ('h', n_sounding)) - sigmaMap = maps.ExpMap(nP=n_layer*n_sounding) * wires.sigma + wires = maps.Wires(("sigma", n_layer * n_sounding), ("h", n_sounding)) + sigmaMap = maps.ExpMap(nP=n_layer * n_sounding) * wires.sigma hMap = maps.ExpMap(nP=n_sounding) * wires.h simulation = tdem.Simulation1DLayeredStitched( - survey=survey, thicknesses=thicknesses, + survey=survey, + thicknesses=thicknesses, sigmaMap=sigmaMap, hMap=hMap, - topo=topo, parallel=False, n_cpu=2, verbose=False, solver=PardisoSolver + topo=topo, + parallel=False, + n_cpu=2, + verbose=False, + solver=PardisoSolver, ) self.sim = simulation self.mesh = mesh - def test_EM1TDJvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 - sigma = np.ones(self.mesh.n_cells) * 1./100. - sigma[inds_1] = 1./10. - sigma[inds] = 1./50. - sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] def fwdfun(m): resp = self.sim.dpred(m) @@ -148,15 +156,20 @@ def test_EM1TDJtvec_Layers(self): # Conductivity inds = self.mesh.cell_centers[:, 1] < 25 inds_1 = self.mesh.cell_centers[:, 1] < 50 - sigma = np.ones(self.mesh.n_cells) * 1./100. - sigma[inds_1] = 1./10. - sigma[inds] = 1./50. - sigma_em1d = sigma.reshape(self.mesh.vnC, order='F').flatten() - m_stitched = m_stitched = np.r_[np.log(sigma_em1d), np.ones(self.sim.n_sounding)*np.log(30.)] + sigma = np.ones(self.mesh.n_cells) * 1.0 / 100.0 + sigma[inds_1] = 1.0 / 10.0 + sigma[inds] = 1.0 / 50.0 + sigma_em1d = sigma.reshape(self.mesh.vnC, order="F").flatten() + m_stitched = m_stitched = np.r_[ + np.log(sigma_em1d), np.ones(self.sim.n_sounding) * np.log(30.0) + ] dobs = self.sim.dpred(m_stitched) - m_ini = np.r_[np.log(1./100.) * np.ones(self.mesh.n_cells), np.ones(self.sim.n_sounding)*np.log(30.)*1.5] + m_ini = np.r_[ + np.log(1.0 / 100.0) * np.ones(self.mesh.n_cells), + np.ones(self.sim.n_sounding) * np.log(30.0) * 1.5, + ] resp_ini = self.sim.dpred(m_ini) dr = resp_ini - dobs @@ -174,5 +187,6 @@ def derChk(m): if passed: print("STITCHED EM1DFM MagDipole Jtvec test works") -if __name__ == '__main__': - unittest.main() \ No newline at end of file + +if __name__ == "__main__": + unittest.main() From f9a26a04aa05623767dede097865e96dd8dccd83 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 22 Nov 2023 09:35:07 -0800 Subject: [PATCH 105/164] Rename choclo_parallel to numba_parallel --- SimPEG/potential_fields/gravity/simulation.py | 8 ++++---- tests/pf/test_forward_Grav_Linear.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 1de6468eaf..a41ea48112 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -105,7 +105,7 @@ class Simulation3DIntegral(BasePFSimulation): engine : str, optional Choose which engine should be used to run the forward model: ``"geoana"`` or "``choclo``". - choclo_parallel : bool, optional + numba_parallel : bool, optional If True, the simulation will run in parallel. If False, it will run in serial. If ``engine`` is not ``"choclo"`` this argument will be ignored. @@ -119,7 +119,7 @@ def __init__( rho=None, rhoMap=None, engine="geoana", - choclo_parallel=True, + numba_parallel=True, **kwargs, ): super().__init__(mesh, **kwargs) @@ -128,12 +128,12 @@ def __init__( self._G = None self._gtg_diagonal = None self.modelMap = self.rhoMap - self.choclo_parallel = choclo_parallel + self.numba_parallel = numba_parallel self.engine = engine self._sanity_checks_engine(kwargs) # Define jit functions if self.engine == "choclo": - if choclo_parallel: + if numba_parallel: self._sensitivity_gravity = _sensitivity_gravity_parallel self._forward_gravity = _forward_gravity_parallel else: diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index db9388efa0..496e80b1f7 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -135,7 +135,7 @@ def test_accelerations_vs_analytic( # Create simulation if engine == "choclo": sensitivity_path = tmp_path / "sensitivity_choclo" - kwargs = dict(choclo_parallel=parallelism) + kwargs = dict(numba_parallel=parallelism) else: sensitivity_path = tmp_path kwargs = dict(n_processes=parallelism) @@ -191,7 +191,7 @@ def test_tensor_vs_analytic( # Create simulation if engine == "choclo": sensitivity_path = tmp_path / "sensitivity_choclo" - kwargs = dict(choclo_parallel=parallelism) + kwargs = dict(numba_parallel=parallelism) else: sensitivity_path = tmp_path kwargs = dict(n_processes=parallelism) @@ -251,7 +251,7 @@ def test_guv_vs_analytic( # Create simulation if engine == "choclo": sensitivity_path = tmp_path / "sensitivity_choclo" - kwargs = dict(choclo_parallel=parallelism) + kwargs = dict(numba_parallel=parallelism) else: sensitivity_path = tmp_path kwargs = dict(n_processes=parallelism) From fc7fda1eaeed3cb80afc5156dc11a7ba0d68882e Mon Sep 17 00:00:00 2001 From: Jacob Edman Date: Wed, 22 Nov 2023 10:54:54 -0800 Subject: [PATCH 106/164] Extend tests for magnetic simulation (#1) Extend tests for magnetic simulation for the new implementation using Choclo as engine. Use `pytest` and ditch `unittest`. Extend the tests to different options for `store_sensitivity` using `pytest`'s parametrizations. --- tests/pf/test_forward_Mag_Linear.py | 647 ++++++++++++++++------------ 1 file changed, 375 insertions(+), 272 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 3f412651db..0237185739 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -1,77 +1,222 @@ -import unittest +from typing import List, Tuple + import discretize -from SimPEG import utils, maps -from SimPEG.potential_fields import magnetics as mag +import numpy as np +import pytest from geoana.em.static import MagneticPrism from scipy.constants import mu_0 -import numpy as np +from SimPEG import maps, utils +from SimPEG.potential_fields import magnetics as mag -def test_ana_mag_forward(): - nx = 5 - ny = 5 - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - chi1 = 0.01 - chi2 = 0.02 +@pytest.fixture +def mag_mesh() -> discretize.TensorMesh: + """ + a small tensor mesh for testing magnetic simulations + Returns + ------- + discretize.TensorMesh + the tensor mesh for testing + """ # Define a mesh cs = 0.2 hxind = [(cs, 41)] hyind = [(cs, 41)] hzind = [(cs, 41)] mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + return mesh - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) +@pytest.fixture +def two_blocks() -> Tuple[np.ndarray, np.ndarray]: + """ + The parameters defining two blocks - model = np.zeros(mesh.n_cells) - model[block1_inds] = chi1 - model[block2_inds] = chi2 + Returns + ------- + Tuple[np.ndarray, np.ndarray] + Tuple of (3, 2) arrays of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of each block + """ + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + return block1, block2 - active_cells = model != 0.0 - model_reduced = model[active_cells] - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) +@pytest.fixture +def receiver_locations() -> np.ndarray: + """ + a grid of receivers for testing + Returns + ------- + np.ndarray + (n, 3) array of receiver locations + """ # Create plane of observations + nx, ny = 5, 5 xr = np.linspace(-20, 20, nx) yr = np.linspace(-20, 20, ny) X, Y = np.meshgrid(xr, yr) Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz", "tmi"] + return np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) - survey = mag.Survey(srcField) - # Creat reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) +@pytest.fixture +def inducing_field() -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]: + """ + inducing field two ways-- (amplitude, inclination , declination) and (b_x, b_y, b_z) + Returns + ------- + Tuple[Tuple[float, float, float], Tuple[float, float, float]] + (amplitude, inclination, declination), (b_x, b_y, b_z) + """ + H0 = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + return H0, b0 + + +def get_block_inds(grid: np.ndarray, block: np.ndarray) -> np.ndarray: + """ + get the indices for a block + + Parameters + ---------- + grid : np.ndarray + (n, 3) array of xyz locations + block : np.ndarray + (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of the block + + Returns + ------- + np.ndarray + boolean array of indices corresponding to the block + """ + + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + +def create_block_model( + mesh: discretize.TensorMesh, + blocks: Tuple[np.ndarray, ...], + block_params: Tuple[np.ndarray, ...], +) -> Tuple[np.ndarray, np.ndarray]: + """ + Create a magnetic model from a sequence of blocks + + Parameters + ---------- + mesh : discretize.TensorMesh + TensorMesh object to put the model on + blocks : Tuple[np.ndarray, ...] + Tuple of block definitions (each element is (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) + dimensions of the block) + block_params : Tuple[np.ndarray, ...] + Tuple of parameters to assign for each block. Must be the same length as ``blocks``. + + Returns + ------- + Tuple[np.ndarray, np.ndarray] + Tuple of the magnetic model and active_cells (a boolean array) + + Raises + ------ + ValueError + if ``blocks`` and ``block_params`` have incompatible dimensions + """ + if len(blocks) != len(block_params): + raise ValueError( + "'blocks' and 'block_params' must have the same number of elements" + ) + model = np.zeros((mesh.n_cells, np.atleast_1d(block_params[0]).shape[0])) + for block, params in zip(blocks, block_params): + block_ind = get_block_inds(mesh.cell_centers, block) + model[block_ind] = params + active_cells = np.any(np.abs(model) > 0, axis=1) + return model.squeeze(), active_cells + + +def create_mag_survey( + components: List[str], + receiver_locations: np.ndarray, + inducing_field_params: Tuple[float, float, float], +) -> mag.Survey: + """ + create a magnetic Survey + + Parameters + ---------- + components : List[str] + List of components to model + receiver_locations : np.ndarray + (n, 3) array of xyz receiver locations + inducing_field_params : Tuple[float, float, float] + amplitude, inclination, and declination of the inducing field + + Returns + ------- + mag.Survey + a magnetic Survey instance + """ + + receivers = mag.Point(receiver_locations, components=components) + source_field = mag.UniformBackgroundField([receivers], *inducing_field_params) + return mag.Survey(source_field) + + +@pytest.mark.parametrize( + "engine,parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + + chi1 = 0.01 + chi2 = 0.02 + model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) + model_reduced = model[active_cells] + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells))) + + survey = create_mag_survey( + components=["bx", "by", "bz", "tmi"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) sim = mag.Simulation3DIntegral( - mesh, + mag_mesh, survey=survey, - chiMap=idenMap, + chiMap=identity_map, ind_active=active_cells, - store_sensitivities="forward_only", - n_processes=None, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + engine=engine, + **parallel_kwargs, ) data = sim.dpred(model_reduced) @@ -80,290 +225,248 @@ def get_block_inds(grid, block): d_z = data[2::4] d_t = data[3::4] - tmi = sim.tmi_projection - d_t2 = d_x * tmi[0] + d_y * tmi[1] + d_z * tmi[2] - np.testing.assert_allclose(d_t, d_t2) # double check internal projection - # Compute analytical response from magnetic prism + block1, block2 = two_blocks prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) ) - np.testing.assert_allclose(d_x, d[:, 0]) - np.testing.assert_allclose(d_y, d[:, 1]) - np.testing.assert_allclose(d_z, d[:, 2]) - np.testing.assert_allclose(d_t, d @ tmi) - + # TMI projection + tmi = sim.tmi_projection + d_t2 = d_x * tmi[0] + d_y * tmi[1] + d_z * tmi[2] -def test_ana_mag_grad_forward(): - nx = 5 - ny = 5 + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose( + d_t, d_t2, rtol=rtol, atol=atol + ) # double check internal projection + np.testing.assert_allclose(d_x, d[:, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_y, d[:, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_z, d[:, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_t, d @ tmi, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_grad_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) chi1 = 0.01 chi2 = 0.02 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros(mesh.n_cells) - model[block1_inds] = chi1 - model[block2_inds] = chi2 - - active_cells = model != 0.0 + model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) model_reduced = model[active_cells] + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells))) - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bxx", "bxy", "bxz", "byy", "byz", "bzz"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) - survey = mag.Survey(srcField) - - # Creat reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - + survey = create_mag_survey( + components=["bxx", "bxy", "bxz", "byy", "byz", "bzz"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) sim = mag.Simulation3DIntegral( - mesh, + mag_mesh, survey=survey, - chiMap=idenMap, + chiMap=identity_map, ind_active=active_cells, - store_sensitivities="forward_only", - n_processes=None, + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, + engine=engine, + **parallel_kwargs, ) - - data = sim.dpred(model_reduced) - d_xx = data[0::6] - d_xy = data[1::6] - d_xz = data[2::6] - d_yy = data[3::6] - d_yz = data[4::6] - d_zz = data[5::6] - - # Compute analytical response from magnetic prism - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) - - d = ( - prism_1.magnetic_field_gradient(locXyz) - + prism_2.magnetic_field_gradient(locXyz) - + prism_3.magnetic_field_gradient(locXyz) - ) * mu_0 - - np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=1e-10, atol=1e-12) - np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=1e-10, atol=1e-12) - - -def test_ana_mag_vec_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - - M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 - M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros((mesh.n_cells, 3)) - model[block1_inds] = M1 - model[block2_inds] = M2 - - active_cells = np.any(model != 0.0, axis=1) + if engine == "choclo": + # gradient simulation not implemented for choclo yet + with pytest.raises(NotImplementedError): + data = sim.dpred(model_reduced) + else: + data = sim.dpred(model_reduced) + d_xx = data[0::6] + d_xy = data[1::6] + d_xz = data[2::6] + d_yy = data[3::6] + d_yz = data[4::6] + d_zz = data[5::6] + + # Compute analytical response from magnetic prism + block1, block2 = two_blocks + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) + + d = ( + prism_1.magnetic_field_gradient(receiver_locations) + + prism_2.magnetic_field_gradient(receiver_locations) + + prism_3.magnetic_field_gradient(receiver_locations) + ) * mu_0 + + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_vec_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() + M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() + + model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) model_reduced = model[active_cells].reshape(-1, order="F") + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz", "tmi"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) - survey = mag.Survey(srcField) - - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) + survey = create_mag_survey( + components=["bx", "by", "bz", "tmi"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) sim = mag.Simulation3DIntegral( - mesh, + mag_mesh, survey=survey, - chiMap=idenMap, + chiMap=identity_map, ind_active=active_cells, - store_sensitivities="forward_only", + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, model_type="vector", - n_processes=None, + engine=engine, + **parallel_kwargs, ) data = sim.dpred(model_reduced).reshape(-1, 4) # Compute analytical response from magnetic prism + block1, block2 = two_blocks prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) ) tmi = sim.tmi_projection - np.testing.assert_allclose(data[:, 0], d[:, 0]) - np.testing.assert_allclose(data[:, 1], d[:, 1]) - np.testing.assert_allclose(data[:, 2], d[:, 2]) - np.testing.assert_allclose(data[:, 3], d @ tmi) - - -def test_ana_mag_amp_forward(): - nx = 5 - ny = 5 - - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - - M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 - M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 - - # Define a mesh - cs = 0.2 - hxind = [(cs, 41)] - hyind = [(cs, 41)] - hzind = [(cs, 41)] - mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - # create a model of two blocks, 1 inside the other - block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) - block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - - def get_block_inds(grid, block): - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) - ) - - block1_inds = get_block_inds(mesh.cell_centers, block1) - block2_inds = get_block_inds(mesh.cell_centers, block2) - - model = np.zeros((mesh.n_cells, 3)) - model[block1_inds] = M1 - model[block2_inds] = M2 - - active_cells = np.any(model != 0.0, axis=1) + # Check results + rtol, atol = 9e-6, 3e-7 + np.testing.assert_allclose(data[:, 0], d[:, 0], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 1], d[:, 1], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 2], d[:, 2], rtol=rtol, atol=atol) + np.testing.assert_allclose(data[:, 3], d @ tmi, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize( + "engine, parallel_kwargs", + [ + ("geoana", {"n_processes": None}), + ("geoana", {"n_processes": 1}), + ("choclo", {"choclo_parallel": False}), + ("choclo", {"choclo_parallel": True}), + ], + ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], +) +@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) +def test_ana_mag_amp_forward( + engine, + parallel_kwargs, + store_sensitivities, + tmp_path, + mag_mesh, + two_blocks, + receiver_locations, + inducing_field, +): + inducing_field_params, b0 = inducing_field + M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() + M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() + + model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) model_reduced = model[active_cells].reshape(-1, order="F") + # Create reduced identity map for Linear Problem + identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - # Create plane of observations - xr = np.linspace(-20, 20, nx) - yr = np.linspace(-20, 20, ny) - X, Y = np.meshgrid(xr, yr) - Z = np.ones_like(X) * 3.0 - locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["bx", "by", "bz"] - - rxLoc = mag.Point(locXyz, components=components) - srcField = mag.SourceField([rxLoc], parameters=H0) - survey = mag.Survey(srcField) - - # Create reduced identity map for Linear Pproblem - idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) + survey = create_mag_survey( + components=["bx", "by", "bz"], + receiver_locations=receiver_locations, + inducing_field_params=inducing_field_params, + ) sim = mag.Simulation3DIntegral( - mesh, + mag_mesh, survey=survey, - chiMap=idenMap, + chiMap=identity_map, ind_active=active_cells, - store_sensitivities="forward_only", + sensitivity_path=str(tmp_path / f"{engine}"), + store_sensitivities=store_sensitivities, model_type="vector", is_amplitude_data=True, - n_processes=None, + engine=engine, + **parallel_kwargs, ) data = sim.dpred(model_reduced) # Compute analytical response from magnetic prism + block1, block2 = two_blocks prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) d = ( - prism_1.magnetic_flux_density(locXyz) - + prism_2.magnetic_flux_density(locXyz) - + prism_3.magnetic_flux_density(locXyz) + prism_1.magnetic_flux_density(receiver_locations) + + prism_2.magnetic_flux_density(receiver_locations) + + prism_3.magnetic_flux_density(receiver_locations) ) d_amp = np.linalg.norm(d, axis=1) - np.testing.assert_allclose(data, d_amp) - - -if __name__ == "__main__": - unittest.main() + # Check results + rtol, atol = 1e-7, 1e-6 + np.testing.assert_allclose(data, d_amp, rtol=rtol, atol=atol) From a81dfb7526e80bc40765cae78fe4629be40ce2ca Mon Sep 17 00:00:00 2001 From: Thibaut Astic <97514898+thibaut-kobold@users.noreply.github.com> Date: Wed, 22 Nov 2023 13:21:02 -0800 Subject: [PATCH 107/164] Update SimPEG/potential_fields/magnetics/simulation.py Co-authored-by: Santiago Soler --- SimPEG/potential_fields/magnetics/simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 67a5a4194b..6da04b4ec1 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -284,7 +284,7 @@ def evaluate_integral(self, receiver_location, components): node_evals["gxxx"] = prism_fzzz(dy, dz, dx) node_evals["gxxy"] = prism_fxxy(dx, dy, dz) node_evals["gxxz"] = prism_fxxz(dx, dy, dz) - if any(s in components for s in ["bxy", "tmi_x", "tmi_y"]): + if "bxy" in components or "tmi_x" in components or "tmi_y" in components: if "gxxy" not in node_evals: node_evals["gxxy"] = prism_fxxy(dx, dy, dz) node_evals["gyyx"] = prism_fxxz(dy, dz, dx) From 6c0d0c0ef6a44e0ae3e329b966fad4704555d268 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 22 Nov 2023 13:27:01 -0800 Subject: [PATCH 108/164] list syntax efficiency --- SimPEG/potential_fields/magnetics/simulation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 6da04b4ec1..11fdfb5a70 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -289,24 +289,24 @@ def evaluate_integral(self, receiver_location, components): node_evals["gxxy"] = prism_fxxy(dx, dy, dz) node_evals["gyyx"] = prism_fxxz(dy, dz, dx) node_evals["gxyz"] = prism_fxyz(dx, dy, dz) - if any(s in components for s in ["bxz", "tmi_x", "tmi_z"]): + if "bxz" in components or "tmi_x" in components or "tmi_z" in components: if "gxxz" not in node_evals: node_evals["gxxz"] = prism_fxxz(dx, dy, dz) if "gxyz" not in node_evals: node_evals["gxyz"] = prism_fxyz(dx, dy, dz) node_evals["gzzx"] = prism_fxxy(dz, dx, dy) - if any(s in components for s in ["byy", "tmi_y"]): + if "byy" in components or "tmi_y" in components: if "gyyx" not in node_evals: node_evals["gyyx"] = prism_fxxz(dy, dz, dx) node_evals["gyyy"] = prism_fzzz(dz, dx, dy) node_evals["gyyz"] = prism_fxxy(dy, dz, dx) - if any(s in components for s in ["byz", "tmi_y", "tmi_z"]): + if "byz" in components or "tmi_y" in components or "tmi_z" in components: if "gxyz" not in node_evals: node_evals["gxyz"] = prism_fxyz(dx, dy, dz) if "gyyz" not in node_evals: node_evals["gyyz"] = prism_fxxy(dy, dz, dx) node_evals["gzzy"] = prism_fxxz(dz, dx, dy) - if any(s in components for s in ["bzz", "tmi_z"]): + if "bzz" in components or "tmi_z" in components: if "gzzx" not in node_evals: node_evals["gzzx"] = prism_fxxy(dz, dx, dy) if "gzzy" not in node_evals: From dff853c3540e743a48cbcd8f756fd9a9bead9ff4 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 22 Nov 2023 14:38:28 -0800 Subject: [PATCH 109/164] add finite difference test --- tests/pf/test_forward_Mag_Linear.py | 30 +++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 74c1480380..0ff2a20a29 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -106,8 +106,8 @@ def get_block_inds(grid, block): def test_ana_mag_tmi_grad_forward(): - nx = 5 - ny = 5 + nx = 41 + ny = 41 H0 = (50000.0, 60.0, 250.0) b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) @@ -150,11 +150,13 @@ def get_block_inds(grid, block): # Create plane of observations xr = np.linspace(-20, 20, nx) + dxr = xr[1] - xr[0] yr = np.linspace(-20, 20, ny) + dyr = yr[1] - yr[0] X, Y = np.meshgrid(xr, yr) Z = np.ones_like(X) * 3.0 locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - components = ["tmi_x", "tmi_y", "tmi_z"] + components = ["tmi", "tmi_x", "tmi_y", "tmi_z"] rxLoc = mag.Point(locXyz, components=components) srcField = mag.UniformBackgroundField( @@ -175,9 +177,10 @@ def get_block_inds(grid, block): ) data = sim.dpred(model_reduced) - d_x = data[0::3] - d_y = data[1::3] - d_z = data[2::3] + tmi = data[0::4] + d_x = data[1::4] + d_y = data[2::4] + d_z = data[3::4] # Compute analytical response from magnetic prism prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) @@ -196,6 +199,21 @@ def get_block_inds(grid, block): np.testing.assert_allclose(d_y, tmi_y, rtol=1e-10, atol=1e-12) np.testing.assert_allclose(d_z, tmi_z, rtol=1e-10, atol=1e-12) + # finite difference test x-grad + np.testing.assert_allclose( + np.diff(tmi.reshape(nx, ny, order="F")[:, ::2], axis=1) / (2 * dyr), + tmi_y.reshape(nx, ny, order="F")[:, 1::2], + atol=1.0, + rtol=1e-1, + ) + # finite difference test y-grad + np.testing.assert_allclose( + np.diff(tmi.reshape(nx, ny, order="F")[::2, :], axis=0) / (2 * dxr), + tmi_x.reshape(nx, ny, order="F")[1::2, :], + atol=1.0, + rtol=1e-1, + ) + def test_ana_mag_grad_forward(): nx = 5 From 503b7a4f061bcf5fa84b146863ae8a4c17e92651 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 22 Nov 2023 14:41:03 -0800 Subject: [PATCH 110/164] typo --- tests/pf/test_forward_Mag_Linear.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 0ff2a20a29..caa3dcf05b 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -199,14 +199,14 @@ def get_block_inds(grid, block): np.testing.assert_allclose(d_y, tmi_y, rtol=1e-10, atol=1e-12) np.testing.assert_allclose(d_z, tmi_z, rtol=1e-10, atol=1e-12) - # finite difference test x-grad + # finite difference test y-grad np.testing.assert_allclose( np.diff(tmi.reshape(nx, ny, order="F")[:, ::2], axis=1) / (2 * dyr), tmi_y.reshape(nx, ny, order="F")[:, 1::2], atol=1.0, rtol=1e-1, ) - # finite difference test y-grad + # finite difference test x-grad np.testing.assert_allclose( np.diff(tmi.reshape(nx, ny, order="F")[::2, :], axis=0) / (2 * dxr), tmi_x.reshape(nx, ny, order="F")[1::2, :], From f8de50d6a6654ab95bc3513e02bc9915de20b7bd Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 22 Nov 2023 14:46:29 -0800 Subject: [PATCH 111/164] refine test --- tests/pf/test_forward_Mag_Linear.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index caa3dcf05b..662556b93e 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -106,8 +106,8 @@ def get_block_inds(grid, block): def test_ana_mag_tmi_grad_forward(): - nx = 41 - ny = 41 + nx = 61 + ny = 61 H0 = (50000.0, 60.0, 250.0) b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) From 434963d4333b1a058929aa4fc4794b92e5b8b7e0 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:44:23 -0800 Subject: [PATCH 112/164] Update SimPEG/electromagnetics/base_1d.py Co-authored-by: domfournier --- SimPEG/electromagnetics/base_1d.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d.py b/SimPEG/electromagnetics/base_1d.py index c05e869d40..f62eaff47f 100644 --- a/SimPEG/electromagnetics/base_1d.py +++ b/SimPEG/electromagnetics/base_1d.py @@ -370,9 +370,8 @@ def _compute_hankel_coefficients(self): is_mag_dipole = class_name == "MagDipole" is_wire_loop = class_name == "LineCurrent" - if is_circular_loop: - if np.any(src.orientation[:-1] != 0.0): - raise ValueError("Can only simulate horizontal circular loops") + if is_circular_loop and np.any(src.orientation[:-1] != 0.0): + raise ValueError("Can only simulate horizontal circular loops") if self.hMap is not None: h = hvec[i_src] From f0df62cc2f7de3a0c8333c3958024392c6f19f3e Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:46:53 -0800 Subject: [PATCH 113/164] Update SimPEG/electromagnetics/base_1d_stitched.py Co-authored-by: domfournier --- SimPEG/electromagnetics/base_1d_stitched.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index 3892b851f5..5a90aada31 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -25,12 +25,6 @@ class BaseStitchedEM1DSimulation(BaseSimulation): """ _formulation = "1D" - # _coefficients = [] - # _coefficients_set = False - - # _Jmatrix_sigma = None - # _Jmatrix_height = None - # _J = None # Properties for electrical conductivity/resistivity sigma, sigmaMap, sigmaDeriv = props.Invertible( From 006b48e69e67d965e3c67bcfa42ff23fd97db703 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:47:24 -0800 Subject: [PATCH 114/164] Update SimPEG/electromagnetics/frequency_domain/survey.py Co-authored-by: domfournier --- SimPEG/electromagnetics/frequency_domain/survey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index 65f71a9664..be120ba17b 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -108,7 +108,7 @@ def get_sources_by_frequency(self, frequency): return self._frequency_dict[frequency] @property - def source_location_by_sounding_dict(self): + def source_location_by_sounding(self) -> dict: """ Source locations in the survey as a dictionary """ From 439916d687abc8ab810c0ed80b2041ccaf7fced5 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:55:40 -0800 Subject: [PATCH 115/164] Update SimPEG/electromagnetics/base_1d_stitched.py Co-authored-by: domfournier --- SimPEG/electromagnetics/base_1d_stitched.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index 5a90aada31..d7e21e726b 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -143,7 +143,8 @@ def topo(self, value): @property def parallel(self): - """Parallel + """ + Run the computation as a parallel process. Returns ------- From 71bab8f9ecc489075d8cb7492826f7900bd8eda7 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:55:47 -0800 Subject: [PATCH 116/164] Update SimPEG/electromagnetics/base_1d_stitched.py Co-authored-by: domfournier --- SimPEG/electromagnetics/base_1d_stitched.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index d7e21e726b..812a0a9096 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -50,7 +50,7 @@ class BaseStitchedEM1DSimulation(BaseSimulation): ) # Additional properties - h, hMap, hDeriv = props.Invertible("Receiver Height (m), h > 0") + height, heightMap, heightDeriv = props.Invertible("Receiver Height (m), h > 0") thicknesses, thicknessesMap, thicknessesDeriv = props.Invertible( "layer thicknesses (m)" From f7b84ab2976f588dd33e0337f0ffe17c54867e2a Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:56:27 -0800 Subject: [PATCH 117/164] Update SimPEG/electromagnetics/frequency_domain/simulation_1d.py Co-authored-by: domfournier --- .../frequency_domain/simulation_1d.py | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py index bd80116632..902ce90c61 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation_1d.py @@ -149,27 +149,6 @@ def getJ(self, m, f=None): # Grab a copy C0s_dh = C0s.copy() C1s_dh = C1s.copy() - # h_vec = self.h - # i = 0 - # for i_src, src in enumerate(self.survey.source_list): - # class_name = type(src).__name__ - # is_wire_loop = class_name == "LineCurrent" - - # # h = h_vec[i_src] - # if is_wire_loop: - # n_quad_points = src.n_segments * self.n_points_per_path - # nD = sum( - # rx.locations.shape[0] * n_quad_points - # for rx in src.receiver_list - # ) - # else: - # nD = sum(rx.locations.shape[0] for rx in src.receiver_list) - # ip1 = i + nD - # # v = np.exp(-lambs[i:ip1] * h) - # C0s_dh[i:ip1] *= - lambs[i:ip1] - # C1s_dh[i:ip1] *= - lambs[i:ip1] - # i = ip1 - # J will be n_d * n_src (each source has it's own h)... # It seems to be the 2 * lambs to be multiplied, but had to drop factor of 2 C0s_dh *= -lambs From 8a48a9d00c29776324d0d88d479390a26aa709d9 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:56:42 -0800 Subject: [PATCH 118/164] Update SimPEG/electromagnetics/frequency_domain/survey.py Co-authored-by: domfournier --- SimPEG/electromagnetics/frequency_domain/survey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index be120ba17b..c78b1e6e7b 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -127,7 +127,7 @@ def get_sources_by_sounding_number(self, i_sounding): return self._source_location_dict[i_sounding] @property - def vnD_by_sounding_dict(self): + def vnD_by_sounding(self) -> dict: if getattr(self, "_vnD_by_sounding_dict", None) is None: self._vnD_by_sounding_dict = {} for i_sounding in self.source_location_by_sounding_dict: From 22a0906bbf6528d0f1e64a10e7680f829ec9c9b8 Mon Sep 17 00:00:00 2001 From: Seogi Kang Date: Wed, 22 Nov 2023 15:57:12 -0800 Subject: [PATCH 119/164] Update SimPEG/electromagnetics/base_1d_stitched.py Co-authored-by: domfournier --- SimPEG/electromagnetics/base_1d_stitched.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index 812a0a9096..cad8f96de2 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -361,27 +361,6 @@ def input_args(self, i_sounding, output_type="forward"): ) return output - # This is the most expensive process, but required once - # May need to find unique - # def input_args_for_coeff(self, i_sounding): - # output = ( - # self.survey.get_sources_by_sounding_number(i_sounding), - # self.topo[i_sounding, :], - # self.Thicknesses[i_sounding,:], - # self.Sigma[i_sounding, :], - # self.Eta[i_sounding, :], - # self.Tau[i_sounding, :], - # self.C[i_sounding, :], - # self.Chi[i_sounding, :], - # self.dChi[i_sounding, :], - # self.Tau1[i_sounding, :], - # self.Tau2[i_sounding, :], - # self.H[i_sounding], - # 'forward', - # True, - # [], - # ) - # return output def fields(self, m): if self.verbose: From 643b76aef5c0e0ba132474f3026476ada7f68c1c Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 22 Nov 2023 16:33:45 -0800 Subject: [PATCH 120/164] incorporate dom's suggestions --- SimPEG/electromagnetics/base_1d_stitched.py | 144 ++++++++---------- .../frequency_domain/survey.py | 34 ++--- SimPEG/electromagnetics/time_domain/survey.py | 36 ++--- 3 files changed, 98 insertions(+), 116 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index cad8f96de2..c9502ae342 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -50,7 +50,7 @@ class BaseStitchedEM1DSimulation(BaseSimulation): ) # Additional properties - height, heightMap, heightDeriv = props.Invertible("Receiver Height (m), h > 0") + h, hMap, hDeriv = props.Invertible("Receiver Height (m), h > 0") thicknesses, thicknessesMap, thicknessesDeriv = props.Invertible( "layer thicknesses (m)" @@ -194,7 +194,7 @@ def n_layer(self): @property def n_sounding(self): - return len(self.survey.source_location_by_sounding_dict) + return len(self.survey.source_location_by_sounding) @property def data_index(self): @@ -202,107 +202,107 @@ def data_index(self): # ------------- For physical properties ------------- # @property - def Sigma(self): - if getattr(self, "_Sigma", None) is None: + def sigma_matrix(self): + if getattr(self, "_sigma_matrix", None) is None: # Ordering: first z then x - self._Sigma = self.sigma.reshape((self.n_sounding, self.n_layer)) - return self._Sigma + self._sigma_matrix = self.sigma.reshape((self.n_sounding, self.n_layer)) + return self._sigma_matrix @property - def Thicknesses(self): - if getattr(self, "_Thicknesses", None) is None: + def thickness_matrix(self): + if getattr(self, "_thickness_matrix", None) is None: # Ordering: first z then x if len(self.thicknesses) == int(self.n_sounding * (self.n_layer - 1)): - self._Thicknesses = self.thicknesses.reshape( + self._thickness_matrix = self.thicknesses.reshape( (self.n_sounding, self.n_layer - 1) ) else: - self._Thicknesses = np.tile(self.thicknesses, (self.n_sounding, 1)) - return self._Thicknesses + self._thickness_matrix = np.tile(self.thicknesses, (self.n_sounding, 1)) + return self._thickness_matrix @property - def Eta(self): - if getattr(self, "_Eta", None) is None: + def eta_matrix(self): + if getattr(self, "_eta_matrix", None) is None: # Ordering: first z then x if self.eta is None: - self._Eta = np.zeros( + self._eta_matrix = np.zeros( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._Eta = self.eta.reshape((self.n_sounding, self.n_layer)) - return self._Eta + self._eta_matrix = self.eta.reshape((self.n_sounding, self.n_layer)) + return self._eta_matrix @property - def Tau(self): - if getattr(self, "_Tau", None) is None: + def tau_matrix(self): + if getattr(self, "_tau_matrix", None) is None: # Ordering: first z then x if self.tau is None: - self._Tau = 1e-3 * np.ones( + self._tau_matrix = 1e-3 * np.ones( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._Tau = self.tau.reshape((self.n_sounding, self.n_layer)) - return self._Tau + self._tau_matrix = self.tau.reshape((self.n_sounding, self.n_layer)) + return self._tau_matrix @property - def C(self): - if getattr(self, "_C", None) is None: + def c_matrix(self): + if getattr(self, "_c_matrix", None) is None: # Ordering: first z then x if self.c is None: - self._C = np.ones( + self._c_matrix = np.ones( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._C = self.c.reshape((self.n_sounding, self.n_layer)) - return self._C + self._c_matrix = self.c.reshape((self.n_sounding, self.n_layer)) + return self._c_matrix @property - def Chi(self): - if getattr(self, "_Chi", None) is None: + def chi_matrix(self): + if getattr(self, "_chi_matrix", None) is None: # Ordering: first z then x if self.chi is None: - self._Chi = np.zeros( + self._chi_matrix = np.zeros( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._Chi = self.chi.reshape((self.n_sounding, self.n_layer)) - return self._Chi + self._chi_matrix = self.chi.reshape((self.n_sounding, self.n_layer)) + return self._chi_matrix @property - def dChi(self): - if getattr(self, "_dChi", None) is None: + def dchi_matrix(self): + if getattr(self, "_dchi_matrix", None) is None: # Ordering: first z then x if self.dchi is None: - self._dChi = np.zeros( + self._dchi_matrix = np.zeros( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._dChi = self.dchi.reshape((self.n_sounding, self.n_layer)) - return self._dChi + self._dchi_matrix = self.dchi.reshape((self.n_sounding, self.n_layer)) + return self._dchi_matrix @property - def Tau1(self): - if getattr(self, "_Tau1", None) is None: + def tau1_matrix(self): + if getattr(self, "_tau1_matrix", None) is None: # Ordering: first z then x if self.tau1 is None: - self._Tau1 = 1e-10 * np.ones( + self._tau1_matrix = 1e-10 * np.ones( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._Tau1 = self.tau1.reshape((self.n_sounding, self.n_layer)) - return self._Tau1 + self._tau1_matrix = self.tau1.reshape((self.n_sounding, self.n_layer)) + return self._tau1_matrix @property - def Tau2(self): - if getattr(self, "_Tau2", None) is None: + def tau2_matrix(self): + if getattr(self, "_tau2_matrix", None) is None: # Ordering: first z then x if self.tau2 is None: - self._Tau2 = 100.0 * np.ones( + self._tau2_matrix = 100.0 * np.ones( (self.n_sounding, self.n_layer), dtype=float, order="C" ) else: - self._Tau2 = self.tau2.reshape((self.n_sounding, self.n_layer)) - return self._Tau2 + self._tau2_matrix = self.tau2.reshape((self.n_sounding, self.n_layer)) + return self._tau2_matrix @property def JtJ_sigma(self): @@ -312,7 +312,7 @@ def JtJ_height(self): return self._JtJ_height @property - def H(self): + def h_vector(self): if self.hMap is None: h = self.source_locations_for_sounding[:, 2] - self.topo[:, 2] return h @@ -345,19 +345,17 @@ def input_args(self, i_sounding, output_type="forward"): output = ( self.survey.get_sources_by_sounding_number(i_sounding), self.topo[i_sounding, :], - self.Thicknesses[i_sounding, :], - self.Sigma[i_sounding, :], - self.Eta[i_sounding, :], - self.Tau[i_sounding, :], - self.C[i_sounding, :], - self.Chi[i_sounding, :], - self.dChi[i_sounding, :], - self.Tau1[i_sounding, :], - self.Tau2[i_sounding, :], - self.H[i_sounding], + self.thickness_matrix[i_sounding, :], + self.sigma_matrix[i_sounding, :], + self.eta_matrix[i_sounding, :], + self.tau_matrix[i_sounding, :], + self.c_matrix[i_sounding, :], + self.chi_matrix[i_sounding, :], + self.dchi_matrix[i_sounding, :], + self.tau1_matrix[i_sounding, :], + self.tau2_matrix[i_sounding, :], + self.h_vector[i_sounding], output_type, - # False, - # self._coefficients[i_sounding], ) return output @@ -382,7 +380,7 @@ def dpred(self, m, f=None): @property def sounding_number(self): self._sounding_number = [ - key for key in self.survey.source_location_by_sounding_dict.keys() + key for key in self.survey.source_location_by_sounding.keys() ] return self._sounding_number @@ -396,28 +394,12 @@ def source_locations_for_sounding(self): if getattr(self, "_source_locations_for_sounding", None) is None: self._source_locations_for_sounding = np.vstack( [ - self.survey._source_location_by_sounding_dict[ii][0] + self.survey._source_location_by_sounding[ii][0] for ii in range(self.n_sounding) ] ) return self._source_locations_for_sounding - - # def chunks(self, lst, n): - # """Yield successive n-sized chunks from lst.""" - # for i in range(0, len(lst), n): - # yield lst[i:i + n] - - # @property - # def sounding_number_chunks(self): - # self._sounding_number_chunks = list(self.chunks(self.sounding_number, self.n_sounding_for_chunk)) - # return self._sounding_number_chunks - - # def input_args_by_chunk(self, i_chunk, output_type): - # args_by_chunks = [] - # for i_sounding in self.sounding_number_chunks[i_chunk]: - # args_by_chunks.append(self.input_args(i_sounding, output_type)) - # return args_by_chunks - + def set_null_topography(self): self.topo = self.source_locations_for_sounding.copy() self.topo[:, 2] = 0.0 @@ -438,7 +420,7 @@ def set_ij_n_layer(self, n_layer=None): m = n_layer for i_sounding in range(self.n_sounding): - n = self.survey.vnD_by_sounding_dict[i_sounding] + n = self.survey.vnD_by_sounding[i_sounding] J_temp = np.tile(np.arange(m), (n, 1)) + shift_for_J I_temp = ( np.tile(np.arange(n), (1, m)).reshape((n, m), order="F") + shift_for_I @@ -460,7 +442,7 @@ def set_ij_height(self): J = [] I = np.arange(self.survey.nD) for i_sounding in range(self.n_sounding): - n = self.survey.vnD_by_sounding_dict[i_sounding] + n = self.survey.vnD_by_sounding[i_sounding] J.append(np.ones(n) * i_sounding) J = np.hstack(J).astype(int) return (I, J) @@ -509,7 +491,7 @@ def deleteTheseOnModelUpdate(self): toDelete = super().deleteTheseOnModelUpdate if self.fix_Jmatrix is False: toDelete += [ - "_Sigma", + "_sigma_matrix", "_J", "_Jmatrix_sigma", "_Jmatrix_height", diff --git a/SimPEG/electromagnetics/frequency_domain/survey.py b/SimPEG/electromagnetics/frequency_domain/survey.py index c78b1e6e7b..9fe0858c11 100644 --- a/SimPEG/electromagnetics/frequency_domain/survey.py +++ b/SimPEG/electromagnetics/frequency_domain/survey.py @@ -16,22 +16,22 @@ def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) _frequency_dict = {} - _source_location_dict = {} - _source_location_by_sounding_dict = {} + _source_location = {} + _source_location_by_sounding = {} for src in self.source_list: if src.frequency not in _frequency_dict: _frequency_dict[src.frequency] = [] _frequency_dict[src.frequency] += [src] - if src.i_sounding not in _source_location_dict: - _source_location_dict[src.i_sounding] = [] - _source_location_by_sounding_dict[src.i_sounding] = [] - _source_location_dict[src.i_sounding] += [src] - _source_location_by_sounding_dict[src.i_sounding] += [src.location] + if src.i_sounding not in _source_location: + _source_location[src.i_sounding] = [] + _source_location_by_sounding[src.i_sounding] = [] + _source_location[src.i_sounding] += [src] + _source_location_by_sounding[src.i_sounding] += [src.location] self._frequency_dict = _frequency_dict self._frequencies = sorted([f for f in self._frequency_dict]) - self._source_location_dict = _source_location_dict - self._source_location_by_sounding_dict = _source_location_by_sounding_dict + self._source_location = _source_location + self._source_location_by_sounding = _source_location_by_sounding @property def source_list(self): @@ -112,7 +112,7 @@ def source_location_by_sounding(self) -> dict: """ Source locations in the survey as a dictionary """ - return self._source_location_by_sounding_dict + return self._source_location_by_sounding def get_sources_by_sounding_number(self, i_sounding): """ @@ -122,18 +122,18 @@ def get_sources_by_sounding_number(self, i_sounding): :return: sources at the sepcified source location """ assert ( - i_sounding in self._source_location_dict + i_sounding in self._source_location ), "The requested sounding is not in this survey." - return self._source_location_dict[i_sounding] + return self._source_location[i_sounding] @property def vnD_by_sounding(self) -> dict: - if getattr(self, "_vnD_by_sounding_dict", None) is None: - self._vnD_by_sounding_dict = {} - for i_sounding in self.source_location_by_sounding_dict: + if getattr(self, "_vnD_by_sounding", None) is None: + self._vnD_by_sounding = {} + for i_sounding in self.source_location_by_sounding: source_list = self.get_sources_by_sounding_number(i_sounding) nD = 0 for src in source_list: nD += src.nD - self._vnD_by_sounding_dict[i_sounding] = nD - return self._vnD_by_sounding_dict + self._vnD_by_sounding[i_sounding] = nD + return self._vnD_by_sounding diff --git a/SimPEG/electromagnetics/time_domain/survey.py b/SimPEG/electromagnetics/time_domain/survey.py index 4882b45447..c0464bd6c4 100644 --- a/SimPEG/electromagnetics/time_domain/survey.py +++ b/SimPEG/electromagnetics/time_domain/survey.py @@ -20,18 +20,18 @@ class Survey(BaseSurvey): def __init__(self, source_list, **kwargs): super(Survey, self).__init__(source_list, **kwargs) - _source_location_dict = {} - _source_location_by_sounding_dict = {} + _source_location = {} + _source_location_by_sounding = {} for src in source_list: - if src.i_sounding not in _source_location_dict: - _source_location_dict[src.i_sounding] = [] - _source_location_by_sounding_dict[src.i_sounding] = [] - _source_location_dict[src.i_sounding] += [src] - _source_location_by_sounding_dict[src.i_sounding] += [src.location] + if src.i_sounding not in _source_location: + _source_location[src.i_sounding] = [] + _source_location_by_sounding[src.i_sounding] = [] + _source_location[src.i_sounding] += [src] + _source_location_by_sounding[src.i_sounding] += [src.location] - self._source_location_dict = _source_location_dict - self._source_location_by_sounding_dict = _source_location_by_sounding_dict + self._source_location = _source_location + self._source_location_by_sounding = _source_location_by_sounding @property def source_list(self): @@ -51,11 +51,11 @@ def source_list(self, new_list): ) @property - def source_location_by_sounding_dict(self): + def source_location_by_sounding(self): """ Source location in the survey as a dictionary """ - return self._source_location_by_sounding_dict + return self._source_location_by_sounding def get_sources_by_sounding_number(self, i_sounding): """ @@ -65,18 +65,18 @@ def get_sources_by_sounding_number(self, i_sounding): :return: sources at the sepcified source location """ assert ( - i_sounding in self._source_location_dict + i_sounding in self._source_location ), "The requested sounding is not in this survey." - return self._source_location_dict[i_sounding] + return self._source_location[i_sounding] @property - def vnD_by_sounding_dict(self): - if getattr(self, "_vnD_by_sounding_dict", None) is None: - self._vnD_by_sounding_dict = {} - for i_sounding in self.source_location_by_sounding_dict: + def vnD_by_sounding(self): + if getattr(self, "_vnD_by_sounding", None) is None: + self._vnD_by_sounding = {} + for i_sounding in self.source_location_by_sounding: source_list = self.get_sources_by_sounding_number(i_sounding) nD = 0 for src in source_list: nD += src.nD - self._vnD_by_sounding_dict[i_sounding] = nD + self._vnD_by_sounding[i_sounding] = nD return self._vnD_by_sounding_dict From 1b5963572949f457ef38c1a65fbce11abf9e0cc7 Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 22 Nov 2023 16:41:12 -0800 Subject: [PATCH 121/164] minor change --- SimPEG/electromagnetics/time_domain/survey.py | 2 +- tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py | 5 ++--- tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py | 8 +------- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/SimPEG/electromagnetics/time_domain/survey.py b/SimPEG/electromagnetics/time_domain/survey.py index c0464bd6c4..929813d662 100644 --- a/SimPEG/electromagnetics/time_domain/survey.py +++ b/SimPEG/electromagnetics/time_domain/survey.py @@ -79,4 +79,4 @@ def vnD_by_sounding(self): for src in source_list: nD += src.nD self._vnD_by_sounding[i_sounding] = nD - return self._vnD_by_sounding_dict + return self._vnD_by_sounding diff --git a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py index c59745676b..0663fc3cc6 100644 --- a/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_FD_jac_layers.py @@ -2,7 +2,7 @@ import unittest import numpy as np import SimPEG.electromagnetics.frequency_domain as fdem -from SimPEG import * +from SimPEG import maps, tests from discretize import TensorMesh np.random.seed(41) @@ -42,7 +42,7 @@ def setUp(self, parallel=False): ) ) - for i_freq, frequency in enumerate(frequencies): + for frequency in frequencies: src = fdem.sources.MagDipole( receiver_list, frequency, @@ -85,7 +85,6 @@ def test_EM1DFDJvec_Layers(self): def fwdfun(m): resp = self.sim.dpred(m) return resp - # return Hz def jacfun(m, dm): Jvec = self.sim.Jvec(m, dm) diff --git a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py index f83840740a..8a93fba6e0 100644 --- a/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py +++ b/tests/em/em1d/test_Stitched_EM1D_TD_jac_layers.py @@ -2,7 +2,7 @@ import unittest import numpy as np import SimPEG.electromagnetics.time_domain as tdem -from SimPEG import * +from SimPEG import maps, tests from discretize import TensorMesh from pymatsolver import PardisoSolver @@ -38,15 +38,12 @@ def setUp(self, parallel=False): sigma = np.ones(mesh.nC) * 1.0 / 100.0 sigma[inds_1] = 1.0 / 10.0 sigma[inds] = 1.0 / 50.0 - sigma_em1d = sigma.reshape(mesh.vnC, order="F").flatten() x = mesh.cell_centers_x y = np.zeros_like(x) z = np.ones_like(x) * 30.0 source_locations = np.c_[x, y, z] - source_current = 1.0 source_orientation = "z" - source_radius = 10.0 receiver_offset_r = 13.25 receiver_offset_z = 2.0 @@ -60,8 +57,6 @@ def setUp(self, parallel=False): topo = np.c_[x, y, z - 30.0].astype(float) - sigma_map = maps.ExpMap(mesh) - source_list = [] for i_sounding in range(0, n_sounding): @@ -134,7 +129,6 @@ def test_EM1TDJvec_Layers(self): def fwdfun(m): resp = self.sim.dpred(m) return resp - # return Hz def jacfun(m, dm): Jvec = self.sim.Jvec(m, dm) From 56aad920d95bfbafcfc6e81e24c9e862e13324ff Mon Sep 17 00:00:00 2001 From: sgkang Date: Wed, 22 Nov 2023 16:46:07 -0800 Subject: [PATCH 122/164] run black --- SimPEG/electromagnetics/base_1d_stitched.py | 3 +-- SimPEG/regularization/laterally_constrained.py | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SimPEG/electromagnetics/base_1d_stitched.py b/SimPEG/electromagnetics/base_1d_stitched.py index c9502ae342..bbd43b676e 100644 --- a/SimPEG/electromagnetics/base_1d_stitched.py +++ b/SimPEG/electromagnetics/base_1d_stitched.py @@ -359,7 +359,6 @@ def input_args(self, i_sounding, output_type="forward"): ) return output - def fields(self, m): if self.verbose: print("Compute fields") @@ -399,7 +398,7 @@ def source_locations_for_sounding(self): ] ) return self._source_locations_for_sounding - + def set_null_topography(self): self.topo = self.source_locations_for_sounding.copy() self.topo[:, 2] = 0.0 diff --git a/SimPEG/regularization/laterally_constrained.py b/SimPEG/regularization/laterally_constrained.py index 1d46165b4e..b23f391a4e 100644 --- a/SimPEG/regularization/laterally_constrained.py +++ b/SimPEG/regularization/laterally_constrained.py @@ -80,7 +80,8 @@ def __init__( objfcts = [ SparseSmallness(mesh=self.regularization_mesh), SparseSmoothness(mesh=self.regularization_mesh, orientation="r"), - SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), ] + SparseSmoothness(mesh=self.regularization_mesh, orientation="z"), + ] super().__init__( self.regularization_mesh, From df26e97130c0998cebdff01395ecd8624fc05647 Mon Sep 17 00:00:00 2001 From: dccowan Date: Fri, 24 Nov 2023 10:31:13 -0800 Subject: [PATCH 123/164] add some docstrings --- .../frequency_domain/fields.py | 41 ++++- .../frequency_domain/simulation.py | 94 ++++++++-- .../time_domain/simulation.py | 170 +++++++++++++++++- 3 files changed, 282 insertions(+), 23 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 31552ac12d..f3f4b3cf1e 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -623,6 +623,19 @@ def _charge_density(self, eSolution, source_list): class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): + r""" + Fields object for Simulation3DElectricFieldFaceEdgeConductivity. + + In this case, the discrete Ohm's law relationship accounts for volume, face + and edge currents. So: + + .. math:: + \mathbf{M_e \, J} = \left ( \mathbf{M_{e\sigma} + M_{e\tau} + + M_{e\kappa}} \right ) \mathbf{e} + + :param discretize.base.BaseMesh mesh: mesh + :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey + """ def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl self._aveE2CCV = self.simulation.mesh.aveE2CCV @@ -667,6 +680,19 @@ def _jDeriv_u(self, src, du_dm_v, adjoint=False): ) def _jDeriv_m(self, src, v, adjoint=False): + """ + Derivative of the current density with respect to the inversion model. + + This includes derivatives for volume, face and/or edge conductivities + depending on whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source + :param numpy.ndarray v: vector to take product with + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: product of the current density derivative with respect to the + inversion model with a vector + """ e = self[src, "e"] if adjoint: @@ -1014,8 +1040,15 @@ def _charge_density(self, bSolution, source_list): class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensity): - """ - Fields object for Simulation3DMagneticFluxDensity. + r""" + Fields object for Simulation3DMagneticFluxDensityFaceEdgeConductivity. + + In this case, the discrete Ohm's law relationship accounts for volume, face + and edge currents. So: + + .. math:: + \mathbf{M_e \, J} = \left ( \mathbf{M_{e\sigma} + M_{e\tau} + + M_{e\kappa}} \right ) \mathbf{e} :param discretize.base.BaseMesh mesh: mesh :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey @@ -1023,12 +1056,8 @@ class Fields3DMagneticFluxDensityFaceEdgeConductivity(Fields3DMagneticFluxDensit def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl - # self._MeSigma = self.simulation.MeSigma - # self._MeSigmaI = self.simulation.MeSigmaI self._MfMui = self.simulation.MfMui self._MfMuiDeriv = self.simulation.MfMuiDeriv - # self._MeSigmaDeriv = self.simulation.MeSigmaDeriv - # self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self.__MeSigmaTauKappa = self.simulation._MeSigmaTauKappa self.__MeSigmaTauKappaI = self.simulation._MeSigmaTauKappaI self.__MeSigmaTauKappaDeriv = self.simulation._MeSigmaTauKappaDeriv diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index ead7c9c291..98604bd074 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -432,11 +432,47 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): class Simulation3DElectricFieldFaceEdgeConductivity( Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation ): + r""" + By eliminating the magnetic flux density using + + .. math :: + + \mathbf{b} = \frac{1}{i \omega}\left(-\mathbf{C} \mathbf{e} + + \mathbf{s_m}\right) + + + we can write Maxwell's equations as a second order system in + :math:`mathbf{e}` only: + + .. math :: + + \left(\mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C} + + i \omega \left \mathbf{M^e_{\sigma} + M^e_\tau + M^e_\kappa} \right ) + \right) \mathbf{e} = \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}\mathbf{s_m} + - i\omega\mathbf{M^e}\mathbf{s_e} + + which we solve for :math:`\mathbf{e}`. + + :param discretize.base.BaseMesh mesh: mesh + """ _solutionType = "eSolution" _formulation = "EB" fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getA(self, freq): + r""" + System matrix + + .. math :: + + \mathbf{A} = \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C} + + i \omega \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}} \right) + + :param float freq: Frequency + :rtype: scipy.sparse.csr_matrix + :return: A + """ MfMui = self.MfMui C = self.mesh.edge_curl @@ -456,12 +492,9 @@ def getA(self, freq): def getADeriv_sigma(self, freq, u, v, adjoint=False): r""" Product of the derivative of our system matrix with respect to the - conductivity model and a vector - - .. math :: - - \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\sigma}} = - i \omega \frac{d \mathbf{M^e_{\sigma}}(\mathbf{u})\mathbf{v} }{d\mathbf{m}} + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. :param float freq: frequency :param numpy.ndarray u: solution vector (nE,) @@ -477,6 +510,19 @@ def getADeriv_sigma(self, freq, u, v, adjoint=False): return 1j * omega(freq) * dMe_dsigma_v def getADeriv(self, freq, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + model and a vector. + + :param float freq: frequency + :param numpy.ndarray u: solution vector (nE,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ return ( self.getADeriv_sigma(freq, u, v, adjoint) + self.getADeriv_mui(freq, u, v, adjoint) @@ -669,6 +715,31 @@ def getRHSDeriv(self, freq, src, v, adjoint=False): class Simulation3DMagneticFluxDensityFaceEdgeConductivity( Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): + r""" + We eliminate :math:`\mathbf{e}` using + + .. math :: + + \mathbf{e} = \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \left(\mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - \mathbf{s_e}\right) + + and solve for :math:`\mathbf{b}` using: + + .. math :: + + \left(\mathbf{C} \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} + i \omega \right)\mathbf{b} = \mathbf{s_m} + + \left ( \mathbf{M^e_{\sigma}} + \mathbf{M^e_{\tau}} + + \mathbf{M^e_{\kappa}}\right )^{-1} \mathbf{M^e}\mathbf{s_e} + + .. note :: + The inverse problem will not work with full anisotropy + + :param discretize.base.BaseMesh mesh: mesh + """ + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity def getA(self, freq): @@ -677,7 +748,8 @@ def getA(self, freq): .. math :: - \mathbf{A} = \mathbf{C} \mathbf{M^e_{\sigma}}^{-1} + \mathbf{A} = \mathbf{C} \left ( \mathbf{M^e_{\sigma}} + + \mathbf{M^e_{\tau}} + \mathbf{M^e_{\kappa}}\right )^{-1} \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} + i \omega :param float freq: Frequency @@ -705,12 +777,10 @@ def getA(self, freq): def getADeriv_sigma(self, freq, u, v, adjoint=False): r""" Product of the derivative of our system matrix with respect to the - model and a vector + model and a vector. - .. math :: - - \frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}} = - \mathbf{C} \frac{\mathbf{M^e_{\sigma}} \mathbf{v}}{d\mathbf{m}} + This includes derivatives for volume, face and/or edge conductivities + depending on whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. :param float freq: frequency :param numpy.ndarray u: solution vector (nF,) diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index b08ed3fad3..54696bc9d7 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -969,6 +969,70 @@ def getAdcDeriv(self, u, v, adjoint=False): class Simulation3DMagneticFluxDensityFaceEdgeConductivity( Simulation3DMagneticFluxDensity, BaseFaceEdgeElectricalPDESimulation ): + r""" + Starting from the quasi-static E-B formulation of Maxwell's equations + (semi-discretized) + + .. math:: + + \mathbf{C} \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = + \mathbf{s_m} \\ + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right ) + \mathbf{e} = \mathbf{s_e} + + + where :math:`\mathbf{s_e}` is an integrated quantity, we eliminate + :math:`\mathbf{e}` using + + .. math:: + + \mathbf{e} = \mathbf{M_{\sigma}^e}^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + + + to obtain a second order semi-discretized system in :math:`\mathbf{b}` + + .. math:: + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \frac{\partial \mathbf{b}}{\partial t} = \mathbf{C} + \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + \mathbf{s_m} + + + and moving everything except the time derivative to the rhs gives + + .. math:: + \frac{\partial \mathbf{b}}{\partial t} = + -\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b} + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e} + \mathbf{s_m} + + For the time discretization, we use backward euler. To solve for the + :math:`n+1` th time step, we have + + .. math:: + + \frac{\mathbf{b}^{n+1} - \mathbf{b}^{n}}{\mathbf{dt}} = + -\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{C}^{\top} + \mathbf{M_{\mu^{-1}}^f} \mathbf{b}^{n+1} + + \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{s_e}^{n+1} + + \mathbf{s_m}^{n+1} + + + re-arranging to put :math:`\mathbf{b}^{n+1}` on the left hand side gives + + .. math:: + + (\mathbf{I} + \mathbf{dt} \mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) \mathbf{b}^{n+1} = + \mathbf{b}^{n} + \mathbf{dt}(\mathbf{C} \left ( \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} + \mathbf{s_e}^{n+1} + \mathbf{s_m}^{n+1}) + + """ + fieldsPair = Fields3DMagneticFluxDensityFaceEdgeConductivity #: A SimPEG.EM.TDEM.Fields3DMagneticFluxDensity object def getAdiag(self, tInd): @@ -977,7 +1041,8 @@ def getAdiag(self, tInd): .. math:: - (\mathbf{I} + \mathbf{dt} \mathbf{C} \mathbf{M_{\sigma}^e}^{-1} + (\mathbf{I} + \mathbf{dt} \mathbf{C} \left ( + \mathbf{M_{\sigma}^e + M_{\tau}^e + M_{\kappa}^e} \right )^{-1} \mathbf{C}^{\top} \mathbf{M_{\mu^{-1}}^f}) """ @@ -996,6 +1061,21 @@ def getAdiag(self, tInd): return A def getAdiagDeriv(self, tInd, u, v, adjoint=False): + r""" + Product of the derivative of our system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ C = self.mesh.edge_curl MfMui = self.MfMui @@ -1029,7 +1109,9 @@ def getRHS(self, tInd): def getRHSDeriv(self, tInd, src, v, adjoint=False): """ - Derivative of the RHS + Derivative of the RHS. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. """ C = self.mesh.edge_curl @@ -1076,11 +1158,48 @@ def getRHSDeriv(self, tInd, src, v, adjoint=False): class Simulation3DElectricFieldFaceEdgeConductivity( Simulation3DElectricField, BaseFaceEdgeElectricalPDESimulation ): + r""" + Solve the EB-formulation of Maxwell's equations for the electric field, e. + Takes into account volume, face and edge conductivities. + + Starting with + + .. math:: + + \nabla \times \mathbf{e} + \frac{\partial \mathbf{b}}{\partial t} = \mathbf{s_m} \ + \nabla \times \mu^{-1} \mathbf{b} - \sigma \mathbf{e} = \mathbf{s_e} + + + we eliminate :math:`\frac{\partial b}{\partial t}` using + + .. math:: + + \frac{\partial \mathbf{b}}{\partial t} = - \nabla \times \mathbf{e} + \mathbf{s_m} + + + taking the time-derivative of Ampere's law, we see + + .. math:: + + \frac{\partial}{\partial t}\left( \nabla \times \mu^{-1} \mathbf{b} - \sigma \mathbf{e} \right) = \frac{\partial \mathbf{s_e}}{\partial t} \ + \nabla \times \mu^{-1} \frac{\partial \mathbf{b}}{\partial t} - \sigma \frac{\partial\mathbf{e}}{\partial t} = \frac{\partial \mathbf{s_e}}{\partial t} + + + which gives us + + .. math:: + + \nabla \times \mu^{-1} \nabla \times \mathbf{e} + \sigma \frac{\partial\mathbf{e}}{\partial t} = \nabla \times \mu^{-1} \mathbf{s_m} + \frac{\partial \mathbf{s_e}}{\partial t} + + + """ fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getAdiag(self, tInd): """ - Diagonal of the system matrix at a given time index + Diagonal of the system matrix at a given time index. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. """ assert tInd >= 0 and tInd < self.nT @@ -1092,6 +1211,21 @@ def getAdiag(self, tInd): return C.T.tocsr() * (MfMui * C) + 1.0 / dt * MeSigmaTauKappa def getAdiagDeriv(self, tInd, u, v, adjoint=False): + r""" + Product of the derivative of diagonal system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ assert tInd >= 0 and tInd < self.nT dt = self.time_steps[tInd] @@ -1114,8 +1248,20 @@ def getAsubdiag(self, tInd): return -1.0 / dt * MeSigmaTauKappa def getAsubdiagDeriv(self, tInd, u, v, adjoint=False): - """ - Derivative of the matrix below the diagonal with respect to conductance + r""" + Product of the derivative of off-diagonal system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param float tInd: time step index + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) """ dt = self.time_steps[tInd] @@ -1134,6 +1280,20 @@ def getAdc(self): return Adc def getAdcDeriv(self, u, v, adjoint=False): + r""" + Product of the derivative of the DC system matrix with respect to the + electrical properties within the model and a vector. This includes + derivatives for volume, face and/or edge conductivities depending on + whether ``sigmaMap``, ``tauMap`` and/or ``kappaMap`` are set. + + :param numpy.ndarray u: solution vector (nF,) + :param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for + adjoint + :param bool adjoint: adjoint? + :rtype: numpy.ndarray + :return: derivative of the system matrix times a vector (nP,) or + adjoint (nD,) + """ Grad = self.mesh.nodal_gradient if not adjoint: return Grad.T * self._MeSigmaTauKappaDeriv(-u, v, adjoint) From 94072befd27ed76e60202740d93965b4b6a61f1a Mon Sep 17 00:00:00 2001 From: dccowan Date: Fri, 24 Nov 2023 10:41:00 -0800 Subject: [PATCH 124/164] formatting --- SimPEG/electromagnetics/frequency_domain/fields.py | 1 + SimPEG/electromagnetics/frequency_domain/simulation.py | 1 + SimPEG/electromagnetics/time_domain/simulation.py | 3 +-- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index f3f4b3cf1e..988091a4ac 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -636,6 +636,7 @@ class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): :param discretize.base.BaseMesh mesh: mesh :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey """ + def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl self._aveE2CCV = self.simulation.mesh.aveE2CCV diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 98604bd074..cbce26a9ea 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -455,6 +455,7 @@ class Simulation3DElectricFieldFaceEdgeConductivity( :param discretize.base.BaseMesh mesh: mesh """ + _solutionType = "eSolution" _formulation = "EB" fieldsPair = Fields3DElectricFieldFaceEdgeConductivity diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 54696bc9d7..319f063d90 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -1188,11 +1188,10 @@ class Simulation3DElectricFieldFaceEdgeConductivity( which gives us .. math:: - \nabla \times \mu^{-1} \nabla \times \mathbf{e} + \sigma \frac{\partial\mathbf{e}}{\partial t} = \nabla \times \mu^{-1} \mathbf{s_m} + \frac{\partial \mathbf{s_e}}{\partial t} - """ + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getAdiag(self, tInd): From 17c0a845d6ae9b085f0674e75ba4eda3dcc77c6b Mon Sep 17 00:00:00 2001 From: dccowan Date: Fri, 24 Nov 2023 10:41:41 -0800 Subject: [PATCH 125/164] one for formatting --- SimPEG/electromagnetics/frequency_domain/fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/electromagnetics/frequency_domain/fields.py b/SimPEG/electromagnetics/frequency_domain/fields.py index 988091a4ac..8bce30eea6 100644 --- a/SimPEG/electromagnetics/frequency_domain/fields.py +++ b/SimPEG/electromagnetics/frequency_domain/fields.py @@ -636,7 +636,7 @@ class Fields3DElectricFieldFaceEdgeConductivity(Fields3DElectricField): :param discretize.base.BaseMesh mesh: mesh :param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey """ - + def startup(self): self._edgeCurl = self.simulation.mesh.edge_curl self._aveE2CCV = self.simulation.mesh.aveE2CCV From 7efa9016479485a10b14df218459a7427b441ad8 Mon Sep 17 00:00:00 2001 From: dccowan Date: Fri, 24 Nov 2023 11:02:33 -0800 Subject: [PATCH 126/164] one more. I know.....precommit --- SimPEG/electromagnetics/frequency_domain/simulation.py | 2 +- SimPEG/electromagnetics/time_domain/simulation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index cbce26a9ea..5ab3c57f63 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -455,7 +455,7 @@ class Simulation3DElectricFieldFaceEdgeConductivity( :param discretize.base.BaseMesh mesh: mesh """ - + _solutionType = "eSolution" _formulation = "EB" fieldsPair = Fields3DElectricFieldFaceEdgeConductivity diff --git a/SimPEG/electromagnetics/time_domain/simulation.py b/SimPEG/electromagnetics/time_domain/simulation.py index 319f063d90..71d7c42d0d 100644 --- a/SimPEG/electromagnetics/time_domain/simulation.py +++ b/SimPEG/electromagnetics/time_domain/simulation.py @@ -1191,7 +1191,7 @@ class Simulation3DElectricFieldFaceEdgeConductivity( \nabla \times \mu^{-1} \nabla \times \mathbf{e} + \sigma \frac{\partial\mathbf{e}}{\partial t} = \nabla \times \mu^{-1} \mathbf{s_m} + \frac{\partial \mathbf{s_e}}{\partial t} """ - + fieldsPair = Fields3DElectricFieldFaceEdgeConductivity def getAdiag(self, tInd): From 9f534e5ad3ea7228fba69dbaf53be1a157dc9696 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Mon, 4 Dec 2023 16:45:38 -0800 Subject: [PATCH 127/164] more sensible optimization defaults --- SimPEG/optimization.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/SimPEG/optimization.py b/SimPEG/optimization.py index dbbc23ef21..b666d4bb03 100644 --- a/SimPEG/optimization.py +++ b/SimPEG/optimization.py @@ -241,16 +241,16 @@ class Minimize(object): name = "General Optimization Algorithm" #: The name of the optimization algorithm maxIter = 20 #: Maximum number of iterations - maxIterLS = 10 #: Maximum number of iterations for the line-search + maxIterLS = 20 #: Maximum number of iterations for the line-search maxStep = np.inf #: Maximum step possible, used in scaling before the line-search. LSreduction = 1e-4 #: Expected decrease in the line-search LScurvature = ( 0.9 #: Expected decrease of the slope for line search Wolfe Curvature criteria ) LSshorten = 0.5 #: Line-search step is shortened by this amount each time. - tolF = 1e-1 #: Tolerance on function value decrease - tolX = 1e-1 #: Tolerance on norm(x) movement - tolG = 1e-1 #: Tolerance on gradient norm + tolF = 1e-2 #: Tolerance on function value decrease + tolX = 1e-2 #: Tolerance on norm(x) movement + tolG = 1e-2 #: Tolerance on gradient norm eps = 1e-5 #: Small value stopNextIteration = False #: Stops the optimization program nicely. @@ -763,8 +763,8 @@ def _doEndIterationRemember(self, *args): class ProjectedGradient(Minimize, Remember): name = "Projected Gradient" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 lower = -np.inf upper = np.inf @@ -1043,8 +1043,8 @@ def __init__(self, **kwargs): name = "Inexact Gauss Newton" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 @property def approxHinv(self): @@ -1184,8 +1184,8 @@ def __init__(self, **kwargs): name = "Projected GNCG" - maxIterCG = 5 - tolCG = 1e-1 + maxIterCG = 100 + tolCG = 1e-4 cg_count = 0 stepOffBoundsFact = 1e-2 # perturbation of the inactive set off the bounds stepActiveset = True From c6240243404b34d9e54844454349f83185b36b37 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 6 Dec 2023 16:41:08 -0800 Subject: [PATCH 128/164] fix rotated gradients with weights and active cells --- SimPEG/regularization/rotated.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 5b5afad63e..02c863379d 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,7 +1,9 @@ -from .base import BaseRegularization import numpy as np import scipy.sparse as sp +from scipy.interpolate import NearestNDInterpolator + from ..utils.code_utils import validate_ndarray_with_shape +from .base import BaseRegularization class SmoothnessFullGradient(BaseRegularization): @@ -231,7 +233,13 @@ def W(self): mesh = self.regularization_mesh.mesh cell_weights = np.ones(len(mesh)) for values in self._weights.values(): - cell_weights *= values + # project values to full mesh + # dirty fix of original PR + projection = NearestNDInterpolator( + mesh.cell_centers[self.active_cells], values + ) + proj_values = projection(mesh.cell_centers) + cell_weights *= proj_values reg_model = self._anis_alpha * cell_weights[:, None] # turn off measure in inactive cells if self.active_cells is not None: From 29db9746ae12941cb94f7d1c7502eb42b6635c1b Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 19 Dec 2023 17:41:27 -0800 Subject: [PATCH 129/164] re-implement distance weighting and add a strategy comparison --- SimPEG/utils/__init__.py | 150 +++-- SimPEG/utils/model_utils.py | 108 +++- tests/base/test_model_utils.py | 5 +- ..._gravity_anomaly_irls_compare_weighting.py | 512 ++++++++++++++++++ 4 files changed, 683 insertions(+), 92 deletions(-) create mode 100644 tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index 49e3e6193a..5edf2565c9 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -145,25 +145,29 @@ """ from discretize.utils.interpolation_utils import interpolation_matrix +from . import io_utils, model_builder, solver_utils from .code_utils import ( - mem_profile_class, + Report, + as_array_n_by_dim, + call_hooks, + check_stoppers, + dependent_property, + deprecate_class, + deprecate_function, + deprecate_method, + deprecate_module, + deprecate_property, hook, - set_kwargs, - print_titles, + mem_profile_class, + print_done, print_line, - check_stoppers, print_stoppers, - print_done, - call_hooks, - deprecate_property, - deprecate_module, - deprecate_method, - deprecate_function, - deprecate_class, - dependent_property, - as_array_n_by_dim, + print_titles, requires, - Report, + set_kwargs, + validate_active_indices, + validate_callable, + validate_direction, validate_float, validate_integer, validate_list_of_types, @@ -171,70 +175,60 @@ validate_ndarray_with_shape, validate_string, validate_type, - validate_callable, - validate_direction, - validate_active_indices, ) - +from .coord_utils import rotate_points_from_normals, rotation_matrix_from_normals +from .counter_utils import Counter, count, timeIt +from .curv_utils import ( + example_curvilinear_grid, + face_info, + index_cube, + volume_tetrahedron, +) +from .io_utils import download from .mat_utils import ( + Identity, + TensorType, + Zero, + av, + av_extrap, + cartesian2spherical, + coterminal, + ddx, + define_plane_from_points, + eigenvalue_by_power_iteration, + estimate_diagonal, + get_subarray, + ind2sub, + inverse_2x2_block_diagonal, + inverse_3x3_block_diagonal, + inverse_property_tensor, + kron3, + make_property_tensor, mkvc, + ndgrid, sdiag, sdinv, speye, - kron3, + spherical2cartesian, spzeros, - ddx, - av, - av_extrap, - ndgrid, - ind2sub, sub2ind, - get_subarray, - inverse_3x3_block_diagonal, - inverse_2x2_block_diagonal, - TensorType, - make_property_tensor, - inverse_property_tensor, - estimate_diagonal, - Zero, - Identity, unique_rows, - eigenvalue_by_power_iteration, - cartesian2spherical, - spherical2cartesian, - coterminal, - define_plane_from_points, ) from .mesh_utils import ( - unpack_widths, closest_points_index, extract_core_mesh, surface2inds, + unpack_widths, ) -from .curv_utils import ( - volume_tetrahedron, - index_cube, - face_info, - example_curvilinear_grid, -) -from .counter_utils import Counter, count, timeIt -from . import model_builder -from . import solver_utils -from . import io_utils -from .coord_utils import ( - rotation_matrix_from_normals, - rotate_points_from_normals, -) -from .model_utils import surface2ind_topo, depth_weighting -from .plot_utils import plot2Ddata, plotLayer, plot_1d_layer_model -from .io_utils import download +from .model_utils import depth_weighting, distance_weighting, surface2ind_topo from .pgi_utils import ( GaussianMixture, - WeightedGaussianMixture, - GaussianMixtureWithPrior, GaussianMixtureWithNonlinearRelationships, GaussianMixtureWithNonlinearRelationshipsWithPrior, + GaussianMixtureWithPrior, + WeightedGaussianMixture, ) +from .plot_utils import plot2Ddata, plot_1d_layer_model, plotLayer # Deprecated imports interpmat = deprecate_function( @@ -242,39 +236,27 @@ ) from .code_utils import ( + asArray_N_x_Dim, + callHooks, + checkStoppers, + dependentProperty, memProfileWrapper, - setKwargs, - printTitles, + printDone, printLine, - checkStoppers, printStoppers, - printDone, - callHooks, - dependentProperty, - asArray_N_x_Dim, + printTitles, + setKwargs, ) +from .coord_utils import rotatePointsFromNormals, rotationMatrixFromNormals +from .curv_utils import exampleLrmGrid, faceInfo, indexCube, volTetra from .mat_utils import ( - sdInv, + diagEst, getSubArray, - inv3X3BlockDiagonal, inv2X2BlockDiagonal, - makePropertyTensor, + inv3X3BlockDiagonal, invPropertyTensor, - diagEst, + makePropertyTensor, + sdInv, uniqueRows, ) -from .mesh_utils import ( - meshTensor, - closestPoints, - ExtractCoreMesh, -) -from .curv_utils import ( - volTetra, - faceInfo, - indexCube, - exampleLrmGrid, -) -from .coord_utils import ( - rotatePointsFromNormals, - rotationMatrixFromNormals, -) +from .mesh_utils import ExtractCoreMesh, closestPoints, meshTensor diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 8c6d19b1ab..900c9fd01b 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,10 +1,12 @@ -from .mat_utils import mkvc +import warnings + import numpy as np -from scipy.interpolate import griddata -from scipy.spatial import cKDTree import scipy.sparse as sp from discretize.utils import active_from_xyz -import warnings +from scipy.interpolate import griddata +from scipy.spatial import cKDTree + +from .mat_utils import mkvc def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): @@ -195,3 +197,101 @@ def depth_weighting( wz = wz[active_cells] return wz / np.nanmax(wz) + + +def distance_weighting( + mesh, reference_locs, active_cells=None, exponent=2.0, threshold=None, **kwargs +): + r""" + Construct diagonal elements of a distance weighting matrix + + Builds the model weights following the distance weighting strategy, a method + to generate weights based on the distance between mesh cell centers and some + reference location(s). + Use these weights in regularizations to counteract the natural decay of + potential field data with distance. + + Parameters + ---------- + mesh : discretize.base.BaseMesh + Discretized model space. + reference_locs : float or (n, ndim) numpy.ndarray + Reference location for the distance weighting. + It can be a ``float``, which value is the component for + the reference location. + Or it can be a 2d array, with multiple reference locations, where each + row should contain the coordinates of a single location point in the + following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D + meshes). + The coordinate of the reference location, usually the receiver locations + active_cells : (mesh.n_cells) numpy.ndarray of bool, optional + Index vector for the active cells on the mesh. + If ``None``, every cell will be assumed to be active. + exponent : float, optional + Exponent parameter for distance weighting. + The exponent should match the natural decay power of the potential + field. For example, for gravity acceleration, set it to 2; for magnetic + fields, to 3. + threshold : float or None, optional + Threshold parameters used in the distance weighting. + If ``None``, it will be set to half of the smallest cell width. + + Returns + ------- + (n_active) numpy.ndarray + Normalized distance weights for the mesh at every active cell as + a 1d-array. + """ + + if "indActive" in kwargs: + warnings.warn( + "The indActive keyword argument has been deprecated, please use active_cells. " + "This will be removed in SimPEG 0.19.0", + FutureWarning, + stacklevel=2, + ) + active_cells = kwargs["indActive"] + + # Default threshold value + if threshold is None: + threshold = 0.5 * mesh.h_gridded.min() + + reference_locs = np.asarray(reference_locs) + + # Calculate distance from receiver locations + # reference_locs is a scalar + if reference_locs.ndim < 2: + distance = np.abs(mesh.cell_centers[:, -1] - reference_locs) + + # reference_locs is a 2d array + else: + n, d = mesh.cell_centers.shape + t, d1 = reference_locs.shape + + if not d == d1: + raise Exception("vectors must have same number of columns") + + # vectorized distance calculations + distance = ( + np.dot((mesh.cell_centers**2.0), np.ones([d, t])) + + np.dot(np.ones([n, d]), (reference_locs**2.0).T) + - 2.0 * np.dot(mesh.cell_centers, reference_locs.T) + ) ** 0.5 + + dist_weights = ( + ( + ( + ( + mesh.cell_volumes.reshape(-1, 1) + / ((distance + threshold) ** exponent) + ) + ** 2 + ).sum(axis=1) + ) + ** (0.5) + ) / np.sqrt(mesh.cell_volumes) + + if active_cells is not None: + dist_weights = dist_weights[active_cells] + + return dist_weights / np.nanmax(dist_weights) diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index d7d57f6d80..e0bb3584d5 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -1,12 +1,9 @@ import unittest import numpy as np - from discretize import TensorMesh -from SimPEG import ( - utils, -) +from SimPEG import utils class DepthWeightingTest(unittest.TestCase): diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py new file mode 100644 index 0000000000..f01d39be31 --- /dev/null +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -0,0 +1,512 @@ +""" +Compare weighting strategy with Inversion of surface Gravity Anomaly Data +========================================================================= + +Here we invert gravity anomaly data to recover a density contrast model. We formulate the inverse problem as an iteratively +re-weighted least-squares (IRLS) optimization problem. For this tutorial, we +focus on the following: + + - Setting regularization weights + - Defining the survey from xyz formatted data + - Generating a mesh based on survey geometry + - Including surface topography + - Defining the inverse problem (data misfit, regularization, optimization) + - Specifying directives for the inversion + - Setting sparse and blocky norms + - Plotting the recovered model and data misfit + +Although we consider gravity anomaly data in this tutorial, the same approach +can be used to invert gradiometry and other types of geophysical data. +""" + +######################################################################### +# Import modules +# -------------- +# + +import os +import tarfile + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +from discretize import TensorMesh +from discretize.utils import active_from_xyz + +from SimPEG import ( + data, + data_misfit, + directives, + inverse_problem, + inversion, + maps, + optimization, + regularization, + utils, +) +from SimPEG.potential_fields import gravity +from SimPEG.utils import model_builder, plot2Ddata + +# sphinx_gallery_thumbnail_number = 3 + +############################################# +# Define File Names +# ----------------- +# +# File paths for assets we are loading. To set up the inversion, we require +# topography and field observations. The true model defined on the whole mesh +# is loaded to compare with the inversion result. These files are stored as a +# tar-file on our google cloud bucket: +# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" +# + +# storage bucket where we have the data +data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" + +# download the data +downloaded_data = utils.download(data_source, overwrite=True) + +# unzip the tarfile +tar = tarfile.open(downloaded_data, "r") +tar.extractall() +tar.close() + +# path to the directory containing our data +dir_path = downloaded_data.split(".")[0] + os.path.sep + +# files to work with +topo_filename = dir_path + "gravity_topo.txt" +data_filename = dir_path + "gravity_data.obs" + + +############################################# +# Load Data and Plot +# ------------------ +# +# Here we load and plot synthetic gravity anomaly data. Topography is generally +# defined as an (N, 3) array. Gravity data is generally defined with 4 columns: +# x, y, z and data. +# + +# Load topography +xyz_topo = np.loadtxt(str(topo_filename)) + +# Load field data +dobs = np.loadtxt(str(data_filename)) + +# Define receiver locations and observed data +receiver_locations = dobs[:, 0:3] +dobs = dobs[:, -1] + +# Plot +mpl.rcParams.update({"font.size": 12}) +fig = plt.figure(figsize=(7, 5)) + +ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) +plot2Ddata(receiver_locations, dobs, ax=ax1, contourOpts={"cmap": "bwr"}) +ax1.set_title("Gravity Anomaly") +ax1.set_xlabel("x (m)") +ax1.set_ylabel("y (m)") + +ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) +norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) +cbar = mpl.colorbar.ColorbarBase( + ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" +) +cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) + +plt.show() + +############################################# +# Assign Uncertainties +# -------------------- +# +# Inversion with SimPEG requires that we define the standard deviation of our data. +# This represents our estimate of the noise in our data. For a gravity inversion, +# a constant floor value is generally applied to all data. For this tutorial, +# the standard deviation on each datum will be 1% of the maximum observed +# gravity anomaly value. +# + +maximum_anomaly = np.max(np.abs(dobs)) + +uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs)) + +############################################# +# Defining the Survey +# ------------------- +# +# Here, we define the survey that will be used for this tutorial. Gravity +# surveys are simple to create. The user only needs an (N, 3) array to define +# the xyz locations of the observation locations. From this, the user can +# define the receivers and the source field. +# + +# Define the receivers. The data consists of vertical gravity anomaly measurements. +# The set of receivers must be defined as a list. +receiver_list = gravity.receivers.Point(receiver_locations, components="gz") + +receiver_list = [receiver_list] + +# Define the source field +source_field = gravity.sources.SourceField(receiver_list=receiver_list) + +# Define the survey +survey = gravity.survey.Survey(source_field) + +############################################# +# Defining the Data +# ----------------- +# +# Here is where we define the data that is inverted. The data is defined by +# the survey, the observation values and the standard deviation. +# + +data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) + + +############################################# +# Defining a Tensor Mesh +# ---------------------- +# +# Here, we create the tensor mesh that will be used to invert gravity anomaly +# data. If desired, we could define an OcTree mesh. +# + +dh = 5.0 +hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] +hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] +hz = [(dh, 5, -1.3), (dh, 15)] +mesh = TensorMesh([hx, hy, hz], "CCN") + +######################################################## +# Starting/Reference Model and Mapping on Tensor Mesh +# --------------------------------------------------- +# +# Here, we create starting and/or reference models for the inversion as +# well as the mapping from the model space to the active cells. Starting and +# reference models can be a constant background value or contain a-priori +# structures. +# + +# Find the indices of the active cells in forward model (ones below surface) +ind_active = active_from_xyz(mesh, xyz_topo) + +# Define mapping from model to active cells +nC = int(ind_active.sum()) +model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell + +# Define and plot starting model +starting_model = np.zeros(nC) + + +############################################## +# Define the Physics and data misfit +# ---------------------------------- +# +# Here, we define the physics of the gravity problem by using the simulation +# class. +# + +simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active +) + +# Define the data misfit. Here the data misfit is the L2 norm of the weighted +# residual between the observed data and the data predicted for a given model. +# Within the data misfit, the residual between predicted and observed data are +# normalized by the data's standard deviation. +dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) + + +####################################################################### +# Running the Depth Weighted inversion +# ------------------------------------ +# +# Here we define the directives, weights, regularization, and optimization +# for a depth-weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# The directives are defined as a list +directives_list = [ + update_IRLS, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) with depth weighting. +reg_dpth = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_dpth.norms = [0, 2, 2, 2] +depth_weights = utils.depth_weighting( + mesh, receiver_locations, active_cells=ind_active, exponent=2 +) +reg_dpth.set_weights(depth_weights=depth_weights) + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dpth, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_dpth = inv.run(starting_model) + +####################################################################### +# Running the Distance Weighted inversion +# --------------------------------------- +# +# Here we define the directives, weights, regularization, and optimization +# for a distance-weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# The directives are defined as a list +directives_list = [ + update_IRLS, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) with distance weighting. +reg_dist = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_dist.norms = [0, 2, 2, 2] +distance_weights = utils.distance_weighting( + mesh, receiver_locations, active_cells=ind_active, exponent=2 +) +reg_dist.set_weights(distance_weights=distance_weights**2) + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dist, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_dist = inv.run(starting_model) + +####################################################################### +# Running the Distance Weighted inversion +# --------------------------------------- +# +# Here we define the directives, weights, regularization, and optimization +# for a sensitivity weighted inversion +# + +# inversion directives +# Defining a starting value for the trade-off parameter (beta) between the data +# misfit and the regularization. +starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) + +# Defines the directives for the IRLS regularization. This includes setting +# the cooling schedule for the trade-off parameter. +update_IRLS = directives.Update_IRLS( + f_min_change=1e-4, + max_irls_iterations=30, + coolEpsFact=1.5, + beta_tol=1e-2, +) + +# Options for outputting recovered models and predicted data for each beta. +save_iteration = directives.SaveOutputEveryIteration(save_txt=False) + +# Updating the preconditionner if it is model dependent. +update_jacobi = directives.UpdatePreconditioner() + +# Add sensitivity weights +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) + +# The directives are defined as a list +directives_list = [ + update_IRLS, + sensitivity_weights, + starting_beta, + save_iteration, + update_jacobi, +] + +# Define the regularization (model objective function) for sensitivity weighting. +reg_sensw = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) +reg_sensw.norms = [0, 2, 2, 2] + +# Define how the optimization problem is solved. Here we will use a projected +# Gauss-Newton approach that employs the conjugate gradient solver. +opt = optimization.ProjectedGNCG( + maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 +) + +# Here we define the inverse problem that is to be solved +inv_prob = inverse_problem.BaseInvProblem(dmis, reg_sensw, opt) + +# Here we combine the inverse problem and the set of directives +inv = inversion.BaseInversion(inv_prob, directives_list) + +# Run inversion +recovered_model_sensw = inv.run(starting_model) + +############################################################ +# Recreate True Model +# ------------------- +# + +# Define density contrast values for each unit in g/cc +background_density = 0.0 +block_density = -0.2 +sphere_density = 0.2 + +# Define model. Models in SimPEG are vector arrays. +true_model = background_density * np.ones(nC) + +# You could find the indicies of specific cells within the model and change their +# value to add structures. +ind_block = ( + (mesh.gridCC[ind_active, 0] > -50.0) + & (mesh.gridCC[ind_active, 0] < -20.0) + & (mesh.gridCC[ind_active, 1] > -15.0) + & (mesh.gridCC[ind_active, 1] < 15.0) + & (mesh.gridCC[ind_active, 2] > -50.0) + & (mesh.gridCC[ind_active, 2] < -30.0) +) +true_model[ind_block] = block_density + +# You can also use SimPEG utilities to add structures to the model more concisely +ind_sphere = model_builder.getIndicesSphere(np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC) +ind_sphere = ind_sphere[ind_active] +true_model[ind_sphere] = sphere_density + + +############################################################ +# Plotting True Model and Recovered Models +# ---------------------------------------- +# + +# Plot Models +fig, ax = plt.subplots(4, 1, figsize=(9, 20), sharex=True, sharey=True) +plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) +cmap = "coolwarm" +slice_y_loc = 0.0 + +mm = mesh.plot_slice( + plotting_map * true_model, + normal="Y", + ax=ax[0], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[0].set_title(f"True model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[0]) + +# plot depth weighting result +vmax = np.abs(recovered_model_dpth).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_dpth, + normal="Y", + ax=ax[1], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[1].set_title(f"Depth weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[1]) + +# plot distance weighting result +vmax = np.abs(recovered_model_dist).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_dist, + normal="Y", + ax=ax[2], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[2].set_title(f"Distance weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[2]) + +# plot sensitivity weighting result +vmax = np.abs(recovered_model_sensw).max() +norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) +mm = mesh.plot_slice( + plotting_map * recovered_model_sensw, + normal="Y", + ax=ax[3], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap, "norm": norm}, +) +ax[3].set_title(f"Sensitivity weighting Model slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[3]) + +# shared plotting +plotting_map = maps.InjectActiveCells(mesh, ind_active, 0.0) +slice_y_ind = ( + mesh.cell_centers[:, 1] == np.abs(mesh.cell_centers[:, 1] - slice_y_loc).min() +) +for axx in ax: + utils.plot2Ddata( + mesh.cell_centers[slice_y_ind][:, [0, 2]], + (plotting_map * true_model)[slice_y_ind], + contourOpts={"alpha": 0}, + level=True, + ncontour=2, + levelOpts={"colors": "grey", "linewidths": 2, "linestyles": "--"}, + method="nearest", + ax=axx, + ) + axx.set_aspect(1) + +plt.show() From a925fad555eccf51ba204a863cf2fd4223a74462 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 19 Dec 2023 17:44:55 -0800 Subject: [PATCH 130/164] typo --- .../plot_inv_1c_gravity_anomaly_irls_compare_weighting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py index f01d39be31..61a4f75415 100644 --- a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -320,7 +320,7 @@ distance_weights = utils.distance_weighting( mesh, receiver_locations, active_cells=ind_active, exponent=2 ) -reg_dist.set_weights(distance_weights=distance_weights**2) +reg_dist.set_weights(distance_weights=distance_weights) # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. From e812145f3524f755952597019fd94cb7668ae802 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 19 Dec 2023 18:02:02 -0800 Subject: [PATCH 131/164] tutorial --- ..._gravity_anomaly_irls_compare_weighting.py | 70 +++++++++++++++++-- 1 file changed, 66 insertions(+), 4 deletions(-) diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py index 61a4f75415..3cf3cd461e 100644 --- a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -103,7 +103,16 @@ fig = plt.figure(figsize=(7, 5)) ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) -plot2Ddata(receiver_locations, dobs, ax=ax1, contourOpts={"cmap": "bwr"}) +plot2Ddata( + receiver_locations, + dobs, + ax=ax1, + contourOpts={"cmap": "bwr"}, + shade=True, + nx=20, + ny=20, + dataloc=True, +) ax1.set_title("Gravity Anomaly") ax1.set_xlabel("x (m)") ax1.set_ylabel("y (m)") @@ -113,7 +122,7 @@ cbar = mpl.colorbar.ColorbarBase( ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" ) -cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) +cbar.set_label("$mGal$", rotation=270, labelpad=15, size=12) plt.show() @@ -433,7 +442,8 @@ # # Plot Models -fig, ax = plt.subplots(4, 1, figsize=(9, 20), sharex=True, sharey=True) +fig, ax = plt.subplots(2, 2, figsize=(20, 10), sharex=True, sharey=True) +ax = ax.flatten() plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) cmap = "coolwarm" slice_y_loc = 0.0 @@ -509,4 +519,56 @@ ) axx.set_aspect(1) -plt.show() +plt.tight_layout() + +############################################################ +# Visualize weights +# ----------------- +# +# Plot Weights +fig, ax = plt.subplots(1, 3, figsize=(20, 4), sharex=True, sharey=True) +plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) +cmap = "magma" +slice_y_loc = 0.0 + +# plot depth weights +mm = mesh.plot_slice( + plotting_map * np.log10(depth_weights), + normal="Y", + ax=ax[0], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[0].set_title(f"log10(depth weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(depth weights)", ax=ax[0]) + +# plot distance weights +mm = mesh.plot_slice( + plotting_map * np.log10(distance_weights), + normal="Y", + ax=ax[1], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[1].set_title(f"log10(distance weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(distance weights)", ax=ax[1]) + +# plot sensitivity weights +mm = mesh.plot_slice( + plotting_map * np.log10(reg_sensw.objfcts[0].get_weights(key="sensitivity")), + normal="Y", + ax=ax[2], + grid=False, + slice_loc=slice_y_loc, + pcolor_opts={"cmap": cmap}, +) +ax[2].set_title(f"log10(sensitivity weights) slice at y = {slice_y_loc} m") +plt.colorbar(mm[0], label="log10(sensitivity weights)", ax=ax[2]) + +# shared plotting +for axx in ax: + axx.set_aspect(1) + +plt.tight_layout() From f2016cbc2b0a4525096b9dde4a794c07d14d10fe Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 19 Dec 2023 18:22:41 -0800 Subject: [PATCH 132/164] typo --- SimPEG/utils/model_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 900c9fd01b..8555586c8f 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -289,7 +289,7 @@ def distance_weighting( ).sum(axis=1) ) ** (0.5) - ) / np.sqrt(mesh.cell_volumes) + ) / mesh.cell_volumes if active_cells is not None: dist_weights = dist_weights[active_cells] From 459a0da779feb5b784f0602b5d9ecaa5ca534f5f Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 19 Dec 2023 19:01:41 -0800 Subject: [PATCH 133/164] better handling of active_cells --- SimPEG/utils/model_utils.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 8555586c8f..417c245f5a 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -243,6 +243,9 @@ def distance_weighting( a 1d-array. """ + active_cells = ( + np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells + ) if "indActive" in kwargs: warnings.warn( "The indActive keyword argument has been deprecated, please use active_cells. " @@ -263,9 +266,10 @@ def distance_weighting( if reference_locs.ndim < 2: distance = np.abs(mesh.cell_centers[:, -1] - reference_locs) - # reference_locs is a 2d array else: - n, d = mesh.cell_centers.shape + cell_centers = mesh.cell_centers[active_cells] + cell_volumes = mesh.cell_volumes[active_cells] + n, d = cell_centers.shape t, d1 = reference_locs.shape if not d == d1: @@ -273,25 +277,19 @@ def distance_weighting( # vectorized distance calculations distance = ( - np.dot((mesh.cell_centers**2.0), np.ones([d, t])) + np.dot((cell_centers**2.0), np.ones([d, t])) + np.dot(np.ones([n, d]), (reference_locs**2.0).T) - - 2.0 * np.dot(mesh.cell_centers, reference_locs.T) + - 2.0 * np.dot(cell_centers, reference_locs.T) ) ** 0.5 dist_weights = ( ( ( - ( - mesh.cell_volumes.reshape(-1, 1) - / ((distance + threshold) ** exponent) - ) + (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 ).sum(axis=1) ) ** (0.5) - ) / mesh.cell_volumes - - if active_cells is not None: - dist_weights = dist_weights[active_cells] + ) / cell_volumes return dist_weights / np.nanmax(dist_weights) From 76a18f66f1ea29142ee808184e224fdde6c7ae2a Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 08:04:00 -0800 Subject: [PATCH 134/164] remove depreciated argument --- SimPEG/utils/model_utils.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 417c245f5a..2403f06001 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,5 +1,6 @@ import warnings +import discretize import numpy as np import scipy.sparse as sp from discretize.utils import active_from_xyz @@ -200,7 +201,11 @@ def depth_weighting( def distance_weighting( - mesh, reference_locs, active_cells=None, exponent=2.0, threshold=None, **kwargs + mesh: discretize.BaseMesh, + reference_locs: np.ndarray, + active_cells: np.ndarray | None = None, + exponent: float = 2.0, + threshold: float = None, ): r""" Construct diagonal elements of a distance weighting matrix @@ -246,14 +251,6 @@ def distance_weighting( active_cells = ( np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells ) - if "indActive" in kwargs: - warnings.warn( - "The indActive keyword argument has been deprecated, please use active_cells. " - "This will be removed in SimPEG 0.19.0", - FutureWarning, - stacklevel=2, - ) - active_cells = kwargs["indActive"] # Default threshold value if threshold is None: From 2012288eef838d5f2ce0ed6ccd150b31ff0bbf91 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 08:19:41 -0800 Subject: [PATCH 135/164] add deprecation --- SimPEG/potential_fields/base.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/SimPEG/potential_fields/base.py b/SimPEG/potential_fields/base.py index 753a80f8fd..6e13b41932 100644 --- a/SimPEG/potential_fields/base.py +++ b/SimPEG/potential_fields/base.py @@ -1,4 +1,5 @@ import os +import warnings from multiprocessing.pool import Pool import discretize @@ -359,6 +360,12 @@ def get_dist_wgt(mesh, receiver_locations, actv, R, R0): wr : (n_cell) numpy.ndarray Distance weighting model; 0 for all inactive cells """ + warnings.warn( + "The get_dist_wgt function has been deprecated, please import " + "SimPEG.utils.distance_weighting. This will be removed in SimPEG 0.22.0", + FutureWarning, + stacklevel=2, + ) # Find non-zero cells if actv.dtype == "bool": inds = ( From fb5a6cbdcc200b894995a21162a1c550350fa186 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 08:37:52 -0800 Subject: [PATCH 136/164] typo --- SimPEG/utils/model_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 2403f06001..67699856e4 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -202,6 +202,7 @@ def depth_weighting( def distance_weighting( mesh: discretize.BaseMesh, + mesh: discretize.Base.BaseMesh, reference_locs: np.ndarray, active_cells: np.ndarray | None = None, exponent: float = 2.0, From 78b5678220cddfdb5b86352d62c7e4ad031c03e2 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 08:39:24 -0800 Subject: [PATCH 137/164] typo --- SimPEG/utils/model_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 67699856e4..1862cd409e 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -201,7 +201,6 @@ def depth_weighting( def distance_weighting( - mesh: discretize.BaseMesh, mesh: discretize.Base.BaseMesh, reference_locs: np.ndarray, active_cells: np.ndarray | None = None, From c249f0aa00a86405705fd8cae9618e6723f97dcd Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 08:40:39 -0800 Subject: [PATCH 138/164] typo --- SimPEG/utils/model_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 1862cd409e..fe29d28e3d 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -201,7 +201,7 @@ def depth_weighting( def distance_weighting( - mesh: discretize.Base.BaseMesh, + mesh: discretize.base.BaseMesh, reference_locs: np.ndarray, active_cells: np.ndarray | None = None, exponent: float = 2.0, From e400602bde198cf63f36ccee750b153aeffd9128 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 09:47:54 -0800 Subject: [PATCH 139/164] numba tentative --- SimPEG/utils/model_utils.py | 67 +++++++++++++++++++++++------- tests/base/test_model_utils.py | 76 ++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 16 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index fe29d28e3d..a25188b643 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,9 +1,11 @@ import warnings +from typing import Literal import discretize import numpy as np import scipy.sparse as sp from discretize.utils import active_from_xyz +from numba import njit from scipy.interpolate import griddata from scipy.spatial import cKDTree @@ -206,6 +208,7 @@ def distance_weighting( active_cells: np.ndarray | None = None, exponent: float = 2.0, threshold: float = None, + engine: Literal["numpy", "numba"] = "numba", ): r""" Construct diagonal elements of a distance weighting matrix @@ -240,6 +243,8 @@ def distance_weighting( threshold : float or None, optional Threshold parameters used in the distance weighting. If ``None``, it will be set to half of the smallest cell width. + engine: str, 'numpy' or 'numba': pick between a numpy vectorized computation (memory intensive) or parallelized + numba implementation. Default to 'numba'. Returns ------- @@ -258,17 +263,45 @@ def distance_weighting( reference_locs = np.asarray(reference_locs) - # Calculate distance from receiver locations - # reference_locs is a scalar - if reference_locs.ndim < 2: - distance = np.abs(mesh.cell_centers[:, -1] - reference_locs) + cell_centers = mesh.cell_centers[active_cells] + cell_volumes = mesh.cell_volumes[active_cells] + + # address 1D case + if mesh.dim == 1: + cell_centers = cell_centers.reshape(-1, 1) + reference_locs = reference_locs.reshape(-1, 1) + + if engine == "numba": + + @njit(parallel=True) + def distance_weighting_numba( + cell_centers: np.ndarray, + cell_volumes: np.ndarray, + reference_locs: np.ndarray, + threshold: float, + exponent: float = 2.0, + ): + distance_weights = np.zeros(len(cell_centers)) + for _, rl in enumerate(reference_locs): + dst_wgt = ( + np.sqrt(((cell_centers - rl) ** 2).sum(axis=1)) + threshold + ) ** exponent + dst_wgt = (cell_volumes / dst_wgt) ** 2 + distance_weights += dst_wgt + + return distance_weights + + dist_weights = distance_weighting_numba( + cell_centers, + cell_volumes, + reference_locs, + exponent=exponent, + threshold=threshold, + ) - else: - cell_centers = mesh.cell_centers[active_cells] - cell_volumes = mesh.cell_volumes[active_cells] + elif engine == "numpy": n, d = cell_centers.shape t, d1 = reference_locs.shape - if not d == d1: raise Exception("vectors must have same number of columns") @@ -279,14 +312,16 @@ def distance_weighting( - 2.0 * np.dot(cell_centers, reference_locs.T) ) ** 0.5 - dist_weights = ( - ( - ( - (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) - ** 2 - ).sum(axis=1) + dist_weights = ( + (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 + ).sum(axis=1) + + else: + raise ValueError( + f"engine should be either 'numpy' or 'numba', instead {engine=}" ) - ** (0.5) - ) / cell_volumes + + dist_weights = dist_weights**0.5 + dist_weights /= cell_volumes return dist_weights / np.nanmax(dist_weights) diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index e0bb3584d5..3f2c347c59 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -69,5 +69,81 @@ def test_depth_weighting_2D(self): np.testing.assert_allclose(wz, wz2) +class DistancehWeightingTest(unittest.TestCase): + def test_distance_weighting_3D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hy, hz], "CCN") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + with self.assertRaises(ValueError): + utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="test" + ) + + def test_distance_weighting_2D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hz], "CN") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + def test_distance_weighting_1D(self): + # Mesh + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + mesh = TensorMesh([hx], "C") + + actv = np.random.randint(0, 2, mesh.n_cells) == 1 + + reference_locs = ( + np.random.rand(1000, 1) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) + + mesh.origin + ) + + # distance weighting + wz_numpy = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + ) + wz_numba = utils.distance_weighting( + mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + ) + np.testing.assert_allclose(wz_numpy, wz_numba) + + if __name__ == "__main__": unittest.main() From b03b8ca12d9e4b70393f4df8b52631f6258f2503 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 20 Dec 2023 10:12:52 -0800 Subject: [PATCH 140/164] typing --- SimPEG/utils/model_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index a25188b643..42e6b35494 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -207,7 +207,7 @@ def distance_weighting( reference_locs: np.ndarray, active_cells: np.ndarray | None = None, exponent: float = 2.0, - threshold: float = None, + threshold: float | None = None, engine: Literal["numpy", "numba"] = "numba", ): r""" From 7506d59eb850f2935d83a54cc566112b2c9446ab Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 15:11:41 -0800 Subject: [PATCH 141/164] address comments --- SimPEG/utils/model_utils.py | 117 ++++++++++++++++++++++++++---------- 1 file changed, 85 insertions(+), 32 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 42e6b35494..f7e9122a5c 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -5,12 +5,23 @@ import numpy as np import scipy.sparse as sp from discretize.utils import active_from_xyz -from numba import njit from scipy.interpolate import griddata from scipy.spatial import cKDTree from .mat_utils import mkvc +try: + from numba import njit, prange +except ImportError: + # Define dummy njit decorator + def njit(*args, **kwargs): + return lambda f: f + + # Define dummy prange function + prange = range + + warnings.warn("numba is not installed. Some computations might be slower.") + def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): """Get indices of active cells from topography. @@ -202,13 +213,73 @@ def depth_weighting( return wz / np.nanmax(wz) +@njit(parallel=True) +def _distance_weighting_numba( + cell_centers: np.ndarray, + cell_volumes: np.ndarray, + reference_locs: np.ndarray, + threshold: float, + exponent: float = 2.0, +) -> np.ndarray: + r""" + distance weighting kernel in numba. + + If numba is not installed, this will work as a regular for loop. + + Parameters + ---------- + cell_centers : np.ndarray + cell centers of the mesh. + cell_volumes : np.ndarray + cell volumes of the mesh. + reference_locs : float or (n, ndim) numpy.ndarray + Reference location for the distance weighting. + It can be a ``float``, which value is the component for + the reference location. + Or it can be a 2d array, with multiple reference locations, where each + row should contain the coordinates of a single location point in the + following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D + meshes). + The coordinate of the reference location, usually the receiver locations + exponent : float, optional + Exponent parameter for distance weighting. + The exponent should match the natural decay power of the potential + field. For example, for gravity acceleration, set it to 2; for magnetic + fields, to 3. + threshold : float or None, optional + Threshold parameters used in the distance weighting. + If ``None``, it will be set to half of the smallest cell width. + + Returns + ------- + (n_active) numpy.ndarray + Normalized distance weights for the mesh at every active cell as + a 1d-array. + """ + distance_weights = np.zeros(len(cell_centers)) + n_reference_locs = len(reference_locs) + for i in prange(n_reference_locs): + rl = reference_locs[i] + dst_wgt = ( + np.sqrt(((cell_centers - rl) ** 2).sum(axis=1)) + threshold + ) ** exponent + dst_wgt = (cell_volumes / dst_wgt) ** 2 + distance_weights += dst_wgt + + distance_weights = distance_weights**0.5 + distance_weights /= cell_volumes + distance_weights /= np.nanmax(distance_weights) + + return distance_weights + + def distance_weighting( mesh: discretize.base.BaseMesh, reference_locs: np.ndarray, active_cells: np.ndarray | None = None, exponent: float = 2.0, threshold: float | None = None, - engine: Literal["numpy", "numba"] = "numba", + engine: Literal["loop", "vector"] = "loop", ): r""" Construct diagonal elements of a distance weighting matrix @@ -243,8 +314,8 @@ def distance_weighting( threshold : float or None, optional Threshold parameters used in the distance weighting. If ``None``, it will be set to half of the smallest cell width. - engine: str, 'numpy' or 'numba': pick between a numpy vectorized computation (memory intensive) or parallelized - numba implementation. Default to 'numba'. + engine: str, 'loops' or 'vector': pick between a `vector` vectorized computation (memory intensive) or `for` loop + implementation, parallelized with numba if available. Default to 'loop'. Returns ------- @@ -271,27 +342,8 @@ def distance_weighting( cell_centers = cell_centers.reshape(-1, 1) reference_locs = reference_locs.reshape(-1, 1) - if engine == "numba": - - @njit(parallel=True) - def distance_weighting_numba( - cell_centers: np.ndarray, - cell_volumes: np.ndarray, - reference_locs: np.ndarray, - threshold: float, - exponent: float = 2.0, - ): - distance_weights = np.zeros(len(cell_centers)) - for _, rl in enumerate(reference_locs): - dst_wgt = ( - np.sqrt(((cell_centers - rl) ** 2).sum(axis=1)) + threshold - ) ** exponent - dst_wgt = (cell_volumes / dst_wgt) ** 2 - distance_weights += dst_wgt - - return distance_weights - - dist_weights = distance_weighting_numba( + if engine == "loop": + distance_weights = _distance_weighting_numba( cell_centers, cell_volumes, reference_locs, @@ -299,7 +351,7 @@ def distance_weighting_numba( threshold=threshold, ) - elif engine == "numpy": + elif engine == "vector": n, d = cell_centers.shape t, d1 = reference_locs.shape if not d == d1: @@ -312,16 +364,17 @@ def distance_weighting_numba( - 2.0 * np.dot(cell_centers, reference_locs.T) ) ** 0.5 - dist_weights = ( + distance_weights = ( (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 ).sum(axis=1) + distance_weights = distance_weights**0.5 + distance_weights /= cell_volumes + distance_weights /= np.nanmax(distance_weights) + else: raise ValueError( - f"engine should be either 'numpy' or 'numba', instead {engine=}" + f"engine should be either 'vector' or 'loop', instead {engine=}" ) - dist_weights = dist_weights**0.5 - dist_weights /= cell_volumes - - return dist_weights / np.nanmax(dist_weights) + return distance_weights From 145eeb87790d96ce47d66edfb2c125b53a916580 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 15:13:47 -0800 Subject: [PATCH 142/164] fix tests --- tests/base/test_model_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index 3f2c347c59..8ebfb32ceb 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -87,10 +87,10 @@ def test_distance_weighting_3D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" ) wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" ) np.testing.assert_allclose(wz_numpy, wz_numba) @@ -115,10 +115,10 @@ def test_distance_weighting_2D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" ) wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" ) np.testing.assert_allclose(wz_numpy, wz_numba) @@ -137,10 +137,10 @@ def test_distance_weighting_1D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numpy" + mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" ) wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="numba" + mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" ) np.testing.assert_allclose(wz_numpy, wz_numba) From f81a43ced3cddbae17f7fe1a3f19422bf9209461 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 15:23:44 -0800 Subject: [PATCH 143/164] stacklevel --- SimPEG/utils/model_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index f7e9122a5c..88142093a6 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -20,7 +20,11 @@ def njit(*args, **kwargs): # Define dummy prange function prange = range - warnings.warn("numba is not installed. Some computations might be slower.") + warnings.warn( + "numba is not installed. Some computations might be slower.", + type=ImportWarning, + stacklevel=2, + ) def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): From 89c75be11b8bb24db9d4efcc48a40b727c44251d Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 15:33:40 -0800 Subject: [PATCH 144/164] try fix `| None` test error --- SimPEG/utils/model_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 88142093a6..b35f81645b 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,5 +1,5 @@ import warnings -from typing import Literal +from typing import Literal, Optional import discretize import numpy as np @@ -280,9 +280,9 @@ def _distance_weighting_numba( def distance_weighting( mesh: discretize.base.BaseMesh, reference_locs: np.ndarray, - active_cells: np.ndarray | None = None, + active_cells: Optional[np.ndarray] = None, exponent: float = 2.0, - threshold: float | None = None, + threshold: Optional[float] = None, engine: Literal["loop", "vector"] = "loop", ): r""" From ef4056e63d289a09fdfadc9e9b5c3d7e96f7698f Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 16:35:00 -0800 Subject: [PATCH 145/164] move warning --- SimPEG/utils/model_utils.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index b35f81645b..e2c42d19b5 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -11,8 +11,11 @@ from .mat_utils import mkvc try: + import numba from numba import njit, prange except ImportError: + numba = None + # Define dummy njit decorator def njit(*args, **kwargs): return lambda f: f @@ -20,12 +23,6 @@ def njit(*args, **kwargs): # Define dummy prange function prange = range - warnings.warn( - "numba is not installed. Some computations might be slower.", - type=ImportWarning, - stacklevel=2, - ) - def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): """Get indices of active cells from topography. @@ -260,6 +257,7 @@ def _distance_weighting_numba( Normalized distance weights for the mesh at every active cell as a 1d-array. """ + distance_weights = np.zeros(len(cell_centers)) n_reference_locs = len(reference_locs) for i in prange(n_reference_locs): @@ -327,6 +325,11 @@ def distance_weighting( Normalized distance weights for the mesh at every active cell as a 1d-array. """ + if (numba is None) and (engine == "loop"): + warnings.warn( + "numba is not installed. 'For loops' computations might be slower.", + stacklevel=2, + ) active_cells = ( np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells From f982f98ef4ee1abb18629eacf224d927af99b341 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Tue, 2 Jan 2024 16:47:36 -0800 Subject: [PATCH 146/164] more warnings --- SimPEG/utils/model_utils.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index e2c42d19b5..00b1ed1dc3 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -325,11 +325,6 @@ def distance_weighting( Normalized distance weights for the mesh at every active cell as a 1d-array. """ - if (numba is None) and (engine == "loop"): - warnings.warn( - "numba is not installed. 'For loops' computations might be slower.", - stacklevel=2, - ) active_cells = ( np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells @@ -350,6 +345,12 @@ def distance_weighting( reference_locs = reference_locs.reshape(-1, 1) if engine == "loop": + if numba is None: + warnings.warn( + "numba is not installed. 'loop' computations might be slower.", + stacklevel=2, + ) + distance_weights = _distance_weighting_numba( cell_centers, cell_volumes, @@ -359,6 +360,12 @@ def distance_weighting( ) elif engine == "vector": + warnings.warn( + "vectorized computations are memory intensive. Consider switching to `engine='loop'` if you run into memory" + " overflow issues", + stacklevel=2, + ) + n, d = cell_centers.shape t, d1 = reference_locs.shape if not d == d1: From cf65de4d6fe30c845ac084076a2e162dcbd9d2e0 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 3 Jan 2024 10:34:21 -0800 Subject: [PATCH 147/164] fix new depreciation --- .../plot_inv_1c_gravity_anomaly_irls_compare_weighting.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py index 3cf3cd461e..02d6b18727 100644 --- a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -431,7 +431,9 @@ true_model[ind_block] = block_density # You can also use SimPEG utilities to add structures to the model more concisely -ind_sphere = model_builder.getIndicesSphere(np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC) +ind_sphere = model_builder.get_indices_sphere( + np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC +) ind_sphere = ind_sphere[ind_active] true_model[ind_sphere] = sphere_density From 5e6a3c5d0283cffd0063d7ecba785ea170378e86 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 3 Jan 2024 12:55:55 -0800 Subject: [PATCH 148/164] replace numpy distance calculation by `scipy.spatial.distance.cdist` --- SimPEG/utils/model_utils.py | 39 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 00b1ed1dc3..4b67f584e8 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -7,6 +7,7 @@ from discretize.utils import active_from_xyz from scipy.interpolate import griddata from scipy.spatial import cKDTree +from scipy.spatial.distance import cdist from .mat_utils import mkvc @@ -281,7 +282,8 @@ def distance_weighting( active_cells: Optional[np.ndarray] = None, exponent: float = 2.0, threshold: Optional[float] = None, - engine: Literal["loop", "vector"] = "loop", + engine: Literal["loop", "cdist"] = "loop", + cdist_opts: Optional[dict] = None, ): r""" Construct diagonal elements of a distance weighting matrix @@ -316,8 +318,11 @@ def distance_weighting( threshold : float or None, optional Threshold parameters used in the distance weighting. If ``None``, it will be set to half of the smallest cell width. - engine: str, 'loops' or 'vector': pick between a `vector` vectorized computation (memory intensive) or `for` loop - implementation, parallelized with numba if available. Default to 'loop'. + engine: str, 'loop' or 'cdist' + pick between a `scipy.spatial.distance.cdist` computation (memory intensive) or `for` loop implementation, + parallelized with numba if available. Default to 'loop'. + cdist_opts: dct, optional + Only valid with `engine=='cdist'`. Options to pass to scipy.spatial.distance.cdist. Default to None. Returns ------- @@ -350,7 +355,11 @@ def distance_weighting( "numba is not installed. 'loop' computations might be slower.", stacklevel=2, ) - + if cdist_opts is not None: + warnings.warn( + f"`cdist_opts` is only valid with `engine=='cdist'`, currently {engine=}", + stacklevel=2, + ) distance_weights = _distance_weighting_numba( cell_centers, cell_volumes, @@ -359,24 +368,14 @@ def distance_weighting( threshold=threshold, ) - elif engine == "vector": + elif engine == "cdist": warnings.warn( - "vectorized computations are memory intensive. Consider switching to `engine='loop'` if you run into memory" - " overflow issues", + "scipy.spatial.distance.cdist computations can be memory intensive. Consider switching to `engine='loop'` " + "if you run into memory overflow issues", stacklevel=2, ) - - n, d = cell_centers.shape - t, d1 = reference_locs.shape - if not d == d1: - raise Exception("vectors must have same number of columns") - - # vectorized distance calculations - distance = ( - np.dot((cell_centers**2.0), np.ones([d, t])) - + np.dot(np.ones([n, d]), (reference_locs**2.0).T) - - 2.0 * np.dot(cell_centers, reference_locs.T) - ) ** 0.5 + cdist_opts = cdist_opts or dict() + distance = cdist(cell_centers, reference_locs, **cdist_opts) distance_weights = ( (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 @@ -388,7 +387,7 @@ def distance_weighting( else: raise ValueError( - f"engine should be either 'vector' or 'loop', instead {engine=}" + f"engine should be either 'cdist' or 'loop', instead {engine=}" ) return distance_weights From 939d02edb20f86330edc25a2f1552961ce4c2de1 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 3 Jan 2024 12:57:21 -0800 Subject: [PATCH 149/164] replace numpy distance calculation by `scipy.spatial.distance.cdist`, with possibility to pass optional args. --- SimPEG/utils/model_utils.py | 39 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 00b1ed1dc3..4b67f584e8 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -7,6 +7,7 @@ from discretize.utils import active_from_xyz from scipy.interpolate import griddata from scipy.spatial import cKDTree +from scipy.spatial.distance import cdist from .mat_utils import mkvc @@ -281,7 +282,8 @@ def distance_weighting( active_cells: Optional[np.ndarray] = None, exponent: float = 2.0, threshold: Optional[float] = None, - engine: Literal["loop", "vector"] = "loop", + engine: Literal["loop", "cdist"] = "loop", + cdist_opts: Optional[dict] = None, ): r""" Construct diagonal elements of a distance weighting matrix @@ -316,8 +318,11 @@ def distance_weighting( threshold : float or None, optional Threshold parameters used in the distance weighting. If ``None``, it will be set to half of the smallest cell width. - engine: str, 'loops' or 'vector': pick between a `vector` vectorized computation (memory intensive) or `for` loop - implementation, parallelized with numba if available. Default to 'loop'. + engine: str, 'loop' or 'cdist' + pick between a `scipy.spatial.distance.cdist` computation (memory intensive) or `for` loop implementation, + parallelized with numba if available. Default to 'loop'. + cdist_opts: dct, optional + Only valid with `engine=='cdist'`. Options to pass to scipy.spatial.distance.cdist. Default to None. Returns ------- @@ -350,7 +355,11 @@ def distance_weighting( "numba is not installed. 'loop' computations might be slower.", stacklevel=2, ) - + if cdist_opts is not None: + warnings.warn( + f"`cdist_opts` is only valid with `engine=='cdist'`, currently {engine=}", + stacklevel=2, + ) distance_weights = _distance_weighting_numba( cell_centers, cell_volumes, @@ -359,24 +368,14 @@ def distance_weighting( threshold=threshold, ) - elif engine == "vector": + elif engine == "cdist": warnings.warn( - "vectorized computations are memory intensive. Consider switching to `engine='loop'` if you run into memory" - " overflow issues", + "scipy.spatial.distance.cdist computations can be memory intensive. Consider switching to `engine='loop'` " + "if you run into memory overflow issues", stacklevel=2, ) - - n, d = cell_centers.shape - t, d1 = reference_locs.shape - if not d == d1: - raise Exception("vectors must have same number of columns") - - # vectorized distance calculations - distance = ( - np.dot((cell_centers**2.0), np.ones([d, t])) - + np.dot(np.ones([n, d]), (reference_locs**2.0).T) - - 2.0 * np.dot(cell_centers, reference_locs.T) - ) ** 0.5 + cdist_opts = cdist_opts or dict() + distance = cdist(cell_centers, reference_locs, **cdist_opts) distance_weights = ( (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 @@ -388,7 +387,7 @@ def distance_weighting( else: raise ValueError( - f"engine should be either 'vector' or 'loop', instead {engine=}" + f"engine should be either 'cdist' or 'loop', instead {engine=}" ) return distance_weights From 01f7fa746c7029fd594dcea8e8dacd8a5188261a Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 3 Jan 2024 15:08:32 -0800 Subject: [PATCH 150/164] fix test --- tests/base/test_model_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index 8ebfb32ceb..9b85dfad3e 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -87,7 +87,7 @@ def test_distance_weighting_3D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" ) wz_numba = utils.distance_weighting( mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" @@ -115,7 +115,7 @@ def test_distance_weighting_2D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" ) wz_numba = utils.distance_weighting( mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" @@ -137,7 +137,7 @@ def test_distance_weighting_1D(self): # distance weighting wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="vector" + mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" ) wz_numba = utils.distance_weighting( mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" From c10d768967bcbe0b70771b95aa60fc1e8a3b34ac Mon Sep 17 00:00:00 2001 From: domfournier Date: Sun, 21 Jan 2024 12:01:21 -0800 Subject: [PATCH 151/164] Re-apply modulo for coterminal comps --- SimPEG/utils/mat_utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index 5d128d9970..8d527f026c 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -409,12 +409,8 @@ def coterminal(theta): Coterminal angles """ - sub = theta[np.abs(theta) >= np.pi] - sub = -np.sign(sub) * (2 * np.pi - np.abs(sub)) - - theta[np.abs(theta) >= np.pi] = sub - - return theta + coterminal = np.sign(theta) * (np.abs(theta) % np.pi) + return coterminal def define_plane_from_points(xyz1, xyz2, xyz3): From 92ba02d7d2c9f7d3deec726398bf59063f740a55 Mon Sep 17 00:00:00 2001 From: domfournier Date: Sun, 21 Jan 2024 12:10:55 -0800 Subject: [PATCH 152/164] Add unittest --- tests/base/test_utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 541ed1642b..9f34923c87 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -1,4 +1,5 @@ import unittest +import pytest import numpy as np import scipy.sparse as sp import os @@ -22,6 +23,7 @@ Counter, download, surface2ind_topo, + coterminal, ) import discretize @@ -342,5 +344,15 @@ def test_downloads(self): shutil.rmtree(os.path.expanduser("./test_url")) +@pytest.mark.parametrize( + "angle", [np.pi, -np.pi, 3 * np.pi, 1.25 * np.pi, -1.25 * np.pi] +) +def test_coterminal(angle): + coangle = coterminal(angle) + assert np.abs(coangle) < np.pi + if coangle != 0: + assert np.sign(coterminal(angle)) == np.sign(angle) + + if __name__ == "__main__": unittest.main() From d45be1462f5b1ce96f72b285ace0bbd308068b56 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 22 Jan 2024 09:56:36 -0800 Subject: [PATCH 153/164] Add more tests for the coterminal function Run tests with coterminal angles in each quadrant and for right angles (multiples of np.pi / 2). --- tests/base/test_utils.py | 47 +++++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 9f34923c87..a62c192b7e 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -344,14 +344,45 @@ def test_downloads(self): shutil.rmtree(os.path.expanduser("./test_url")) -@pytest.mark.parametrize( - "angle", [np.pi, -np.pi, 3 * np.pi, 1.25 * np.pi, -1.25 * np.pi] -) -def test_coterminal(angle): - coangle = coterminal(angle) - assert np.abs(coangle) < np.pi - if coangle != 0: - assert np.sign(coterminal(angle)) == np.sign(angle) +class TestCoterminalAngle: + """ + Tests for the coterminal function + """ + + @pytest.mark.parametrize( + "coterminal_angle", + (1 / 4 * np.pi, 3 / 4 * np.pi, -3 / 4 * np.pi, -1 / 4 * np.pi), + ids=("pi/4", "3/4 pi", "-3/4 pi", "-pi/4"), + ) + def test_angles_in_quadrants(self, coterminal_angle): + """ + Test coterminal for angles in each quadrant + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + @pytest.mark.parametrize( + "coterminal_angle", + (0, np.pi / 2, np.pi, -np.pi / 2), + ids=("0", "pi/2", "pi", "-pi/2"), + ) + def test_right_angles(self, coterminal_angle): + """ + Test coterminal for right angles + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + @pytest.mark.parametrize( + "angle", + [np.pi, -np.pi, 3 * np.pi, 1.25 * np.pi, -1.25 * np.pi], + ids=("pi", "-pi", "3 pi", "1.25 pi", "-1.25 pi"), + ) + def test_sign_coterminal(self, angle): + coangle = coterminal(angle) + assert np.abs(coangle) < np.pi + if coangle != 0: + assert np.sign(coterminal(angle)) == np.sign(angle) if __name__ == "__main__": From d042a7311bf98d57a8692eec28ab9cde072f0caf Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 22 Jan 2024 10:03:47 -0800 Subject: [PATCH 154/164] Fix angle in tests: use -pi instead of pi This change assumes that the coterminal function returns the coterminal angles in the [-pi, pi) interval. --- tests/base/test_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index a62c192b7e..a606cb0991 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -363,8 +363,8 @@ def test_angles_in_quadrants(self, coterminal_angle): @pytest.mark.parametrize( "coterminal_angle", - (0, np.pi / 2, np.pi, -np.pi / 2), - ids=("0", "pi/2", "pi", "-pi/2"), + (0, np.pi / 2, -np.pi, -np.pi / 2), + ids=("0", "pi/2", "-pi", "-pi/2"), ) def test_right_angles(self, coterminal_angle): """ From b892ea42e407b9bd2145f790c2260faab60faa2c Mon Sep 17 00:00:00 2001 From: Thibaut Date: Mon, 22 Jan 2024 13:58:04 -0800 Subject: [PATCH 155/164] work on rot.grad. --- SimPEG/regularization/rotated.py | 388 +++++++++++++++++++++++++++++-- 1 file changed, 372 insertions(+), 16 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 02c863379d..ecf3df0476 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,9 +1,16 @@ import numpy as np import scipy.sparse as sp +from discretize.base import BaseMesh from scipy.interpolate import NearestNDInterpolator -from ..utils.code_utils import validate_ndarray_with_shape -from .base import BaseRegularization +from .. import utils +from ..utils.code_utils import ( + validate_float, + validate_ndarray_with_shape, + validate_type, +) +from .base import BaseRegularization, RegularizationMesh +from .sparse import Sparse, SparseSmallness class SmoothnessFullGradient(BaseRegularization): @@ -29,7 +36,7 @@ class SmoothnessFullGradient(BaseRegularization): Each matrix should be orthonormal. Default is Identity. ortho_check : bool, optional Whether to check `reg_dirs` for orthogonality. - **kwargs + kwargs : Keyword arguments passed to the parent class ``BaseRegularization``. Examples @@ -81,11 +88,47 @@ class SmoothnessFullGradient(BaseRegularization): anisotropic alpha used for rotated gradients. """ - def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs): + # TODO: move this to KoBold/SimPEG + _multiplier_pair = "alpha_x" + + def __init__( + self, + mesh, + alphas=None, + reg_dirs=None, + ortho_check=True, + norm=2, + irls_scaled=True, + irls_threshold=1e-8, + reference_model_in_smooth=False, + **kwargs, + ): + """ + _summary_ + + :param mesh: _description_ + :param alphas: _description_, defaults to None + :param reg_dirs: _description_, defaults to None + :param ortho_check: _description_, defaults to True + :param norm: _description_, defaults to 2 + :param irls_scaled: _description_, defaults to True + :param irls_threshold: _description_, defaults to 1e-8 + :raises TypeError: _description_ + :raises IndexError: _description_ + :raises ValueError: _description_ + :raises IndexError: _description_ + :raises ValueError: _description_ + """ + self.reference_model_in_smooth = reference_model_in_smooth + if mesh.dim < 2: raise TypeError("Mesh must have dimension higher than 1") super().__init__(mesh=mesh, **kwargs) + self.norm = norm + self.irls_threshold = irls_threshold + self.irls_scaled = irls_scaled + if alphas is None: edge_length = np.min(mesh.edge_lengths) alphas = edge_length**2 * np.ones(mesh.dim) @@ -107,7 +150,7 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs) else: raise IndexError( f"`alphas` first dimension, {alphas.shape[0]}, must be either number " - f"of active cells {n_cells}, or the number of mesh cells {mesh.n_cells}. " + f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " ) if np.any(alphas < 0): raise ValueError("`alpha` must be non-negative") @@ -134,7 +177,7 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs) else: raise IndexError( f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " - f"of active cells {n_cells}, or the number of mesh cells {mesh.n_cells}. " + f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " ) # check orthogonality? if ortho_check: @@ -168,28 +211,109 @@ def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs) ) self._anis_alpha = anis_alpha + @property + def reference_model_in_smooth(self) -> bool: + """ + _summary_ + + :return: _description_ + """ + # Inherited from BaseRegularization class + return self._reference_model_in_smooth + + @reference_model_in_smooth.setter + def reference_model_in_smooth(self, value: bool): + """ + _summary_ + + :param value: _description_ + :raises TypeError: _description_ + """ + if not isinstance(value, bool): + raise TypeError( + f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided." + ) + self._reference_model_in_smooth = value + + def _delta_m(self, m): + """ + _summary_ + + :param m: _description_ + :return: _description_ + """ + if self.reference_model is None or not self.reference_model_in_smooth: + return m + return m - self.reference_model + + def f_m(self, m): + """ + _summary_ + + :param m: _description_ + :return: _description_ + """ + dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) + + if self.units is not None and self.units.lower() == "radian": + cell_distances = np.r_[ + self.regularization_mesh.mesh.average_cell_to_face_x + * self.regularization_mesh.mesh.h_gridded[:, 0], + self.regularization_mesh.mesh.average_cell_to_face_y + * self.regularization_mesh.mesh.h_gridded[:, 1], + self.regularization_mesh.mesh.average_cell_to_face_z + * self.regularization_mesh.mesh.h_gridded[:, 2], + ] + return utils.mat_utils.coterminal(dfm_dl * cell_distances) / cell_distances + return dfm_dl + + def f_m_deriv(self, m): + """ + _summary_ + + :param m: _description_ + :return: _description_ + """ + return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) + # overwrite the call, deriv, and deriv2... def __call__(self, m): - G = self.cell_gradient + """ + _summary_ + + :param m: _description_ + :return: _description_ + """ M_f = self.W - r = G @ (self.mapping * (self._delta_m(m))) + r = self.f_m(m) return 0.5 * r @ M_f @ r def deriv(self, m): - m_d = self.mapping.deriv(self._delta_m(m)) - G = self.cell_gradient + """ + _summary_ + + :param m: _description_ + :return: _description_ + """ + m_d = self.f_m_deriv(m) M_f = self.W - r = G @ (self.mapping * (self._delta_m(m))) - return m_d.T * (G.T @ (M_f @ r)) + r = self.f_m(m) + return m_d.T @ (M_f @ r) def deriv2(self, m, v=None): - m_d = self.mapping.deriv(self._delta_m(m)) - G = self.cell_gradient + """ + _summary_ + + :param m: _description_ + :param v: _description_, defaults to None + :return: _description_ + """ + m_d = self.f_m_deriv(m) M_f = self.W if v is None: - return m_d.T @ (G.T @ M_f @ G) @ m_d + return m_d.T @ (M_f @ m_d) - return m_d.T @ (G.T @ (M_f @ (G @ (m_d @ v)))) + return m_d.T @ (M_f @ (m_d @ v)) @property def cell_gradient(self): @@ -247,3 +371,235 @@ def W(self): self._W = mesh.get_face_inner_product(reg_model) return self._W + + def update_weights(self, m): + """ + _summary_ + + :param m: _description_ + """ + f_m = self.f_m(m) + irls_weights = self.get_lp_weights(f_m) + irls_weights = self.regularization_mesh.mesh.average_face_to_cell @ irls_weights + self.set_weights(irls=irls_weights[self.active_cells]) + + def get_lp_weights(self, f_m): + """ + _summary_ + + :param f_m: _description_ + :return: _description_ + """ + lp_scale = np.ones_like(f_m) + if self.irls_scaled: + # Scale on l2-norm gradient: f_m.max() + l2_max = np.ones_like(f_m) * np.abs(f_m).max() + # Compute theoretical maximum gradients for p < 1 + l2_max[self.norm < 1] = self.irls_threshold / np.sqrt( + 1.0 - self.norm[self.norm < 1] + ) + lp_values = l2_max / (l2_max**2.0 + self.irls_threshold**2.0) ** ( + 1.0 - self.norm / 2.0 + ) + lp_scale[lp_values != 0] = np.abs(f_m).max() / lp_values[lp_values != 0] + + return lp_scale / (f_m**2.0 + self.irls_threshold**2.0) ** ( + 1.0 - self.norm / 2.0 + ) + + @property + def irls_scaled(self) -> bool: + """Scale IRLS weights. + + When ``True``, scaling is applied when computing IRLS weights. + The scaling acts to preserve the balance between the data misfit and the components of + the regularization based on the derivative of the l2-norm measure. And it assists the + convergence by ensuring the model does not deviate + aggressively from the global 2-norm solution during the first few IRLS iterations. + For a comprehensive description, see the documentation for :py:meth:`get_lp_weights` . + + Returns + ------- + bool + Whether to scale IRLS weights. + """ + return self._irls_scaled + + @irls_scaled.setter + def irls_scaled(self, value: bool): + """ + _summary_ + + :param value: _description_ + """ + self._irls_scaled = validate_type("irls_scaled", value, bool, cast=False) + + @property + def irls_threshold(self): + r"""Stability constant for computing IRLS weights. + + Returns + ------- + float + Stability constant for computing IRLS weights. + """ + return self._irls_threshold + + @irls_threshold.setter + def irls_threshold(self, value): + self._irls_threshold = validate_float( + "irls_threshold", value, min_val=0.0, inclusive_min=False + ) + + @property + def norm(self): + r"""Norm for the sparse regularization. + + Returns + ------- + None, float, (n_cells, ) numpy.ndarray + Norm for the sparse regularization. If ``None``, a 2-norm is used. + A float within the interval [0,2] represents a constant norm applied for all cells. + A ``numpy.ndarray`` object, where each entry is used to apply a different norm to each cell in the mesh. + """ + return self._norm + + @norm.setter + def norm(self, value: float | np.ndarray | None): + """ + _summary_ + + :param value: _description_ + :raises ValueError: _description_ + """ + if value is None: + value = np.ones(self.cell_gradient.shape[0]) * 2.0 + else: + value = np.ones(self.cell_gradient.shape[0]) * value + if np.any(value < 0) or np.any(value > 2): + raise ValueError( + "Value provided for 'norm' should be in the interval [0, 2]" + ) + self._norm = value + + @property + def units(self) -> str | None: + """Units for the model parameters. + + Some regularization classes behave differently depending on the units; e.g. 'radian'. + + Returns + ------- + str + Units for the model parameters. + """ + return self._units + + @units.setter + def units(self, units: str | None): + if units is not None and not isinstance(units, str): + raise TypeError( + f"'units' must be None or type str. Value of type {type(units)} provided." + ) + self._units = units + + +class RotatedSparse(Sparse): + def __init__( + self, + mesh, + reg_dirs, + alphas_rot, + active_cells=None, + norms=[2, 2], + gradient_type="total", + irls_scaled=True, + irls_threshold=1e-8, + objfcts=None, + **kwargs, + ): + """ + Class to wrap rotated gradient into a ComboObjective Function + + :param mesh: _description_ + :param reg_dirs: _description_ + :param alphas_rot: _description_ + :param active_cells: _description_, defaults to None + :param norms: _description_, defaults to None + :param gradient_type: _description_, defaults to "total" + :param irls_scaled: _description_, defaults to True + :param irls_threshold: _description_, defaults to 1e-8 + :param objfcts: _description_, defaults to None + """ + if not isinstance(mesh, RegularizationMesh): + mesh = RegularizationMesh(mesh) + + if not isinstance(mesh, RegularizationMesh): + TypeError( + f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " + f"Value of type {type(mesh)} provided." + ) + self._regularization_mesh = mesh + if active_cells is not None: + self._regularization_mesh.active_cells = active_cells + + if objfcts is None: + objfcts = [ + SparseSmallness(mesh=self.regularization_mesh), + SmoothnessFullGradient( + mesh=self.regularization_mesh.mesh, + active_cells=active_cells, + reg_dirs=reg_dirs, + alphas=alphas_rot, + norm=norms[1], + irls_scaled=irls_scaled, + irls_threshold=irls_threshold, + # **kwargs, + ), + ] + + super().__init__( + self.regularization_mesh, + objfcts=objfcts, + active_cells=active_cells, + gradient_type=gradient_type, + norms=norms[:2], + irls_scaled=irls_scaled, + irls_threshold=irls_threshold, + **kwargs, + ) + + @property + def alpha_y(self): + """Multiplier constant for first-order smoothness along y. + + Returns + ------- + float + Multiplier constant for first-order smoothness along y. + """ + return self._alpha_y + + @alpha_y.setter + def alpha_y(self, value): + self._alpha_y = None + + @property + def alpha_z(self): + """Multiplier constant for first-order smoothness along z. + + Returns + ------- + float + Multiplier constant for first-order smoothness along z. + """ + return self._alpha_z + + @alpha_z.setter + def alpha_z(self, value): + """ + _summary_ + + :param value: _description_ + """ + self._alpha_z = None From 41875d9323f5342c3429e8d964f33dcf52606bfa Mon Sep 17 00:00:00 2001 From: domfournier Date: Mon, 22 Jan 2024 23:48:06 -0800 Subject: [PATCH 156/164] Go for joe's modulus --- SimPEG/utils/mat_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index 8d527f026c..be2fedf19d 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -409,7 +409,7 @@ def coterminal(theta): Coterminal angles """ - coterminal = np.sign(theta) * (np.abs(theta) % np.pi) + coterminal = (theta + np.pi) % (2 * np.pi) - np.pi return coterminal From 2cbd998ba5077dc73ac7637e0edab1a56333d54e Mon Sep 17 00:00:00 2001 From: Thibaut Date: Wed, 24 Jan 2024 14:39:34 -0800 Subject: [PATCH 157/164] wip --- SimPEG/regularization/rotated.py | 54 ++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index ecf3df0476..0b6a86f51c 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,16 +1,17 @@ import numpy as np import scipy.sparse as sp +from discretize import TensorMesh, TreeMesh from discretize.base import BaseMesh from scipy.interpolate import NearestNDInterpolator -from .. import utils -from ..utils.code_utils import ( +from SimPEG import utils +from SimPEG.regularization import RegularizationMesh, Sparse, SparseSmallness +from SimPEG.regularization.base import BaseRegularization +from SimPEG.utils.code_utils import ( validate_float, validate_ndarray_with_shape, validate_type, ) -from .base import BaseRegularization, RegularizationMesh -from .sparse import Sparse, SparseSmallness class SmoothnessFullGradient(BaseRegularization): @@ -256,15 +257,10 @@ def f_m(self, m): dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) if self.units is not None and self.units.lower() == "radian": - cell_distances = np.r_[ - self.regularization_mesh.mesh.average_cell_to_face_x - * self.regularization_mesh.mesh.h_gridded[:, 0], - self.regularization_mesh.mesh.average_cell_to_face_y - * self.regularization_mesh.mesh.h_gridded[:, 1], - self.regularization_mesh.mesh.average_cell_to_face_z - * self.regularization_mesh.mesh.h_gridded[:, 2], - ] - return utils.mat_utils.coterminal(dfm_dl * cell_distances) / cell_distances + return ( + utils.mat_utils.coterminal(dfm_dl * self.cell_distances) + / self.cell_distances + ) return dfm_dl def f_m_deriv(self, m): @@ -497,12 +493,44 @@ def units(self) -> str | None: @units.setter def units(self, units: str | None): + """ + _summary_ + + :param units: _description_ + :raises TypeError: _description_ + """ if units is not None and not isinstance(units, str): raise TypeError( f"'units' must be None or type str. Value of type {type(units)} provided." ) self._units = units + @property + def cell_distances(self): + """ + _summary_ + + :return: _description_ + """ + mesh = self.regularization_mesh.mesh + if isinstance(mesh, TreeMesh): + average_cell_to_face_x = mesh.average_cell_to_face_x + average_cell_to_face_y = mesh.average_cell_to_face_y + average_cell_to_face_z = mesh.average_cell_to_face_z + + elif isinstance(mesh, TensorMesh): + average_cell_to_face_x = mesh.average_cell_to_face[: mesh.nFx] + average_cell_to_face_y = mesh.average_cell_to_face[ + mesh.nFx : (mesh.nFx + mesh.nFy) + ] + average_cell_to_face_z = mesh.average_cell_to_face[(mesh.nFx + mesh.nFy) :] + + return np.r_[ + average_cell_to_face_x * mesh.h_gridded[:, 0], + average_cell_to_face_y * mesh.h_gridded[:, 1], + average_cell_to_face_z * mesh.h_gridded[:, 2], + ] + class RotatedSparse(Sparse): def __init__( From a565de42e133c06d092af1abead5967a8a7c7a50 Mon Sep 17 00:00:00 2001 From: domfournier Date: Wed, 24 Jan 2024 15:57:35 -0800 Subject: [PATCH 158/164] Change bounds on pi in docs --- SimPEG/utils/mat_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index be2fedf19d..c7b3d07fcb 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -396,7 +396,7 @@ def coterminal(theta): \theta = 2\pi N + \gamma and *N* is an integer, the function returns the value of :math:`\gamma`. - The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi]`. + The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi)`. Parameters ---------- From 579e0ce942bdb5a19ac06a83d80f784883842c56 Mon Sep 17 00:00:00 2001 From: domfournier Date: Wed, 24 Jan 2024 15:58:12 -0800 Subject: [PATCH 159/164] Remove failing test on sign --- tests/base/test_utils.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index a606cb0991..5952b755fc 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -373,17 +373,6 @@ def test_right_angles(self, coterminal_angle): angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) np.testing.assert_allclose(coterminal(angles), coterminal_angle) - @pytest.mark.parametrize( - "angle", - [np.pi, -np.pi, 3 * np.pi, 1.25 * np.pi, -1.25 * np.pi], - ids=("pi", "-pi", "3 pi", "1.25 pi", "-1.25 pi"), - ) - def test_sign_coterminal(self, angle): - coangle = coterminal(angle) - assert np.abs(coangle) < np.pi - if coangle != 0: - assert np.sign(coterminal(angle)) == np.sign(angle) - if __name__ == "__main__": unittest.main() From 33ff71d906f1158bf411ad5eaf66af249b2d106a Mon Sep 17 00:00:00 2001 From: Thibaut Date: Thu, 25 Jan 2024 15:18:34 -0800 Subject: [PATCH 160/164] cell_distances --- SimPEG/regularization/rotated.py | 42 ++++++++------------------------ 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 0b6a86f51c..e927e3e4b0 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,17 +1,16 @@ import numpy as np import scipy.sparse as sp -from discretize import TensorMesh, TreeMesh from discretize.base import BaseMesh from scipy.interpolate import NearestNDInterpolator -from SimPEG import utils -from SimPEG.regularization import RegularizationMesh, Sparse, SparseSmallness -from SimPEG.regularization.base import BaseRegularization -from SimPEG.utils.code_utils import ( +from ..utils.code_utils import ( validate_float, validate_ndarray_with_shape, validate_type, ) +from ..utils.mat_utils import coterminal +from . import BaseRegularization, RegularizationMesh, Sparse, SparseSmallness +from .base import BaseRegularization class SmoothnessFullGradient(BaseRegularization): @@ -257,10 +256,7 @@ def f_m(self, m): dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) if self.units is not None and self.units.lower() == "radian": - return ( - utils.mat_utils.coterminal(dfm_dl * self.cell_distances) - / self.cell_distances - ) + return coterminal(dfm_dl * self._cell_distances) / self._cell_distances return dfm_dl def f_m_deriv(self, m): @@ -506,30 +502,12 @@ def units(self, units: str | None): self._units = units @property - def cell_distances(self): - """ - _summary_ - - :return: _description_ - """ - mesh = self.regularization_mesh.mesh - if isinstance(mesh, TreeMesh): - average_cell_to_face_x = mesh.average_cell_to_face_x - average_cell_to_face_y = mesh.average_cell_to_face_y - average_cell_to_face_z = mesh.average_cell_to_face_z - - elif isinstance(mesh, TensorMesh): - average_cell_to_face_x = mesh.average_cell_to_face[: mesh.nFx] - average_cell_to_face_y = mesh.average_cell_to_face[ - mesh.nFx : (mesh.nFx + mesh.nFy) - ] - average_cell_to_face_z = mesh.average_cell_to_face[(mesh.nFx + mesh.nFy) :] + def _cell_distances(self): + cell_distances = self.cell_gradient.max(axis=1).toarray().flatten() + cell_distances[cell_distances == 0] = 1 + cell_distances = cell_distances ** (-1) - return np.r_[ - average_cell_to_face_x * mesh.h_gridded[:, 0], - average_cell_to_face_y * mesh.h_gridded[:, 1], - average_cell_to_face_z * mesh.h_gridded[:, 2], - ] + return cell_distances class RotatedSparse(Sparse): From 923f0149fada0cfaca00ebd887cd50d24129bf51 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Thu, 25 Jan 2024 16:22:51 -0800 Subject: [PATCH 161/164] polishing --- SimPEG/regularization/rotated.py | 150 +++++++------------------------ 1 file changed, 33 insertions(+), 117 deletions(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index e927e3e4b0..78eb28700f 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -1,5 +1,8 @@ +from typing import Literal + import numpy as np import scipy.sparse as sp +from discretize import TensorMesh, TreeMesh from discretize.base import BaseMesh from scipy.interpolate import NearestNDInterpolator @@ -10,7 +13,6 @@ ) from ..utils.mat_utils import coterminal from . import BaseRegularization, RegularizationMesh, Sparse, SparseSmallness -from .base import BaseRegularization class SmoothnessFullGradient(BaseRegularization): @@ -88,7 +90,6 @@ class SmoothnessFullGradient(BaseRegularization): anisotropic alpha used for rotated gradients. """ - # TODO: move this to KoBold/SimPEG _multiplier_pair = "alpha_x" def __init__( @@ -103,22 +104,6 @@ def __init__( reference_model_in_smooth=False, **kwargs, ): - """ - _summary_ - - :param mesh: _description_ - :param alphas: _description_, defaults to None - :param reg_dirs: _description_, defaults to None - :param ortho_check: _description_, defaults to True - :param norm: _description_, defaults to 2 - :param irls_scaled: _description_, defaults to True - :param irls_threshold: _description_, defaults to 1e-8 - :raises TypeError: _description_ - :raises IndexError: _description_ - :raises ValueError: _description_ - :raises IndexError: _description_ - :raises ValueError: _description_ - """ self.reference_model_in_smooth = reference_model_in_smooth if mesh.dim < 2: @@ -214,21 +199,14 @@ def __init__( @property def reference_model_in_smooth(self) -> bool: """ - _summary_ + whether to include reference model in gradient or not - :return: _description_ + :return: True or False """ - # Inherited from BaseRegularization class return self._reference_model_in_smooth @reference_model_in_smooth.setter def reference_model_in_smooth(self, value: bool): - """ - _summary_ - - :param value: _description_ - :raises TypeError: _description_ - """ if not isinstance(value, bool): raise TypeError( f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided." @@ -236,23 +214,11 @@ def reference_model_in_smooth(self, value: bool): self._reference_model_in_smooth = value def _delta_m(self, m): - """ - _summary_ - - :param m: _description_ - :return: _description_ - """ if self.reference_model is None or not self.reference_model_in_smooth: return m return m - self.reference_model def f_m(self, m): - """ - _summary_ - - :param m: _description_ - :return: _description_ - """ dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) if self.units is not None and self.units.lower() == "radian": @@ -260,46 +226,21 @@ def f_m(self, m): return dfm_dl def f_m_deriv(self, m): - """ - _summary_ - - :param m: _description_ - :return: _description_ - """ return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) # overwrite the call, deriv, and deriv2... def __call__(self, m): - """ - _summary_ - - :param m: _description_ - :return: _description_ - """ M_f = self.W r = self.f_m(m) return 0.5 * r @ M_f @ r def deriv(self, m): - """ - _summary_ - - :param m: _description_ - :return: _description_ - """ m_d = self.f_m_deriv(m) M_f = self.W r = self.f_m(m) return m_d.T @ (M_f @ r) def deriv2(self, m, v=None): - """ - _summary_ - - :param m: _description_ - :param v: _description_, defaults to None - :return: _description_ - """ m_d = self.f_m_deriv(m) M_f = self.W if v is None: @@ -365,23 +306,12 @@ def W(self): return self._W def update_weights(self, m): - """ - _summary_ - - :param m: _description_ - """ f_m = self.f_m(m) irls_weights = self.get_lp_weights(f_m) irls_weights = self.regularization_mesh.mesh.average_face_to_cell @ irls_weights self.set_weights(irls=irls_weights[self.active_cells]) def get_lp_weights(self, f_m): - """ - _summary_ - - :param f_m: _description_ - :return: _description_ - """ lp_scale = np.ones_like(f_m) if self.irls_scaled: # Scale on l2-norm gradient: f_m.max() @@ -419,11 +349,6 @@ def irls_scaled(self) -> bool: @irls_scaled.setter def irls_scaled(self, value: bool): - """ - _summary_ - - :param value: _description_ - """ self._irls_scaled = validate_type("irls_scaled", value, bool, cast=False) @property @@ -458,12 +383,6 @@ def norm(self): @norm.setter def norm(self, value: float | np.ndarray | None): - """ - _summary_ - - :param value: _description_ - :raises ValueError: _description_ - """ if value is None: value = np.ones(self.cell_gradient.shape[0]) * 2.0 else: @@ -489,12 +408,6 @@ def units(self) -> str | None: @units.setter def units(self, units: str | None): - """ - _summary_ - - :param units: _description_ - :raises TypeError: _description_ - """ if units is not None and not isinstance(units, str): raise TypeError( f"'units' must be None or type str. Value of type {type(units)} provided." @@ -502,7 +415,12 @@ def units(self, units: str | None): self._units = units @property - def _cell_distances(self): + def _cell_distances(self) -> np.ndarray: + """ + cell size average on faces + + :return: np.ndarray + """ cell_distances = self.cell_gradient.max(axis=1).toarray().flatten() cell_distances[cell_distances == 0] = 1 cell_distances = cell_distances ** (-1) @@ -511,31 +429,35 @@ def _cell_distances(self): class RotatedSparse(Sparse): + """ + Class that wraps the rotated gradients in a ComboObjectiveFunction similar to Sparse. + """ + def __init__( self, - mesh, - reg_dirs, - alphas_rot, - active_cells=None, - norms=[2, 2], - gradient_type="total", - irls_scaled=True, - irls_threshold=1e-8, - objfcts=None, + mesh: TensorMesh | TreeMesh, + reg_dirs: np.ndarray, + alphas_rot: tuple[float, float, float], + active_cells: np.ndarray | None = None, + norms: list[float] = [2.0, 2.0], + gradient_type: Literal["components", "total"] = "total", + irls_scaled: bool = True, + irls_threshold: float = 1e-8, + objfcts: list[BaseRegularization] | None = None, **kwargs, ): """ Class to wrap rotated gradient into a ComboObjective Function - :param mesh: _description_ - :param reg_dirs: _description_ - :param alphas_rot: _description_ - :param active_cells: _description_, defaults to None - :param norms: _description_, defaults to None - :param gradient_type: _description_, defaults to "total" - :param irls_scaled: _description_, defaults to True - :param irls_threshold: _description_, defaults to 1e-8 - :param objfcts: _description_, defaults to None + :param mesh: mesh + :param reg_dirs: rotation matrix + :param alphas_rot: alphas for rotated gradients + :param active_cells: active cells, defaults to None + :param norms: norms, defaults to [2, 2] + :param gradient_type: gradient_type, defaults to "total" + :param irls_scaled: irls_scaled, defaults to True + :param irls_threshold: irls_threshold, defaults to 1e-8 + :param objfcts: objfcts, defaults to None """ if not isinstance(mesh, RegularizationMesh): mesh = RegularizationMesh(mesh) @@ -560,7 +482,6 @@ def __init__( norm=norms[1], irls_scaled=irls_scaled, irls_threshold=irls_threshold, - # **kwargs, ), ] @@ -603,9 +524,4 @@ def alpha_z(self): @alpha_z.setter def alpha_z(self, value): - """ - _summary_ - - :param value: _description_ - """ self._alpha_z = None From ad97ccb2ebf3a4e518e5cf79a4fd32e362ed40cb Mon Sep 17 00:00:00 2001 From: Thibaut Date: Thu, 25 Jan 2024 16:25:06 -0800 Subject: [PATCH 162/164] add init --- SimPEG/regularization/__init__.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index b4cf48aa43..44993f7278 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -149,28 +149,28 @@ from ..utils.code_utils import deprecate_class from .base import ( BaseRegularization, - WeightedLeastSquares, BaseSimilarityMeasure, Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder, + WeightedLeastSquares, ) -from .regularization_mesh import RegularizationMesh -from .regularization_mesh_lateral import LCRegularizationMesh -from .sparse import BaseSparse, SparseSmallness, SparseSmoothness, Sparse -from .pgi import PGIsmallness, PGI -from .cross_gradient import CrossGradient from .correspondence import LinearCorrespondence +from .cross_gradient import CrossGradient from .jtv import JointTotalVariation +from .pgi import PGI, PGIsmallness +from .regularization_mesh import RegularizationMesh +from .regularization_mesh_lateral import LCRegularizationMesh +from .rotated import RotatedSparse, SmoothnessFullGradient +from .sparse import BaseSparse, Sparse, SparseSmallness, SparseSmoothness from .vector import ( + AmplitudeSmallness, + AmplitudeSmoothnessFirstOrder, + BaseAmplitude, BaseVectorRegularization, CrossReferenceRegularization, - BaseAmplitude, VectorAmplitude, - AmplitudeSmallness, - AmplitudeSmoothnessFirstOrder, ) -from .rotated import SmoothnessFullGradient @deprecate_class(removal_version="0.19.0", future_warn=True) From 492f44e20306851080dcceb746971d139040e5f6 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Thu, 25 Jan 2024 16:59:45 -0800 Subject: [PATCH 163/164] remove rotated from init --- SimPEG/regularization/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 44993f7278..984c537509 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -161,7 +161,6 @@ from .pgi import PGI, PGIsmallness from .regularization_mesh import RegularizationMesh from .regularization_mesh_lateral import LCRegularizationMesh -from .rotated import RotatedSparse, SmoothnessFullGradient from .sparse import BaseSparse, Sparse, SparseSmallness, SparseSmoothness from .vector import ( AmplitudeSmallness, From 3c4e8e737f36acc530fc6d9c421cf68b49d8fa20 Mon Sep 17 00:00:00 2001 From: Thibaut Astic <97514898+thibaut-kobold@users.noreply.github.com> Date: Fri, 26 Jan 2024 17:15:07 -0800 Subject: [PATCH 164/164] Update SimPEG/regularization/rotated.py Co-authored-by: Jacob Edman --- SimPEG/regularization/rotated.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py index 78eb28700f..1eaf3166a2 100644 --- a/SimPEG/regularization/rotated.py +++ b/SimPEG/regularization/rotated.py @@ -421,7 +421,7 @@ def _cell_distances(self) -> np.ndarray: :return: np.ndarray """ - cell_distances = self.cell_gradient.max(axis=1).toarray().flatten() + cell_distances = self.cell_gradient.max(axis=1).toarray().ravel() cell_distances[cell_distances == 0] = 1 cell_distances = cell_distances ** (-1)