From cb365eccce0bfc02f37064e714b51bb3af76b044 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Tue, 23 Jan 2024 22:15:23 -0800 Subject: [PATCH 01/68] define the kernels of the linear function on nodes rather than at cell centers --- SimPEG/simulation.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index fd2d525f4d..fd84ef831b 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -567,6 +567,14 @@ class ExponentialSinusoidSimulation(LinearSimulation): \int_x e^{p j_k x} \cos(\pi q j_k x) \quad, j_k \in [j_0, ..., j_n] """ + def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): + self.n_kernels = n_kernels + self.p = p + self.q = q + self.j0 = j0 + self.jn = jn + super(ExponentialSinusoidSimulation, self).__init__(**kwargs) + @property def n_kernels(self): """The number of kernels for the linear problem @@ -637,14 +645,6 @@ def jn(self): def jn(self, value): self._jn = validate_float("jn", value) - def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): - self.n_kernels = n_kernels - self.p = p - self.q = q - self.j0 = j0 - self.jn = jn - super(ExponentialSinusoidSimulation, self).__init__(**kwargs) - @property def jk(self): """ @@ -658,8 +658,8 @@ def g(self, k): """ Kernel functions for the decaying oscillating exponential functions. """ - return np.exp(self.p * self.jk[k] * self.mesh.cell_centers_x) * np.cos( - np.pi * self.q * self.jk[k] * self.mesh.cell_centers_x + return np.exp(self.p * self.jk[k] * self.mesh.nodes_x) * np.cos( + np.pi * self.q * self.jk[k] * self.mesh.nodes_x ) @property @@ -671,7 +671,9 @@ def G(self): G = np.empty((self.n_kernels, self.mesh.nC)) for i in range(self.n_kernels): - G[i, :] = self.g(i) * self.mesh.h[0] + G[i, :] = self.mesh.cell_volumes * ( + self.mesh.average_node_to_cell @ self.g(i) + ) self._G = G return self._G From 5782d4eebed4844bcbb850b3fcea272f63791558 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Sat, 27 Jan 2024 21:52:59 -0800 Subject: [PATCH 02/68] remove test checking entries of the kernels --- tests/base/test_simulation.py | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/tests/base/test_simulation.py b/tests/base/test_simulation.py index a1271d96b2..03a59c7fd7 100644 --- a/tests/base/test_simulation.py +++ b/tests/base/test_simulation.py @@ -19,32 +19,6 @@ def setUp(self): self.mtrue = mtrue - def test_forward(self): - data = np.r_[ - 7.50000000e-02, - 5.34102961e-02, - 5.26315566e-03, - -3.92235199e-02, - -4.22361894e-02, - -1.29419602e-02, - 1.30060891e-02, - 1.73572943e-02, - 7.78056876e-03, - -1.49689823e-03, - -4.50212858e-03, - -3.14559131e-03, - -9.55761370e-04, - 3.53963158e-04, - 7.24902205e-04, - 6.06022770e-04, - 3.36635644e-04, - 7.48637479e-05, - -1.10094573e-04, - -1.84905476e-04, - ] - - assert np.allclose(data, self.sim.dpred(self.mtrue)) - def test_make_synthetic_data(self): dclean = self.sim.dpred(self.mtrue) data = self.sim.make_synthetic_data(self.mtrue) From 8bfb0ddca087de46b7f24a6da3e01aff493e3b5c Mon Sep 17 00:00:00 2001 From: domfournier Date: Sun, 28 Jan 2024 07:56:24 -0800 Subject: [PATCH 03/68] Set function's parent on init. Augment unitests --- SimPEG/objective_function.py | 11 ++++++++++- tests/base/test_regularization.py | 5 +++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index b25ccb4c4a..3b6e08a278 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -360,7 +360,12 @@ class ComboObjectiveFunction(BaseObjectiveFunction): _multiplier_types = (float, None, Zero, np.float64, int, np.integer) - def __init__(self, objfcts=None, multipliers=None, unpack_on_add=True): + def __init__( + self, + objfcts: list[BaseObjectiveFunction] | None = None, + multipliers=None, + unpack_on_add=True, + ): # Define default lists if None if objfcts is None: objfcts = [] @@ -380,6 +385,10 @@ def __init__(self, objfcts=None, multipliers=None, unpack_on_add=True): nP = None super().__init__(nP=nP) + + for fct in objfcts: + fct.parent = self + self.objfcts = objfcts self._multipliers = multipliers self._unpack_on_add = unpack_on_add diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 43bcc168ad..8446b9fbdc 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -664,6 +664,11 @@ class Dummy: with pytest.raises(TypeError, match=msg): regularization.parent = invalid_parent + def test_default_parent(self, regularization): + """Test setting a default parent class to a BaseRegularization.""" + parent = ComboObjectiveFunction(objfcts=[regularization]) + assert regularization.parent is parent + class TestWeightsKeys: """ From 2bd131b75e8e300585f6fc0d63c6abb5a86eb84b Mon Sep 17 00:00:00 2001 From: domfournier Date: Sun, 28 Jan 2024 08:27:50 -0800 Subject: [PATCH 04/68] Use future import --- SimPEG/objective_function.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index 3b6e08a278..09691d6756 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np import scipy.sparse as sp From 0e8a7e6af1537dab19b2d6630a1119532fe89c10 Mon Sep 17 00:00:00 2001 From: Thibaut Date: Mon, 29 Jan 2024 11:28:17 -0800 Subject: [PATCH 05/68] ravel --- SimPEG/regularization/regularization_mesh.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index c07f92504a..0300adfc44 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -1,9 +1,9 @@ import numpy as np import scipy.sparse as sp + from SimPEG.utils.code_utils import deprecate_property, validate_active_indices -from .. import props -from .. import utils +from .. import props, utils ############################################################################### # # @@ -553,7 +553,7 @@ def cell_distances_x(self) -> np.ndarray: if getattr(self, "_cell_distances_x", None) is None: self._cell_distances_x = self.cell_gradient_x.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_x @@ -569,7 +569,7 @@ def cell_distances_y(self) -> np.ndarray: if getattr(self, "_cell_distances_y", None) is None: self._cell_distances_y = self.cell_gradient_y.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_y @@ -585,7 +585,7 @@ def cell_distances_z(self) -> np.ndarray: if getattr(self, "_cell_distances_z", None) is None: self._cell_distances_z = self.cell_gradient_z.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_z From 9ebab833e674ddb7b83cfb87f1ef250c907b8c83 Mon Sep 17 00:00:00 2001 From: domfournier Date: Mon, 29 Jan 2024 12:09:24 -0800 Subject: [PATCH 06/68] Move assignement in the validation --- SimPEG/objective_function.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index 09691d6756..55fb0dfad0 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -388,9 +388,6 @@ def __init__( super().__init__(nP=nP) - for fct in objfcts: - fct.parent = self - self.objfcts = objfcts self._multipliers = multipliers self._unpack_on_add = unpack_on_add @@ -543,6 +540,8 @@ def _validate_objective_functions(self, objective_functions): f"{function.__class__.__name__} in 'objfcts'. " "All objective functions must inherit from BaseObjectiveFunction." ) + function.parent = self + number_of_parameters = [f.nP for f in objective_functions if f.nP != "*"] if number_of_parameters: all_equal = all(np.equal(number_of_parameters, number_of_parameters[0])) From 05698fbaccc937df5c1054cb54aa8ac34d0e3127 Mon Sep 17 00:00:00 2001 From: domfournier Date: Mon, 29 Jan 2024 12:10:59 -0800 Subject: [PATCH 07/68] Black flake8 fix --- tests/base/test_regularization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 8446b9fbdc..ea21f5859a 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -665,7 +665,7 @@ class Dummy: regularization.parent = invalid_parent def test_default_parent(self, regularization): - """Test setting a default parent class to a BaseRegularization.""" + """Test setting default parent class to a BaseRegularization.""" parent = ComboObjectiveFunction(objfcts=[regularization]) assert regularization.parent is parent From 716e221e83b986479037ae90ba9787f53d9b7334 Mon Sep 17 00:00:00 2001 From: domfournier Date: Thu, 1 Feb 2024 10:15:38 -0800 Subject: [PATCH 08/68] Fix implementation of coterminal function (#1334) Fix the implementation of the `coterminal` function: use the `mod` operator to ensure that the functions returns the correct coterminal angle defined in $[-\pi, \pi)$. Add more tests. --------- Co-authored-by: Santiago Soler Co-authored-by: Joseph Capriotti --- SimPEG/utils/mat_utils.py | 10 +++------- tests/base/test_utils.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index 5d128d9970..c7b3d07fcb 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -396,7 +396,7 @@ def coterminal(theta): \theta = 2\pi N + \gamma and *N* is an integer, the function returns the value of :math:`\gamma`. - The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi]`. + The coterminal angle :math:`\gamma` is within the range :math:`[-\pi , \pi)`. Parameters ---------- @@ -409,12 +409,8 @@ def coterminal(theta): Coterminal angles """ - sub = theta[np.abs(theta) >= np.pi] - sub = -np.sign(sub) * (2 * np.pi - np.abs(sub)) - - theta[np.abs(theta) >= np.pi] = sub - - return theta + coterminal = (theta + np.pi) % (2 * np.pi) - np.pi + return coterminal def define_plane_from_points(xyz1, xyz2, xyz3): diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 541ed1642b..5952b755fc 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -1,4 +1,5 @@ import unittest +import pytest import numpy as np import scipy.sparse as sp import os @@ -22,6 +23,7 @@ Counter, download, surface2ind_topo, + coterminal, ) import discretize @@ -342,5 +344,35 @@ def test_downloads(self): shutil.rmtree(os.path.expanduser("./test_url")) +class TestCoterminalAngle: + """ + Tests for the coterminal function + """ + + @pytest.mark.parametrize( + "coterminal_angle", + (1 / 4 * np.pi, 3 / 4 * np.pi, -3 / 4 * np.pi, -1 / 4 * np.pi), + ids=("pi/4", "3/4 pi", "-3/4 pi", "-pi/4"), + ) + def test_angles_in_quadrants(self, coterminal_angle): + """ + Test coterminal for angles in each quadrant + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + @pytest.mark.parametrize( + "coterminal_angle", + (0, np.pi / 2, -np.pi, -np.pi / 2), + ids=("0", "pi/2", "-pi", "-pi/2"), + ) + def test_right_angles(self, coterminal_angle): + """ + Test coterminal for right angles + """ + angles = np.array([2 * n * np.pi + coterminal_angle for n in range(-3, 4)]) + np.testing.assert_allclose(coterminal(angles), coterminal_angle) + + if __name__ == "__main__": unittest.main() From bb3a488642f73c5b4270fad3ff269cc94a1dbf14 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Thu, 1 Feb 2024 16:27:55 -0800 Subject: [PATCH 09/68] move averaging outside of matrix construction --- SimPEG/simulation.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index fd84ef831b..0a0d9d3c4c 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -8,7 +8,7 @@ from discretize.base import BaseMesh from discretize import TensorMesh -from discretize.utils import unpack_widths +from discretize.utils import unpack_widths, sdiag from . import props from .data import SyntheticData, Data @@ -668,12 +668,12 @@ def G(self): Matrix whose rows are the kernel functions """ if getattr(self, "_G", None) is None: - G = np.empty((self.n_kernels, self.mesh.nC)) + G = np.empty((self.mesh.nC, self.n_kernels)) for i in range(self.n_kernels): - G[i, :] = self.mesh.cell_volumes * ( - self.mesh.average_node_to_cell @ self.g(i) - ) + G[:, i] = self.g(i) - self._G = G + self._G = ( + sdiag(self.mesh.cell_volumes) @ (self.mesh.average_node_to_cell @ G).T + ) return self._G From 67b7bd9892e512e66761caa30801587fb070fbca Mon Sep 17 00:00:00 2001 From: domfournier Date: Sun, 4 Feb 2024 08:11:26 -0800 Subject: [PATCH 10/68] Move parent setting to WeightedLeastSquares --- SimPEG/objective_function.py | 3 --- SimPEG/regularization/base.py | 5 +++++ tests/base/test_regularization.py | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index c00aec0942..28464bd66e 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -390,9 +390,6 @@ def __init__( super().__init__(nP=nP) - for fun in objfcts: - fun.parent = self - self.objfcts = objfcts self._multipliers = multipliers self._unpack_on_add = unpack_on_add diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 656c1a2572..4db6c4a1c9 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -1665,8 +1665,13 @@ def __init__( objfcts = kwargs.pop("objfcts") super().__init__(objfcts=objfcts, unpack_on_add=False, **kwargs) + + for fun in objfcts: + fun.parent = self + if active_cells is not None: self.active_cells = active_cells + self.mapping = mapping self.reference_model = reference_model self.reference_model_in_smooth = reference_model_in_smooth diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 3023748b86..6e73479785 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -682,7 +682,8 @@ class Dummy: def test_default_parent(self, regularization): """Test setting default parent class to a BaseRegularization.""" - parent = ComboObjectiveFunction(objfcts=[regularization]) + mesh = discretize.TensorMesh([3, 4, 5]) + parent = WeightedLeastSquares(mesh, objfcts=[regularization]) assert regularization.parent is parent From aafb71ec3fa4cb8298c6bbb92075a387f27d71de Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Fri, 9 Feb 2024 10:19:49 -0700 Subject: [PATCH 11/68] Update cross gradient hessian approximation This make the hessian approximation in the cross gradient regularization actually SPD. --- SimPEG/regularization/cross_gradient.py | 70 +++++++++---------------- 1 file changed, 25 insertions(+), 45 deletions(-) diff --git a/SimPEG/regularization/cross_gradient.py b/SimPEG/regularization/cross_gradient.py index 9c8193a19f..b3b54cb3c5 100644 --- a/SimPEG/regularization/cross_gradient.py +++ b/SimPEG/regularization/cross_gradient.py @@ -344,64 +344,44 @@ def deriv2(self, model, v=None): g_m1 = G @ m1 g_m2 = G @ m2 - if v is None: - A = ( - G.T - @ ( - sp.diags(Av.T @ (Av @ g_m2**2)) - - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m2) - ) - @ G - ) - - C = ( - G.T - @ ( - sp.diags(Av.T @ (Av @ g_m1**2)) - - sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m1) - ) - @ G - ) + d11_mid = Av.T @ (Av @ g_m2**2) + d12_mid = -(Av.T @ (Av @ (g_m1 * g_m2))) + d22_mid = Av.T @ (Av @ g_m1**2) - B = None - BT = None + if v is None: + D11_mid = sp.diags(d11_mid) + D12_mid = sp.diags(d12_mid) + D22_mid = sp.diags(d22_mid) if not self.approx_hessian: - # d_m1_d_m2 - B = ( - G.T - @ ( - 2 * sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m2) - - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m1) - - sp.diags(Av.T @ Av @ (g_m1 * g_m2)) - ) - @ G + D11_mid = D11_mid - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m2) + D12_mid = ( + D12_mid + + 2 * sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m2) + - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m1) ) - BT = B.T + D22_mid = D22_mid - sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m1) + D11 = G.T @ D11_mid @ G + D12 = G.T @ D12_mid @ G + D22 = G.T @ D22_mid @ G - return sp.bmat([[A, B], [BT, C]], format="csr") + return sp.bmat([[D11, D12], [D12.T, D22]], format="csr") else: v1, v2 = self.wire_map * v Gv1 = G @ v1 Gv2 = G @ v2 - - p1 = G.T @ ( - (Av.T @ (Av @ g_m2**2)) * Gv1 - g_m2 * (Av.T @ (Av @ (g_m2 * Gv1))) - ) - p2 = G.T @ ( - (Av.T @ (Av @ g_m1**2)) * Gv2 - g_m1 * (Av.T @ (Av @ (g_m1 * Gv2))) - ) - + p1 = G.T @ (d11_mid * Gv1 + d12_mid * Gv2) + p2 = G.T @ (d12_mid * Gv1 + d22_mid * Gv2) if not self.approx_hessian: p1 += G.T @ ( - 2 * g_m1 * (Av.T @ (Av @ (g_m2 * Gv2))) - - g_m2 * (Av.T @ (Av @ (g_m1 * Gv2))) - - (Av.T @ (Av @ (g_m1 * g_m2))) * Gv2 + -g_m2 * (Av.T @ (Av @ (g_m2 * Gv1))) # d11*v1 full addition + + 2 * g_m1 * (Av.T @ (Av @ (g_m2 * Gv2))) # d12*v2 full addition + - g_m2 * (Av.T @ (Av @ (g_m1 * Gv2))) # d12*v2 continued ) p2 += G.T @ ( - 2 * g_m2 * (Av.T @ (Av @ (g_m1 * Gv1))) - - g_m1 * (Av.T @ (Av @ (g_m2 * Gv1))) - - (Av.T @ (Av @ (g_m2 * g_m1))) * Gv1 + -g_m1 * (Av.T @ (Av @ (g_m1 * Gv2))) # d22*v2 full addition + + 2 * g_m2 * (Av.T @ (Av @ (g_m1 * Gv1))) # d12.T*v1 full addition + - g_m1 * (Av.T @ (Av @ (g_m2 * Gv1))) # d12.T*v1 fcontinued ) return np.r_[p1, p2] From 50a864e3431f07fd876ec2a19bc31a73e9597271 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 20 Feb 2024 11:47:42 -0800 Subject: [PATCH 12/68] Fix partial derivatives in regularization docs (#1362) Fix LaTeX in second order partial derivatives shown in regularization docstrings. Replace $\frac{\partial \phi^2}{\partial m^2}$ for $\frac{\partial^2 \phi}{\partial m^2}$. --- SimPEG/regularization/correspondence.py | 8 ++++---- SimPEG/regularization/cross_gradient.py | 8 ++++---- SimPEG/regularization/jtv.py | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/SimPEG/regularization/correspondence.py b/SimPEG/regularization/correspondence.py index 6f31dd3317..55d0db8765 100644 --- a/SimPEG/regularization/correspondence.py +++ b/SimPEG/regularization/correspondence.py @@ -185,10 +185,10 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} \end{bmatrix} When a vector :math:`(\mathbf{v})` is supplied, the method returns the Hessian diff --git a/SimPEG/regularization/cross_gradient.py b/SimPEG/regularization/cross_gradient.py index b3b54cb3c5..5204881bcc 100644 --- a/SimPEG/regularization/cross_gradient.py +++ b/SimPEG/regularization/cross_gradient.py @@ -310,10 +310,10 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} \end{bmatrix} When a vector :math:`(\mathbf{v})` is supplied, the method returns the Hessian diff --git a/SimPEG/regularization/jtv.py b/SimPEG/regularization/jtv.py index aa55780b47..153b8cd511 100644 --- a/SimPEG/regularization/jtv.py +++ b/SimPEG/regularization/jtv.py @@ -256,11 +256,11 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} & \cdots \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} & \; \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} & \; \\ \vdots & \; & \ddots \end{bmatrix} From c6d05c30ea7c574db4eedf979ad51df86ce60eaf Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Tue, 20 Feb 2024 15:06:57 -0800 Subject: [PATCH 13/68] Remove factor of half in data misfits and regularizations (#1326) Remove the factor of 1/2 in front of data misfit and regularization terms. Add a factor of 2 to their derivatives. Update target misfit directives to account for the missing 1/2 factor. Update documentation to remove the factor of 1/2. Update tests accordingly (#1332 for PGI). --------- Co-authored-by: Santiago Soler Co-authored-by: Thibaut Astic <97514898+thibaut-kobold@users.noreply.github.com> --- SimPEG/data_misfit.py | 12 +-- SimPEG/directives/directives.py | 32 ++++---- SimPEG/directives/pgi_directives.py | 2 +- SimPEG/directives/sim_directives.py | 4 +- SimPEG/objective_function.py | 14 ++-- SimPEG/regularization/__init__.py | 12 +-- SimPEG/regularization/base.py | 72 ++++++++--------- SimPEG/regularization/correspondence.py | 10 +-- SimPEG/regularization/cross_gradient.py | 40 ++++++---- SimPEG/regularization/jtv.py | 6 +- SimPEG/regularization/pgi.py | 52 +++++------- SimPEG/regularization/sparse.py | 34 ++++---- SimPEG/regularization/vector.py | 80 +++++++++++-------- .../plot_tomo_joint_with_volume.py | 16 ++-- tests/base/test_cross_gradient.py | 2 +- tests/base/test_objective_function.py | 8 +- tests/base/test_pgi_regularization.py | 14 ++-- tests/em/em1d/test_EM1D_FD_jac_layers.py | 18 +++-- tests/pf/test_pf_quadtree_inversion_linear.py | 8 +- tests/utils/test_mat_utils.py | 6 +- 20 files changed, 226 insertions(+), 216 deletions(-) diff --git a/SimPEG/data_misfit.py b/SimPEG/data_misfit.py index 42ffc6532d..6d975f2d30 100644 --- a/SimPEG/data_misfit.py +++ b/SimPEG/data_misfit.py @@ -19,7 +19,7 @@ class inherits the :py:class:`SimPEG.objective_function.L2ObjectiveFunction`. create your own data misfit class. .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \| \mathbf{W} f(\mathbf{m}) \|_2^2 + \phi_d (\mathbf{m}) = \| \mathbf{W} f(\mathbf{m}) \|_2^2 where :math:`\mathbf{m}` is the model vector, :math:`\mathbf{W}` is a linear weighting matrix, and :math:`f` is a mapping function that acts on the model. @@ -152,7 +152,7 @@ def W(self): For a discrete least-squares data misfit function of the form: .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \| \mathbf{W} \mathbf{f}(\mathbf{m}) \|_2^2 + \phi_d (\mathbf{m}) = \| \mathbf{W} \mathbf{f}(\mathbf{m}) \|_2^2 :math:`\mathbf{W}` is a linear weighting matrix, :math:`\mathbf{m}` is the model vector, and :math:`\mathbf{f}` is a discrete mapping function that acts on the model vector. @@ -237,7 +237,7 @@ class L2DataMisfit(BaseDataMisfit): data and predicted data for a given model. I.e.: .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \big \| \mathbf{W_d} + \phi_d (\mathbf{m}) = \big \| \mathbf{W_d} \big ( \mathbf{d}_\text{pred} - \mathbf{d}_\text{obs} \big ) \big \|_2^2 where :math:`\mathbf{d}_\text{obs}` is the observed data vector, :math:`\mathbf{d}_\text{pred}` @@ -266,7 +266,7 @@ def __call__(self, m, f=None): """Evaluate the residual for a given model.""" R = self.W * self.residual(m, f=f) - return 0.5 * np.vdot(R, R) + return np.vdot(R, R) @timeIt def deriv(self, m, f=None): @@ -293,7 +293,7 @@ def deriv(self, m, f=None): if f is None: f = self.simulation.fields(m) - return self.simulation.Jtvec( + return 2 * self.simulation.Jtvec( m, self.W.T * (self.W * self.residual(m, f=f)), f=f ) @@ -330,6 +330,6 @@ def deriv2(self, m, v, f=None): if f is None: f = self.simulation.fields(m) - return self.simulation.Jtvec_approx( + return 2 * self.simulation.Jtvec_approx( m, self.W * (self.W * self.simulation.Jvec_approx(m, v, f=f)), f=f ) diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 6700257d9a..6f38db40be 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -1060,17 +1060,17 @@ def phi_d_star(self): ------- float """ - # the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if self._phi_d_star is None: nD = 0 for survey in self.survey: nD += survey.nD - self._phi_d_star = 0.5 * nD + self._phi_d_star = nD return self._phi_d_star @phi_d_star.setter def phi_d_star(self, value): - # the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if value is not None: value = validate_float( "phi_d_star", value, min_val=0.0, inclusive_min=False @@ -1166,13 +1166,13 @@ def phi_d_star(self): ------- float """ - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid = || dpred - dobs||^2 if getattr(self, "_phi_d_star", None) is None: # Check if it is a ComboObjective if isinstance(self.dmisfit, ComboObjectiveFunction): - value = np.r_[[0.5 * survey.nD for survey in self.survey]] + value = np.r_[[survey.nD for survey in self.survey]] else: - value = np.r_[[0.5 * self.survey.nD]] + value = np.r_[[self.survey.nD]] self._phi_d_star = value self._DMtarget = None @@ -1180,7 +1180,7 @@ def phi_d_star(self): @phi_d_star.setter def phi_d_star(self, value): - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid =|| dpred - dobs||^2 if value is not None: value = validate_ndarray_with_shape("phi_d_star", value, shape=("*",)) self._phi_d_star = value @@ -1426,11 +1426,11 @@ def CLtarget(self): self._CLtarget = self.chiSmall * self.phi_ms_star elif getattr(self, "_CLtarget", None) is None: - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if self.phi_ms_star is None: # Expected value is number of active cells * number of physical # properties - self.phi_ms_star = 0.5 * len(self.invProb.model) + self.phi_ms_star = len(self.invProb.model) self._CLtarget = self.chiSmall * self.phi_ms_star @@ -1747,7 +1747,7 @@ def load_results(self): self.f = results[:, 7] - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0 + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -1765,9 +1765,7 @@ def plot_misfit_curves( plot_small=False, plot_smooth=False, ): - self.target_misfit = ( - np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) / 2.0 - ) + self.target_misfit = np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -1821,7 +1819,7 @@ def plot_misfit_curves( fig.savefig(fname, dpi=dpi) def plot_tikhonov_curves(self, fname=None, dpi=200): - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0 + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -2062,7 +2060,7 @@ def target(self): for survey in self.survey: nD += survey.nD - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target @@ -2076,10 +2074,10 @@ def start(self): if isinstance(self.survey, list): self._start = 0 for survey in self.survey: - self._start += survey.nD * 0.5 * self.chifact_start + self._start += survey.nD * self.chifact_start else: - self._start = self.survey.nD * 0.5 * self.chifact_start + self._start = self.survey.nD * self.chifact_start return self._start @start.setter diff --git a/SimPEG/directives/pgi_directives.py b/SimPEG/directives/pgi_directives.py index db332ff9bb..e8fb543ee1 100644 --- a/SimPEG/directives/pgi_directives.py +++ b/SimPEG/directives/pgi_directives.py @@ -413,7 +413,7 @@ def initialize(self): @property def DMtarget(self): if getattr(self, "_DMtarget", None) is None: - self.phi_d_target = 0.5 * self.invProb.dmisfit.survey.nD + self.phi_d_target = self.invProb.dmisfit.survey.nD self._DMtarget = self.chifact * self.phi_d_target return self._DMtarget diff --git a/SimPEG/directives/sim_directives.py b/SimPEG/directives/sim_directives.py index 5b781fe97a..718fac26c3 100644 --- a/SimPEG/directives/sim_directives.py +++ b/SimPEG/directives/sim_directives.py @@ -305,7 +305,7 @@ def target(self): if getattr(self, "_target", None) is None: nD = np.array([survey.nD for survey in self.survey]) - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target @@ -362,7 +362,7 @@ def target(self): nD += [survey.nD] nD = np.array(nD) - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index 28464bd66e..b3c299cea2 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -536,7 +536,7 @@ class L2ObjectiveFunction(BaseObjectiveFunction): Weighting least-squares objective functions in SimPEG are defined as follows: .. math:: - \phi = \frac{1}{2} \big \| \mathbf{W} f(\mathbf{m}) \big \|_2^2 + \phi = \big \| \mathbf{W} f(\mathbf{m}) \big \|_2^2 where :math:`\mathbf{m}` are the model parameters, :math:`f` is a mapping operator, and :math:`\mathbf{W}` is the weighting matrix. @@ -605,20 +605,22 @@ def W(self): def __call__(self, m): """Evaluate the objective function for a given model.""" r = self.W * (self.mapping * m) - return 0.5 * r.dot(r) + return r.dot(r) def deriv(self, m): # Docstring inherited from BaseObjectiveFunction - return self.mapping.deriv(m).T * (self.W.T * (self.W * (self.mapping * m))) + return 2 * self.mapping.deriv(m).T * (self.W.T * (self.W * (self.mapping * m))) def deriv2(self, m, v=None): # Docstring inherited from BaseObjectiveFunction if v is not None: - return self.mapping.deriv(m).T * ( - self.W.T * (self.W * (self.mapping.deriv(m) * v)) + return ( + 2 + * self.mapping.deriv(m).T + * (self.W.T * (self.W * (self.mapping.deriv(m) * v))) ) W = self.W * self.mapping.deriv(m) - return W.T * W + return 2 * W.T * W def _validate_objective_functions(objective_functions): diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 5d1a7910ac..c379bbd202 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -52,10 +52,10 @@ .. math:: \phi_m (m) = - \alpha_s \! \int_\Omega \Bigg [ \frac{1}{2} w_s(r) \, m(r)^2 \Bigg ] \, dv + - \alpha_x \! \int_\Omega \Bigg [ \frac{1}{2} w_x(r) + \alpha_s \! \int_\Omega \Bigg [ w_s(r) \, m(r)^2 \Bigg ] \, dv + + \alpha_x \! \int_\Omega \Bigg [ w_x(r) \bigg ( \frac{\partial m}{\partial x} \bigg )^2 \Bigg ] \, dv + - \alpha_y \! \int_\Omega \Bigg [ \frac{1}{2} w_y(r) + \alpha_y \! \int_\Omega \Bigg [ w_y(r) \bigg ( \frac{\partial m}{\partial y} \bigg )^2 \Bigg ] \, dv where :math:`w_s(r), w_x(r), w_y(r)` are user-defined weighting functions. @@ -65,9 +65,9 @@ And the regularization is implemented using a weighted sum of objective functions: .. math:: - \phi_m (\mathbf{m}) \approx \frac{\alpha_s}{2} \big \| \mathbf{W_s m} \big \|^2 + - \frac{\alpha_x}{2} \big \| \mathbf{W_x G_x m} \big \|^2 + - \frac{\alpha_y}{2} \big \| \mathbf{W_y G_y m} \big \|^2 + \phi_m (\mathbf{m}) \approx \alpha_s \big \| \mathbf{W_s m} \big \|^2 + + \alpha_x \big \| \mathbf{W_x G_x m} \big \|^2 + + \alpha_y \big \| \mathbf{W_y G_y m} \big \|^2 where :math:`\mathbf{G_x}` and :math:`\mathbf{G_y}` are partial gradient operators along the x and y-directions, respectively. :math:`\mathbf{W_s}`, :math:`\mathbf{W_x}` and :math:`\mathbf{W_y}` diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 4db6c4a1c9..4857a376d2 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -472,7 +472,7 @@ def __call__(self, m): The regularization function evaluated for the model provided. """ r = self.W * self.f_m(m) - return 0.5 * r.dot(r) + return r.dot(r) def f_m(self, m) -> np.ndarray: """Not implemented for ``BaseRegularization`` class.""" @@ -506,7 +506,7 @@ def deriv(self, m) -> np.ndarray: The Gradient of the regularization function evaluated for the model provided. """ r = self.W * self.f_m(m) - return self.f_m_deriv(m).T * (self.W.T * r) + return 2 * self.f_m_deriv(m).T * (self.W.T * r) @utils.timeIt def deriv2(self, m, v=None) -> csr_matrix: @@ -539,9 +539,9 @@ def deriv2(self, m, v=None) -> csr_matrix: """ f_m_deriv = self.f_m_deriv(m) if v is None: - return f_m_deriv.T * ((self.W.T * self.W) * f_m_deriv) + return 2 * f_m_deriv.T * ((self.W.T * self.W) * f_m_deriv) - return f_m_deriv.T * (self.W.T * (self.W * (f_m_deriv * v))) + return 2 * f_m_deriv.T * (self.W.T * (self.W * (f_m_deriv * v))) class Smallness(BaseRegularization): @@ -584,7 +584,7 @@ class Smallness(BaseRegularization): We define the regularization function (objective function) for smallness as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Big [ m(r) - m^{(ref)}(r) \Big ]^2 \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model and :math:`w(r)` @@ -595,7 +595,7 @@ class Smallness(BaseRegularization): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, m_i - m_i^{(ref)} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the mesh and @@ -604,7 +604,7 @@ class Smallness(BaseRegularization): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} + \phi (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where @@ -674,7 +674,7 @@ def f_m(self, m) -> np.ndarray: The objective function for smallness regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters defined on the mesh (model), @@ -689,7 +689,7 @@ def f_m(self, m) -> np.ndarray: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 """ return self.mapping * self._delta_m(m) @@ -720,7 +720,7 @@ def f_m_deriv(self, m) -> csr_matrix: The objective function for smallness regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters defined on the mesh (model), @@ -735,7 +735,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 Thus, the derivative with respect to the model is: @@ -794,7 +794,7 @@ class SmoothnessFirstOrder(BaseRegularization): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \bigg [ \frac{\partial m}{\partial x} \bigg ]^2 \, dv where :math:`m(r)` is the model and :math:`w(r)` is a user-defined weighting function. @@ -804,7 +804,7 @@ class SmoothnessFirstOrder(BaseRegularization): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, \frac{\partial m_i}{\partial x} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the mesh @@ -813,7 +813,7 @@ class SmoothnessFirstOrder(BaseRegularization): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, G_x m } \, \Big \|^2 + \phi (\mathbf{m}) = \Big \| \mathbf{W \, G_x m } \, \Big \|^2 where @@ -830,7 +830,7 @@ class SmoothnessFirstOrder(BaseRegularization): In this case, the objective function becomes: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W G_x} + \phi (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting a reference model with the @@ -988,7 +988,7 @@ def f_m(self, m): is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1005,7 +1005,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 """ dfm_dl = self.mapping * self._delta_m(m) @@ -1044,7 +1044,7 @@ def f_m_deriv(self, m) -> csr_matrix: is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1061,7 +1061,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 The derivative with respect to the model is therefore: @@ -1159,7 +1159,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): smoothness along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \bigg [ \frac{\partial^2 m}{\partial x^2} \bigg ]^2 \, dv where :math:`m(r)` is the model and :math:`w(r)` is a user-defined weighting function. @@ -1169,7 +1169,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, \frac{\partial^2 m_i}{\partial x^2} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the @@ -1178,7 +1178,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \big \| \mathbf{W \, L_x \, m } \, \big \|^2 + \phi (\mathbf{m}) = \big \| \mathbf{W \, L_x \, m } \, \big \|^2 where @@ -1192,7 +1192,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): In this case, the objective function becomes: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W L_x} + \phi (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting a reference model with the @@ -1255,7 +1255,7 @@ def f_m(self, m): is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1272,7 +1272,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 """ dfm_dl = self.mapping * self._delta_m(m) @@ -1313,7 +1313,7 @@ def f_m_deriv(self, m) -> csr_matrix: is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1330,7 +1330,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 The derivative of the regularization kernel function with respect to the model is: @@ -1433,11 +1433,11 @@ class WeightedLeastSquares(ComboObjectiveFunction): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) =& \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) =& \alpha_s \int_\Omega \, w(r) \Big [ m(r) - m^{(ref)}(r) \Big ]^2 \, dv \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + &+ \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \bigg [ \frac{\partial m}{\partial \xi_j} \bigg ]^2 \, dv \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \int_\Omega \, w(r) + &+ \sum_{j=x,y,z} \alpha_{jj} \int_\Omega \, w(r) \bigg [ \frac{\partial^2 m}{\partial \xi_j^2} \bigg ]^2 \, dv \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) @@ -1461,10 +1461,10 @@ class WeightedLeastSquares(ComboObjectiveFunction): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) =& \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) =& \alpha_s \Big \| \mathbf{W_s} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -1482,11 +1482,11 @@ class WeightedLeastSquares(ComboObjectiveFunction): In this case, the objective function becomes: .. math:: - \phi_m (\mathbf{m}) =& \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) =& \alpha_s \Big \| \mathbf{W_s} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j} + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j} + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) diff --git a/SimPEG/regularization/correspondence.py b/SimPEG/regularization/correspondence.py index 55d0db8765..670afc3132 100644 --- a/SimPEG/regularization/correspondence.py +++ b/SimPEG/regularization/correspondence.py @@ -43,7 +43,7 @@ class LinearCorrespondence(BaseSimilarityMeasure): .. math:: \phi (\mathbf{m}) - = \frac{1}{2} \big \| \lambda_1 \mathbf{m_1} + \lambda_2 \mathbf{m_2} + \lambda_3 \big \|^2 + = \big \| \lambda_1 \mathbf{m_1} + \lambda_2 \mathbf{m_2} + \lambda_3 \big \|^2 Scalar coefficients :math:`\{ \lambda_1 , \lambda_2 , \lambda_3 \}` are set using the `coefficients` property. For a true linear correspondence constraint, we set @@ -130,7 +130,7 @@ def __call__(self, model): """ result = self.relation(model) - return 0.5 * result.T @ result + return result.T @ result def deriv(self, model): r"""Gradient of the regularization function evaluated for the model provided. @@ -167,7 +167,7 @@ def deriv(self, model): result = np.r_[dc_dm1, dc_dm2] - return result + return 2 * result def deriv2(self, model, v=None): r"""Hessian of the regularization function evaluated for the model provided. @@ -217,10 +217,10 @@ def deriv2(self, model, v=None): v1, v2 = self.wire_map * v p1 = k1**2 * v1 + k2 * k1 * v2 p2 = k2 * k1 * v1 + k2**2 * v2 - return np.r_[p1, p2] + return 2 * np.r_[p1, p2] else: n = self.regularization_mesh.nC A = utils.sdiag(np.ones(n) * (k1**2)) B = utils.sdiag(np.ones(n) * (k2**2)) C = utils.sdiag(np.ones(n) * (k1 * k2)) - return sp.bmat([[A, C], [C, B]], format="csr") + return 2 * sp.bmat([[A, C], [C, B]], format="csr") diff --git a/SimPEG/regularization/cross_gradient.py b/SimPEG/regularization/cross_gradient.py index 5204881bcc..a1cb7e1c03 100644 --- a/SimPEG/regularization/cross_gradient.py +++ b/SimPEG/regularization/cross_gradient.py @@ -52,7 +52,7 @@ class CrossGradient(BaseSimilarityMeasure): (`Haber and Gazit, 2013 `__): .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big | \nabla m_1 \, \times \, \nabla m_2 \, \Big |^2 \, dv where :math:`w(r)` is a user-defined weighting function. @@ -60,7 +60,7 @@ class CrossGradient(BaseSimilarityMeasure): the regularization function can be re-expressed as: .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, \Big [ \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big [ \, \big | \nabla m_1 \big |^2 \big | \nabla m_2 \big |^2 - \big ( \nabla m_1 \, \cdot \, \nabla m_2 \, \big )^2 \Big ] \, dv @@ -69,7 +69,7 @@ class CrossGradient(BaseSimilarityMeasure): function (objective function) is given by: .. math:: - \phi (m_1, m_2) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \bigg [ + \phi (m_1, m_2) \approx \sum_i \tilde{w}_i \, \bigg [ \Big | (\nabla m_1)_i \Big |^2 \Big | (\nabla m_2)_i \Big |^2 - \Big [ (\nabla m_1)_i \, \cdot \, (\nabla m_2)_i \, \Big ]^2 \, \bigg ] @@ -89,9 +89,9 @@ class CrossGradient(BaseSimilarityMeasure): .. math:: \phi (\mathbf{m}) = - \frac{1}{2} \Big [ \mathbf{W A} \big ( \mathbf{G \, m_1} \big )^2 \Big ]^T + \Big [ \mathbf{W A} \big ( \mathbf{G \, m_1} \big )^2 \Big ]^T \Big [ \mathbf{W A} \big ( \mathbf{G \, m_2} \big )^2 \Big ] - - \frac{1}{2} \bigg \| \mathbf{W A} \Big [ \big ( \mathbf{G \, m_1} \big ) + - \bigg \| \mathbf{W A} \Big [ \big ( \mathbf{G \, m_1} \big ) \odot \big ( \mathbf{G \, m_2} \big ) \Big ] \bigg \|^2 where exponents are computed elementwise, @@ -249,9 +249,7 @@ def __call__(self, model): G = self._G g_m1 = G @ m1 g_m2 = G @ m2 - return 0.5 * np.sum( - (Av @ g_m1**2) * (Av @ g_m2**2) - (Av @ (g_m1 * g_m2)) ** 2 - ) + return np.sum((Av @ g_m1**2) * (Av @ g_m2**2) - (Av @ (g_m1 * g_m2)) ** 2) def deriv(self, model): r"""Gradient of the regularization function evaluated for the model provided. @@ -267,7 +265,7 @@ def deriv(self, model): The gradient has the form: .. math:: - \frac{\partial \phi}{\partial \mathbf{m}} = + 2 \frac{\partial \phi}{\partial \mathbf{m}} = \begin{bmatrix} \dfrac{\partial \phi}{\partial \mathbf{m_1}} \\ \dfrac{\partial \phi}{\partial \mathbf{m_2}} \end{bmatrix} @@ -288,12 +286,15 @@ def deriv(self, model): g_m1 = G @ m1 g_m2 = G @ m2 - return np.r_[ - (((Av @ g_m2**2) @ Av) * g_m1) @ G - - (((Av @ (g_m1 * g_m2)) @ Av) * g_m2) @ G, - (((Av @ g_m1**2) @ Av) * g_m2) @ G - - (((Av @ (g_m1 * g_m2)) @ Av) * g_m1) @ G, - ] + return ( + 2 + * np.r_[ + (((Av @ g_m2**2) @ Av) * g_m1) @ G + - (((Av @ (g_m1 * g_m2)) @ Av) * g_m2) @ G, + (((Av @ g_m1**2) @ Av) * g_m2) @ G + - (((Av @ (g_m1 * g_m2)) @ Av) * g_m1) @ G, + ] + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 def deriv2(self, model, v=None): r"""Hessian of the regularization function evaluated for the model provided. @@ -364,7 +365,10 @@ def deriv2(self, model, v=None): D12 = G.T @ D12_mid @ G D22 = G.T @ D22_mid @ G - return sp.bmat([[D11, D12], [D12.T, D22]], format="csr") + return 2 * sp.bmat( + [[D11, D12], [D12.T, D22]], format="csr" + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 + else: v1, v2 = self.wire_map * v @@ -384,4 +388,6 @@ def deriv2(self, model, v=None): + 2 * g_m2 * (Av.T @ (Av @ (g_m1 * Gv1))) # d12.T*v1 full addition - g_m1 * (Av.T @ (Av @ (g_m2 * Gv1))) # d12.T*v1 fcontinued ) - return np.r_[p1, p2] + return ( + 2 * np.r_[p1, p2] + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 diff --git a/SimPEG/regularization/jtv.py b/SimPEG/regularization/jtv.py index 153b8cd511..86a2208915 100644 --- a/SimPEG/regularization/jtv.py +++ b/SimPEG/regularization/jtv.py @@ -50,7 +50,7 @@ class JointTotalVariation(BaseSimilarityMeasure): (`Haber and Gazit, 2013 `__): .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big [ \, \big | \nabla m_1 \big |^2 \, + \, \big | \nabla m_2 \big |^2 \, \Big ]^{1/2} \, dv where :math:`w(r)` is a user-defined weighting function. @@ -60,7 +60,7 @@ class JointTotalVariation(BaseSimilarityMeasure): function (objective function) is given by: .. math:: - \phi (m_1, m_2) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \bigg [ \, + \phi (m_1, m_2) \approx \sum_i \tilde{w}_i \, \bigg [ \, \Big | (\nabla m_1)_i \Big |^2 \, + \, \Big | (\nabla m_2)_i \Big |^2 \, \bigg ]^{1/2} where :math:`(\nabla m_1)_i` are the gradients of property :math:`m_1` defined on the mesh and @@ -78,7 +78,7 @@ class JointTotalVariation(BaseSimilarityMeasure): is therefore equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \, \mathbf{e}^T \Bigg ( \, + \phi (\mathbf{m}) = \mathbf{e}^T \Bigg ( \, \mathbf{W \, A} \bigg [ \sum_k (\mathbf{G \, m_k})^2 \bigg ] \; + \; \epsilon \mathbf{v}^2 \, \Bigg )^{1/2} diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 496df06f45..2c98c321f8 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -103,10 +103,10 @@ class PGIsmallness(Smallness): least-square: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} + \phi (\mathbf{m}) &= \alpha_\text{pgi} \big | \mathbf{W} ( \Theta , \mathbf{z}^\ast ) \, (\mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \, \Big \|^2 - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -497,7 +497,7 @@ def __call__(self, m, external_weights=True): ] ] - return 0.5 * mkvc(r0).dot(mkvc(r1)) + return mkvc(r0).dot(mkvc(r1)) else: modellist = self.wiresmap * m @@ -506,7 +506,7 @@ def __call__(self, m, external_weights=True): if self.non_linear_relationships: score = self.gmm.score_samples(model) score_vec = mkvc(np.r_[[score for maps in self.wiresmap.maps]]) - return -np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) + return -2 * np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) else: if external_weights and getattr(self.W, "diagonal", None) is not None: @@ -519,7 +519,7 @@ def __call__(self, m, external_weights=True): score = self.gmm.score_samples_with_sensW(model, sensW) # score_vec = mkvc(np.r_[[score for maps in self.wiresmap.maps]]) # return -np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) - return -np.sum(score) + return -2 * np.sum(score) @timeIt def deriv(self, m): @@ -616,7 +616,7 @@ def deriv(self, m): ] ] ) - return mkvc(mD.T * (self.W.T * r)) + return 2 * mkvc(mD.T * (self.W.T * r)) else: if self.non_linear_relationships: @@ -726,7 +726,7 @@ def deriv(self, m): logP = np.vstack([logP for maps in self.wiresmap.maps]) numer = (W * np.exp(logP)).sum(axis=1) r = numer / (np.exp(score_vec)) - return mkvc(mD.T * r) + return 2 * mkvc(mD.T * r) @timeIt def deriv2(self, m, v=None): @@ -841,22 +841,12 @@ def deriv2(self, m, v=None): mDv = self.wiresmap * (mD * v) mDv = np.c_[mDv] r0 = (self.W * (mkvc(mDv))).reshape(mDv.shape, order="F") - return mkvc( - mD.T - * ( - self.W - * ( - mkvc( - np.r_[ - [ - np.dot(self._r_second_deriv[i], r0[i]) - for i in range(len(r0)) - ] - ] - ) - ) - ) + second_deriv_times_r0 = mkvc( + np.r_[ + [np.dot(self._r_second_deriv[i], r0[i]) for i in range(len(r0))] + ] ) + return 2 * mkvc(mD.T * (self.W * second_deriv_times_r0)) else: # Forming the Hessian by diagonal blocks hlist = [ @@ -875,7 +865,7 @@ def deriv2(self, m, v=None): Hr = Hr.dot(self.W) - return (mD.T * mD) * (self.W * (Hr)) + return 2 * (mD.T * mD) * (self.W * (Hr)) else: if self.non_linear_relationships: @@ -953,7 +943,7 @@ def deriv2(self, m, v=None): for j in range(len(self.wiresmap.maps)): Hc = sp.hstack([Hc, sdiag(hlist[i][j])]) Hr = sp.vstack([Hr, Hc]) - Hr = (mD.T * mD) * Hr + Hr = 2 * (mD.T * mD) * Hr if v is not None: return Hr.dot(v) @@ -1041,12 +1031,12 @@ class PGI(ComboObjectiveFunction): ``PGI`` is given by: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} + \phi (\mathbf{m}) &= \alpha_\text{pgi} \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ]^T \mathbf{W} ( \Theta , \mathbf{z}^\ast ) \, \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ] \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -1072,10 +1062,10 @@ class PGI(ComboObjectiveFunction): regularization function (objective function) can be expressed as: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} \Big \| \mathbf{W}_{\! 1/2}(\Theta, \mathbf{z}^\ast ) \, + \phi (\mathbf{m}) &= \alpha_\text{pgi} \Big \| \mathbf{W}_{\! 1/2}(\Theta, \mathbf{z}^\ast ) \, \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ] \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) When the ``approx_eval`` property is ``True``, you may also set the ``approx_gradient`` property diff --git a/SimPEG/regularization/sparse.py b/SimPEG/regularization/sparse.py index e1602971c0..817c49e224 100644 --- a/SimPEG/regularization/sparse.py +++ b/SimPEG/regularization/sparse.py @@ -257,7 +257,7 @@ class SparseSmallness(BaseSparse, Smallness): We define the regularization function (objective function) for sparse smallness (compactness) as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Big | \, m(r) - m^{(ref)}(r) \, \Big |^{p(r)} \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model, :math:`w(r)` @@ -271,7 +271,7 @@ class SparseSmallness(BaseSparse, Smallness): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Big | m_i - m_i^{(ref)} \Big |^{p_i} where :math:`m_i \in \mathbf{m}` are the discrete model parameters defined on the mesh. @@ -286,8 +286,8 @@ class SparseSmallness(BaseSparse, Smallness): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i \tilde{w}_i \, \Big | m_i^{(k)} - m_i^{(ref)} \Big |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} \Big | m_i^{(k)} - m_i^{(ref)} \Big |^2 + = \sum_i \tilde{w}_i \, \Big | m_i^{(k)} - m_i^{(ref)} \Big |^{p_i} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Big | m_i^{(k)} - m_i^{(ref)} \Big |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -300,7 +300,7 @@ class SparseSmallness(BaseSparse, Smallness): function for IRLS iteration :math:`k` can be expressed as follows: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{\! (k)} \big [ \mathbf{m}^{(k)} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where @@ -464,7 +464,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Bigg | \, \frac{\partial m}{\partial x} \, \Bigg |^{p(r)} \, dv where :math:`m(r)` is the model, :math:`w(r)` @@ -478,7 +478,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial m_i}{\partial x} \, \Bigg |^{p_i} where :math:`m_i \in \mathbf{m}` are the discrete model parameters defined on the mesh. @@ -493,9 +493,9 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i + = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial m_i^{(k)}}{\partial x} \Bigg |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Bigg | \, \frac{\partial m_i^{(k)}}{\partial x} \Bigg |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -509,7 +509,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): function for IRLS iteration :math:`k` can be expressed as follows: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{m}^{(k)} \Big \|^2 where @@ -528,7 +528,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): In this case, the least-squares problem for IRLS iteration :math:`k` becomes: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \mathbf{G_x} \big [ \mathbf{m}^{(k)} - \mathbf{m}^{(ref)} \big ] \Big \|^2 @@ -765,9 +765,9 @@ class Sparse(WeightedLeastSquares): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) = \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) = \alpha_s \int_\Omega \, w(r) \Big | \, m(r) - m^{(ref)}(r) \, \Big |^{p_s(r)} \, dv - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + + \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \Bigg | \, \frac{\partial m}{\partial \xi_j} \, \Bigg |^{p_j(r)} \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model, and :math:`w(r)` @@ -819,9 +819,9 @@ class Sparse(WeightedLeastSquares): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \mathbf{W_s}^{\!\! (k)} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j \, m} \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j \, m} \Big \|^2 where @@ -878,9 +878,9 @@ class Sparse(WeightedLeastSquares): the objective function becomes: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \mathbf{W_s}^{\! (k)} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| + + \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting the `reference_model_in_smooth` parameter diff --git a/SimPEG/regularization/vector.py b/SimPEG/regularization/vector.py index 2341b4d82c..19bd68f080 100644 --- a/SimPEG/regularization/vector.py +++ b/SimPEG/regularization/vector.py @@ -82,7 +82,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): regularization is given by: .. math:: - \phi (\vec{m}) = \frac{1}{2} \int_\Omega \, \vec{w}(r) \, \cdot \, + \phi (\vec{m}) = \int_\Omega \, \vec{w}(r) \, \cdot \, \Big [ \vec{m}(r) \, \times \, \vec{m}^{(ref)}(r) \Big ]^2 \, dv where :math:`\vec{m}^{(ref)}(r)` is the reference model vector and :math:`\vec{w}(r)` @@ -93,7 +93,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): function (objective function) is given by: .. math:: - \phi (\vec{m}) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \cdot \, + \phi (\vec{m}) \approx \sum_i \tilde{w}_i \, \cdot \, \Big | \vec{m}_i \, \times \, \vec{m}_i^{(ref)} \Big |^2 where :math:`\tilde{m}_i \in \mathbf{m}` are the model vectors at cell centers and @@ -129,7 +129,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): The discrete regularization function in linear form can ultimately be expressed as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} + \phi (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 @@ -262,7 +262,7 @@ def f_m(self, m): The objective function for cross reference regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 where :math:`\mathbf{m}` are the discrete vector model parameters defined on the mesh (model), @@ -277,7 +277,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 """ return self._X @ (self.mapping * m) @@ -309,7 +309,7 @@ def f_m_deriv(self, m): The objective function for cross reference regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 where :math:`\mathbf{m}` are the discrete vector model parameters defined on the mesh (model), @@ -324,7 +324,7 @@ def f_m_deriv(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 Thus, the derivative with respect to the model is: @@ -423,11 +423,15 @@ def deriv(self, m) -> np.ndarray: """ d_m = self._delta_m(m) - return self.f_m_deriv(m).T * ( - self.W.T - @ self.W - @ (self.f_m_deriv(m) @ d_m).reshape((-1, self.n_comp), order="F") - ).flatten(order="F") + return ( + 2 + * self.f_m_deriv(m).T + * ( + self.W.T + @ self.W + @ (self.f_m_deriv(m) @ d_m).reshape((-1, self.n_comp), order="F") + ).flatten(order="F") + ) def deriv2(self, m, v=None) -> csr_matrix: r"""Hessian of the regularization function evaluated for the model provided. @@ -460,13 +464,21 @@ def deriv2(self, m, v=None) -> csr_matrix: f_m_deriv = self.f_m_deriv(m) if v is None: - return f_m_deriv.T * ( - sp.block_diag([self.W.T * self.W] * self.n_comp) * f_m_deriv + return ( + 2 + * f_m_deriv.T + * (sp.block_diag([self.W.T * self.W] * self.n_comp) * f_m_deriv) ) - return f_m_deriv.T * ( - self.W.T @ self.W @ (f_m_deriv * v).reshape((-1, self.n_comp), order="F") - ).flatten(order="F") + return ( + 2 + * f_m_deriv.T + * ( + self.W.T + @ self.W + @ (f_m_deriv * v).reshape((-1, self.n_comp), order="F") + ).flatten(order="F") + ) class AmplitudeSmallness(SparseSmallness, BaseAmplitude): @@ -519,7 +531,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): (compactness) as: .. math:: - \phi (\vec{m}) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (\vec{m}) = \int_\Omega \, w(r) \, \Big | \, \vec{m}(r) - \vec{m}^{(ref)}(r) \, \Big |^{p(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`\vec{m}^{(ref)}(r)` is the reference model, :math:`w(r)` @@ -533,7 +545,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Big | \vec{m}_i - \vec{m}_i^{(ref)} \Big |^{p_i} where :math:`\mathbf{m}` are the model parameters, :math:`\vec{m}_i` represents the vector @@ -549,8 +561,8 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i \tilde{w}_i \, \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + = \sum_i \tilde{w}_i \, \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^{p_i} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -578,7 +590,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): The objective function for IRLS iteration :math:`k` is given by: .. math:: - \phi \big ( \mathbf{\bar{m}}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{\bar{m}}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{\bar{m}}^{(k)} \; \Big \|^2 where @@ -738,7 +750,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Bigg | \, \frac{\partial |\vec{m}|}{\partial x} \, \Bigg |^{p(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`w(r)` @@ -752,7 +764,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial |\vec{m}_i|}{\partial x} \, \Bigg |^{p_i} where :math:`\vec{m}_i` is the vector defined for mesh cell :math:`i`. @@ -767,9 +779,9 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i + = \sum_i \tilde{w}_i \, \left | \, \frac{\partial \big | \vec{m}_i^{(k)} \big | }{\partial x} \right |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \left | \, \frac{\partial \big | \vec{m}_i^{(k)} \big | }{\partial x} \right |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -794,7 +806,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): The objective function for IRLS iteration :math:`k` is given by: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{\bar{m}}^{(k)} \Big \|^2 where @@ -813,7 +825,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): In this case, the least-squares problem for IRLS iteration :math:`k` becomes: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{\bar{m}}^{(k)} \Big \|^2 where @@ -1042,9 +1054,9 @@ class VectorAmplitude(Sparse): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) = \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) = \alpha_s \int_\Omega \, w(r) \Big | \, \vec{m}(r) - \vec{m}^{(ref)}(r) \, \Big |^{p_s(r)} \, dv - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + + \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \Bigg | \, \frac{\partial |\vec{m}|}{\partial \xi_j} \, \bigg |^{p_j(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`\vec{m}^{(ref)}(r)` is the reference model, @@ -1104,9 +1116,9 @@ class VectorAmplitude(Sparse): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \, \mathbf{W_s}^{\! (k)} \, \Delta \mathbf{\bar{m}} \, \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \bar{m}} \, \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \bar{m}} \, \Big \|^2 where @@ -1171,9 +1183,9 @@ class VectorAmplitude(Sparse): the objective function becomes: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \, \mathbf{W_s}^{\! (k)} \, \Delta \mathbf{\bar{m}} \, \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \Delta \bar{m}} \, \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \Delta \bar{m}} \, \Big \|^2 This functionality is used by setting the `reference_model_in_smooth` parameter to ``True``. diff --git a/examples/20-published/plot_tomo_joint_with_volume.py b/examples/20-published/plot_tomo_joint_with_volume.py index 2b9c445917..791bc32a8c 100644 --- a/examples/20-published/plot_tomo_joint_with_volume.py +++ b/examples/20-published/plot_tomo_joint_with_volume.py @@ -42,7 +42,7 @@ class Volume(objective_function.BaseObjectiveFunction): .. math:: - \phi_v = \frac{1}{2}|| \int_V m dV - \text{knownVolume} ||^2 + \phi_v = || \int_V m dV - \text{knownVolume} ||^2 """ def __init__(self, mesh, knownVolume=0.0, **kwargs): @@ -60,25 +60,27 @@ def knownVolume(self, value): self._knownVolume = utils.validate_float("knownVolume", value, min_val=0.0) def __call__(self, m): - return 0.5 * (self.estVol(m) - self.knownVolume) ** 2 + return (self.estVol(m) - self.knownVolume) ** 2 def estVol(self, m): return np.inner(self.mesh.cell_volumes, m) def deriv(self, m): # return (self.mesh.cell_volumes * np.inner(self.mesh.cell_volumes, m)) - return self.mesh.cell_volumes * ( - self.knownVolume - np.inner(self.mesh.cell_volumes, m) - ) + return ( + 2 + * self.mesh.cell_volumes + * (self.knownVolume - np.inner(self.mesh.cell_volumes, m)) + ) # factor of 2 from deriv of ||estVol - knownVol||^2 def deriv2(self, m, v=None): if v is not None: - return utils.mkvc( + return 2 * utils.mkvc( self.mesh.cell_volumes * np.inner(self.mesh.cell_volumes, v) ) else: # TODO: this is inefficent. It is a fully dense matrix - return sp.csc_matrix( + return 2 * sp.csc_matrix( np.outer(self.mesh.cell_volumes, self.mesh.cell_volumes) ) diff --git a/tests/base/test_cross_gradient.py b/tests/base/test_cross_gradient.py index 66e84082ab..9c764d481e 100644 --- a/tests/base/test_cross_gradient.py +++ b/tests/base/test_cross_gradient.py @@ -96,7 +96,7 @@ def test_cross_grad_calc(self): cross_grad = self.cross_grad - v1 = 0.5 * np.sum(np.abs(cross_grad.calculate_cross_gradient(m))) + v1 = np.sum(np.abs(cross_grad.calculate_cross_gradient(m))) v2 = cross_grad(m) self.assertEqual(v1, v2) diff --git a/tests/base/test_objective_function.py b/tests/base/test_objective_function.py index 78d2161361..211a9719bf 100644 --- a/tests/base/test_objective_function.py +++ b/tests/base/test_objective_function.py @@ -278,13 +278,11 @@ def test_ComboW(self): r1 = phi1.W * m r2 = phi2.W * m - print(phi(m), 0.5 * np.inner(r, r)) + print(phi(m), np.inner(r, r)) - self.assertTrue(np.allclose(phi(m), 0.5 * np.inner(r, r))) + self.assertTrue(np.allclose(phi(m), np.inner(r, r))) self.assertTrue( - np.allclose( - phi(m), 0.5 * (alpha1 * np.inner(r1, r1) + alpha2 * np.inner(r2, r2)) - ) + np.allclose(phi(m), (alpha1 * np.inner(r1, r1) + alpha2 * np.inner(r2, r2))) ) def test_ComboConstruction(self): diff --git a/tests/base/test_pgi_regularization.py b/tests/base/test_pgi_regularization.py index b8db90f00e..440e50a494 100644 --- a/tests/base/test_pgi_regularization.py +++ b/tests/base/test_pgi_regularization.py @@ -4,6 +4,7 @@ import numpy as np from pymatsolver import SolverLU from scipy.stats import multivariate_normal + from SimPEG import regularization from SimPEG.maps import Wires from SimPEG.utils import WeightedGaussianMixture, mkvc @@ -85,9 +86,7 @@ def test_full_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) - + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -193,8 +192,7 @@ def test_tied_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -297,8 +295,7 @@ def test_diag_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -401,8 +398,7 @@ def test_spherical_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers.py b/tests/em/em1d/test_EM1D_FD_jac_layers.py index 83c78f9758..630e45a4d8 100644 --- a/tests/em/em1d/test_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_EM1D_FD_jac_layers.py @@ -155,8 +155,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2.0 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): @@ -314,8 +316,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): @@ -450,8 +454,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): diff --git a/tests/pf/test_pf_quadtree_inversion_linear.py b/tests/pf/test_pf_quadtree_inversion_linear.py index 78dc40de46..96bd691749 100644 --- a/tests/pf/test_pf_quadtree_inversion_linear.py +++ b/tests/pf/test_pf_quadtree_inversion_linear.py @@ -463,7 +463,7 @@ def test_quadtree_grav_inverse(self): self.assertAlmostEqual(model_residual, 0.1, delta=0.1) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.grav_inv.invProb.dmisfit(self.grav_model) + data_misfit = self.grav_inv.invProb.dmisfit(self.grav_model) self.assertLess(data_misfit, dpred.shape[0] * 1.15) def test_quadtree_mag_inverse(self): @@ -481,7 +481,7 @@ def test_quadtree_mag_inverse(self): self.assertAlmostEqual(model_residual, 0.01, delta=0.05) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.mag_inv.invProb.dmisfit(self.mag_model) + data_misfit = self.mag_inv.invProb.dmisfit(self.mag_model) self.assertLess(data_misfit, dpred.shape[0] * 1.1) def test_quadtree_grav_inverse_activecells(self): @@ -501,7 +501,7 @@ def test_quadtree_grav_inverse_activecells(self): self.assertAlmostEqual(model_residual, 0.1, delta=0.1) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.grav_inv_active.invProb.dmisfit( + data_misfit = self.grav_inv_active.invProb.dmisfit( self.grav_model[self.active_cells] ) self.assertLess(data_misfit, dpred.shape[0] * 1.1) @@ -530,7 +530,7 @@ def test_quadtree_mag_inverse_activecells(self): self.assertAlmostEqual(model_residual, 0.01, delta=0.05) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.mag_inv_active.invProb.dmisfit( + data_misfit = self.mag_inv_active.invProb.dmisfit( self.mag_model[self.active_cells] ) self.assertLess(data_misfit, dpred.shape[0] * 1.1) diff --git a/tests/utils/test_mat_utils.py b/tests/utils/test_mat_utils.py index 9fc7018435..30655046b0 100644 --- a/tests/utils/test_mat_utils.py +++ b/tests/utils/test_mat_utils.py @@ -75,7 +75,7 @@ def g(k): def test_dm_eigenvalue_by_power_iteration(self): # Test for a single data misfit - dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G)) + dmis_matrix = 2 * self.G.T.dot((self.dmis.W**2).dot(self.G)) field = self.dmis.simulation.fields(self.true_model) max_eigenvalue_numpy, _ = eigsh(dmis_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( @@ -89,7 +89,7 @@ def test_dm_eigenvalue_by_power_iteration(self): WtW = 0.0 for mult, dm in zip(self.dmiscombo.multipliers, self.dmiscombo.objfcts): WtW += mult * dm.W**2 - dmiscombo_matrix = self.G.T.dot(WtW.dot(self.G)) + dmiscombo_matrix = 2 * self.G.T.dot(WtW.dot(self.G)) max_eigenvalue_numpy, _ = eigsh(dmiscombo_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( self.dmiscombo, self.true_model, n_pw_iter=30 @@ -110,7 +110,7 @@ def test_reg_eigenvalue_by_power_iteration(self): def test_combo_eigenvalue_by_power_iteration(self): reg_maxtrix = self.reg.deriv2(self.true_model) - dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G)) + dmis_matrix = 2 * self.G.T.dot((self.dmis.W**2).dot(self.G)) combo_matrix = dmis_matrix + self.beta * reg_maxtrix max_eigenvalue_numpy, _ = eigsh(combo_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( From b22e6a2069bbcf37dc9331e6b703750767af0895 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Thu, 22 Feb 2024 11:26:52 -0800 Subject: [PATCH 14/68] Improvements to template for a bug report issue (#1359) Fix typos in the template for the bug report issue, and simplify the text. --- .github/ISSUE_TEMPLATE/bug-report.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 5e2e876ec4..ba93c01385 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -7,7 +7,7 @@ body: - type: markdown attributes: value: > - Thanks for your use of SimPEG and for taking the time to report a bug! Please + Thanks for using SimPEG and taking the time to report a bug! Please first double check that there is not already a bug report on this issue by searching through the existing bugs. @@ -19,11 +19,11 @@ body: - type: textarea attributes: - label: "Reproducable code example:" + label: "Reproducible code example:" description: > Please submit a small, but complete, code sample that reproduces the bug or missing functionality. It should be able to be copy-pasted - into a Python interpreter and ran as-is. + into a Python interpreter and run as-is. placeholder: | import SimPEG << your code here >> @@ -58,4 +58,4 @@ body: placeholder: | << your explanation here >> validations: - required: false \ No newline at end of file + required: false From ff5df80669cc9efef68f680a42936c86f51d3700 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Sun, 25 Feb 2024 16:46:12 -0800 Subject: [PATCH 15/68] fix sizes on creation of empty G matrix --- SimPEG/simulation.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index 656645cc88..cc1b172b5f 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -665,12 +665,13 @@ def G(self): Matrix whose rows are the kernel functions """ if getattr(self, "_G", None) is None: - G = np.empty((self.mesh.nC, self.n_kernels)) + G_nodes = np.empty((self.mesh.n_nodes, self.n_kernels)) for i in range(self.n_kernels): - G[:, i] = self.g(i) + print(self.g(i).shape) + G_nodes[:, i] = self.g(i) - self._G = ( - sdiag(self.mesh.cell_volumes) @ (self.mesh.average_node_to_cell @ G).T + self._G = (self.mesh.average_node_to_cell @ G_nodes).T @ sdiag( + self.mesh.cell_volumes ) return self._G From ec7d62957bcb3ec033d8758b5a62a1f0cbc61281 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 28 Feb 2024 09:44:49 -0800 Subject: [PATCH 16/68] Simplify a few gravity simulation tests (#1363) Remove some unneeded lines in gravity simulation tests, particularly the ones that tests warnings and errors being raised. Replace `unittest.mock.patch` with `pytest`'s Monkeypatch to fake `choclo` not being installed. --- tests/pf/test_forward_Grav_Linear.py | 86 ++++------------------------ 1 file changed, 11 insertions(+), 75 deletions(-) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 496e80b1f7..7208f0f9e3 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -1,6 +1,6 @@ -from unittest.mock import patch import pytest import discretize +import SimPEG from SimPEG import maps from SimPEG.potential_fields import gravity from geoana.gravity import Prism @@ -314,87 +314,38 @@ def test_sensitivity_dtype( assert simulation.sensitivity_dtype is np.float32 @pytest.mark.parametrize("invalid_dtype", (float, np.float16)) - def test_invalid_sensitivity_dtype_assignment( - self, simple_mesh, receivers_locations, invalid_dtype - ): + def test_invalid_sensitivity_dtype_assignment(self, simple_mesh, invalid_dtype): """ Test invalid sensitivity_dtype assignment """ - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Create simulation simulation = gravity.Simulation3DIntegral( simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, ) # Check if error is raised msg = "sensitivity_dtype must be either np.float32 or np.float64." with pytest.raises(TypeError, match=msg): simulation.sensitivity_dtype = invalid_dtype - def test_invalid_engine(self, simple_mesh, receivers_locations): + def test_invalid_engine(self, simple_mesh): """Test if error is raised after invalid engine.""" - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Check if error is raised after an invalid engine is passed engine = "invalid engine" with pytest.raises(ValueError, match=f"Invalid engine '{engine}'"): - gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine=engine, - ) + gravity.Simulation3DIntegral(simple_mesh, engine=engine) - def test_choclo_and_n_proceesses(self, simple_mesh, receivers_locations): + def test_choclo_and_n_proceesses(self, simple_mesh): """Check if warning is raised after passing n_processes with choclo engine.""" - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Check if warning is raised msg = "The 'n_processes' will be ignored when selecting 'choclo'" with pytest.warns(UserWarning, match=msg): simulation = gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine="choclo", - n_processes=2, + simple_mesh, engine="choclo", n_processes=2 ) # Check if n_processes was overwritten and set to None assert simulation.n_processes is None - def test_choclo_and_sensitivity_path_as_dir( - self, simple_mesh, receivers_locations, tmp_path - ): + def test_choclo_and_sensitivity_path_as_dir(self, simple_mesh, tmp_path): """ Check if error is raised when sensitivity_path is a dir with choclo engine. """ - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Create a sensitivity_path directory sensitivity_path = tmp_path / "sensitivity_dummy" sensitivity_path.mkdir() @@ -403,36 +354,21 @@ def test_choclo_and_sensitivity_path_as_dir( with pytest.raises(ValueError, match=msg): gravity.Simulation3DIntegral( simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, store_sensitivities="disk", sensitivity_path=str(sensitivity_path), engine="choclo", ) - @patch("SimPEG.potential_fields.gravity.simulation.choclo", None) - def test_choclo_missing(self, simple_mesh, receivers_locations): + def test_choclo_missing(self, simple_mesh, monkeypatch): """ Check if error is raised when choclo is missing and chosen as engine. """ - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) + # Monkeypatch choclo in SimPEG.potential_fields.base + monkeypatch.setattr(SimPEG.potential_fields.gravity.simulation, "choclo", None) # Check if error is raised msg = "The choclo package couldn't be found." with pytest.raises(ImportError, match=msg): - gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine="choclo", - ) + gravity.Simulation3DIntegral(simple_mesh, engine="choclo") class TestConversionFactor: From 10add2d334889df3d255b04275a2afa90b61a9c6 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Wed, 28 Feb 2024 14:34:52 -0800 Subject: [PATCH 17/68] remove print statement --- SimPEG/simulation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index cc1b172b5f..c9dccd53d1 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -668,7 +668,6 @@ def G(self): G_nodes = np.empty((self.mesh.n_nodes, self.n_kernels)) for i in range(self.n_kernels): - print(self.g(i).shape) G_nodes[:, i] = self.g(i) self._G = (self.mesh.average_node_to_cell @ G_nodes).T @ sdiag( From d993a0f361010e88ed4edf6dab1aa3f52850e0bf Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Wed, 28 Feb 2024 14:39:23 -0800 Subject: [PATCH 18/68] add documentation stating what a datum is and how we evaluate the integral --- SimPEG/simulation.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index c9dccd53d1..da0c94b1cb 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -557,11 +557,20 @@ def Jtvec(self, m, v, f=None): class ExponentialSinusoidSimulation(LinearSimulation): r""" This is the simulation class for the linear problem consisting of - exponentially decaying sinusoids. The rows of the G matrix are + exponentially decaying sinusoids. The kernel functions take the form: .. math:: \int_x e^{p j_k x} \cos(\pi q j_k x) \quad, j_k \in [j_0, ..., j_n] + + The model is defined at cell centers while the kernel functions are defined on nodes. + The trapezoid rule is used to evaluate the integral + + .. math:: + + d_j = \int g_j(x) m(x) dx + + to define our data. """ def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): From 0d9adeebe2b4f9d7e0f482016df9e71421ba9888 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 12:24:00 -0800 Subject: [PATCH 19/68] Raise error when using magnetic's SourceField Make `UniformBackgroundField` to raise an error if `parameters` is being passed to the constructor and add test for it. --- SimPEG/potential_fields/magnetics/sources.py | 23 ++++++++++++++----- tests/pf/test_mag_uniform_background_field.py | 19 +++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 tests/pf/test_mag_uniform_background_field.py diff --git a/SimPEG/potential_fields/magnetics/sources.py b/SimPEG/potential_fields/magnetics/sources.py index 6c9d13c50a..397d310f60 100644 --- a/SimPEG/potential_fields/magnetics/sources.py +++ b/SimPEG/potential_fields/magnetics/sources.py @@ -26,11 +26,22 @@ class UniformBackgroundField(BaseSrc): def __init__( self, receiver_list=None, - amplitude=50000, - inclination=90, - declination=0, - **kwargs + amplitude=50000.0, + inclination=90.0, + declination=0.0, + **kwargs, ): + # Raise errors on 'parameters' argument + # The parameters argument was supported in the deprecated SourceField + # class. We would like to raise an error in case the user passes it + # so the class doesn't behave differently than expected. + if (key := "parameters") in kwargs: + raise TypeError( + f"'{key}' property has been removed." + "Please pass the amplitude, inclination and declination" + " through their own arguments." + ) + self.amplitude = amplitude self.inclination = inclination self.declination = declination @@ -39,7 +50,7 @@ def __init__( @property def amplitude(self): - """Amplitude of the inducing backgound field. + """Amplitude of the inducing background field. Returns ------- @@ -92,7 +103,7 @@ def b0(self): ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", future_warn=True, error=True) class SourceField(UniformBackgroundField): """Source field for magnetics integral formulation diff --git a/tests/pf/test_mag_uniform_background_field.py b/tests/pf/test_mag_uniform_background_field.py new file mode 100644 index 0000000000..df7cc5ed76 --- /dev/null +++ b/tests/pf/test_mag_uniform_background_field.py @@ -0,0 +1,19 @@ +""" +Test the UniformBackgroundField class +""" +import pytest +from SimPEG.potential_fields.magnetics import UniformBackgroundField + + +def test_invalid_parameters_argument(): + """ + Test if error is raised after passing 'parameters' as argument + """ + parameters = (1, 35, 60) + msg = ( + "'parameters' property has been removed." + "Please pass the amplitude, inclination and declination" + " through their own arguments." + ) + with pytest.raises(TypeError, match=msg): + UniformBackgroundField(parameters=parameters) From 507ed61b81a1e8398084805d3d2606c10205c7f6 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 12:25:30 -0800 Subject: [PATCH 20/68] Update docstring in magnetic survey --- SimPEG/potential_fields/magnetics/survey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/survey.py b/SimPEG/potential_fields/magnetics/survey.py index beed236268..98ac827a5c 100644 --- a/SimPEG/potential_fields/magnetics/survey.py +++ b/SimPEG/potential_fields/magnetics/survey.py @@ -9,7 +9,7 @@ class Survey(BaseSurvey): Parameters ---------- - source_field : SimPEG.potential_fields.magnetics.sources.SourceField + source_field : SimPEG.potential_fields.magnetics.sources.UniformBackgroundField A source object that defines the Earth's inducing field """ From e2b92772102643849c10353fedbaf30283dedd8e Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 13:44:56 -0800 Subject: [PATCH 21/68] Update tests/pf to use the UniformBackgroundField --- tests/pf/test_forward_Mag_Linear.py | 36 ++++++++++++------- tests/pf/test_forward_PFproblem.py | 8 +++-- tests/pf/test_mag_MVI_Octree.py | 9 +++-- tests/pf/test_mag_inversion_linear.py | 9 +++-- tests/pf/test_mag_inversion_linear_Octree.py | 9 +++-- tests/pf/test_mag_nonLinear_Amplitude.py | 16 ++++++--- tests/pf/test_mag_vector_amplitude.py | 9 +++-- tests/pf/test_pf_quadtree_inversion_linear.py | 27 ++++++++++---- tests/pf/test_sensitivity_PFproblem.py | 2 +- 9 files changed, 91 insertions(+), 34 deletions(-) diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 662556b93e..c3125b6499 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -13,8 +13,8 @@ def test_ana_mag_forward(): nx = 5 ny = 5 - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) chi1 = 0.01 chi2 = 0.02 @@ -62,7 +62,10 @@ def get_block_inds(grid, block): rxLoc = mag.Point(locXyz, components=components) srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = mag.Survey(srcField) @@ -219,8 +222,8 @@ def test_ana_mag_grad_forward(): nx = 5 ny = 5 - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) chi1 = 0.01 chi2 = 0.02 @@ -268,7 +271,10 @@ def get_block_inds(grid, block): rxLoc = mag.Point(locXyz, components=components) srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + [rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = mag.Survey(srcField) @@ -315,8 +321,8 @@ def test_ana_mag_vec_forward(): nx = 5 ny = 5 - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 @@ -362,7 +368,10 @@ def get_block_inds(grid, block): rxLoc = mag.Point(locXyz, components=components) srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = mag.Survey(srcField) @@ -403,8 +412,8 @@ def test_ana_mag_amp_forward(): nx = 5 ny = 5 - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 @@ -450,7 +459,10 @@ def get_block_inds(grid, block): rxLoc = mag.Point(locXyz, components=components) srcField = mag.UniformBackgroundField( - [rxLoc], amplitude=H0[0], inclination=H0[1], declination=H0[2] + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = mag.Survey(srcField) diff --git a/tests/pf/test_forward_PFproblem.py b/tests/pf/test_forward_PFproblem.py index 65793bb650..b0914da781 100644 --- a/tests/pf/test_forward_PFproblem.py +++ b/tests/pf/test_forward_PFproblem.py @@ -12,7 +12,6 @@ def setUp(self): Inc = 45.0 Dec = 45.0 Btot = 51000 - H0 = (Btot, Inc, Dec) self.b0 = mag.analytics.IDTtoxyz(-Inc, Dec, Btot) @@ -40,7 +39,12 @@ def setUp(self): self.yr = yr self.rxLoc = np.c_[utils.mkvc(X), utils.mkvc(Y), utils.mkvc(Z)] receivers = mag.Point(self.rxLoc, components=components) - srcField = mag.SourceField([receivers], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[receivers], + amplitude=Btot, + inclination=Inc, + declination=Dec, + ) self.survey = mag.Survey(srcField) diff --git a/tests/pf/test_mag_MVI_Octree.py b/tests/pf/test_mag_MVI_Octree.py index 49809e4c7d..537c4b1720 100644 --- a/tests/pf/test_mag_MVI_Octree.py +++ b/tests/pf/test_mag_MVI_Octree.py @@ -20,7 +20,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -46,7 +46,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh diff --git a/tests/pf/test_mag_inversion_linear.py b/tests/pf/test_mag_inversion_linear.py index 868ad8b34b..611935adf4 100644 --- a/tests/pf/test_mag_inversion_linear.py +++ b/tests/pf/test_mag_inversion_linear.py @@ -23,7 +23,7 @@ def setUp(self): np.random.seed(0) # Define the inducing field parameter - H0 = (50000, 90, 0) + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a mesh dx = 5.0 @@ -59,7 +59,12 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(rxLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # We can now create a susceptibility model and generate data diff --git a/tests/pf/test_mag_inversion_linear_Octree.py b/tests/pf/test_mag_inversion_linear_Octree.py index d30e8a7184..64ce31e0bd 100644 --- a/tests/pf/test_mag_inversion_linear_Octree.py +++ b/tests/pf/test_mag_inversion_linear_Octree.py @@ -26,7 +26,7 @@ def setUp(self): # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh h = [5, 5, 5] @@ -55,7 +55,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # self.mesh.finalize() diff --git a/tests/pf/test_mag_nonLinear_Amplitude.py b/tests/pf/test_mag_nonLinear_Amplitude.py index 318964328f..186fdc1343 100644 --- a/tests/pf/test_mag_nonLinear_Amplitude.py +++ b/tests/pf/test_mag_nonLinear_Amplitude.py @@ -21,7 +21,7 @@ class AmpProblemTest(unittest.TestCase): def setUp(self): # We will assume a vertical inducing field - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -46,8 +46,11 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(srcField) @@ -185,8 +188,11 @@ def setUp(self): # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) surveyAmp = magnetics.survey.Survey(srcField) diff --git a/tests/pf/test_mag_vector_amplitude.py b/tests/pf/test_mag_vector_amplitude.py index 84d56ee320..af4619e14f 100644 --- a/tests/pf/test_mag_vector_amplitude.py +++ b/tests/pf/test_mag_vector_amplitude.py @@ -20,7 +20,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -46,7 +46,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh diff --git a/tests/pf/test_pf_quadtree_inversion_linear.py b/tests/pf/test_pf_quadtree_inversion_linear.py index 96bd691749..46bcf77c67 100644 --- a/tests/pf/test_pf_quadtree_inversion_linear.py +++ b/tests/pf/test_pf_quadtree_inversion_linear.py @@ -104,9 +104,14 @@ def create_gravity_sim_flat(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim_flat(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz_flat) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + [mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator @@ -159,9 +164,14 @@ def create_gravity_sim(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + [mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator @@ -215,9 +225,14 @@ def create_gravity_sim_active(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim_active(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + receiver_list=[mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator diff --git a/tests/pf/test_sensitivity_PFproblem.py b/tests/pf/test_sensitivity_PFproblem.py index 99d5fb4f37..53c96c96cb 100644 --- a/tests/pf/test_sensitivity_PFproblem.py +++ b/tests/pf/test_sensitivity_PFproblem.py @@ -41,7 +41,7 @@ # # components = ['bx', 'by', 'bz'] # receivers = mag.Point(rxLoc, components=components) -# srcField = mag.SourceField([receivers], parameters=H0) +# srcField = mag.UniformBackgroundField([receivers], parameters=H0) # # self.survey = mag.Survey(srcField) # From 2c0f6423ee3a2767aac00f51b614216c995c8c5c Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 13:49:38 -0800 Subject: [PATCH 22/68] Update other tests to use UniformBackgroundField --- tests/base/test_directives.py | 9 +++++++-- tests/dask/test_mag_MVI_Octree.py | 9 +++++++-- tests/dask/test_mag_inversion_linear_Octree.py | 9 +++++++-- tests/dask/test_mag_nonLinear_Amplitude.py | 16 +++++++++++----- tests/utils/test_io_utils.py | 11 +++++++---- 5 files changed, 39 insertions(+), 15 deletions(-) diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index 8637e633af..6a939d9f7b 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -64,11 +64,16 @@ def setUp(self): mesh = discretize.TensorMesh([4, 4, 4]) # Magnetic inducing field parameter (A,I,D) - B = [50000, 90, 0] + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a MAGsurvey rx = mag.Point(np.vstack([[0.25, 0.25, 0.25], [-0.25, -0.25, 0.25]])) - srcField = mag.UniformBackgroundField([rx], parameters=(B[0], B[1], B[2])) + srcField = mag.UniformBackgroundField( + receiver_list=[rx], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create the forward model operator diff --git a/tests/dask/test_mag_MVI_Octree.py b/tests/dask/test_mag_MVI_Octree.py index e7e5699224..7189e99df4 100644 --- a/tests/dask/test_mag_MVI_Octree.py +++ b/tests/dask/test_mag_MVI_Octree.py @@ -21,7 +21,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -47,7 +47,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh diff --git a/tests/dask/test_mag_inversion_linear_Octree.py b/tests/dask/test_mag_inversion_linear_Octree.py index cf16cb1578..098423b4f8 100644 --- a/tests/dask/test_mag_inversion_linear_Octree.py +++ b/tests/dask/test_mag_inversion_linear_Octree.py @@ -29,7 +29,7 @@ def setUp(self): # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh h = [5, 5, 5] @@ -58,7 +58,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) self.mesh = mesh_utils.mesh_builder_xyz( diff --git a/tests/dask/test_mag_nonLinear_Amplitude.py b/tests/dask/test_mag_nonLinear_Amplitude.py index 0118ac78f9..5b0dfdf6f6 100644 --- a/tests/dask/test_mag_nonLinear_Amplitude.py +++ b/tests/dask/test_mag_nonLinear_Amplitude.py @@ -22,7 +22,7 @@ class AmpProblemTest(unittest.TestCase): def setUp(self): # We will assume a vertical inducing field - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -47,8 +47,11 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(srcField) @@ -186,8 +189,11 @@ def setUp(self): # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) surveyAmp = magnetics.survey.Survey(srcField) diff --git a/tests/utils/test_io_utils.py b/tests/utils/test_io_utils.py index 54e6282fe0..76159b6a6a 100644 --- a/tests/utils/test_io_utils.py +++ b/tests/utils/test_io_utils.py @@ -242,9 +242,12 @@ def setUp(self): xyz = np.c_[x, y, z] rx = magnetics.receivers.Point(xyz, components="tmi") - inducing_field = (50000.0, 60.0, 15.0) - source_field = magnetics.sources.SourceField( - receiver_list=rx, parameters=inducing_field + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 15.0) + source_field = magnetics.sources.UniformBackgroundField( + receiver_list=rx, + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(source_field) @@ -253,7 +256,7 @@ def setUp(self): self.std = std rx2 = magnetics.receivers.Point(xyz, components="tmi") - src_bad = magnetics.sources.SourceField([rx, rx2]) + src_bad = magnetics.sources.UniformBackgroundField([rx, rx2]) survey_bad = magnetics.survey.Survey(src_bad) self.survey_bad = survey_bad From 5ca010a46e547964b2366c0454545dce65741593 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 13:56:35 -0800 Subject: [PATCH 23/68] Update examples and tutorials --- examples/01-maps/plot_sumMap.py | 9 +++++++-- .../plot_inv_mag_MVI_Sparse_TreeMesh.py | 9 +++++++-- .../plot_inv_mag_MVI_VectorAmplitude.py | 9 +++++++-- .../plot_inv_mag_nonLinear_Amplitude.py | 16 +++++++++++++--- examples/_archived/plot_inv_mag_linear.py | 9 +++++++-- .../04-magnetics/plot_2a_magnetics_induced.py | 8 +++++--- tutorials/04-magnetics/plot_2b_magnetics_mvi.py | 8 +++++--- .../plot_inv_2a_magnetics_induced.py | 8 +++++--- .../plot_inv_3_cross_gradient_pf.py | 8 +++++--- 9 files changed, 61 insertions(+), 23 deletions(-) diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index 270e7cec22..dd7a1d012b 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -30,7 +30,7 @@ def run(plotIt=True): - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh dx = 5.0 @@ -62,7 +62,12 @@ def run(plotIt=True): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.Point(rxLoc) - srcField = magnetics.SourceField([rxLoc], parameters=H0) + srcField = magnetics.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = magnetics.Survey(srcField) # We can now create a susceptibility model and generate data diff --git a/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py b/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py index 9c420650b6..7cc54915f2 100644 --- a/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py +++ b/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py @@ -51,7 +51,7 @@ # np.random.seed(1) # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -74,7 +74,12 @@ # Create a MAGsurvey xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] rxLoc = magnetics.receivers.Point(xyzLoc) -srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) # Here how the topography looks with a quick interpolation, just a Gaussian... diff --git a/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py b/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py index bc23e82d3c..0e8740197d 100644 --- a/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py +++ b/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py @@ -44,7 +44,7 @@ # np.random.seed(1) # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create grid of points for topography # Lets create a simple Gaussian topo and set the active cells @@ -63,7 +63,12 @@ # Create a MAGsurvey xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] rxLoc = magnetics.receivers.Point(xyzLoc) -srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) ############################################################################### diff --git a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py index 3f43150103..8c5cfb1af8 100644 --- a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py +++ b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py @@ -50,7 +50,7 @@ # # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -75,7 +75,12 @@ # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) -srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) # Here how the topography looks with a quick interpolation, just a Gaussian... @@ -267,7 +272,12 @@ # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) -srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) surveyAmp = magnetics.survey.Survey(srcField) simulation = magnetics.simulation.Simulation3DIntegral( diff --git a/examples/_archived/plot_inv_mag_linear.py b/examples/_archived/plot_inv_mag_linear.py index 09bfe42a64..62d5114807 100644 --- a/examples/_archived/plot_inv_mag_linear.py +++ b/examples/_archived/plot_inv_mag_linear.py @@ -26,7 +26,7 @@ def run(plotIt=True): # Define the inducing field parameter - H0 = (50000, 90, 0) + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a mesh dx = 5.0 @@ -64,7 +64,12 @@ def run(plotIt=True): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.receivers.Point(rxLoc, components=["tmi"]) - srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = magnetics.survey.Survey(srcField) # We can now create a susceptibility model and generate data diff --git a/tutorials/04-magnetics/plot_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_2a_magnetics_induced.py index 75e8b2bca1..af08030220 100644 --- a/tutorials/04-magnetics/plot_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_2a_magnetics_induced.py @@ -82,10 +82,12 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=strength, + inclination=inclination, + declination=declination, ) # Define the survey diff --git a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py index be8f7a63b7..6ffe6ac691 100644 --- a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py +++ b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py @@ -81,10 +81,12 @@ field_inclination = 60 field_declination = 30 field_strength = 50000 -inducing_field = (field_strength, field_inclination, field_declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=field_strength, + inclination=field_inclination, + declination=field_declination, ) # Define the survey diff --git a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py index 0b4bb43ec7..07074a525d 100644 --- a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py @@ -160,10 +160,12 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=strength, + inclination=inclination, + declination=declination, ) # Define the survey diff --git a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py index 56ef62c72a..27bcfb523e 100755 --- a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py +++ b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py @@ -196,11 +196,13 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) # Define the source field and survey for gravity data -source_field_mag = magnetics.sources.SourceField( - receiver_list=[receiver_mag], parameters=inducing_field +source_field_mag = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_mag], + amplitude=strength, + inclination=inclination, + declination=declination, ) survey_mag = magnetics.survey.Survey(source_field_mag) From 6bce940c0fab0e62a1fe506de6f3297fedc6fe8c Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 29 Feb 2024 16:24:23 -0800 Subject: [PATCH 24/68] Make sure that SourceField will throw an error Correctly choose the arguments for the `@deprecateclass` decorator and add test checking that creating a `SourceField` object will raise an error. --- SimPEG/potential_fields/magnetics/sources.py | 2 +- tests/pf/test_mag_uniform_background_field.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/sources.py b/SimPEG/potential_fields/magnetics/sources.py index 397d310f60..c62bc80b23 100644 --- a/SimPEG/potential_fields/magnetics/sources.py +++ b/SimPEG/potential_fields/magnetics/sources.py @@ -103,7 +103,7 @@ def b0(self): ) -@deprecate_class(removal_version="0.19.0", future_warn=True, error=True) +@deprecate_class(removal_version="0.19.0", error=True) class SourceField(UniformBackgroundField): """Source field for magnetics integral formulation diff --git a/tests/pf/test_mag_uniform_background_field.py b/tests/pf/test_mag_uniform_background_field.py index df7cc5ed76..18989d4d09 100644 --- a/tests/pf/test_mag_uniform_background_field.py +++ b/tests/pf/test_mag_uniform_background_field.py @@ -2,7 +2,7 @@ Test the UniformBackgroundField class """ import pytest -from SimPEG.potential_fields.magnetics import UniformBackgroundField +from SimPEG.potential_fields.magnetics import UniformBackgroundField, SourceField def test_invalid_parameters_argument(): @@ -17,3 +17,12 @@ def test_invalid_parameters_argument(): ) with pytest.raises(TypeError, match=msg): UniformBackgroundField(parameters=parameters) + + +def test_deprecated_source_field(): + """ + Test if instantiating a magnetics.source.SourceField object raises an error + """ + msg = "SourceField has been removed, please use UniformBackgroundField." + with pytest.raises(NotImplementedError, match=msg): + SourceField() From bae455a9b10aba6fd32074c60e96d32410495fbe Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 5 Mar 2024 15:19:11 -0800 Subject: [PATCH 25/68] Remove deprecated regularization classes (#1365) Raise errors when constructing deprecated regularization classes. Add test that checks if those errors are being raised. Replace the removed regularization classes that were still being used in a few pieces of the code and in the tests. Use the new classes instead. Add removed classes to the `IGNORE_ME` list in `test_regularization.py` so other tests don't fail. --- SimPEG/directives/directives.py | 18 +-------- SimPEG/directives/pgi_directives.py | 8 +--- .../spectral_induced_polarization/run.py | 10 +++-- SimPEG/regularization/__init__.py | 18 ++++----- tests/base/test_regularization.py | 38 ++++++++++++++++++- tests/em/static/test_SIP_2D_jvecjtvecadj.py | 12 ++++-- 6 files changed, 65 insertions(+), 39 deletions(-) diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 6f38db40be..5141bebaf3 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -14,7 +14,6 @@ Sparse, SparseSmallness, PGIsmallness, - PGIwithNonlinearRelationshipsSmallness, SmoothnessFirstOrder, SparseSmoothness, BaseSimilarityMeasure, @@ -716,7 +715,6 @@ def initialize(self): Smallness, SparseSmallness, PGIsmallness, - PGIwithNonlinearRelationshipsSmallness, ), ): smallness += [obj] @@ -1288,13 +1286,7 @@ def initialize(self): np.r_[ i, j, - ( - isinstance( - regpart, - PGIwithNonlinearRelationshipsSmallness, - ) - or isinstance(regpart, PGIsmallness) - ), + isinstance(regpart, PGIsmallness), ] ) for i, regobjcts in enumerate(self.invProb.reg.objfcts) @@ -1332,13 +1324,7 @@ def initialize(self): ( np.r_[ j, - ( - isinstance( - regpart, - PGIwithNonlinearRelationshipsSmallness, - ) - or isinstance(regpart, PGIsmallness) - ), + isinstance(regpart, PGIsmallness), ] ) for j, regpart in enumerate(self.invProb.reg.objfcts) diff --git a/SimPEG/directives/pgi_directives.py b/SimPEG/directives/pgi_directives.py index e8fb543ee1..0cc141f026 100644 --- a/SimPEG/directives/pgi_directives.py +++ b/SimPEG/directives/pgi_directives.py @@ -12,7 +12,6 @@ from ..regularization import ( PGI, PGIsmallness, - PGIwithRelationships, SmoothnessFirstOrder, SparseSmoothness, ) @@ -363,12 +362,7 @@ def initialize(self): if getattr(self.reg.objfcts[0], "objfcts", None) is not None: # Find the petrosmallness terms in a two-levels combo-regularization. petrosmallness = np.where( - np.r_[ - [ - isinstance(regpart, (PGI, PGIwithRelationships)) - for regpart in self.reg.objfcts - ] - ] + np.r_[[isinstance(regpart, PGI) for regpart in self.reg.objfcts]] )[0][0] self.petrosmallness = petrosmallness diff --git a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py index 18b766b3e1..bf568e31d7 100644 --- a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py @@ -142,9 +142,13 @@ def run_inversion( m_lower = np.r_[eta_lower, tau_lower, c_lower] # Set up regularization - reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=actind) - reg_tau = regularization.Simple(mesh, mapping=wires.tau, indActive=actind) - reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=actind) + reg_eta = regularization.WeightedLeastSquares( + mesh, mapping=wires.eta, indActive=actind + ) + reg_tau = regularization.WeightedLeastSquares( + mesh, mapping=wires.tau, indActive=actind + ) + reg_c = regularization.WeightedLeastSquares(mesh, mapping=wires.c, indActive=actind) # Todo: diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index c379bbd202..334e4ed986 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -171,21 +171,21 @@ ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SimpleSmall(Smallness): """Deprecated class, replaced by Smallness.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SimpleSmoothDeriv(SmoothnessFirstOrder): """Deprecated class, replaced by SmoothnessFirstOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Simple(WeightedLeastSquares): """Deprecated class, replaced by WeightedLeastSquares.""" @@ -201,7 +201,7 @@ def __init__(self, mesh=None, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs): ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Tikhonov(WeightedLeastSquares): """Deprecated class, replaced by WeightedLeastSquares.""" @@ -218,28 +218,28 @@ def __init__( ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Small(Smallness): """Deprecated class, replaced by Smallness.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SmoothDeriv(SmoothnessFirstOrder): """Deprecated class, replaced by SmoothnessFirstOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SmoothDeriv2(SmoothnessSecondOrder): """Deprecated class, replaced by SmoothnessSecondOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class PGIwithNonlinearRelationshipsSmallness(PGIsmallness): """Deprecated class, replaced by PGIsmallness.""" @@ -247,7 +247,7 @@ def __init__(self, gmm, **kwargs): super().__init__(gmm, non_linear_relationships=True, **kwargs) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class PGIwithRelationships(PGI): """Deprecated class, replaced by PGI.""" diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 6e73479785..18aac616b6 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -39,6 +39,16 @@ "BaseAmplitude", "VectorAmplitude", "CrossReferenceRegularization", + # Removed regularization classes that raise error on instantiation + "PGIwithNonlinearRelationshipsSmallness", + "PGIwithRelationships", + "Simple", + "SimpleSmall", + "SimpleSmoothDeriv", + "Small", + "SmoothDeriv", + "SmoothDeriv2", + "Tikhonov", ] @@ -457,7 +467,7 @@ def test_nC_residual(self): mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap regMesh = discretize.TensorMesh([mesh.h[2][mapping.maps[-1].indActive]]) - reg = regularization.Simple(regMesh) + reg = regularization.WeightedLeastSquares(regMesh) self.assertTrue(reg._nC_residual == regMesh.nC) self.assertTrue(all([fct._nC_residual == regMesh.nC for fct in reg.objfcts])) @@ -793,5 +803,31 @@ def test_weights(self, mesh): BaseRegularization(mesh, weights=weights, cell_weights=weights) +class TestRemovedRegularizations: + """ + Test if errors are raised after creating removed regularization classes. + """ + + @pytest.mark.parametrize( + "regularization_class", + ( + regularization.PGIwithNonlinearRelationshipsSmallness, + regularization.PGIwithRelationships, + regularization.Simple, + regularization.SimpleSmall, + regularization.SimpleSmoothDeriv, + regularization.Small, + regularization.SmoothDeriv, + regularization.SmoothDeriv2, + regularization.Tikhonov, + ), + ) + def test_removed_class(self, regularization_class): + class_name = regularization_class.__name__ + msg = f"{class_name} has been removed, please use." + with pytest.raises(NotImplementedError, match=msg): + regularization_class() + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/static/test_SIP_2D_jvecjtvecadj.py b/tests/em/static/test_SIP_2D_jvecjtvecadj.py index 887aad24db..a299c339f5 100644 --- a/tests/em/static/test_SIP_2D_jvecjtvecadj.py +++ b/tests/em/static/test_SIP_2D_jvecjtvecadj.py @@ -264,9 +264,15 @@ def setUp(self): dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) - reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=~airind) - reg_taui = regularization.Simple(mesh, mapping=wires.taui, indActive=~airind) - reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=~airind) + reg_eta = regularization.WeightedLeastSquares( + mesh, mapping=wires.eta, indActive=~airind + ) + reg_taui = regularization.WeightedLeastSquares( + mesh, mapping=wires.taui, indActive=~airind + ) + reg_c = regularization.WeightedLeastSquares( + mesh, mapping=wires.c, indActive=~airind + ) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 From 7fe2c33736e9632fd9c36296d0f89e32a53dfea5 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 6 Mar 2024 11:46:56 -0800 Subject: [PATCH 26/68] Removed deprecated properties of UpdateSensitivityWeights (#1368) #### Summary Remove the `everyIter`, `threshold` and `normalization` properties of `UpdateSensitivityWeights`. Add tests to check if errors are being raised after passing removed arguments to its constructor. Update tests and examples using the removed arguments and properties. #### PR Checklist * [ ] If this is a work in progress PR, set as a Draft PR * [ ] Linted my code according to the [style guides](https://docs.simpeg.xyz/content/getting_started/contributing/code-style.html). * [ ] Added [tests](https://docs.simpeg.xyz/content/getting_started/practices.html#testing) to verify changes to the code. * [ ] Added necessary documentation to any new functions/classes following the expect [style](https://docs.simpeg.xyz/content/getting_started/practices.html#documentation). * [ ] Marked as ready for review (if this is was a draft PR), and converted to a Pull Request * [ ] Tagged ``@simpeg/simpeg-developers`` when ready for review. #### Reference issue Part of the solution for https://github.com/simpeg/simpeg/issues/1302 #### What does this implement/fix? #### Additional information Most of these changes were cherry-picked from #1306 --- SimPEG/directives/directives.py | 58 ++++------- examples/02-gravity/plot_inv_grav_tiled.py | 2 +- examples/_archived/plot_inv_grav_linear.py | 2 +- examples/_archived/plot_inv_mag_linear.py | 2 +- tests/base/test_directives.py | 96 ++++++++++++++++--- tests/dask/test_grav_inversion_linear.py | 2 +- tests/dask/test_mag_MVI_Octree.py | 2 +- tests/pf/test_mag_MVI_Octree.py | 2 +- tests/pf/test_mag_inversion_linear.py | 2 +- tests/pf/test_mag_vector_amplitude.py | 2 +- .../plot_inv_2_inversion_irls.py | 2 +- .../03-gravity/plot_inv_1a_gravity_anomaly.py | 2 +- .../plot_inv_1b_gravity_anomaly_irls.py | 2 +- .../plot_inv_2a_magnetics_induced.py | 2 +- tutorials/06-ip/plot_inv_2_dcip2d.py | 2 +- tutorials/06-ip/plot_inv_3_dcip3d.py | 2 +- .../plot_inv_3_cross_gradient_pf.py | 2 +- 17 files changed, 118 insertions(+), 66 deletions(-) diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 5141bebaf3..23addd7899 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -2521,33 +2521,20 @@ def __init__( normalization_method="maximum", **kwargs, ): - if "everyIter" in kwargs.keys(): - warnings.warn( - "'everyIter' property is deprecated and will be removed in SimPEG 0.20.0." - "Please use 'every_iteration'.", - stacklevel=2, + # Raise errors on deprecated arguments + if (key := "everyIter") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. Please use 'every_iteration'.", ) - every_iteration = kwargs.pop("everyIter") - - if "threshold" in kwargs.keys(): - warnings.warn( - "'threshold' property is deprecated and will be removed in SimPEG 0.20.0." - "Please use 'threshold_value'.", - stacklevel=2, + if (key := "threshold") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. Please use 'threshold_value'.", ) - threshold_value = kwargs.pop("threshold") - - if "normalization" in kwargs.keys(): - warnings.warn( - "'normalization' property is deprecated and will be removed in SimPEG 0.20.0." + if (key := "normalization") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. " "Please define normalization using 'normalization_method'.", - stacklevel=2, ) - normalization_method = kwargs.pop("normalization") - if normalization_method is True: - normalization_method = "maximum" - else: - normalization_method = None super().__init__(**kwargs) @@ -2574,7 +2561,11 @@ def every_iteration(self, value): self._every_iteration = validate_type("every_iteration", value, bool) everyIter = deprecate_property( - every_iteration, "everyIter", "every_iteration", removal_version="0.20.0" + every_iteration, + "everyIter", + "every_iteration", + removal_version="0.20.0", + error=True, ) @property @@ -2603,7 +2594,11 @@ def threshold_value(self, value): self._threshold_value = validate_float("threshold_value", value, min_val=0.0) threshold = deprecate_property( - threshold_value, "threshold", "threshold_value", removal_version="0.20.0" + threshold_value, + "threshold", + "threshold_value", + removal_version="0.20.0", + error=True, ) @property @@ -2653,18 +2648,6 @@ def normalization_method(self): def normalization_method(self, value): if value is None: self._normalization_method = value - - elif isinstance(value, bool): - warnings.warn( - "Boolean type for 'normalization_method' is deprecated and will be removed in 0.20.0." - "Please use None, 'maximum' or 'minimum'.", - stacklevel=2, - ) - if value: - self._normalization_method = "maximum" - else: - self._normalization_method = None - else: self._normalization_method = validate_string( "normalization_method", value, string_list=["minimum", "maximum"] @@ -2675,6 +2658,7 @@ def normalization_method(self, value): "normalization", "normalization_method", removal_version="0.20.0", + error=True, ) def initialize(self): diff --git a/examples/02-gravity/plot_inv_grav_tiled.py b/examples/02-gravity/plot_inv_grav_tiled.py index cc8fe41f78..5ed4cd90e2 100644 --- a/examples/02-gravity/plot_inv_grav_tiled.py +++ b/examples/02-gravity/plot_inv_grav_tiled.py @@ -243,7 +243,7 @@ ) saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[update_IRLS, sensitivity_weights, betaest, update_Jacobi, saveDict], diff --git a/examples/_archived/plot_inv_grav_linear.py b/examples/_archived/plot_inv_grav_linear.py index d84bcc5bd9..26b9cbf680 100644 --- a/examples/_archived/plot_inv_grav_linear.py +++ b/examples/_archived/plot_inv_grav_linear.py @@ -127,7 +127,7 @@ def run(plotIt=True): ) saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[ diff --git a/examples/_archived/plot_inv_mag_linear.py b/examples/_archived/plot_inv_mag_linear.py index 62d5114807..2ae4b535e3 100644 --- a/examples/_archived/plot_inv_mag_linear.py +++ b/examples/_archived/plot_inv_mag_linear.py @@ -131,7 +131,7 @@ def run(plotIt=True): saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() # Add sensitivity weights - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index 6a939d9f7b..e4857d08f0 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -133,21 +133,12 @@ def test_validation_in_inversion(self): inv = inversion.BaseInversion(invProb) inv.directiveList = [update_Jacobi, sensitivity_weights] - def test_sensitivity_weighting_warnings(self): - # Test setter warnings - d_temp = directives.UpdateSensitivityWeights() - d_temp.normalization_method = True - self.assertTrue(d_temp.normalization_method == "maximum") - - d_temp.normalization_method = False - self.assertTrue(d_temp.normalization_method is None) - def test_sensitivity_weighting_global(self): test_inputs = { - "everyIter": False, - "threshold": 1e-12, + "every_iteration": False, + "threshold_value": 1e-12, "threshold_method": "global", - "normalization": False, + "normalization_method": None, } # Compute test weights @@ -155,7 +146,7 @@ def test_sensitivity_weighting_global(self): np.sqrt(np.sum((self.dmis.W * self.sim.G) ** 2, axis=0)) / self.mesh.cell_volumes ) - test_weights = sqrt_diagJtJ + test_inputs["threshold"] + test_weights = sqrt_diagJtJ + test_inputs["threshold_value"] test_weights *= self.mesh.cell_volumes # Test directive @@ -182,7 +173,7 @@ def test_sensitivity_weighting_percentile_maximum(self): "every_iteration": True, "threshold_value": 1, "threshold_method": "percentile", - "normalization": True, + "normalization_method": "maximum", } # Compute test weights @@ -303,5 +294,82 @@ def test_save_output_dict(RegClass): assert "x SparseSmoothness.norm" in out_dict +class TestUpdateSensitivityWeightsRemovedArgs: + """ + Test if `UpdateSensitivityWeights` raises errors after passing removed arguments. + """ + + def test_every_iter(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `everyIter`. + """ + msg = "'everyIter' property has been removed. Please use 'every_iteration'." + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(everyIter=True) + + def test_threshold(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `threshold`. + """ + msg = "'threshold' property has been removed. Please use 'threshold_value'." + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(threshold=True) + + def test_normalization(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `normalization`. + """ + msg = ( + "'normalization' property has been removed. " + "Please define normalization using 'normalization_method'." + ) + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(normalization=True) + + +class TestUpdateSensitivityNormalization: + """ + Test the `normalization` property and setter in `UpdateSensitivityWeights` + """ + + @pytest.mark.parametrize("normalization_method", (None, "maximum", "minimum")) + def test_normalization_method_setter_valid(self, normalization_method): + """ + Test if the setter method for normalization_method in + `UpdateSensitivityWeights` works as expected on valid values. + + The `normalization_method` must be a string or a None. This test was + included as part of the removal process of the old `normalization` + property. + """ + d_temp = directives.UpdateSensitivityWeights() + # Use the setter method to assign a value to normalization_method + d_temp.normalization_method = normalization_method + assert d_temp.normalization_method == normalization_method + + @pytest.mark.parametrize("normalization_method", (True, False, "an invalid method")) + def test_normalization_method_setter_invalid(self, normalization_method): + """ + Test if the setter method for normalization_method in + `UpdateSensitivityWeights` raises error on invalid values. + + The `normalization_method` must be a string or a None. This test was + included as part of the removal process of the old `normalization` + property. + """ + d_temp = directives.UpdateSensitivityWeights() + if isinstance(normalization_method, bool): + error_type = TypeError + msg = "'normalization_method' must be a str. Got" + else: + error_type = ValueError + msg = ( + r"'normalization_method' must be in \['minimum', 'maximum'\]. " + f"Got '{normalization_method}'" + ) + with pytest.raises(error_type, match=msg): + d_temp.normalization_method = normalization_method + + if __name__ == "__main__": unittest.main() diff --git a/tests/dask/test_grav_inversion_linear.py b/tests/dask/test_grav_inversion_linear.py index 9e91433d36..8d35014b9f 100644 --- a/tests/dask/test_grav_inversion_linear.py +++ b/tests/dask/test_grav_inversion_linear.py @@ -105,7 +105,7 @@ def setUp(self): # Here is where the norms are applied IRLS = directives.Update_IRLS(max_irls_iterations=20, chifact_start=2.0) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi] ) diff --git a/tests/dask/test_mag_MVI_Octree.py b/tests/dask/test_mag_MVI_Octree.py index 7189e99df4..8f9690f216 100644 --- a/tests/dask/test_mag_MVI_Octree.py +++ b/tests/dask/test_mag_MVI_Octree.py @@ -152,7 +152,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) diff --git a/tests/pf/test_mag_MVI_Octree.py b/tests/pf/test_mag_MVI_Octree.py index 537c4b1720..1880095af8 100644 --- a/tests/pf/test_mag_MVI_Octree.py +++ b/tests/pf/test_mag_MVI_Octree.py @@ -149,7 +149,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) diff --git a/tests/pf/test_mag_inversion_linear.py b/tests/pf/test_mag_inversion_linear.py index 611935adf4..c9e0cbc7c3 100644 --- a/tests/pf/test_mag_inversion_linear.py +++ b/tests/pf/test_mag_inversion_linear.py @@ -119,7 +119,7 @@ def setUp(self): # Here is where the norms are applied IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=1) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, betaest, update_Jacobi] ) diff --git a/tests/pf/test_mag_vector_amplitude.py b/tests/pf/test_mag_vector_amplitude.py index af4619e14f..5dea5ded25 100644 --- a/tests/pf/test_mag_vector_amplitude.py +++ b/tests/pf/test_mag_vector_amplitude.py @@ -141,7 +141,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) diff --git a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py index 19ae156e89..15c7f18dc5 100644 --- a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py +++ b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py @@ -172,7 +172,7 @@ def g(k): # # Add sensitivity weights but don't update at each beta -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # Reach target misfit for L2 solution, then use IRLS until model stops changing. IRLS = directives.Update_IRLS(max_irls_iterations=40, minGNiter=1, f_min_change=1e-4) diff --git a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py index 3b0c46dcfe..e1e99ebcb7 100644 --- a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py @@ -268,7 +268,7 @@ target_misfit = directives.TargetMisfit(chifact=1) # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py index fdcca19bc8..23336dee7c 100644 --- a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py +++ b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py @@ -274,7 +274,7 @@ update_jacobi = directives.UpdatePreconditioner() # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py index 07074a525d..7ff2f45463 100644 --- a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py @@ -309,7 +309,7 @@ target_misfit = directives.TargetMisfit(chifact=1) # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/06-ip/plot_inv_2_dcip2d.py b/tutorials/06-ip/plot_inv_2_dcip2d.py index 2215462ffd..d4d00efa06 100644 --- a/tutorials/06-ip/plot_inv_2_dcip2d.py +++ b/tutorials/06-ip/plot_inv_2_dcip2d.py @@ -567,7 +567,7 @@ # Here we define the directives in the same manner as the DC inverse problem. # -update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold=1e-3) +update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold_value=1e-3) starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=1) save_iteration = directives.SaveOutputEveryIteration(save_txt=False) diff --git a/tutorials/06-ip/plot_inv_3_dcip3d.py b/tutorials/06-ip/plot_inv_3_dcip3d.py index 9193d829b7..1be280c5d7 100644 --- a/tutorials/06-ip/plot_inv_3_dcip3d.py +++ b/tutorials/06-ip/plot_inv_3_dcip3d.py @@ -633,7 +633,7 @@ # Here we define the directives in the same manner as the DC inverse problem. # -update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold=1e-3) +update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold_value=1e-3) starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e2) beta_schedule = directives.BetaSchedule(coolingFactor=2.5, coolingRate=1) save_iteration = directives.SaveOutputEveryIteration(save_txt=False) diff --git a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py index 27bcfb523e..261af3baf2 100755 --- a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py +++ b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py @@ -365,7 +365,7 @@ stopping = directives.MovingAndMultiTargetStopping(tol=1e-6) -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # Updating the preconditionner if it is model dependent. update_jacobi = directives.UpdatePreconditioner() From 887cfe20e78bb7e8f552911eb5d4d0b510a24b57 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Mon, 11 Mar 2024 11:41:28 -0700 Subject: [PATCH 27/68] Replace indActive for active_cells in regularizations (#1366) #### Summary Remove the `indActive` argument in constructors of regularization classes, remove the `indActive` attribute and replace them for the supported `active_cells`. Replace the `indActive` argument in the `depth_weighting` function in favor of `active_cells`. Raise errors when those arguments are being passed and add tests checking those errors are being raised. Update code, tests and examples that were still using `indActive`. #### Reference issue Part of the solution for #1302 Most of these changes were cherry-picked from #1306 --- .../static/induced_polarization/run.py | 4 +- .../static/resistivity/run.py | 4 +- .../spectral_induced_polarization/run.py | 8 ++- SimPEG/regularization/base.py | 41 ++++-------- SimPEG/utils/model_utils.py | 10 +-- .../plot_inv_mag_nonLinear_Amplitude.py | 4 +- examples/08-vrm/plot_inv_vrm_eq.py | 2 +- ...lot_inv_dcip_dipoledipole_2_5Dinversion.py | 2 +- ...nv_dcip_dipoledipole_2_5Dinversion_irls.py | 2 +- examples/_archived/plot_inv_grav_linear.py | 2 +- examples/_archived/plot_inv_mag_linear.py | 2 +- tests/base/test_correspondance.py | 2 +- tests/base/test_cross_gradient.py | 12 ++-- tests/base/test_jtv.py | 14 +++-- tests/base/test_model_utils.py | 35 +++++++++-- tests/base/test_regularization.py | 63 +++++++++++++++---- tests/dask/test_mag_MVI_Octree.py | 12 ++-- tests/dask/test_mag_nonLinear_Amplitude.py | 4 +- tests/em/static/test_SIP_2D_jvecjtvecadj.py | 6 +- tests/em/static/test_SIP_jvecjtvecadj.py | 6 +- tests/pf/test_mag_inversion_linear.py | 2 +- tests/pf/test_mag_nonLinear_Amplitude.py | 4 +- .../03-gravity/plot_inv_1a_gravity_anomaly.py | 4 +- tutorials/05-dcr/plot_inv_2_dcr2d_irls.py | 2 +- tutorials/05-dcr/plot_inv_3_dcr3d.py | 2 +- tutorials/06-ip/plot_inv_2_dcip2d.py | 4 +- tutorials/06-ip/plot_inv_3_dcip3d.py | 4 +- .../plot_inv_3_cross_gradient_pf.py | 6 +- .../_temporary/plot_4c_fdem3d_inversion.py | 2 +- 29 files changed, 159 insertions(+), 106 deletions(-) diff --git a/SimPEG/electromagnetics/static/induced_polarization/run.py b/SimPEG/electromagnetics/static/induced_polarization/run.py index 550579a319..53fbd717f3 100644 --- a/SimPEG/electromagnetics/static/induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/induced_polarization/run.py @@ -37,7 +37,7 @@ def run_inversion( regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion if use_sensitivity_weight: - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y @@ -45,7 +45,7 @@ def run_inversion( else: reg = regularization.Sparse( mesh, - indActive=actind, + active_cells=actind, mapping=regmap, cell_weights=mesh.cell_volumes[actind], ) diff --git a/SimPEG/electromagnetics/static/resistivity/run.py b/SimPEG/electromagnetics/static/resistivity/run.py index a9a04dc3e0..0ee948ea48 100644 --- a/SimPEG/electromagnetics/static/resistivity/run.py +++ b/SimPEG/electromagnetics/static/resistivity/run.py @@ -37,14 +37,14 @@ def run_inversion( regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion if use_sensitivity_weight: - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y reg.alpha_z = alpha_z else: reg = regularization.WeightedLeastSquares( - mesh, indActive=actind, mapping=regmap + mesh, active_cells=actind, mapping=regmap ) reg.alpha_s = alpha_s reg.alpha_x = alpha_x diff --git a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py index bf568e31d7..101385a70b 100644 --- a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py @@ -143,12 +143,14 @@ def run_inversion( # Set up regularization reg_eta = regularization.WeightedLeastSquares( - mesh, mapping=wires.eta, indActive=actind + mesh, mapping=wires.eta, active_cells=actind ) reg_tau = regularization.WeightedLeastSquares( - mesh, mapping=wires.tau, indActive=actind + mesh, mapping=wires.tau, active_cells=actind + ) + reg_c = regularization.WeightedLeastSquares( + mesh, mapping=wires.c, active_cells=actind ) - reg_c = regularization.WeightedLeastSquares(mesh, mapping=wires.c, indActive=actind) # Todo: diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 4857a376d2..ca38be2e3f 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -67,22 +67,13 @@ def __init__( f"Value of type {type(mesh)} provided." ) - # Handle deprecated indActive argument + # Raise errors on deprecated arguments: avoid old code that still uses + # them to silently fail if (key := "indActive") in kwargs: - if active_cells is not None: - raise ValueError( - f"Cannot simultaneously pass 'active_cells' and '{key}'. " - "Pass 'active_cells' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'active_cells'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'active_cells' instead." ) - active_cells = kwargs.pop(key) - - # Handle deprecated cell_weights argument if (key := "cell_weights") in kwargs: if weights is not None: raise ValueError( @@ -146,8 +137,7 @@ def active_cells(self, values: np.ndarray | None): "indActive", "active_cells", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property @@ -1585,19 +1575,13 @@ def __init__( ) self._regularization_mesh = mesh + # Raise errors on deprecated arguments: avoid old code that still uses + # them to silently fail if (key := "indActive") in kwargs: - if active_cells is not None: - raise ValueError( - f"Cannot simultaneously pass 'active_cells' and '{key}'. " - "Pass 'active_cells' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'active_cells'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'active_cells' instead." ) - active_cells = kwargs.pop(key) self.alpha_s = alpha_s if alpha_x is not None: @@ -2107,8 +2091,7 @@ def active_cells(self, values: np.ndarray): "indActive", "active_cells", "0.19.0", - error=False, - future_warn=True, + error=True, ) @property diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 8c6d19b1ab..2bdd99b42a 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -160,14 +160,10 @@ def depth_weighting( value. """ - if "indActive" in kwargs: - warnings.warn( - "The indActive keyword argument has been deprecated, please use active_cells. " - "This will be removed in SimPEG 0.19.0", - FutureWarning, - stacklevel=2, + if (key := "indActive") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " "Please use 'active_cells' instead." ) - active_cells = kwargs["indActive"] # Default threshold value if threshold is None: diff --git a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py index 8c5cfb1af8..bbcd745dce 100644 --- a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py +++ b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py @@ -233,7 +233,7 @@ # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) reg.mref = np.zeros(nC) @@ -345,7 +345,7 @@ data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization -reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) +reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] reg.mref = np.zeros(nC) diff --git a/examples/08-vrm/plot_inv_vrm_eq.py b/examples/08-vrm/plot_inv_vrm_eq.py index e4004a0b29..aba2d0fde0 100644 --- a/examples/08-vrm/plot_inv_vrm_eq.py +++ b/examples/08-vrm/plot_inv_vrm_eq.py @@ -196,7 +196,7 @@ w = w / np.max(w) w = w -reg = regularization.Smallness(mesh=mesh, indActive=actCells, cell_weights=w) +reg = regularization.Smallness(mesh=mesh, active_cells=actCells, cell_weights=w) opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 ) diff --git a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py index 1caac9b8d2..465390fca9 100644 --- a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py +++ b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py @@ -146,7 +146,7 @@ def run(plotIt=True, survey_type="dipole-dipole"): regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) opt = optimization.InexactGaussNewton(maxIter=15) invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt) beta = directives.BetaSchedule(coolingFactor=5, coolingRate=2) diff --git a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py index 188662e72e..b6c67f0fb3 100644 --- a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py +++ b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py @@ -155,7 +155,7 @@ def run(plotIt=True, survey_type="dipole-dipole", p=0.0, qx=2.0, qz=2.0): # Related to inversion reg = regularization.Sparse( - mesh, indActive=actind, mapping=regmap, gradientType="components" + mesh, active_cells=actind, mapping=regmap, gradientType="components" ) # gradientType = 'components' reg.norms = [p, qx, qz, 0.0] diff --git a/examples/_archived/plot_inv_grav_linear.py b/examples/_archived/plot_inv_grav_linear.py index 26b9cbf680..c11bea15e9 100644 --- a/examples/_archived/plot_inv_grav_linear.py +++ b/examples/_archived/plot_inv_grav_linear.py @@ -102,7 +102,7 @@ def run(plotIt=True): rxLoc = survey.source_field.receiver_list[0].locations # Create a regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] # Data misfit function diff --git a/examples/_archived/plot_inv_mag_linear.py b/examples/_archived/plot_inv_mag_linear.py index 2ae4b535e3..661ed94062 100644 --- a/examples/_archived/plot_inv_mag_linear.py +++ b/examples/_archived/plot_inv_mag_linear.py @@ -104,7 +104,7 @@ def run(plotIt=True): data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd) # Create a regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.mref = np.zeros(nC) reg.norms = [0, 0, 0, 0] # reg.eps_p, reg.eps_q = 1e-0, 1e-0 diff --git a/tests/base/test_correspondance.py b/tests/base/test_correspondance.py index 4e9fc71442..64a8cbcd0a 100644 --- a/tests/base/test_correspondance.py +++ b/tests/base/test_correspondance.py @@ -30,7 +30,7 @@ def setUp(self): corr = regularization.LinearCorrespondence( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh diff --git a/tests/base/test_cross_gradient.py b/tests/base/test_cross_gradient.py index 9c764d481e..b0493b8569 100644 --- a/tests/base/test_cross_gradient.py +++ b/tests/base/test_cross_gradient.py @@ -30,7 +30,7 @@ def setUp(self): cros_grad = regularization.CrossGradient( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -122,7 +122,7 @@ def setUp(self): cros_grad = regularization.CrossGradient( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -196,7 +196,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - cross_grad = regularization.CrossGradient(mesh, wire_map=wires, indActive=actv) + cross_grad = regularization.CrossGradient( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.cross_grad = cross_grad @@ -259,7 +261,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - cross_grad = regularization.CrossGradient(mesh, wire_map=wires, indActive=actv) + cross_grad = regularization.CrossGradient( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.cross_grad = cross_grad diff --git a/tests/base/test_jtv.py b/tests/base/test_jtv.py index 9d93d26c66..c016043da6 100644 --- a/tests/base/test_jtv.py +++ b/tests/base/test_jtv.py @@ -31,7 +31,7 @@ def setUp(self): jtv = regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -81,7 +81,7 @@ def setUp(self): jtv = regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -127,7 +127,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - jtv = regularization.JointTotalVariation(mesh, wire_map=wires, indActive=actv) + jtv = regularization.JointTotalVariation( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.jtv = jtv @@ -174,7 +176,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - jtv = regularization.JointTotalVariation(mesh, wire_map=wires, indActive=actv) + jtv = regularization.JointTotalVariation( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.jtv = jtv @@ -221,7 +225,7 @@ def test_bad_wires(): regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index d7d57f6d80..48279e4b54 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -1,3 +1,4 @@ +import pytest import unittest import numpy as np @@ -22,7 +23,9 @@ def test_depth_weighting_3D(self): r_loc = 0.1 # Depth weighting - wz = utils.depth_weighting(mesh, r_loc, indActive=actv, exponent=5, threshold=0) + wz = utils.depth_weighting( + mesh, r_loc, active_cells=actv, exponent=5, threshold=0 + ) reference_locs = ( np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) @@ -31,14 +34,14 @@ def test_depth_weighting_3D(self): reference_locs[:, -1] = r_loc wz2 = utils.depth_weighting( - mesh, reference_locs, indActive=actv, exponent=5, threshold=0 + mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 ) np.testing.assert_allclose(wz, wz2) # testing default params all_active = np.ones(mesh.n_cells, dtype=bool) wz = utils.depth_weighting( - mesh, r_loc, indActive=all_active, exponent=2, threshold=0.5 * dh + mesh, r_loc, active_cells=all_active, exponent=2, threshold=0.5 * dh ) wz2 = utils.depth_weighting(mesh, r_loc) @@ -58,7 +61,9 @@ def test_depth_weighting_2D(self): r_loc = 0.1 # Depth weighting - wz = utils.depth_weighting(mesh, r_loc, indActive=actv, exponent=5, threshold=0) + wz = utils.depth_weighting( + mesh, r_loc, active_cells=actv, exponent=5, threshold=0 + ) reference_locs = ( np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) @@ -67,10 +72,30 @@ def test_depth_weighting_2D(self): reference_locs[:, -1] = r_loc wz2 = utils.depth_weighting( - mesh, reference_locs, indActive=actv, exponent=5, threshold=0 + mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 ) np.testing.assert_allclose(wz, wz2) +@pytest.fixture +def mesh(): + """Sample mesh.""" + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hz], "CN") + return mesh + + +def test_removed_indactive(mesh): + """ + Test if error is raised after passing removed indActive argument + """ + active_cells = np.ones(mesh.nC, dtype=bool) + msg = "'indActive' argument has been removed. " "Please use 'active_cells' instead." + with pytest.raises(TypeError, match=msg): + utils.depth_weighting(mesh, 0, indActive=active_cells) + + if __name__ == "__main__": unittest.main() diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 18aac616b6..3000d23460 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -764,7 +764,6 @@ class TestDeprecatedArguments: Within these arguments are: - * ``indActive`` (replaced by ``active_cells``) * ``cell_weights`` (replaced by ``weights``) """ @@ -783,18 +782,6 @@ def mesh(self, request): h = [h_i / h_i.sum() for h_i in (hx, hy, hz)] return discretize.TensorMesh(h) - @pytest.mark.parametrize( - "regularization_class", (BaseRegularization, WeightedLeastSquares) - ) - def test_active_cells(self, mesh, regularization_class): - """Test indActive and active_cells arguments.""" - active_cells = np.ones(len(mesh), dtype=bool) - msg = "Cannot simultaneously pass 'active_cells' and 'indActive'." - with pytest.raises(ValueError, match=msg): - regularization_class( - mesh, active_cells=active_cells, indActive=active_cells - ) - def test_weights(self, mesh): """Test cell_weights and weights.""" weights = np.ones(len(mesh)) @@ -803,6 +790,56 @@ def test_weights(self, mesh): BaseRegularization(mesh, weights=weights, cell_weights=weights) +class TestRemovedObjects: + """ + Test if errors are raised after passing removed arguments or trying to + access removed properties. + + * ``indActive`` (replaced by ``active_cells``) + + """ + + @pytest.fixture(params=["1D", "2D", "3D"]) + def mesh(self, request): + """Sample mesh.""" + if request.param == "1D": + hx = np.random.rand(10) + h = [hx / hx.sum()] + elif request.param == "2D": + hx, hy = np.random.rand(10), np.random.rand(9) + h = [h_i / h_i.sum() for h_i in (hx, hy)] + elif request.param == "3D": + hx, hy, hz = np.random.rand(10), np.random.rand(9), np.random.rand(8) + h = [h_i / h_i.sum() for h_i in (hx, hy, hz)] + return discretize.TensorMesh(h) + + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_ind_active(self, mesh, regularization_class): + """Test if error is raised when passing the indActive argument.""" + active_cells = np.ones(len(mesh), dtype=bool) + msg = ( + "'indActive' argument has been removed. " + "Please use 'active_cells' instead." + ) + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, indActive=active_cells) + + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_ind_active_property(self, mesh, regularization_class): + """Test if error is raised when trying to access the indActive property.""" + active_cells = np.ones(len(mesh), dtype=bool) + reg = regularization_class(mesh, active_cells=active_cells) + msg = "indActive has been removed, please use active_cells." + with pytest.raises(NotImplementedError, match=msg): + reg.indActive + + class TestRemovedRegularizations: """ Test if errors are raised after creating removed regularization classes. diff --git a/tests/dask/test_mag_MVI_Octree.py b/tests/dask/test_mag_MVI_Octree.py index 8f9690f216..3c7305f552 100644 --- a/tests/dask/test_mag_MVI_Octree.py +++ b/tests/dask/test_mag_MVI_Octree.py @@ -117,13 +117,13 @@ def setUp(self): # Create three regularization for the different components # of magnetization - reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.p) + reg_p = regularization.Sparse(mesh, active_cells=actv, mapping=wires.p) reg_p.mref = np.zeros(3 * nC) - reg_s = regularization.Sparse(mesh, indActive=actv, mapping=wires.s) + reg_s = regularization.Sparse(mesh, active_cells=actv, mapping=wires.s) reg_s.mref = np.zeros(3 * nC) - reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.t) + reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.t) reg_t.mref = np.zeros(3 * nC) reg = reg_p + reg_s + reg_t @@ -171,18 +171,18 @@ def setUp(self): # Create a Combo Regularization # Regularize the amplitude of the vectors - reg_a = regularization.Sparse(mesh, indActive=actv, mapping=wires.amp) + reg_a = regularization.Sparse(mesh, active_cells=actv, mapping=wires.amp) reg_a.norms = [0.0, 0.0, 0.0, 0.0] # Sparse on the model and its gradients reg_a.mref = np.zeros(3 * nC) # Regularize the vertical angle of the vectors - reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.theta) + reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.theta) reg_t.alpha_s = 0.0 # No reference angle reg_t.space = "spherical" reg_t.norms = [2.0, 0.0, 0.0, 0.0] # Only norm on gradients used # Regularize the horizontal angle of the vectors - reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.phi) + reg_p = regularization.Sparse(mesh, active_cells=actv, mapping=wires.phi) reg_p.alpha_s = 0.0 # No reference angle reg_p.space = "spherical" reg_p.norms = [2.0, 0.0, 0.0, 0.0] # Only norm on gradients used diff --git a/tests/dask/test_mag_nonLinear_Amplitude.py b/tests/dask/test_mag_nonLinear_Amplitude.py index 5b0dfdf6f6..1f9109d402 100644 --- a/tests/dask/test_mag_nonLinear_Amplitude.py +++ b/tests/dask/test_mag_nonLinear_Amplitude.py @@ -142,7 +142,7 @@ def setUp(self): # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) reg.mref = np.zeros(nC) @@ -235,7 +235,7 @@ def setUp(self): data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] reg.mref = np.zeros(nC) diff --git a/tests/em/static/test_SIP_2D_jvecjtvecadj.py b/tests/em/static/test_SIP_2D_jvecjtvecadj.py index a299c339f5..64fabc6851 100644 --- a/tests/em/static/test_SIP_2D_jvecjtvecadj.py +++ b/tests/em/static/test_SIP_2D_jvecjtvecadj.py @@ -265,13 +265,13 @@ def setUp(self): # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) reg_eta = regularization.WeightedLeastSquares( - mesh, mapping=wires.eta, indActive=~airind + mesh, mapping=wires.eta, active_cells=~airind ) reg_taui = regularization.WeightedLeastSquares( - mesh, mapping=wires.taui, indActive=~airind + mesh, mapping=wires.taui, active_cells=~airind ) reg_c = regularization.WeightedLeastSquares( - mesh, mapping=wires.c, indActive=~airind + mesh, mapping=wires.c, active_cells=~airind ) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( diff --git a/tests/em/static/test_SIP_jvecjtvecadj.py b/tests/em/static/test_SIP_jvecjtvecadj.py index 00e5370bd6..a55272f50e 100644 --- a/tests/em/static/test_SIP_jvecjtvecadj.py +++ b/tests/em/static/test_SIP_jvecjtvecadj.py @@ -282,9 +282,9 @@ def setUp(self): dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) - reg_eta = regularization.Sparse(mesh, mapping=wires.eta, indActive=~airind) - reg_taui = regularization.Sparse(mesh, mapping=wires.taui, indActive=~airind) - reg_c = regularization.Sparse(mesh, mapping=wires.c, indActive=~airind) + reg_eta = regularization.Sparse(mesh, mapping=wires.eta, active_cells=~airind) + reg_taui = regularization.Sparse(mesh, mapping=wires.taui, active_cells=~airind) + reg_c = regularization.Sparse(mesh, mapping=wires.c, active_cells=~airind) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 diff --git a/tests/pf/test_mag_inversion_linear.py b/tests/pf/test_mag_inversion_linear.py index c9e0cbc7c3..2da13bf2f5 100644 --- a/tests/pf/test_mag_inversion_linear.py +++ b/tests/pf/test_mag_inversion_linear.py @@ -101,7 +101,7 @@ def setUp(self): ) # Create a regularization - reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] reg.gradientType = "components" diff --git a/tests/pf/test_mag_nonLinear_Amplitude.py b/tests/pf/test_mag_nonLinear_Amplitude.py index 186fdc1343..015aa8bfe0 100644 --- a/tests/pf/test_mag_nonLinear_Amplitude.py +++ b/tests/pf/test_mag_nonLinear_Amplitude.py @@ -141,7 +141,7 @@ def setUp(self): # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) reg.mref = np.zeros(nC) @@ -234,7 +234,7 @@ def setUp(self): data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] reg.mref = np.zeros(nC) diff --git a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py index e1e99ebcb7..968ef898a2 100644 --- a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py @@ -230,7 +230,9 @@ dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) # Define the regularization (model objective function). -reg = regularization.WeightedLeastSquares(mesh, indActive=ind_active, mapping=model_map) +reg = regularization.WeightedLeastSquares( + mesh, active_cells=ind_active, mapping=model_map +) # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py index 2cf97a0bc8..50ad50e100 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py @@ -302,7 +302,7 @@ reg = regularization.Sparse( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, mapping=regmap, gradientType="total", diff --git a/tutorials/05-dcr/plot_inv_3_dcr3d.py b/tutorials/05-dcr/plot_inv_3_dcr3d.py index 16498c2aad..34633b0e73 100644 --- a/tutorials/05-dcr/plot_inv_3_dcr3d.py +++ b/tutorials/05-dcr/plot_inv_3_dcr3d.py @@ -300,7 +300,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, ) diff --git a/tutorials/06-ip/plot_inv_2_dcip2d.py b/tutorials/06-ip/plot_inv_2_dcip2d.py index d4d00efa06..ed14b4fcbe 100644 --- a/tutorials/06-ip/plot_inv_2_dcip2d.py +++ b/tutorials/06-ip/plot_inv_2_dcip2d.py @@ -310,7 +310,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, alpha_s=0.01, alpha_x=1, @@ -542,7 +542,7 @@ # Define the regularization (model objective function) ip_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, diff --git a/tutorials/06-ip/plot_inv_3_dcip3d.py b/tutorials/06-ip/plot_inv_3_dcip3d.py index 1be280c5d7..39f8a9a9fd 100644 --- a/tutorials/06-ip/plot_inv_3_dcip3d.py +++ b/tutorials/06-ip/plot_inv_3_dcip3d.py @@ -348,7 +348,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, ) @@ -608,7 +608,7 @@ # Define the regularization (model objective function) ip_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, diff --git a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py index 261af3baf2..abd1170b8e 100755 --- a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py +++ b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py @@ -310,15 +310,15 @@ # Define the regularization (model objective function). reg_grav = regularization.WeightedLeastSquares( - mesh, indActive=ind_active, mapping=wires.density + mesh, active_cells=ind_active, mapping=wires.density ) reg_mag = regularization.WeightedLeastSquares( - mesh, indActive=ind_active, mapping=wires.susceptibility + mesh, active_cells=ind_active, mapping=wires.susceptibility ) # Define the coupling term to connect two different physical property models lamda = 2e12 # weight for coupling term -cross_grad = regularization.CrossGradient(mesh, wires, indActive=ind_active) +cross_grad = regularization.CrossGradient(mesh, wires, active_cells=ind_active) # combo dmis = dmis_grav + dmis_mag diff --git a/tutorials/_temporary/plot_4c_fdem3d_inversion.py b/tutorials/_temporary/plot_4c_fdem3d_inversion.py index 5ac26bff26..6282d22b7f 100644 --- a/tutorials/_temporary/plot_4c_fdem3d_inversion.py +++ b/tutorials/_temporary/plot_4c_fdem3d_inversion.py @@ -313,7 +313,7 @@ # Define the regularization (model objective function) reg = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_model, alpha_s=1e-2, alpha_x=1, From f2e29ad9580d7ec457ca2f9a9c42b7642656f954 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 13 Mar 2024 09:20:00 -0700 Subject: [PATCH 28/68] Remove the debug argument from InversionDirective (#1370) Remove the `debug` argument and property from `InversionDirective`, add new test to check if it raises an error when passing it as argument. Update directive class using the `debug` property. Part of the solution for https://github.com/simpeg/simpeg/issues/1302 Most of these changes were cherry-picked from https://github.com/simpeg/simpeg/pull/1306 --- SimPEG/directives/directives.py | 11 +++++------ SimPEG/directives/sim_directives.py | 2 +- tests/base/test_directives.py | 14 ++++++++++++++ 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 23addd7899..1f0f3e41e6 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -64,14 +64,13 @@ class InversionDirective: _dmisfitPair = [BaseDataMisfit, ComboObjectiveFunction] def __init__(self, inversion=None, dmisfit=None, reg=None, verbose=False, **kwargs): + # Raise error on deprecated arguments + if (key := "debug") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'verbose'.") self.inversion = inversion self.dmisfit = dmisfit self.reg = reg - debug = kwargs.pop("debug", None) - if debug is not None: - self.debug = debug - else: - self.verbose = verbose + self.verbose = verbose set_kwargs(self, **kwargs) @property @@ -89,7 +88,7 @@ def verbose(self, value): self._verbose = validate_type("verbose", value, bool) debug = deprecate_property( - verbose, "debug", "verbose", removal_version="0.19.0", future_warn=True + verbose, "debug", "verbose", removal_version="0.19.0", error=True ) @property diff --git a/SimPEG/directives/sim_directives.py b/SimPEG/directives/sim_directives.py index 718fac26c3..0a3464717d 100644 --- a/SimPEG/directives/sim_directives.py +++ b/SimPEG/directives/sim_directives.py @@ -248,7 +248,7 @@ def initialize(self): if self.seed is not None: np.random.seed(self.seed) - if self.debug: + if self.verbose: print("Calculating the beta0 parameter.") m = self.invProb.model diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index e4857d08f0..0d2f2f105c 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -294,6 +294,20 @@ def test_save_output_dict(RegClass): assert "x SparseSmoothness.norm" in out_dict +class TestDeprecatedArguments: + """ + Test if directives raise errors after passing deprecated arguments. + """ + + def test_debug(self): + """ + Test if InversionDirective raises error after passing 'debug'. + """ + msg = "'debug' property has been removed. Please use 'verbose'." + with pytest.raises(TypeError, match=msg): + directives.InversionDirective(debug=True) + + class TestUpdateSensitivityWeightsRemovedArgs: """ Test if `UpdateSensitivityWeights` raises errors after passing removed arguments. From 01bc7fa94e0f44316cdf134077123fbac904e3d5 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 13 Mar 2024 11:19:44 -0700 Subject: [PATCH 29/68] Remove cellDiff properties of RegularizationMesh (#1371) Remove the `cellDiffx`, `cellDiffy`, and `cellDiffz` properties of `RegularizationMesh`. Part of the solution for #1302 Most of these changes were cherry-picked from #1306 --- SimPEG/regularization/regularization_mesh.py | 9 +++------ tests/base/test_regularization.py | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index 0300adfc44..713ed8b630 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -521,24 +521,21 @@ def cell_gradient_z(self) -> sp.csr_matrix: "cellDiffx", "cell_gradient_x", "0.19.0", - error=False, - future_warn=True, + error=True, ) cellDiffy = deprecate_property( cell_gradient_y, "cellDiffy", "cell_gradient_y", "0.19.0", - error=False, - future_warn=True, + error=True, ) cellDiffz = deprecate_property( cell_gradient_z, "cellDiffz", "cell_gradient_z", "0.19.0", - error=False, - future_warn=True, + error=True, ) @property diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 3000d23460..207e8d9cc8 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -396,14 +396,14 @@ def test_linked_properties(self): ] [self.assertTrue(reg.mapping is fct.mapping) for fct in reg.objfcts] - D = reg.regularization_mesh.cellDiffx + D = reg.regularization_mesh.cell_gradient_x reg.regularization_mesh._cell_gradient_x = 4 * D v = np.random.rand(D.shape[1]) [ self.assertTrue( np.all( reg.regularization_mesh._cell_gradient_x * v - == fct.regularization_mesh.cellDiffx * v + == fct.regularization_mesh.cell_gradient_x * v ) ) for fct in reg.objfcts From c9acbd4563ed991564610177e19faa9e5c855a61 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 13 Mar 2024 14:44:11 -0700 Subject: [PATCH 30/68] Remove deprecated bits of code (#1372) Remove some additional deprecated bits of code throughout SimPEG's codebase. Part of the solution for #1302 Most of these changes were cherry-picked from #1306 --- SimPEG/directives/directives.py | 4 +- .../frequency_domain/receivers.py | 12 +- .../frequency_domain/sources.py | 15 +- .../natural_source/receivers.py | 22 +-- .../static/resistivity/IODC.py | 12 +- .../static/resistivity/survey.py | 10 +- .../electromagnetics/static/utils/__init__.py | 12 -- .../static/utils/static_utils.py | 168 ------------------ .../electromagnetics/time_domain/receivers.py | 14 +- .../electromagnetics/time_domain/sources.py | 80 +-------- SimPEG/potential_fields/base.py | 8 - SimPEG/utils/__init__.py | 2 +- SimPEG/utils/code_utils.py | 22 +-- SimPEG/utils/coord_utils.py | 4 +- SimPEG/utils/curv_utils.py | 8 +- SimPEG/utils/io_utils/__init__.py | 8 - SimPEG/utils/io_utils/io_utils_pf.py | 32 ---- SimPEG/utils/mat_utils.py | 16 +- SimPEG/utils/mesh_utils.py | 6 +- tests/base/test_utils.py | 6 +- tests/em/fdem/forward/test_FDEM_sources.py | 27 +-- tests/em/fdem/forward/test_properties.py | 16 +- tests/em/tdem/test_TDEM_sources.py | 24 +-- tests/em/tdem/test_properties.py | 10 +- 24 files changed, 97 insertions(+), 441 deletions(-) diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 1f0f3e41e6..3907ca646e 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -22,7 +22,7 @@ mkvc, set_kwargs, sdiag, - diagEst, + estimate_diagonal, spherical2cartesian, cartesian2spherical, Zero, @@ -2418,7 +2418,7 @@ def JtJv(v): return self.simulation.Jtvec(m, Jv) - JtJdiag = diagEst(JtJv, len(m), k=self.k) + JtJdiag = estimate_diagonal(JtJv, len(m), k=self.k) JtJdiag = JtJdiag / max(JtJdiag) self.reg.wght = JtJdiag diff --git a/SimPEG/electromagnetics/frequency_domain/receivers.py b/SimPEG/electromagnetics/frequency_domain/receivers.py index b6423da9a0..a28c840ff3 100644 --- a/SimPEG/electromagnetics/frequency_domain/receivers.py +++ b/SimPEG/electromagnetics/frequency_domain/receivers.py @@ -1,6 +1,5 @@ from ... import survey from ...utils import validate_string, validate_type, validate_direction -import warnings from discretize.utils import Zero @@ -33,15 +32,8 @@ def __init__( use_source_receiver_offset=False, **kwargs, ): - proj = kwargs.pop("projComp", None) - if proj is not None: - warnings.warn( - "'projComp' overrides the 'orientation' property which automatically" - " handles the projection from the mesh the receivers!!! " - "'projComp' is deprecated and will be removed in SimPEG 0.19.0.", - stacklevel=2, - ) - self.projComp = proj + if (key := "projComp") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed.") self.orientation = orientation self.component = component diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index bc54e0fdf4..1c739df1b7 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -770,11 +770,12 @@ def __init__( **kwargs, ): kwargs.pop("moment", None) - N = kwargs.pop("N", None) - if N is not None: - self.N = N - else: - self.n_turns = n_turns + + # Raise error on deprecated arguments + if (key := "N") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") + self.n_turns = n_turns + super().__init__( receiver_list=receiver_list, frequency=frequency, @@ -871,7 +872,9 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): ) return self.n_turns * self._loop.vector_potential(obsLoc, coordinates) - N = deprecate_property(n_turns, "N", "n_turns", removal_version="0.19.0") + N = deprecate_property( + n_turns, "N", "n_turns", removal_version="0.19.0", error=True + ) class PrimSecSigma(BaseFDEMSrc): diff --git a/SimPEG/electromagnetics/natural_source/receivers.py b/SimPEG/electromagnetics/natural_source/receivers.py index e7c9845630..f0b74fdca4 100644 --- a/SimPEG/electromagnetics/natural_source/receivers.py +++ b/SimPEG/electromagnetics/natural_source/receivers.py @@ -1,4 +1,4 @@ -from ...utils.code_utils import deprecate_class, validate_string +from ...utils.code_utils import validate_string import numpy as np from scipy.constants import mu_0 @@ -613,23 +613,3 @@ def evalDeriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): if adjoint: return imp_deriv return getattr(imp_deriv, self.component) - - -############ -# Deprecated -############ - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_impedance1D(PointNaturalSource): - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_impedance3D(PointNaturalSource): - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_tipper3D(Point3DTipper): - pass diff --git a/SimPEG/electromagnetics/static/resistivity/IODC.py b/SimPEG/electromagnetics/static/resistivity/IODC.py index 2fc42de988..cc47403d7e 100644 --- a/SimPEG/electromagnetics/static/resistivity/IODC.py +++ b/SimPEG/electromagnetics/static/resistivity/IODC.py @@ -10,7 +10,7 @@ from ....utils import ( sdiag, - uniqueRows, + unique_rows, plot2Ddata, validate_type, validate_integer, @@ -728,12 +728,6 @@ def geometric_factor(self, survey): G = geometric_factor(survey, space_type=self.space_type) return G - def from_ambn_locations_to_survey(self, *args, **kwargs): - raise NotImplementedError( - "from_ambn_locations_to_survey has been renamed to " - "from_abmn_locations_to_survey. It will be removed in a future version 0.17.0 of simpeg", - ) - def from_abmn_locations_to_survey( self, a_locations, @@ -767,8 +761,8 @@ def from_abmn_locations_to_survey( if times_ip is not None: self.times_ip = times_ip - uniqSrc = uniqueRows(np.c_[self.a_locations, self.b_locations]) - uniqElec = uniqueRows( + uniqSrc = unique_rows(np.c_[self.a_locations, self.b_locations]) + uniqElec = unique_rows( np.vstack( (self.a_locations, self.b_locations, self.m_locations, self.n_locations) ) diff --git a/SimPEG/electromagnetics/static/resistivity/survey.py b/SimPEG/electromagnetics/static/resistivity/survey.py index ecbbe58c37..cb02a32e65 100644 --- a/SimPEG/electromagnetics/static/resistivity/survey.py +++ b/SimPEG/electromagnetics/static/resistivity/survey.py @@ -1,6 +1,6 @@ import numpy as np -from ....utils.code_utils import deprecate_property, validate_string +from ....utils.code_utils import validate_string from ....survey import BaseSurvey from ..utils import drapeTopotoLoc @@ -152,14 +152,6 @@ def unique_electrode_locations(self): loc_n = self.locations_n return np.unique(np.vstack((loc_a, loc_b, loc_m, loc_n)), axis=0) - electrode_locations = deprecate_property( - unique_electrode_locations, - "electrode_locations", - new_name="unique_electrode_locations", - removal_version="0.17.0", - error=True, - ) - @property def source_locations(self): """ diff --git a/SimPEG/electromagnetics/static/utils/__init__.py b/SimPEG/electromagnetics/static/utils/__init__.py index 1e5ac6adbb..a0d69fe512 100644 --- a/SimPEG/electromagnetics/static/utils/__init__.py +++ b/SimPEG/electromagnetics/static/utils/__init__.py @@ -48,34 +48,22 @@ """ from .static_utils import ( electrode_separations, - source_receiver_midpoints, pseudo_locations, geometric_factor, apparent_resistivity_from_voltage, - apparent_resistivity, plot_pseudosection, generate_dcip_survey, - generate_dcip_survey_line, - gen_DCIPsurvey, generate_dcip_sources_line, generate_survey_from_abmn_locations, - writeUBC_DCobs, - writeUBC_DClocs, convert_survey_3d_to_2d_lines, - convertObs_DC3D_to_2D, - readUBC_DC2Dpre, - readUBC_DC3Dobs, xy_2_lineID, r_unit, - getSrc_locs, gettopoCC, drapeTopotoLoc, genTopography, closestPointsGrid, gen_3d_survey_from_2d_lines, plot_1d_layer_model, - plot_layer, - plot_pseudoSection, ) # Import if user has plotly diff --git a/SimPEG/electromagnetics/static/utils/static_utils.py b/SimPEG/electromagnetics/static/utils/static_utils.py index c52aaf81d2..46d43aa0a5 100644 --- a/SimPEG/electromagnetics/static/utils/static_utils.py +++ b/SimPEG/electromagnetics/static/utils/static_utils.py @@ -23,7 +23,6 @@ from ....utils.plot_utils import plot_1d_layer_model # noqa: F401 -from ....utils.code_utils import deprecate_method try: import plotly.graph_objects as grapho @@ -1816,170 +1815,3 @@ def gen_3d_survey_from_2d_lines( line_inds=line_inds, ) return IO_3d, survey_3d - - -############ -# Deprecated -############ - - -def plot_pseudoSection( - data, - ax=None, - survey_type="dipole-dipole", - data_type="appConductivity", - space_type="half-space", - clim=None, - scale="linear", - sameratio=True, - pcolor_opts=None, - data_location=False, - dobs=None, - dim=2, -): - raise TypeError( - "The plot_pseudoSection method has been removed. Please use " - "plot_pseudosection instead." - ) - - -def apparent_resistivity( - data_object, - survey_type=None, - space_type="half space", - dobs=None, - eps=1e-10, - **kwargs, -): - raise TypeError( - "The apparent_resistivity method has been removed. Please use " - "apparent_resistivity_from_voltage instead." - ) - - -source_receiver_midpoints = deprecate_method( - pseudo_locations, "source_receiver_midpoints", "0.17.0", error=True -) - - -def plot_layer(rho, mesh, **kwargs): - raise NotImplementedError( - "The plot_layer method has been deprecated. Please use " - "plot_1d_layer_model instead. This will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def convertObs_DC3D_to_2D(survey, lineID, flag="local"): - raise TypeError( - "The convertObs_DC3D_to_2D method has been removed. Please use " - "convert_3d_survey_to_2d." - ) - - -def getSrc_locs(survey): - raise NotImplementedError( - "The getSrc_locs method has been deprecated. Source " - "locations are now computed as a method of the survey " - "class. Please use Survey.source_locations(). This method " - " will be removed in version 0.17.0 of SimPEG", - ) - - -def writeUBC_DCobs( - fileName, - data, - dim, - format_type, - survey_type="dipole-dipole", - ip_type=0, - comment_lines="", -): - # """ - # Write UBC GIF DCIP 2D or 3D observation file - - # Input: - # :param str fileName: including path where the file is written out - # :param SimPEG.Data data: DC data object - # :param int dim: either 2 | 3 - # :param str format_type: either 'surface' | 'general' | 'simple' - # :param str survey_type: 'dipole-dipole' | 'pole-dipole' | - # 'dipole-pole' | 'pole-pole' | 'gradient' - - # Output: - # :return: UBC2D-Data file - # :rtype: file - # """ - - raise NotImplementedError( - "The writeUBC_DCobs method has been deprecated. Please use " - "write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def writeUBC_DClocs( - fileName, - dc_survey, - dim, - format_type, - survey_type="dipole-dipole", - ip_type=0, - comment_lines="", -): - # """ - # Write UBC GIF DCIP 2D or 3D locations file - - # Input: - # :param str fileName: including path where the file is written out - # :param SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object - # :param int dim: either 2 | 3 - # :param str survey_type: either 'SURFACE' | 'GENERAL' - - # Output: - # :rtype: file - # :return: UBC 2/3D-locations file - # """ - - raise NotImplementedError( - "The writeUBC_DClocs method has been deprecated. Please use " - "write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - FutureWarning, - ) - - -def readUBC_DC2Dpre(fileName): - raise NotImplementedError( - "The readUBC_DC2Dpre method has been deprecated. Please use " - "read_dcip2d_ubc instead. This is imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def readUBC_DC3Dobs(fileName, data_type="volt"): - raise NotImplementedError( - "The readUBC_DC3Dobs method has been deprecated. Please use " - "read_dcip3d_ubc instead. This is imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -gen_DCIPsurvey = deprecate_method( - generate_dcip_survey, "gen_DCIPsurvey", removal_version="0.17.0", error=True -) - - -def generate_dcip_survey_line( - survey_type, data_type, endl, topo, ds, dh, n, dim_flag="2.5D", sources_only=False -): - raise NotImplementedError( - "The gen_dcip_survey_line method has been deprecated. Please use " - "generate_dcip_sources_line instead. This will be removed in version" - " 0.17.0 of SimPEG", - FutureWarning, - ) diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index 3179c527af..9d6e46ccbb 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -3,7 +3,6 @@ from ...utils import mkvc, validate_type, validate_direction from discretize.utils import Zero from ...survey import BaseTimeRx -import warnings class BaseRx(BaseTimeRx): @@ -25,17 +24,10 @@ def __init__( times, orientation="z", use_source_receiver_offset=False, - **kwargs + **kwargs, ): - proj = kwargs.pop("projComp", None) - if proj is not None: - warnings.warn( - "'projComp' overrides the 'orientation' property which automatically" - " handles the projection from the mesh the receivers!!! " - "'projComp' is deprecated and will be removed in SimPEG 0.19.0.", - stacklevel=2, - ) - self.projComp = proj + if (key := "projComp") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed.") if locations is None: raise AttributeError("'locations' are required. Cannot be 'None'") diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index fa37081259..a3c0a64eb9 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -139,33 +139,6 @@ def eval_deriv(self, time): """ raise NotImplementedError # needed for E-formulation - ########################## - # Deprecated - ########################## - hasInitialFields = deprecate_property( - has_initial_fields, - "hasInitialFields", - new_name="has_initial_fields", - removal_version="0.17.0", - error=True, - ) - - offTime = deprecate_property( - off_time, - "offTime", - new_name="off_time", - removal_version="0.17.0", - error=True, - ) - - eps = deprecate_property( - epsilon, - "eps", - new_name="epsilon", - removal_version="0.17.0", - error=True, - ) - class StepOffWaveform(BaseWaveform): """ @@ -316,14 +289,6 @@ def waveform_function(self, value): def eval(self, time): # noqa: A003 return self.waveform_function(time) - waveFct = deprecate_property( - waveform_function, - "waveFct", - new_name="waveform_function", - removal_version="0.17.0", - error=True, - ) - class VTEMWaveform(BaseWaveform): """ @@ -428,26 +393,6 @@ def eval_deriv(self, time): def time_nodes(self): return np.r_[0, self.peak_time, self.off_time] - ########################## - # Deprecated - ########################## - - peakTime = deprecate_property( - peak_time, - "peakTime", - new_name="peak_time", - removal_version="0.17.0", - error=True, - ) - - a = deprecate_property( - ramp_on_rate, - "a", - new_name="ramp_on_rate", - removal_version="0.17.0", - error=True, - ) - class TrapezoidWaveform(BaseWaveform): """ @@ -625,18 +570,6 @@ def peak_time(self, value): self._ramp_on = np.r_[self._ramp_on[0], value] self._ramp_off = np.r_[value, self._ramp_off[1]] - ########################## - # Deprecated - ########################## - - peakTime = deprecate_property( - peak_time, - "peakTime", - new_name="peak_time", - removal_version="0.17.0", - error=True, - ) - class QuarterSineRampOnWaveform(TrapezoidWaveform): """ @@ -1565,11 +1498,10 @@ def __init__( if "moment" in kwargs: kwargs.pop("moment") - N = kwargs.pop("N", None) - if N is not None: - self.N = N - else: - self.n_turns = n_turns + # Raise error on deprecated arguments + if (key := "N") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") + self.n_turns = n_turns BaseTDEMSrc.__init__( self, receiver_list=receiver_list, location=location, moment=None, **kwargs @@ -1667,7 +1599,9 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): ) return self.n_turns * self._loop.vector_potential(obsLoc, coordinates) - N = deprecate_property(n_turns, "N", "n_turns", removal_version="0.19.0") + N = deprecate_property( + n_turns, "N", "n_turns", removal_version="0.19.0", error=True + ) class LineCurrent(BaseTDEMSrc): diff --git a/SimPEG/potential_fields/base.py b/SimPEG/potential_fields/base.py index 753a80f8fd..9c93895039 100644 --- a/SimPEG/potential_fields/base.py +++ b/SimPEG/potential_fields/base.py @@ -199,14 +199,6 @@ def ind_active(self): """ return self._ind_active - @property - def actInd(self): - """'actInd' is deprecated. Use 'ind_active' instead.""" - raise AttributeError( - "The 'actInd' property has been deprecated. " - "Please use 'ind_active'. This will be removed in version 0.17.0 of SimPEG.", - ) - def linear_operator(self): """Return linear operator. diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index 49e3e6193a..91d142673d 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -238,7 +238,7 @@ # Deprecated imports interpmat = deprecate_function( - interpolation_matrix, "interpmat", removal_version="0.19.0", future_warn=True + interpolation_matrix, "interpmat", removal_version="0.19.0", error=True ) from .code_utils import ( diff --git a/SimPEG/utils/code_utils.py b/SimPEG/utils/code_utils.py index 87e4c1bede..58e759118d 100644 --- a/SimPEG/utils/code_utils.py +++ b/SimPEG/utils/code_utils.py @@ -322,7 +322,7 @@ def call_hooks(match, mainFirst=False): Use the following syntax:: - @callHooks('doEndIteration') + @call_hooks('doEndIteration') def doEndIteration(self): pass @@ -1233,32 +1233,32 @@ def validate_active_indices(property_name, index_arr, n_cells): # DEPRECATIONS ############################################################### memProfileWrapper = deprecate_function( - mem_profile_class, "memProfileWrapper", removal_version="0.18.0", future_warn=True + mem_profile_class, "memProfileWrapper", removal_version="0.18.0", error=True ) setKwargs = deprecate_function( - set_kwargs, "setKwargs", removal_version="0.18.0", future_warn=True + set_kwargs, "setKwargs", removal_version="0.18.0", error=True ) printTitles = deprecate_function( - print_titles, "printTitles", removal_version="0.18.0", future_warn=True + print_titles, "printTitles", removal_version="0.18.0", error=True ) printLine = deprecate_function( - print_line, "printLine", removal_version="0.18.0", future_warn=True + print_line, "printLine", removal_version="0.18.0", error=True ) printStoppers = deprecate_function( - print_stoppers, "printStoppers", removal_version="0.18.0", future_warn=True + print_stoppers, "printStoppers", removal_version="0.18.0", error=True ) checkStoppers = deprecate_function( - check_stoppers, "checkStoppers", removal_version="0.18.0", future_warn=True + check_stoppers, "checkStoppers", removal_version="0.18.0", error=True ) printDone = deprecate_function( - print_done, "printDone", removal_version="0.18.0", future_warn=True + print_done, "printDone", removal_version="0.18.0", error=True ) callHooks = deprecate_function( - call_hooks, "callHooks", removal_version="0.18.0", future_warn=True + call_hooks, "callHooks", removal_version="0.18.0", error=True ) dependentProperty = deprecate_function( - dependent_property, "dependentProperty", removal_version="0.18.0", future_warn=True + dependent_property, "dependentProperty", removal_version="0.18.0", error=True ) asArray_N_x_Dim = deprecate_function( - as_array_n_by_dim, "asArray_N_x_Dim", removal_version="0.19.0", future_warn=True + as_array_n_by_dim, "asArray_N_x_Dim", removal_version="0.19.0", error=True ) diff --git a/SimPEG/utils/coord_utils.py b/SimPEG/utils/coord_utils.py index bb46021ba9..e1d17c5dbf 100644 --- a/SimPEG/utils/coord_utils.py +++ b/SimPEG/utils/coord_utils.py @@ -9,11 +9,11 @@ rotation_matrix_from_normals, "rotationMatrixFromNormals", removal_version="0.19.0", - future_warn=True, + error=True, ) rotatePointsFromNormals = deprecate_function( rotate_points_from_normals, "rotatePointsFromNormals", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/curv_utils.py b/SimPEG/utils/curv_utils.py index 6f516db1c9..71e764ce60 100644 --- a/SimPEG/utils/curv_utils.py +++ b/SimPEG/utils/curv_utils.py @@ -8,17 +8,17 @@ # deprecated functions volTetra = deprecate_function( - volume_tetrahedron, "volTetra", removal_version="0.19.0", future_warn=True + volume_tetrahedron, "volTetra", removal_version="0.19.0", error=True ) indexCube = deprecate_function( - index_cube, "indexCube", removal_version="0.19.0", future_warn=True + index_cube, "indexCube", removal_version="0.19.0", error=True ) faceInfo = deprecate_function( - face_info, "faceInfo", removal_version="0.19.0", future_warn=True + face_info, "faceInfo", removal_version="0.19.0", error=True ) exampleLrmGrid = deprecate_function( example_curvilinear_grid, "exampleLrmGrid", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/io_utils/__init__.py b/SimPEG/utils/io_utils/__init__.py index b3226b2c2e..14d628ab3d 100644 --- a/SimPEG/utils/io_utils/__init__.py +++ b/SimPEG/utils/io_utils/__init__.py @@ -18,11 +18,3 @@ write_dcipoctree_ubc, write_dcip_xyz, ) - -# Deprecated -from .io_utils_pf import ( - readUBCmagneticsObservations, - writeUBCmagneticsObservations, - readUBCgravityObservations, - writeUBCgravityObservations, -) diff --git a/SimPEG/utils/io_utils/io_utils_pf.py b/SimPEG/utils/io_utils/io_utils_pf.py index b5048d908a..9387896481 100644 --- a/SimPEG/utils/io_utils/io_utils_pf.py +++ b/SimPEG/utils/io_utils/io_utils_pf.py @@ -1,6 +1,5 @@ import numpy as np from discretize.utils import mkvc -from ...utils.code_utils import deprecate_method def read_mag3d_ubc(obs_file): @@ -379,34 +378,3 @@ def write_gg3d_ubc(filename, data_object): ) print("Observation file saved to: " + filename) - - -# ====================================================== -# Depricated Methods -# ====================================================== - - -readUBCmagneticsObservations = deprecate_method( - read_mag3d_ubc, - "readUBCmagneticsObservations", - removal_version="0.14.4", - error=True, -) -writeUBCmagneticsObservations = deprecate_method( - write_mag3d_ubc, - "writeUBCmagneticsObservations", - removal_version="0.14.4", - error=True, -) -readUBCgravityObservations = deprecate_method( - read_grav3d_ubc, - "readUBCgravityObservations", - removal_version="0.14.4", - error=True, -) -writeUBCgravityObservations = deprecate_method( - write_grav3d_ubc, - "writeUBCgravityObservations", - removal_version="0.14.4", - error=True, -) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index c7b3d07fcb..46798e75dd 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -451,36 +451,36 @@ def define_plane_from_points(xyz1, xyz2, xyz3): diagEst = deprecate_function( - estimate_diagonal, "diagEst", removal_version="0.19.0", future_warn=True + estimate_diagonal, "diagEst", removal_version="0.19.0", error=True ) uniqueRows = deprecate_function( - unique_rows, "uniqueRows", removal_version="0.19.0", future_warn=True + unique_rows, "uniqueRows", removal_version="0.19.0", error=True ) -sdInv = deprecate_function(sdinv, "sdInv", removal_version="0.19.0", future_warn=True) +sdInv = deprecate_function(sdinv, "sdInv", removal_version="0.19.0", error=True) getSubArray = deprecate_function( - get_subarray, "getSubArray", removal_version="0.19.0", future_warn=True + get_subarray, "getSubArray", removal_version="0.19.0", error=True ) inv3X3BlockDiagonal = deprecate_function( inverse_3x3_block_diagonal, "inv3X3BlockDiagonal", removal_version="0.19.0", - future_warn=True, + error=True, ) inv2X2BlockDiagonal = deprecate_function( inverse_2x2_block_diagonal, "inv2X2BlockDiagonal", removal_version="0.19.0", - future_warn=True, + error=True, ) makePropertyTensor = deprecate_function( make_property_tensor, "makePropertyTensor", removal_version="0.19.0", - future_warn=True, + error=True, ) invPropertyTensor = deprecate_function( inverse_property_tensor, "invPropertyTensor", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/mesh_utils.py b/SimPEG/utils/mesh_utils.py index 30a7e52143..0d39f5c162 100644 --- a/SimPEG/utils/mesh_utils.py +++ b/SimPEG/utils/mesh_utils.py @@ -101,11 +101,11 @@ def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True): # DEPRECATED FUNCTIONS ################################################ meshTensor = deprecate_function( - unpack_widths, "meshTensor", removal_version="0.19.0", future_warn=True + unpack_widths, "meshTensor", removal_version="0.19.0", error=True ) closestPoints = deprecate_function( - closest_points_index, "closestPoints", removal_version="0.19.0", future_warn=True + closest_points_index, "closestPoints", removal_version="0.19.0", error=True ) ExtractCoreMesh = deprecate_function( - extract_core_mesh, "ExtractCoreMesh", removal_version="0.19.0", future_warn=True + extract_core_mesh, "ExtractCoreMesh", removal_version="0.19.0", error=True ) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 5952b755fc..a63ed30702 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -17,7 +17,7 @@ ind2sub, as_array_n_by_dim, TensorType, - diagEst, + estimate_diagonal, count, timeIt, Counter, @@ -300,14 +300,14 @@ def test_surface2ind_topo(self): assert len(np.where(indtopoN)[0]) == 8211 -class TestDiagEst(unittest.TestCase): +class TestEstimateDiagonal(unittest.TestCase): def setUp(self): self.n = 1000 self.A = np.random.rand(self.n, self.n) self.Adiag = np.diagonal(self.A) def getTest(self, testType): - Adiagtest = diagEst(self.A, self.n, self.n, testType) + Adiagtest = estimate_diagonal(self.A, self.n, self.n, testType) r = np.abs(Adiagtest - self.Adiag) err = r.dot(r) return err diff --git a/tests/em/fdem/forward/test_FDEM_sources.py b/tests/em/fdem/forward/test_FDEM_sources.py index e044e51b28..4b499655c3 100644 --- a/tests/em/fdem/forward/test_FDEM_sources.py +++ b/tests/em/fdem/forward/test_FDEM_sources.py @@ -376,21 +376,22 @@ def test_CircularLoop_bPrimaryMu50_h(self): assert self.bPrimaryTest(src, "j") -def test_CircularLoop_test_N_assign(): +def test_removal_circular_loop_n(): """ - Test depreciation of the N argument (now n_turns) + Test if passing the N argument to CircularLoop raises an error """ - src = fdem.sources.CircularLoop( - [], - frequency=1e-3, - radius=np.sqrt(1 / np.pi), - location=[0, 0, 0], - orientation="Z", - mu=mu_0, - current=0.5, - N=2, - ) - assert src.n_turns == 2 + msg = "'N' property has been removed. Please use 'n_turns'." + with pytest.raises(TypeError, match=msg): + fdem.sources.CircularLoop( + [], + frequency=1e-3, + radius=np.sqrt(1 / np.pi), + location=[0, 0, 0], + orientation="Z", + mu=mu_0, + current=0.5, + N=2, + ) def test_line_current_failures(): diff --git a/tests/em/fdem/forward/test_properties.py b/tests/em/fdem/forward/test_properties.py index aca1cca714..5384c69d17 100644 --- a/tests/em/fdem/forward/test_properties.py +++ b/tests/em/fdem/forward/test_properties.py @@ -5,18 +5,12 @@ from SimPEG.electromagnetics import time_domain as tdem -def test_receiver_properties_validation(): +def test_removed_projcomp(): + """Test if passing the removed `projComp` argument raises an error.""" xyz = np.c_[0.0, 0.0, 0.0] - projComp = "Fx" - rx = fdem.receivers.BaseRx(xyz, projComp=projComp) - - assert rx.projComp == projComp - - with pytest.raises(ValueError): - fdem.receivers.BaseRx(xyz, component="potato") - - with pytest.raises(TypeError): - fdem.receivers.BaseRx(xyz, component=2.0) + msg = "'projComp' property has been removed." + with pytest.raises(TypeError, match=msg): + fdem.receivers.BaseRx(xyz, projComp="foo") def test_source_properties_validation(): diff --git a/tests/em/tdem/test_TDEM_sources.py b/tests/em/tdem/test_TDEM_sources.py index 7332c49db3..8d3aa9b511 100644 --- a/tests/em/tdem/test_TDEM_sources.py +++ b/tests/em/tdem/test_TDEM_sources.py @@ -1,3 +1,4 @@ +import pytest import unittest import numpy as np @@ -527,16 +528,17 @@ def test_simple_source(): assert waveform.eval(0.0) == 1.0 -def test_CircularLoop_test_N_assignment(): +def test_removal_circular_loop_n(): """ - Test depreciation of the N property + Test if passing the N argument to CircularLoop raises an error """ - loop = CircularLoop( - [], - waveform=StepOffWaveform(), - location=np.array([0.0, 0.0, 0.0]), - radius=1.0, - current=0.5, - N=2, - ) - assert loop.n_turns == 2 + msg = "'N' property has been removed. Please use 'n_turns'." + with pytest.raises(TypeError, match=msg): + CircularLoop( + [], + waveform=StepOffWaveform(), + location=np.array([0.0, 0.0, 0.0]), + radius=1.0, + current=0.5, + N=2, + ) diff --git a/tests/em/tdem/test_properties.py b/tests/em/tdem/test_properties.py index 5aa443b45b..1c1bb50136 100644 --- a/tests/em/tdem/test_properties.py +++ b/tests/em/tdem/test_properties.py @@ -4,13 +4,13 @@ from SimPEG.electromagnetics import time_domain as tdem -def test_receiver_properties(): +def test_removed_projcomp(): + """Test if passing the removed `projComp` argument raises an error.""" xyz = np.c_[0.0, 0.0, 0.0] times = np.logspace(-5, -2, 4) - projComp = "Fx" - rx = tdem.receivers.BaseRx(xyz, times, projComp=projComp) - - assert rx.projComp == projComp + msg = "'projComp' property has been removed." + with pytest.raises(TypeError, match=msg): + tdem.receivers.BaseRx(xyz, times, projComp="foo") def test_source_properties(): From 4a3bab119989901d64e6013c40a1f03691acf817 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 14 Mar 2024 09:48:39 -0700 Subject: [PATCH 31/68] Use choclo in gravity tutorials (#1378) Use `engine="choclo"` in tutorials that make use of gravity simulations. Add an admonition instructing on how to use the `engine` argument. --- .../03-gravity/plot_1a_gravity_anomaly.py | 20 +++++++++++++++++-- .../03-gravity/plot_1b_gravity_gradiometry.py | 20 +++++++++++++++++-- .../03-gravity/plot_inv_1a_gravity_anomaly.py | 13 +++++++++++- .../plot_inv_1b_gravity_anomaly_irls.py | 13 +++++++++++- .../plot_inv_3_cross_gradient_pf.py | 14 ++++++++++++- ...t_inv_1_joint_pf_pgi_full_info_tutorial.py | 1 + ...lot_inv_2_joint_pf_pgi_no_info_tutorial.py | 1 + 7 files changed, 75 insertions(+), 7 deletions(-) diff --git a/tutorials/03-gravity/plot_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_1a_gravity_anomaly.py index f511e9e2a1..22721d3fcb 100644 --- a/tutorials/03-gravity/plot_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_1a_gravity_anomaly.py @@ -180,19 +180,35 @@ # formulation. # -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities +############################################################################### +# Define the forward simulation. By setting the ``store_sensitivities`` keyword +# argument to ``"forward_only"``, we simulate the data without storing the +# sensitivities. +# + simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active, store_sensitivities="forward_only", + engine="choclo", ) +############################################################################### +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + +############################################################################### # Compute predicted data for some model # SimPEG uses right handed coordinate where Z is positive upward. # This causes gravity signals look "inconsistent" with density values in visualization. + dpred = simulation.dpred(model) # Plot diff --git a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py index 06753d7f2d..7ebeea969e 100644 --- a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py +++ b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py @@ -201,17 +201,33 @@ # formulation. # -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities +############################################################################### +# Define the forward simulation. By setting the ``store_sensitivities`` keyword +# argument to ``"forward_only"``, we simulate the data without storing the +# sensitivities +# + simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active, store_sensitivities="forward_only", + engine="choclo", ) +############################################################################### +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + +############################################################################### # Compute predicted data for some model + dpred = simulation.dpred(model) n_data = len(dpred) diff --git a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py index 968ef898a2..34fd346d83 100644 --- a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py @@ -206,9 +206,20 @@ # Here, we define the physics of the gravity problem by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active + survey=survey, + mesh=mesh, + rhoMap=model_map, + ind_active=ind_active, + engine="choclo", ) diff --git a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py index 23336dee7c..cc658a6d27 100644 --- a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py +++ b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py @@ -208,9 +208,20 @@ # Here, we define the physics of the gravity problem by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active + survey=survey, + mesh=mesh, + rhoMap=model_map, + ind_active=ind_active, + engine="choclo", ) diff --git a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py index abd1170b8e..cf271661d2 100755 --- a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py +++ b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py @@ -275,9 +275,21 @@ # Here, we define the physics of the gravity and magnetic problems by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + simulation_grav = gravity.simulation.Simulation3DIntegral( - survey=survey_grav, mesh=mesh, rhoMap=wires.density, ind_active=ind_active + survey=survey_grav, + mesh=mesh, + rhoMap=wires.density, + ind_active=ind_active, + engine="choclo", ) simulation_mag = magnetics.simulation.Simulation3DIntegral( diff --git a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py index 59e842354d..96b83c4b36 100644 --- a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py @@ -233,6 +233,7 @@ mesh=mesh, rhoMap=wires.den, ind_active=actv, + engine="choclo", ) dmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav) # Mag problem diff --git a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py index 6880a7c15c..ba0219e59a 100644 --- a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py @@ -234,6 +234,7 @@ mesh=mesh, rhoMap=wires.den, ind_active=actv, + engine="choclo", ) dmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav) # Mag problem From 316fd699c25a81726283d13658acb0c67f9979d1 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 14 Mar 2024 11:37:06 -0700 Subject: [PATCH 32/68] Remove surface2ind_topo (#1374) Remove the `surface2ind_topo` function and replace its usage for `dicretize.utils.active_xyz`. Part of the solution to #1302 --- .../static/utils/static_utils.py | 3 +- SimPEG/utils/__init__.py | 3 +- SimPEG/utils/model_utils.py | 41 ------------------- tests/base/test_utils.py | 24 ----------- 4 files changed, 2 insertions(+), 69 deletions(-) diff --git a/SimPEG/electromagnetics/static/utils/static_utils.py b/SimPEG/electromagnetics/static/utils/static_utils.py index 46d43aa0a5..573199e260 100644 --- a/SimPEG/electromagnetics/static/utils/static_utils.py +++ b/SimPEG/electromagnetics/static/utils/static_utils.py @@ -10,7 +10,6 @@ from .. import resistivity as dc from ....utils import ( mkvc, - surface2ind_topo, model_builder, define_plane_from_points, ) @@ -1626,7 +1625,7 @@ def drapeTopotoLoc(mesh, pts, ind_active=None, option="top", topo=None, **kwargs raise ValueError("Unsupported mesh dimension") if ind_active is None: - ind_active = surface2ind_topo(mesh, topo) + ind_active = discretize.utils.active_from_xyz(mesh, topo) if mesh._meshType == "TENSOR": meshtemp, topoCC = gettopoCC(mesh, ind_active, option=option) diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index 91d142673d..c53f37ec75 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -76,7 +76,6 @@ :toctree: generated/ depth_weighting - surface2ind_topo model_builder.add_block model_builder.create_2_layer_model model_builder.create_block_in_wholespace @@ -225,7 +224,7 @@ rotation_matrix_from_normals, rotate_points_from_normals, ) -from .model_utils import surface2ind_topo, depth_weighting +from .model_utils import depth_weighting from .plot_utils import plot2Ddata, plotLayer, plot_1d_layer_model from .io_utils import download from .pgi_utils import ( diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 2bdd99b42a..91df15da71 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -3,47 +3,6 @@ from scipy.interpolate import griddata from scipy.spatial import cKDTree import scipy.sparse as sp -from discretize.utils import active_from_xyz -import warnings - - -def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): - """Get indices of active cells from topography. - - For a mesh and surface topography, this function returns the indices of cells - lying below the discretized surface topography. - - Parameters - ---------- - mesh : discretize.TensorMesh or discretize.TreeMesh - Mesh on which you want to identify active cells - topo : (n, 3) numpy.ndarray - Topography data as a ``numpyndarray`` with columns [x,y,z]; can use [x,z] for 2D meshes. - Topography data can be unstructured. - gridLoc : str {'CC', 'N'} - If 'CC', all cells whose centers are below the topography are active cells. - If 'N', then cells must lie entirely below the topography in order to be active cells. - method : str {'nearest','linear'} - Interpolation method for approximating topography at cell's horizontal position. - Default is 'nearest'. - fill_value : float - Defines the elevation for cells outside the horizontal extent of the topography data. - Default is :py:class:`numpy.nan`. - - Returns - ------- - (n_active) numpy.ndarray of int - Indices of active cells below xyz. - """ - warnings.warn( - "The surface2ind_topo function has been deprecated, please import " - "discretize.utils.active_from_xyz. This will be removed in SimPEG 0.20.0", - FutureWarning, - stacklevel=2, - ) - - active_cells = active_from_xyz(mesh, topo, gridLoc, method) - return np.arange(mesh.n_cells)[active_cells] def surface_layer_index(mesh, topo, index=0): diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index a63ed30702..88938828ca 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -22,7 +22,6 @@ timeIt, Counter, download, - surface2ind_topo, coterminal, ) import discretize @@ -276,29 +275,6 @@ def test_as_array_n_by_dim(self): self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape) - def test_surface2ind_topo(self): - file_url = ( - "https://storage.googleapis.com/simpeg/tests/utils/vancouver_topo.xyz" - ) - file2load = download(file_url) - vancouver_topo = np.loadtxt(file2load) - mesh_topo = discretize.TensorMesh( - [[(500.0, 24)], [(500.0, 20)], [(10.0, 30)]], x0="CCC" - ) - - # To keep consistent with result from deprecated function - vancouver_topo[:, 2] = vancouver_topo[:, 2] + 1e-8 - - indtopoCC = surface2ind_topo( - mesh_topo, vancouver_topo, gridLoc="CC", method="nearest" - ) - indtopoN = surface2ind_topo( - mesh_topo, vancouver_topo, gridLoc="N", method="nearest" - ) - - assert len(np.where(indtopoCC)[0]) == 8728 - assert len(np.where(indtopoN)[0]) == 8211 - class TestEstimateDiagonal(unittest.TestCase): def setUp(self): From d8c90424df8d9d7d738397f723ea5c91bde30784 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 14 Mar 2024 22:16:35 -0600 Subject: [PATCH 33/68] Speed up sphinx documentation building (#1382) #### Summary Stops the left sidebar from being completely expandable for every single item in the documentation. https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/navigation.html#remove-reveal-buttons-for-sidebar-items #### Reference issue No number... but the docs take so long to build now.... #### What does this implement/fix? Faster doc building (excluding the examples and tutorials) #### Additional information (https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/performance.html --- docs/_templates/autosummary/attribute.rst | 7 +++++++ docs/_templates/autosummary/base.rst | 9 +++++++++ docs/_templates/autosummary/method.rst | 7 +++++++ docs/conf.py | 5 ++--- 4 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 docs/_templates/autosummary/attribute.rst create mode 100644 docs/_templates/autosummary/base.rst create mode 100644 docs/_templates/autosummary/method.rst diff --git a/docs/_templates/autosummary/attribute.rst b/docs/_templates/autosummary/attribute.rst new file mode 100644 index 0000000000..820f45286e --- /dev/null +++ b/docs/_templates/autosummary/attribute.rst @@ -0,0 +1,7 @@ +:orphan: + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/_templates/autosummary/base.rst b/docs/_templates/autosummary/base.rst new file mode 100644 index 0000000000..ef8e6277cb --- /dev/null +++ b/docs/_templates/autosummary/base.rst @@ -0,0 +1,9 @@ +{% if objtype == 'property' %} +:orphan: +{% endif %} + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/_templates/autosummary/method.rst b/docs/_templates/autosummary/method.rst new file mode 100644 index 0000000000..820f45286e --- /dev/null +++ b/docs/_templates/autosummary/method.rst @@ -0,0 +1,7 @@ +:orphan: + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index cea63da1c6..72ee283c7f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,9 +55,7 @@ autosummary_generate = True numpydoc_attributes_as_param_list = False -# This has to be set to false in order to make the doc build in a -# reasonable amount of time. -numpydoc_show_inherited_class_members = False +numpydoc_show_inherited_class_members = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -274,6 +272,7 @@ def linkcode_resolve(domain, info): }, ], "use_edit_page_button": False, + "collapse_navigation": True, } html_logo = "images/simpeg-logo.png" From 27f382f6d5a7326a2318b006f90a3e60d18702f0 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Mar 2024 09:11:47 -0700 Subject: [PATCH 34/68] Add docs/sg_execution_times.rst to .gitignore (#1380) #### Summary Ignore the `docs/sg_execution_times.rst` file generated by Sphinx Gallery when building the docs. It contains the execution times of every example in the gallery. We don't need to version control this file. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f6cf59ad22..d85d053a89 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ docs/content/api/generated/* docs/content/examples/* docs/content/tutorials/* docs/modules/* +docs/sg_execution_times.rst .vscode/* # paths to where data are downloaded From 1cb3cea886bb27d46d483790a9e1b76509ce7946 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Fri, 15 Mar 2024 13:07:59 -0700 Subject: [PATCH 35/68] Describe merge process of Pull Requests in docs (#1375) Add a description about how the merge process of Pull Requests will be done, detailing that the Squash and Merge will be used. Fixes #1369 --- .../contributing/pull-requests.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/content/getting_started/contributing/pull-requests.rst b/docs/content/getting_started/contributing/pull-requests.rst index 4475ae1828..75ee05bcdd 100644 --- a/docs/content/getting_started/contributing/pull-requests.rst +++ b/docs/content/getting_started/contributing/pull-requests.rst @@ -33,3 +33,22 @@ pull request into the main branch (feel free to ping one of us on Github). This being said, all SimPEG developers and admins are essentially volunteers providing their time for the benefit of the community. This does mean that it might take some time for us to get your PR. + +Merging a Pull Request +---------------------- + +The ``@simpeg/simpeg-admin`` will merge a Pull Request to the `main` branch +using the `Squash and Merge +`_ +strategy: all commits made to the PR branch will be _squashed_ to a single +commit that will be added to `main`. + +SimPEG admins will ensure that the commit message is descriptive and +comprehensive. Contributors can help by providing a descriptive and +comprehensive PR description of the changes that were applied and the reasons +behind them. This will be greatly appreciated. + +Admins will mention other authors that made significant contributions to +the PR in the commit message, following GitHub's approach for `Creating +co-authored commits +`_. From 7420f93db6cf7f6f8a4dc1ecbd6e91f421374770 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 19 Mar 2024 15:27:11 -0700 Subject: [PATCH 36/68] Simplify private methods in gravity simulation (#1384) Simplify some of the new methods included with the Numba implementation of the gravity simulation: remove the `_get_cell_nodes` method since we can just rely on the `cell_nodes` method of discretize meshes. Update one inline comment. --- SimPEG/potential_fields/gravity/simulation.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index a41ea48112..434c6992dc 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -449,15 +449,6 @@ def _sensitivity_matrix(self): index_offset += n_rows return sensitivity_matrix - def _get_cell_nodes(self): - """ - Return indices of nodes for each cell in the mesh. - """ - if not isinstance(self.mesh, (discretize.TreeMesh, discretize.TensorMesh)): - raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") - cell_nodes = self.mesh.cell_nodes - return cell_nodes - def _get_active_nodes(self): """ Return locations of nodes only for active cells @@ -473,7 +464,7 @@ def _get_active_nodes(self): else: raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") # Get original cell_nodes but only for active cells - cell_nodes = self._get_cell_nodes() + cell_nodes = self.mesh.cell_nodes # If all cells in the mesh are active, return nodes and cell_nodes if self.nC == self.mesh.n_cells: return nodes, cell_nodes @@ -484,7 +475,7 @@ def _get_active_nodes(self): unique_nodes, active_cell_nodes = np.unique(cell_nodes, return_inverse=True) # Select only the nodes that belong to the active cells (active nodes) active_nodes = nodes[unique_nodes] - # Reshape indices of active cells for each active cell in the mesh + # Reshape indices of active cell nodes for each active cell in the mesh active_cell_nodes = active_cell_nodes.reshape(cell_nodes.shape) return active_nodes, active_cell_nodes From 3be7d4068dccc4ed4696ad0e804191bb7020245f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Mar 2024 10:27:23 -0700 Subject: [PATCH 37/68] Update Slack links: point to Mattermost (#1385) Replace the links pointing to Slack for links pointing to Mattermost. Changes have been applied to the documentation, the `README.rst` and the issue templates. --- .github/ISSUE_TEMPLATE/config.yml | 4 ++-- .github/ISSUE_TEMPLATE/feature-request.yml | 9 +++++---- README.rst | 14 ++++++++------ docs/conf.py | 11 +++++++---- .../content/getting_started/contributing/index.rst | 3 ++- docs/content/getting_started/installing.rst | 2 +- 6 files changed, 25 insertions(+), 18 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4108d53c7..6180153534 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,5 +3,5 @@ contact_links: url: https://simpeg.discourse.group/ about: "If you have a question on how to use SimPEG, please submit them to our discourse page." - name: Development-related matters - url: http://slack.simpeg.xyz/ - about: "If you would like to discuss SimPEG, any geophysics related problems, or need help from the SimPEG team, get in touch with us on slack." + url: https://mattermost.softwareunderground.org/simpeg + about: "If you would like to discuss SimPEG, any geophysics related problems, or need help from the SimPEG team, get in touch with us on Mattermost." diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index c7db801f1a..5d8d196a5e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -6,10 +6,11 @@ body: - type: markdown attributes: value: > - If you'd like to request a new feature in SimPEG, or suggest changes in the - functionality of certain functions, we recommend getting in touch with the - developers on [slack](https://slack.simpeg.xyz), in addition to opening an - issue or pull request here. + If you'd like to request a new feature in SimPEG, or suggest changes in + the functionality of certain functions, we recommend getting in touch + with the developers on + [Mattermost](https://mattermost.softwareunderground.org/simpeg), in + addition to opening an Issue or Pull Request here. You can also check out our [Contributor Guide](https://docs.simpeg.xyz/content/getting_started/contributing/index.html) if you need more information. diff --git a/README.rst b/README.rst index bb8122935b..7545b24db7 100644 --- a/README.rst +++ b/README.rst @@ -30,8 +30,8 @@ SimPEG .. image:: https://img.shields.io/discourse/users?server=http%3A%2F%2Fsimpeg.discourse.group%2F :target: https://simpeg.discourse.group/ -.. image:: https://img.shields.io/badge/Slack-simpeg-4A154B.svg?logo=slack - :target: http://slack.simpeg.xyz +.. image:: https://img.shields.io/badge/simpeg-purple?logo=mattermost&label=Mattermost + :target: https://mattermost.softwareunderground.org/simpeg .. image:: https://img.shields.io/badge/Youtube%20channel-GeoSci.xyz-FF0000.svg?logo=youtube :target: https://www.youtube.com/channel/UCBrC4M8_S4GXhyHht7FyQqw @@ -109,7 +109,8 @@ Questions If you have a question regarding a specific use of SimPEG, the fastest way to get a response is by posting on our Discourse discussion forum: https://simpeg.discourse.group/. Alternatively, if you prefer real-time chat, -you can join our slack group at http://slack.simpeg.xyz. +you can join our Mattermost Team at +https://mattermost.softwareunderground.org/simpeg. Please do not create an issue to ask a question. @@ -121,7 +122,8 @@ for developers to discuss upcoming changes to the code base, and for discussing topics related to geophysics in general. Currently our meetings are held every Wednesday, alternating between a mornings (10:30 am pacific time) and afternoons (3:00 pm pacific time) -on even numbered Wednesdays. Find more info on our `slack `_. +on even numbered Wednesdays. Find more info on our +`Mattermost `_. Links @@ -134,8 +136,8 @@ Forums: https://simpeg.discourse.group/ -Slack (real time chat): -http://slack.simpeg.xyz +Mattermost (real time chat): +https://mattermost.softwareunderground.org/simpeg Documentation: diff --git a/docs/conf.py b/docs/conf.py index 72ee283c7f..25936ff517 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -242,7 +242,10 @@ def linkcode_resolve(domain, info): html_theme_options = { "external_links": [ {"name": "SimPEG", "url": "https://simpeg.xyz"}, - {"name": "Contact", "url": "http://slack.simpeg.xyz"}, + { + "name": "Contact", + "url": "https://mattermost.softwareunderground.org/simpeg", + }, ], "icon_links": [ { @@ -251,9 +254,9 @@ def linkcode_resolve(domain, info): "icon": "fab fa-github", }, { - "name": "Slack", - "url": "http://slack.simpeg.xyz/", - "icon": "fab fa-slack", + "name": "Mattermost", + "url": "https://mattermost.softwareunderground.org/simpeg", + "icon": "fas fa-comment", }, { "name": "Discourse", diff --git a/docs/content/getting_started/contributing/index.rst b/docs/content/getting_started/contributing/index.rst index 66a10fd678..cb26c41c54 100644 --- a/docs/content/getting_started/contributing/index.rst +++ b/docs/content/getting_started/contributing/index.rst @@ -20,7 +20,8 @@ Ask questions If you have a question regarding a specific use of SimPEG, the fastest way to get a response is by posting on our Discourse discussion forum: https://simpeg.discourse.group/. Alternatively, if you prefer real-time chat, -you can join our slack group at http://slack.simpeg.xyz. +you can join our Mattermost Team at +https://mattermost.softwareunderground.org/simpeg. Please do not create an issue to ask a question. .. _issues: diff --git a/docs/content/getting_started/installing.rst b/docs/content/getting_started/installing.rst index 7105415ad6..14adff1570 100644 --- a/docs/content/getting_started/installing.rst +++ b/docs/content/getting_started/installing.rst @@ -104,7 +104,7 @@ be able to download and run any of the :ref:`examples and tutorials `_ or on -`slack `_. +`Mattermost `_. Useful Links ============ From e8367b000cf81c7eff8fa5a9f9e7685a646e4521 Mon Sep 17 00:00:00 2001 From: John Kuttai Date: Wed, 27 Mar 2024 10:50:02 -0700 Subject: [PATCH 38/68] added getJ for fdem and nsem simulations (#1276) Implements methods to calculate sensitivity and sensitivity weights for fdem and nsem simulations Added methods for the FDEM simulation class are: - `getJ` - `getJtJdiag` Co-authored-by: Joseph Capriotti --- SimPEG/base/pde_simulation.py | 3 + .../frequency_domain/simulation.py | 109 +++++++++++++++++- .../natural_source/receivers.py | 26 ++++- .../nsem/inversion/test_Problem3D_Derivs.py | 53 +++++++++ 4 files changed, 184 insertions(+), 7 deletions(-) diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index a213d922ad..bf000b4c5c 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -16,6 +16,9 @@ def __inner_mat_mul_op(M, u, v=None, adjoint=False): # u has multiple fields if v.ndim == 1: v = v[:, None] + if adjoint and v.shape[1] != u.shape[1] and v.shape[1] > 1: + # make sure v is a good shape + v = v.reshape(u.shape[0], -1, u.shape[1]) else: if v.ndim > 1: u = u[:, None] diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 50b138cf9e..fd85d50ddb 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -59,7 +59,13 @@ class BaseFDEMSimulation(BaseEMSimulation): # permittivity, permittivityMap, permittivityDeriv = props.Invertible("Dielectric permittivity (F/m)") def __init__( - self, mesh, survey=None, forward_only=False, permittivity=None, **kwargs + self, + mesh, + survey=None, + forward_only=False, + permittivity=None, + storeJ=False, + **kwargs ): super().__init__(mesh=mesh, survey=survey, **kwargs) self.forward_only = forward_only @@ -69,6 +75,7 @@ def __init__( stacklevel=2, ) self.permittivity = permittivity + self.storeJ = storeJ @property def survey(self): @@ -87,6 +94,21 @@ def survey(self, value): if value is not None: value = validate_type("survey", value, Survey, cast=False) self._survey = value + self._survey = value + + @property + def storeJ(self): + """Whether to store the sensitivity matrix + + Returns + ------- + bool + """ + return self._storeJ + + @storeJ.setter + def storeJ(self, value): + self._storeJ = validate_type("storeJ", value, bool) @property def forward_only(self): @@ -240,6 +262,86 @@ def Jtvec(self, m, v, f=None): return mkvc(Jtv) + def getJ(self, m, f=None): + """ + Method to form full J given a model m + + :param numpy.ndarray m: inversion model (nP,) + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object + :rtype: numpy.ndarray + :return: J (ndata, nP) + """ + self.model = m + + if getattr(self, "_Jmatrix", None) is None: + if f is None: + f = self.fields(m) + + Ainv = self.Ainv + m_size = self.model.size + + Jmatrix = np.zeros((self.survey.nD, m_size)) + + data = Data(self.survey) + + for A_i, freq in zip(Ainv, self.survey.frequencies): + for src in self.survey.get_sources_by_frequency(freq): + u_src = f[src, self._solutionType] + + for rx in src.receiver_list: + v = np.eye(rx.nD, dtype=float) + + df_duT, df_dmT = rx.evalDeriv( + src, self.mesh, f, v=v, adjoint=True + ) + + df_duT = np.hstack([df_duT]) + ATinvdf_duT = A_i * df_duT + dA_dmT = self.getADeriv(freq, u_src, ATinvdf_duT, adjoint=True) + dRHS_dmT = self.getRHSDeriv( + freq, src, ATinvdf_duT, adjoint=True + ) + du_dmT = -dA_dmT + + if not isinstance(dRHS_dmT, Zero): + du_dmT += dRHS_dmT + if not isinstance(df_dmT[0], Zero): + du_dmT += np.hstack(df_dmT) + + block = np.array(du_dmT, dtype=complex).real.T + data_inds = data.index_dictionary[src][rx] + Jmatrix[data_inds] = block + + self._Jmatrix = Jmatrix + + return self._Jmatrix + + def getJtJdiag(self, m, W=None, f=None): + """ + Return the diagonal of JtJ + + :param numpy.ndarray m: inversion model (nP,) + :param numpy.ndarray W: vector of weights (ndata,) + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object + :rtype: numpy.ndarray + :return: JtJ (nP,) + """ + self.model = m + + if getattr(self, "_gtgdiag", None) is None: + J = self.getJ(m, f=f) + + if W is None: + W = np.ones(J.shape[0]) + else: + W = W.diagonal() ** 2 + + diag = np.einsum("i,ij,ij->j", W, J, J) + + self._gtgdiag = diag + + return self._gtgdiag + # @profile def getSourceTerm(self, freq): """ @@ -272,6 +374,11 @@ def getSourceTerm(self, freq): i = ii return s_m, s_e + @property + def deleteTheseOnModelUpdate(self): + toDelete = super().deleteTheseOnModelUpdate + return toDelete + ["_Jmatrix", "_gtgdiag"] + ############################################################################### # E-B Formulation # diff --git a/SimPEG/electromagnetics/natural_source/receivers.py b/SimPEG/electromagnetics/natural_source/receivers.py index f0b74fdca4..65aa483dc3 100644 --- a/SimPEG/electromagnetics/natural_source/receivers.py +++ b/SimPEG/electromagnetics/natural_source/receivers.py @@ -294,18 +294,25 @@ def _eval_impedance_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=Fals # Work backwards! gtop_v = v / bot gbot_v = -imp * v / bot + n_d = self.nD if mesh.dim == 3: - ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v[:, None] - ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v[:, None] - ge_v = np.c_[h[:, 1], -h[:, 0]] * gtop_v[:, None] - gh_v = np.c_[-e[:, 1], e[:, 0]] * gtop_v[:, None] + ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v[..., None] + ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v[..., None] + ge_v = np.c_[h[:, 1], -h[:, 0]] * gtop_v[..., None] + gh_v = np.c_[-e[:, 1], e[:, 0]] * gtop_v[..., None] if self.orientation[1] == "x": ghy_v += gh_v else: ghx_v -= gh_v + if v.ndim == 2: + # collapse into a long list of n_d vectors + ghx_v = ghx_v.reshape((n_d, -1)) + ghy_v = ghy_v.reshape((n_d, -1)) + ge_v = ge_v.reshape((n_d, -1)) + gh_v = Phx.T @ ghx_v + Phy.T @ ghy_v ge_v = Pe.T @ ge_v else: @@ -515,8 +522,9 @@ def _eval_tipper_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): if adjoint: # Work backwards! - gtop_v = (v / bot)[:, None] - gbot_v = (-imp * v / bot)[:, None] + gtop_v = (v / bot)[..., None] + gbot_v = (-imp * v / bot)[..., None] + n_d = self.nD ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v @@ -528,6 +536,12 @@ def _eval_tipper_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): else: ghx_v += gh_v + if v.ndim == 2: + # collapse into a long list of n_d vectors + ghx_v = ghx_v.reshape((n_d, -1)) + ghy_v = ghy_v.reshape((n_d, -1)) + ghz_v = ghz_v.reshape((n_d, -1)) + gh_v = Phx.T @ ghx_v + Phy.T @ ghy_v + Phz.T @ ghz_v return f._hDeriv(src, None, gh_v, adjoint=True) diff --git a/tests/em/nsem/inversion/test_Problem3D_Derivs.py b/tests/em/nsem/inversion/test_Problem3D_Derivs.py index 9d1be9da3a..cd96b8127a 100644 --- a/tests/em/nsem/inversion/test_Problem3D_Derivs.py +++ b/tests/em/nsem/inversion/test_Problem3D_Derivs.py @@ -1,4 +1,5 @@ # Test functions +import pytest import unittest import numpy as np from SimPEG import tests, mkvc @@ -12,6 +13,58 @@ MU = mu_0 +@pytest.fixture() +def model_simulation_tuple(): + return nsem.utils.test_utils.setupSimpegNSEM_PrimarySecondary( + nsem.utils.test_utils.halfSpace(1e-2), [0.1], comp="All", singleFreq=False + ) + + +# Test the Jvec derivative +@pytest.mark.parametrize("weights", [True, False]) +def test_Jtjdiag(model_simulation_tuple, weights): + model, simulation = model_simulation_tuple + W = None + if weights: + W = np.eye(simulation.survey.nD) + + J = simulation.getJ(model) + if weights: + J = W @ J + + Jtjdiag = simulation.getJtJdiag(model, W=W) + np.testing.assert_allclose(Jtjdiag, np.sum(J * J, axis=0)) + + +def test_Jtjdiag_clearing(model_simulation_tuple): + model, simulation = model_simulation_tuple + J1 = simulation.getJ(model) + Jtjdiag1 = simulation.getJtJdiag(model) + + m2 = model + 2 + J2 = simulation.getJ(m2) + Jtjdiag2 = simulation.getJtJdiag(m2) + + assert J1 is not J2 + assert Jtjdiag1 is not Jtjdiag2 + + +def test_Jmatrix(model_simulation_tuple): + model, simulation = model_simulation_tuple + rng = np.random.default_rng(4421) + # create random vector + vec = rng.standard_normal(simulation.survey.nD) + + # create the J matrix + J1 = simulation.getJ(model) + Jmatrix_vec = J1.T @ vec + + # compare to JTvec function + jtvec = simulation.Jtvec(model, v=vec) + + np.testing.assert_allclose(Jmatrix_vec, jtvec) + + # Test the Jvec derivative def DerivJvecTest(inputSetup, comp="All", freq=False, expMap=True): m, simulation = nsem.utils.test_utils.setupSimpegNSEM_PrimarySecondary( From 03c71a0c3ef308c64700544c4fa287c613542927 Mon Sep 17 00:00:00 2001 From: Colton Kohnke Date: Wed, 27 Mar 2024 13:52:09 -0400 Subject: [PATCH 39/68] Add LogisticSigmoidMap (#1352) Add a simple bounded Logistic-Sigmoid Mapping as an alternative to the Exponential Map. --------- Co-authored-by: Joseph Capriotti --- SimPEG/__init__.py | 1 + SimPEG/maps.py | 161 ++++++++++++++++++++++++++++++++++++++++ tests/base/test_maps.py | 46 ++++++++++++ 3 files changed, 208 insertions(+) diff --git a/SimPEG/__init__.py b/SimPEG/__init__.py index 18cf134a19..d6ef51cf2a 100644 --- a/SimPEG/__init__.py +++ b/SimPEG/__init__.py @@ -78,6 +78,7 @@ maps.InjectActiveCells maps.MuRelative maps.LogMap + maps.LogisticSigmoidMap maps.ParametricBlock maps.ParametricCircleMap maps.ParametricEllipsoid diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 5cc526b0a0..aa0087100a 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -8,6 +8,7 @@ from scipy.interpolate import UnivariateSpline from scipy.constants import mu_0 from scipy.sparse import csr_matrix as csr +from scipy.special import expit, logit from discretize.tests import check_derivative from discretize import TensorMesh, CylindricalMesh @@ -2156,6 +2157,166 @@ def is_linear(self): return False +class LogisticSigmoidMap(IdentityMap): + r"""Mapping that computes the logistic sigmoid of the model parameters. + + Where :math:`\mathbf{m}` is a set of model parameters, ``LogisticSigmoidMap`` creates + a mapping :math:`\mathbf{u}(\mathbf{m})` that computes the logistic sigmoid + of every element in :math:`\mathbf{m}`; i.e.: + + .. math:: + \mathbf{u}(\mathbf{m}) = sigmoid(\mathbf{m}) = \frac{1}{1+\exp{-\mathbf{m}}} + + ``LogisticSigmoidMap`` transforms values onto the interval (0,1), but can optionally + be scaled and shifted to the interval (a,b). This can be useful for inversion + of data that varies over a log scale and bounded on some interval: + + .. math:: + \mathbf{u}(\mathbf{m}) = a + (b - a) \cdot sigmoid(\mathbf{m}) + + Parameters + ---------- + mesh : discretize.BaseMesh + The number of parameters accepted by the mapping is set to equal the number + of mesh cells. + nP : int + Set the number of parameters accepted by the mapping directly. Used if the + number of parameters is known. Used generally when the number of parameters + is not equal to the number of cells in a mesh. + lower_bound: float or (nP) numpy.ndarray + lower bound (a) for the transform. Default 0. Defined \in \mathbf{u} space. + upper_bound: float or (nP) numpy.ndarray + upper bound (b) for the transform. Default 1. Defined \in \mathbf{u} space. + + """ + + def __init__(self, mesh=None, nP=None, lower_bound=0, upper_bound=1, **kwargs): + super().__init__(mesh=mesh, nP=nP, **kwargs) + lower_bound = np.atleast_1d(lower_bound) + upper_bound = np.atleast_1d(upper_bound) + if self.nP != "*": + # check if lower bound and upper bound broadcast to nP + try: + np.broadcast_shapes(lower_bound.shape, (self.nP,)) + except ValueError as err: + raise ValueError( + f"Lower bound does not broadcast to the number of parameters. " + f"Lower bound shape is {lower_bound.shape} and tried against " + f"{self.nP} parameters." + ) from err + try: + np.broadcast_shapes(upper_bound.shape, (self.nP,)) + except ValueError as err: + raise ValueError( + f"Upper bound does not broadcast to the number of parameters. " + f"Upper bound shape is {upper_bound.shape} and tried against " + f"{self.nP} parameters." + ) from err + # make sure lower and upper bound broadcast to each other... + try: + np.broadcast_shapes(lower_bound.shape, upper_bound.shape) + except ValueError as err: + raise ValueError( + f"Upper bound does not broadcast to the lower bound. " + f"Shapes {upper_bound.shape} and {lower_bound.shape} " + f"are incompatible with each other." + ) from err + + if np.any(lower_bound >= upper_bound): + raise ValueError( + "A lower bound is greater than or equal to the upper bound." + ) + + self._lower_bound = lower_bound + self._upper_bound = upper_bound + + @property + def lower_bound(self): + """The lower bound + + Returns + ------- + numpy.ndarray + """ + return self._lower_bound + + @property + def upper_bound(self): + """The upper bound + + Returns + ------- + numpy.ndarray + """ + return self._upper_bound + + def _transform(self, m): + return self.lower_bound + (self.upper_bound - self.lower_bound) * expit(mkvc(m)) + + def inverse(self, m): + r"""Apply the inverse of the mapping to an array. + + For the logistic sigmoid mapping :math:`\mathbf{u}(\mathbf{m})`, the + inverse mapping on a variable :math:`\mathbf{x}` is performed by taking + the log-odds of elements, i.e.: + + .. math:: + \mathbf{m} = \mathbf{u}^{-1}(\mathbf{x}) = logit(\mathbf{x}) = \log \frac{\mathbf{x}}{1 - \mathbf{x}} + + or scaled and translated to interval (a,b): + .. math:: + \mathbf{m} = logit(\frac{(\mathbf{x} - a)}{b-a}) + + Parameters + ---------- + m : numpy.ndarray + A set of input values + + Returns + ------- + numpy.ndarray + the inverse mapping to the elements in *m*; which in this case + is the log-odds function with scaled and shifted input. + """ + return logit( + (mkvc(m) - self.lower_bound) / (self.upper_bound - self.lower_bound) + ) + + def deriv(self, m, v=None): + r"""Derivative of mapping with respect to the input parameters. + + For a mapping :math:`\mathbf{u}(\mathbf{m})` the derivative of the mapping with + respect to the model is a diagonal matrix of the form: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} + = \textrm{diag} \big ( (b-a)\cdot sigmoid(\mathbf{m})\cdot(1-sigmoid(\mathbf{m})) \big ) + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v* + + Returns + ------- + numpy.ndarray or scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + sigmoid = expit(mkvc(m)) + deriv = (self.upper_bound - self.lower_bound) * sigmoid * (1.0 - sigmoid) + if v is not None: + return deriv * v + return sdiag(deriv) + + @property + def is_linear(self): + return False + + class ChiMap(IdentityMap): r"""Mapping that computes the magnetic permeability given a set of magnetic susceptibilities. diff --git a/tests/base/test_maps.py b/tests/base/test_maps.py index 9f6c8aaec3..f99cc4b23c 100644 --- a/tests/base/test_maps.py +++ b/tests/base/test_maps.py @@ -564,6 +564,51 @@ def test_Tile(self): self.assertTrue((local_mass - total_mass) / total_mass < 1e-8) + def test_logit_errors(self): + nP = 10 + scalar_lower = -2 + scalar_upper = 2 + good_vector_lower = np.random.rand(nP) - 2 + good_vector_upper = np.random.rand(nP) + 2 + + bad_vector_lower = np.random.rand(nP - 2) - 2 + bad_vector_upper = np.random.rand(nP - 2) + 2 + + # test that lower is not equal to nP + with pytest.raises( + ValueError, + match="Lower bound does not broadcast to the number of parameters.*", + ): + maps.LogisticSigmoidMap( + nP=10, lower_bound=bad_vector_lower, upper_bound=scalar_upper + ) + + # test that bad is not equal to nP + with pytest.raises( + ValueError, + match="Upper bound does not broadcast to the number of parameters.*", + ): + maps.LogisticSigmoidMap( + nP=10, lower_bound=scalar_lower, upper_bound=bad_vector_upper + ) + + # test that two upper and lower arrays will not broadcast when not specifying the number of parameters + with pytest.raises( + ValueError, match="Upper bound does not broadcast to the lower bound.*" + ): + maps.LogisticSigmoidMap( + lower_bound=good_vector_lower, upper_bound=bad_vector_upper + ) + + # test that passing a lower bound higher than an upper bound) + with pytest.raises( + ValueError, + match="A lower bound is greater than or equal to the upper bound.", + ): + maps.LogisticSigmoidMap( + lower_bound=good_vector_upper, upper_bound=good_vector_lower + ) + class TestWires(unittest.TestCase): def test_basic(self): @@ -706,6 +751,7 @@ def test_linearity(): maps.SphericalSystem(mesh2), maps.SelfConsistentEffectiveMedium(mesh2, sigma0=1, sigma1=2), maps.ExpMap(), + maps.LogisticSigmoidMap(), maps.ReciprocalMap(), maps.LogMap(), maps.ParametricCircleMap(mesh2), From 0b57ca900411f327f687ac8afa779174decc9120 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Mar 2024 14:40:33 -0700 Subject: [PATCH 40/68] Remove the cell_weights attribute in regularizations (#1376) Add test checking errors are raised after accessing the property or passing `cell_weights` argument to regularization's constructor. Replace usage of `cell_weights` in examples and code. --------- Co-authored-by: Joseph Capriotti --- .../static/induced_polarization/run.py | 1 - SimPEG/regularization/base.py | 58 ++++++--------- examples/01-maps/plot_sumMap.py | 4 +- examples/08-vrm/plot_inv_vrm_eq.py | 2 +- ...1_PGI_Linear_1D_joint_WithRelationships.py | 4 +- tests/base/test_directives.py | 18 ++++- tests/base/test_regularization.py | 73 +++++++++++-------- 7 files changed, 85 insertions(+), 75 deletions(-) diff --git a/SimPEG/electromagnetics/static/induced_polarization/run.py b/SimPEG/electromagnetics/static/induced_polarization/run.py index 53fbd717f3..a7372118b8 100644 --- a/SimPEG/electromagnetics/static/induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/induced_polarization/run.py @@ -47,7 +47,6 @@ def run_inversion( mesh, active_cells=actind, mapping=regmap, - cell_weights=mesh.cell_volumes[actind], ) reg.alpha_s = alpha_s reg.alpha_x = alpha_x diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index ca38be2e3f..5aaeaedfda 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -1,5 +1,4 @@ from __future__ import annotations -import warnings import numpy as np from discretize.base import BaseMesh @@ -75,18 +74,9 @@ def __init__( "Please use 'active_cells' instead." ) if (key := "cell_weights") in kwargs: - if weights is not None: - raise ValueError( - f"Cannot simultaneously pass 'weights' and '{key}'. " - "Pass 'weights' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'weights'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. Please use 'weights' instead." ) - weights = kwargs.pop(key) super().__init__(nP=None, mapping=None, **kwargs) self._regularization_mesh = mesh @@ -302,23 +292,19 @@ def regularization_mesh(self) -> RegularizationMesh: @property def cell_weights(self) -> np.ndarray: """Deprecated property for 'volume' and user defined weights.""" - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - return np.prod(list(self._weights.values()), axis=0) @cell_weights.setter def cell_weights(self, value): - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - self.set_weights(cell_weights=value) def get_weights(self, key) -> np.ndarray: """Cell weights for a given key. @@ -1583,6 +1569,11 @@ def __init__( "Please use 'active_cells' instead." ) + if (key := "cell_weights") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. Please use 'weights' instead." + ) + self.alpha_s = alpha_s if alpha_x is not None: if length_scale_x is not None: @@ -1714,20 +1705,19 @@ def remove_weights(self, key): @property def cell_weights(self): - # All of the objective functions should have the same weights, - # so just grab the one from smallness here, which should also - # trigger the deprecation warning - return self.objfcts[0].cell_weights + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) @cell_weights.setter def cell_weights(self, value): - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - self.set_weights(cell_weights=value) @property def alpha_s(self): diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index dd7a1d012b..d86976ea08 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -138,7 +138,7 @@ def run(plotIt=True): regMesh = TensorMesh([len(domains)]) reg_m1 = regularization.Sparse(regMesh, mapping=wires.homo) - reg_m1.cell_weights = wires.homo * wr + reg_m1.set_weights(cell_weights=wires.homo * wr) reg_m1.norms = [0, 2] reg_m1.mref = np.zeros(sumMap.shape[1]) @@ -146,7 +146,7 @@ def run(plotIt=True): reg_m2 = regularization.Sparse( mesh, active_cells=actv, mapping=wires.hetero, gradient_type="components" ) - reg_m2.cell_weights = wires.hetero * wr + reg_m2.set_weights(cell_weights=wires.hetero * wr) reg_m2.norms = [0, 0, 0, 0] reg_m2.mref = np.zeros(sumMap.shape[1]) diff --git a/examples/08-vrm/plot_inv_vrm_eq.py b/examples/08-vrm/plot_inv_vrm_eq.py index aba2d0fde0..e0eee4ff6f 100644 --- a/examples/08-vrm/plot_inv_vrm_eq.py +++ b/examples/08-vrm/plot_inv_vrm_eq.py @@ -196,7 +196,7 @@ w = w / np.max(w) w = w -reg = regularization.Smallness(mesh=mesh, active_cells=actCells, cell_weights=w) +reg = regularization.Smallness(mesh=mesh, active_cells=actCells, weights=w) opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 ) diff --git a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py index 5da5932952..adabf3f35f 100644 --- a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py +++ b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py @@ -235,11 +235,11 @@ def g(k): reg1 = regularization.WeightedLeastSquares( mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m1 ) -reg1.cell_weights = wr1 +reg1.set_weights(cell_weights=wr1) reg2 = regularization.WeightedLeastSquares( mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m2 ) -reg2.cell_weights = wr2 +reg2.set_weights(cell_weights=wr2) reg = reg1 + reg2 opt = optimization.ProjectedGNCG( diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index 0d2f2f105c..2af5c58e32 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -161,7 +161,11 @@ def test_sensitivity_weighting_global(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) @@ -201,7 +205,11 @@ def test_sensitivity_weighting_percentile_maximum(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) @@ -241,7 +249,11 @@ def test_sensitivity_weighting_amplitude_minimum(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index 207e8d9cc8..c50a4a83a2 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -758,44 +758,13 @@ def test_multiple_weights(self, mesh, regularization_class): assert reg.weights_keys == ["dummy_weight", "other_weights", "volume"] -class TestDeprecatedArguments: - """ - Test errors after simultaneously passing new and deprecated arguments. - - Within these arguments are: - - * ``cell_weights`` (replaced by ``weights``) - - """ - - @pytest.fixture(params=["1D", "2D", "3D"]) - def mesh(self, request): - """Sample mesh.""" - if request.param == "1D": - hx = np.random.rand(10) - h = [hx / hx.sum()] - elif request.param == "2D": - hx, hy = np.random.rand(10), np.random.rand(9) - h = [h_i / h_i.sum() for h_i in (hx, hy)] - elif request.param == "3D": - hx, hy, hz = np.random.rand(10), np.random.rand(9), np.random.rand(8) - h = [h_i / h_i.sum() for h_i in (hx, hy, hz)] - return discretize.TensorMesh(h) - - def test_weights(self, mesh): - """Test cell_weights and weights.""" - weights = np.ones(len(mesh)) - msg = "Cannot simultaneously pass 'weights' and 'cell_weights'." - with pytest.raises(ValueError, match=msg): - BaseRegularization(mesh, weights=weights, cell_weights=weights) - - class TestRemovedObjects: """ Test if errors are raised after passing removed arguments or trying to access removed properties. * ``indActive`` (replaced by ``active_cells``) + * ``cell_weights`` (replaced by ``weights``) """ @@ -839,6 +808,46 @@ def test_ind_active_property(self, mesh, regularization_class): with pytest.raises(NotImplementedError, match=msg): reg.indActive + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_cell_weights_argument(self, mesh, regularization_class): + """Test if error is raised when passing the cell_weights argument.""" + weights = np.ones(len(mesh)) + msg = "'cell_weights' argument has been removed. Please use 'weights' instead." + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, cell_weights=weights) + + @pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) + ) + def test_cell_weights_property(self, mesh, regularization_class): + """Test if error is raised when trying to access the cell_weights property.""" + weights = {"weights": np.ones(len(mesh))} + msg = ( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) + reg = regularization_class(mesh, weights=weights) + with pytest.raises(AttributeError, match=msg): + reg.cell_weights + + @pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) + ) + def test_cell_weights_setter(self, mesh, regularization_class): + """Test if error is raised when trying to set the cell_weights property.""" + msg = ( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) + reg = regularization_class(mesh) + with pytest.raises(AttributeError, match=msg): + reg.cell_weights = "dummy variable" + class TestRemovedRegularizations: """ From 23e27d7c211976c7a64ae203c6ee4688a9f5e590 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Mar 2024 16:31:13 -0700 Subject: [PATCH 41/68] Remove regmesh, mref and gradientType from regularizations (#1377) Remove the `regmesh`, `mref` and `gradientType` properties from regularizations. Make `Sparse` regularizations to raise errors after getting `gradientType` as argument. Add tests checking errors are being raised. Part of the solution to #1302 --- SimPEG/regularization/base.py | 9 ++--- SimPEG/regularization/sparse.py | 33 ++++++++++-------- examples/01-maps/plot_sumMap.py | 4 +-- .../plot_inv_mag_nonLinear_Amplitude.py | 4 +-- .../plot_booky_1D_time_freq_inv.py | 4 +-- .../plot_booky_1Dstitched_resolve_inv.py | 2 +- .../plot_laguna_del_maule_inversion.py | 4 +-- ...nv_dcip_dipoledipole_2_5Dinversion_irls.py | 3 +- tests/base/test_regularization.py | 34 ++++++++++++++++++- tests/dask/test_mag_MVI_Octree.py | 12 +++---- tests/dask/test_mag_nonLinear_Amplitude.py | 4 +-- tests/pf/test_mag_inversion_linear.py | 2 +- tests/pf/test_mag_inversion_linear_Octree.py | 2 +- tests/pf/test_mag_nonLinear_Amplitude.py | 4 +-- tutorials/05-dcr/plot_inv_2_dcr2d_irls.py | 2 +- tutorials/07-fdem/plot_inv_1_em1dfm.py | 2 +- tutorials/08-tdem/plot_inv_1_em1dtm.py | 2 +- 17 files changed, 79 insertions(+), 48 deletions(-) diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 5aaeaedfda..b82333025d 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -262,8 +262,7 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property @@ -285,8 +284,7 @@ def regularization_mesh(self) -> RegularizationMesh: "regmesh", "regularization_mesh", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property @@ -2111,8 +2109,7 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property diff --git a/SimPEG/regularization/sparse.py b/SimPEG/regularization/sparse.py index 817c49e224..43d5916a03 100644 --- a/SimPEG/regularization/sparse.py +++ b/SimPEG/regularization/sparse.py @@ -577,10 +577,13 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): """ def __init__(self, mesh, orientation="x", gradient_type="total", **kwargs): - if "gradientType" in kwargs: - self.gradientType = kwargs.pop("gradientType") - else: - self.gradient_type = gradient_type + # Raise error if removed arguments were passed + if (key := "gradientType") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'gradient_type' instead." + ) + self.gradient_type = gradient_type super().__init__(mesh=mesh, orientation=orientation, **kwargs) def update_weights(self, m): @@ -695,8 +698,7 @@ def gradient_type(self, value: str): "gradientType", new_name="gradient_type", removal_version="0.19.0", - error=False, - future_warn=True, + error=True, ) @@ -930,6 +932,14 @@ def __init__( f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " f"Value of type {type(mesh)} provided." ) + + # Raise error if removed arguments were passed + if (key := "gradientType") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'gradient_type' instead." + ) + self._regularization_mesh = mesh if active_cells is not None: self._regularization_mesh.active_cells = active_cells @@ -950,7 +960,6 @@ def __init__( SparseSmoothness(mesh=self.regularization_mesh, orientation="z") ) - gradientType = kwargs.pop("gradientType", None) super().__init__( self.regularization_mesh, objfcts=objfcts, @@ -959,13 +968,7 @@ def __init__( if norms is None: norms = [1] * (mesh.dim + 1) self.norms = norms - - if gradientType is not None: - # Trigger deprecation warning - self.gradientType = gradientType - else: - self.gradient_type = gradient_type - + self.gradient_type = gradient_type self.irls_scaled = irls_scaled self.irls_threshold = irls_threshold @@ -995,7 +998,7 @@ def gradient_type(self, value: str): self._gradient_type = value gradientType = utils.code_utils.deprecate_property( - gradient_type, "gradientType", "0.19.0", error=False, future_warn=True + gradient_type, "gradientType", "0.19.0", error=True ) @property diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index d86976ea08..fe99b0cc2e 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -140,7 +140,7 @@ def run(plotIt=True): reg_m1 = regularization.Sparse(regMesh, mapping=wires.homo) reg_m1.set_weights(cell_weights=wires.homo * wr) reg_m1.norms = [0, 2] - reg_m1.mref = np.zeros(sumMap.shape[1]) + reg_m1.reference_model = np.zeros(sumMap.shape[1]) # Regularization for the voxel model reg_m2 = regularization.Sparse( @@ -148,7 +148,7 @@ def run(plotIt=True): ) reg_m2.set_weights(cell_weights=wires.hetero * wr) reg_m2.norms = [0, 0, 0, 0] - reg_m2.mref = np.zeros(sumMap.shape[1]) + reg_m2.reference_model = np.zeros(sumMap.shape[1]) reg = reg_m1 + reg_m2 diff --git a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py index bbcd745dce..a7bf711161 100644 --- a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py +++ b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py @@ -235,7 +235,7 @@ reg = regularization.Sparse( mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) -reg.mref = np.zeros(nC) +reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -347,7 +347,7 @@ # Create a sparse regularization reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] -reg.mref = np.zeros(nC) +reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/examples/20-published/plot_booky_1D_time_freq_inv.py b/examples/20-published/plot_booky_1D_time_freq_inv.py index 180685ef32..e1469d3f3d 100644 --- a/examples/20-published/plot_booky_1D_time_freq_inv.py +++ b/examples/20-published/plot_booky_1D_time_freq_inv.py @@ -261,7 +261,7 @@ def run(plotIt=True, saveFig=False, cleanup=True): inv = inversion.BaseInversion(invProb, directiveList=[target]) reg.alpha_s = 1e-3 reg.alpha_x = 1.0 - reg.mref = m0.copy() + reg.reference_model = m0.copy() opt.LSshorten = 0.5 opt.remember("xc") # run the inversion @@ -379,7 +379,7 @@ def run(plotIt=True, saveFig=False, cleanup=True): reg.alpha_x = 1.0 opt.LSshorten = 0.5 opt.remember("xc") - reg.mref = mopt_re # Use RESOLVE model as a reference model + reg.reference_model = mopt_re # Use RESOLVE model as a reference model # run the inversion mopt_sky = inv.run(m0) diff --git a/examples/20-published/plot_booky_1Dstitched_resolve_inv.py b/examples/20-published/plot_booky_1Dstitched_resolve_inv.py index fc10a3317c..dbdff9966b 100644 --- a/examples/20-published/plot_booky_1Dstitched_resolve_inv.py +++ b/examples/20-published/plot_booky_1Dstitched_resolve_inv.py @@ -127,7 +127,7 @@ def resolve_1Dinversions( # regularization regMesh = discretize.TensorMesh([mesh.h[2][mapping.maps[-1].indActive]]) reg = regularization.WeightedLeastSquares(regMesh) - reg.mref = mref + reg.reference_model = mref # optimization opt = optimization.InexactGaussNewton(maxIter=10) diff --git a/examples/20-published/plot_laguna_del_maule_inversion.py b/examples/20-published/plot_laguna_del_maule_inversion.py index 0edd61c3c2..d124efa9da 100644 --- a/examples/20-published/plot_laguna_del_maule_inversion.py +++ b/examples/20-published/plot_laguna_del_maule_inversion.py @@ -96,9 +96,9 @@ def run(plotIt=True, cleanAfterRun=True): # %% Create inversion objects reg = regularization.Sparse( - mesh, active_cells=active, mapping=staticCells, gradientType="total" + mesh, active_cells=active, mapping=staticCells, gradient_type="total" ) - reg.mref = driver.mref[dynamic] + reg.reference_model = driver.mref[dynamic] reg.norms = [0.0, 1.0, 1.0, 1.0] # reg.norms = driver.lpnorms diff --git a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py index b6c67f0fb3..8c3238670b 100644 --- a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py +++ b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py @@ -155,9 +155,8 @@ def run(plotIt=True, survey_type="dipole-dipole", p=0.0, qx=2.0, qz=2.0): # Related to inversion reg = regularization.Sparse( - mesh, active_cells=actind, mapping=regmap, gradientType="components" + mesh, active_cells=actind, mapping=regmap, gradient_type="components" ) - # gradientType = 'components' reg.norms = [p, qx, qz, 0.0] IRLS = directives.Update_IRLS( max_irls_iterations=20, minGNiter=1, beta_search=False, fix_Jmatrix=True diff --git a/tests/base/test_regularization.py b/tests/base/test_regularization.py index c50a4a83a2..4150aba4f9 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/test_regularization.py @@ -9,6 +9,8 @@ from SimPEG.regularization import ( BaseRegularization, WeightedLeastSquares, + Sparse, + SparseSmoothness, Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder, @@ -91,7 +93,7 @@ def test_regularization(self): else: m = np.random.rand(mesh.nC) mref = np.ones_like(m) * np.mean(m) - reg.mref = mref + reg.reference_model = mref # test derivs passed = reg.test(m, eps=TOL) @@ -764,6 +766,9 @@ class TestRemovedObjects: access removed properties. * ``indActive`` (replaced by ``active_cells``) + * ``gradientType`` (replaced by ``gradient_type``) + * ``mref`` (replaced by ``reference_model``) + * ``regmesh`` (replaced by ``regularization_mesh``) * ``cell_weights`` (replaced by ``weights``) """ @@ -782,6 +787,33 @@ def mesh(self, request): h = [h_i / h_i.sum() for h_i in (hx, hy, hz)] return discretize.TensorMesh(h) + @pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) + ) + def test_mref_property(self, mesh, regularization_class): + """Test mref property.""" + msg = "mref has been removed, please use reference_model." + reg = regularization_class(mesh) + with pytest.raises(NotImplementedError, match=msg): + reg.mref + + def test_regmesh_property(self, mesh): + """Test regmesh property.""" + msg = "regmesh has been removed, please use regularization_mesh." + reg = BaseRegularization(mesh) + with pytest.raises(NotImplementedError, match=msg): + reg.regmesh + + @pytest.mark.parametrize("regularization_class", (Sparse, SparseSmoothness)) + def test_gradient_type(self, mesh, regularization_class): + """Test gradientType argument.""" + msg = ( + "'gradientType' argument has been removed. " + "Please use 'gradient_type' instead." + ) + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, gradientType="total") + @pytest.mark.parametrize( "regularization_class", (BaseRegularization, WeightedLeastSquares), diff --git a/tests/dask/test_mag_MVI_Octree.py b/tests/dask/test_mag_MVI_Octree.py index 3c7305f552..37b8783745 100644 --- a/tests/dask/test_mag_MVI_Octree.py +++ b/tests/dask/test_mag_MVI_Octree.py @@ -118,16 +118,16 @@ def setUp(self): # Create three regularization for the different components # of magnetization reg_p = regularization.Sparse(mesh, active_cells=actv, mapping=wires.p) - reg_p.mref = np.zeros(3 * nC) + reg_p.reference_model = np.zeros(3 * nC) reg_s = regularization.Sparse(mesh, active_cells=actv, mapping=wires.s) - reg_s.mref = np.zeros(3 * nC) + reg_s.reference_model = np.zeros(3 * nC) reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.t) - reg_t.mref = np.zeros(3 * nC) + reg_t.reference_model = np.zeros(3 * nC) reg = reg_p + reg_s + reg_t - reg.mref = np.zeros(3 * nC) + reg.reference_model = np.zeros(3 * nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) @@ -173,7 +173,7 @@ def setUp(self): # Regularize the amplitude of the vectors reg_a = regularization.Sparse(mesh, active_cells=actv, mapping=wires.amp) reg_a.norms = [0.0, 0.0, 0.0, 0.0] # Sparse on the model and its gradients - reg_a.mref = np.zeros(3 * nC) + reg_a.reference_model = np.zeros(3 * nC) # Regularize the vertical angle of the vectors reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.theta) @@ -188,7 +188,7 @@ def setUp(self): reg_p.norms = [2.0, 0.0, 0.0, 0.0] # Only norm on gradients used reg = reg_a + reg_t + reg_p - reg.mref = np.zeros(3 * nC) + reg.reference_model = np.zeros(3 * nC) Lbound = np.kron(np.asarray([0, -np.inf, -np.inf]), np.ones(nC)) Ubound = np.kron(np.asarray([10, np.inf, np.inf]), np.ones(nC)) diff --git a/tests/dask/test_mag_nonLinear_Amplitude.py b/tests/dask/test_mag_nonLinear_Amplitude.py index 1f9109d402..758db82d0a 100644 --- a/tests/dask/test_mag_nonLinear_Amplitude.py +++ b/tests/dask/test_mag_nonLinear_Amplitude.py @@ -144,7 +144,7 @@ def setUp(self): reg = regularization.Sparse( mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -237,7 +237,7 @@ def setUp(self): # Create a sparse regularization reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/tests/pf/test_mag_inversion_linear.py b/tests/pf/test_mag_inversion_linear.py index 2da13bf2f5..bf7e10dba8 100644 --- a/tests/pf/test_mag_inversion_linear.py +++ b/tests/pf/test_mag_inversion_linear.py @@ -103,7 +103,7 @@ def setUp(self): # Create a regularization reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] - reg.gradientType = "components" + reg.gradient_type = "components" # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) diff --git a/tests/pf/test_mag_inversion_linear_Octree.py b/tests/pf/test_mag_inversion_linear_Octree.py index 64ce31e0bd..8167bf1e1f 100644 --- a/tests/pf/test_mag_inversion_linear_Octree.py +++ b/tests/pf/test_mag_inversion_linear_Octree.py @@ -117,7 +117,7 @@ def setUp(self): # Create a regularization reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) diff --git a/tests/pf/test_mag_nonLinear_Amplitude.py b/tests/pf/test_mag_nonLinear_Amplitude.py index 015aa8bfe0..85f27266d6 100644 --- a/tests/pf/test_mag_nonLinear_Amplitude.py +++ b/tests/pf/test_mag_nonLinear_Amplitude.py @@ -143,7 +143,7 @@ def setUp(self): reg = regularization.Sparse( mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -236,7 +236,7 @@ def setUp(self): # Create a sparse regularization reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py index 50ad50e100..d72de43fb9 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py @@ -305,7 +305,7 @@ active_cells=ind_active, reference_model=starting_conductivity_model, mapping=regmap, - gradientType="total", + gradient_type="total", alpha_s=0.01, alpha_x=1, alpha_y=1, diff --git a/tutorials/07-fdem/plot_inv_1_em1dfm.py b/tutorials/07-fdem/plot_inv_1_em1dfm.py index 8c58fc7adc..2d566f06a7 100644 --- a/tutorials/07-fdem/plot_inv_1_em1dfm.py +++ b/tutorials/07-fdem/plot_inv_1_em1dfm.py @@ -238,7 +238,7 @@ reg = regularization.Sparse(mesh, mapping=reg_map, alpha_s=0.025, alpha_x=1.0) # reference model -reg.mref = starting_model +reg.reference_model = starting_model # Define sparse and blocky norms p, q reg.norms = [0, 0] diff --git a/tutorials/08-tdem/plot_inv_1_em1dtm.py b/tutorials/08-tdem/plot_inv_1_em1dtm.py index ae188747c0..535f646747 100644 --- a/tutorials/08-tdem/plot_inv_1_em1dtm.py +++ b/tutorials/08-tdem/plot_inv_1_em1dtm.py @@ -227,7 +227,7 @@ reg = regularization.Sparse(mesh, mapping=reg_map, alpha_s=0.01, alpha_x=1.0) # set reference model -reg.mref = starting_model +reg.reference_model = starting_model # Define sparse and blocky norms p, q reg.norms = [1, 0] From 6b9c6baeadc8f35e58f6eb101efd155ba279d402 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 27 Mar 2024 17:09:10 -0700 Subject: [PATCH 42/68] Test if gravity sensitivities are stored on disk (#1388) Add a test function checking that the sensitivity matrix in the gravity simulation using Choclo as engine is being stored in disk using a Numpy memmap. --- tests/pf/test_forward_Grav_Linear.py | 41 ++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 7208f0f9e3..e65c7f7803 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -359,6 +359,47 @@ def test_choclo_and_sensitivity_path_as_dir(self, simple_mesh, tmp_path): engine="choclo", ) + def test_sensitivities_on_disk(self, simple_mesh, receivers_locations, tmp_path): + """ + Test if sensitivity matrix is correctly being stored in disk when asked + """ + # Build survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Build simulation + sensitivities_path = tmp_path / "sensitivities" + simulation = gravity.Simulation3DIntegral( + mesh=simple_mesh, + survey=survey, + store_sensitivities="disk", + sensitivity_path=str(sensitivities_path), + engine="choclo", + ) + simulation.G + # Check if sensitivity matrix was stored in disk and is a memmap + assert sensitivities_path.is_file() + assert type(simulation.G) is np.memmap + + def test_sensitivities_on_ram(self, simple_mesh, receivers_locations, tmp_path): + """ + Test if sensitivity matrix is correctly being allocated in memory when asked + """ + # Build survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Build simulation + simulation = gravity.Simulation3DIntegral( + mesh=simple_mesh, + survey=survey, + store_sensitivities="ram", + engine="choclo", + ) + simulation.G + # Check if sensitivity matrix is a Numpy array (stored in memory) + assert type(simulation.G) is np.ndarray + def test_choclo_missing(self, simple_mesh, monkeypatch): """ Check if error is raised when choclo is missing and chosen as engine. From 6952d9015413c6e6ccee541204cc130effefe210 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 28 Mar 2024 10:44:22 -0700 Subject: [PATCH 43/68] Check if mesh is 3D when using Choclo in gravity simulation (#1386) Make gravity simulation to raise an error if the engine is `"choclo"` and the passed mesh is not 3d. Add a test to check the error. --- SimPEG/potential_fields/gravity/simulation.py | 8 ++++- tests/pf/test_forward_Grav_Linear.py | 31 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index 434c6992dc..a520019939 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -131,8 +131,14 @@ def __init__( self.numba_parallel = numba_parallel self.engine = engine self._sanity_checks_engine(kwargs) - # Define jit functions if self.engine == "choclo": + # Check dimensions of the mesh + if self.mesh.dim != 3: + raise ValueError( + f"Invalid mesh with {self.mesh.dim} dimensions. " + "Only 3D meshes are supported when using 'choclo' as engine." + ) + # Define jit functions if numba_parallel: self._sensitivity_gravity = _sensitivity_gravity_parallel self._forward_gravity = _forward_gravity_parallel diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index e65c7f7803..5e7d865f6b 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -436,3 +436,34 @@ def test_invalid_conversion_factor(self): component = "invalid-component" with pytest.raises(ValueError, match=f"Invalid component '{component}'"): gravity.simulation._get_conversion_factor(component) + + +class TestInvalidMeshChoclo: + @pytest.fixture(params=("tensormesh", "treemesh")) + def mesh(self, request): + """Sample 2D mesh.""" + hx, hy = [(0.1, 8)], [(0.1, 8)] + h = (hx, hy) + if request.param == "tensormesh": + mesh = discretize.TensorMesh(h, "CC") + else: + mesh = discretize.TreeMesh(h, origin="CC") + mesh.finalize() + return mesh + + def test_invalid_mesh_with_choclo(self, mesh): + """ + Test if simulation raises error when passing an invalid mesh and using choclo + """ + # Build survey + receivers_locations = np.array([[0, 0, 0]]) + receivers = gravity.Point(receivers_locations) + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Check if error is raised + msg = ( + "Invalid mesh with 2 dimensions. " + "Only 3D meshes are supported when using 'choclo' as engine." + ) + with pytest.raises(ValueError, match=msg): + gravity.Simulation3DIntegral(mesh, survey, engine="choclo") From 3ba9a4b46734f81321486a8c20844b4de7769b4a Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 28 Mar 2024 13:37:01 -0600 Subject: [PATCH 44/68] Rotated Gradients (#1167) Constructs an (optionally) anisotropic Smoothness Measure operator on general meshes. This makes use of discretize's `get_face_inner_product` operator to construct the regularization as an inner product of cell gradients. Essentially the weighting parameters are an anisotropic model used to combine the gradients. --- SimPEG/regularization/__init__.py | 2 + SimPEG/regularization/_gradient.py | 271 ++++++++++++++++++ SimPEG/regularization/regularization_mesh.py | 6 +- .../test_cross_gradient.py | 15 +- .../regularizations/test_full_gradient.py | 236 +++++++++++++++ tests/base/{ => regularizations}/test_jtv.py | 0 .../test_pgi_regularization.py | 0 .../test_regularization.py | 3 +- 8 files changed, 528 insertions(+), 5 deletions(-) create mode 100644 SimPEG/regularization/_gradient.py rename tests/base/{ => regularizations}/test_cross_gradient.py (95%) create mode 100644 tests/base/regularizations/test_full_gradient.py rename tests/base/{ => regularizations}/test_jtv.py (100%) rename tests/base/{ => regularizations}/test_pgi_regularization.py (100%) rename tests/base/{ => regularizations}/test_regularization.py (99%) diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 334e4ed986..3fa55b9fd3 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -90,6 +90,7 @@ Smallness SmoothnessFirstOrder SmoothnessSecondOrder + SmoothnessFullGradient Sparse Norm Regularization -------------------------- @@ -169,6 +170,7 @@ AmplitudeSmallness, AmplitudeSmoothnessFirstOrder, ) +from ._gradient import SmoothnessFullGradient @deprecate_class(removal_version="0.19.0", error=True) diff --git a/SimPEG/regularization/_gradient.py b/SimPEG/regularization/_gradient.py new file mode 100644 index 0000000000..7e98309f48 --- /dev/null +++ b/SimPEG/regularization/_gradient.py @@ -0,0 +1,271 @@ +from .base import BaseRegularization +import numpy as np +import scipy.sparse as sp +from ..utils.code_utils import validate_ndarray_with_shape + + +class SmoothnessFullGradient(BaseRegularization): + r"""Measures the gradient of a model using optionally anisotropic weighting. + + This regularizer measures the first order smoothness in a mesh ambivalent way + by observing that the N-d smoothness operator can be represented as an + inner product with an arbitrarily anisotropic weight. + + By default it assumes uniform weighting in each dimension, which works + for most ``discretize`` mesh types. + + Parameters + ---------- + mesh : discretize.BaseMesh + The mesh object to use for regularization. The mesh should either have + a `cell_gradient` or a `stencil_cell_gradient` defined. + alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. + The weights of the regularization for each axis. This can be defined for each cell + in the mesh. Default is uniform weights equal to the smallest edge length squared. + reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float + Matrix or list of matrices whose columns represent the regularization directions. + Each matrix should be orthonormal. Default is Identity. + ortho_check : bool, optional + Whether to check `reg_dirs` for orthogonality. + **kwargs + Keyword arguments passed to the parent class ``BaseRegularization``. + + Examples + -------- + Construct of 2D measure with uniform smoothing in each direction. + + >>> from discretize import TensorMesh + >>> from SimPEG.regularization import SmoothnessFullGradient + >>> mesh = TensorMesh([32, 32]) + >>> reg = SmoothnessFullGradient(mesh) + + We can instead create a measure that smooths twice as much in the 1st dimension + than it does in the second dimension. + >>> reg = SmoothnessFullGradient(mesh, [2, 1]) + + The `alphas` parameter can also be indepenant for each cell. Here we set all cells + lower than 0.5 in the x2 to twice as much in the first dimension + otherwise it is uniform smoothing. + >>> alphas = np.ones((mesh.n_cells, mesh.dim)) + >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] + >>> reg = SmoothnessFullGradient(mesh, alphas) + + We can also rotate the axis in which we want to preferentially smooth. Say we want to + smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, + effectively rotating our smoothing 45 degrees. Note and the columns of the matrix + represent the directional vectors (not the rows). + >>> sqrt2 = np.sqrt(2) + >>> reg_dirs = np.array([ + ... [sqrt2, -sqrt2], + ... [sqrt2, sqrt2], + ... ]) + >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) + + Notes + ----- + The regularization object is the discretized form of the continuous regularization + + ..math: + f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V + + The tensor quantity `a` is used to represent the potential preferential directions of + regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: + + ..math: + \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} + + `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting + along each direction, with ``alphas`` along its diagonal. These are multiplied to form the + anisotropic alpha used for rotated gradients. + """ + + def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs): + if mesh.dim < 2: + raise TypeError("Mesh must have dimension higher than 1") + super().__init__(mesh=mesh, **kwargs) + + if alphas is None: + edge_length = np.min(mesh.edge_lengths) + alphas = edge_length**2 * np.ones(mesh.dim) + alphas = validate_ndarray_with_shape( + "alphas", + alphas, + shape=[(mesh.dim,), ("*", mesh.dim)], + dtype=float, + ) + n_active_cells = self.regularization_mesh.n_cells + if len(alphas.shape) == 1: + alphas = np.tile(alphas, (mesh.n_cells, 1)) + if alphas.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if self.active_cells is not None and alphas.shape[0] == n_active_cells: + alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) + alpha_temp[self.active_cells] = alphas + alphas = alpha_temp + else: + raise IndexError( + f"`alphas` first dimension, {alphas.shape[0]}, must be either number " + f"of active cells {n_active_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + if np.any(alphas < 0): + raise ValueError("`alpha` must be non-negative") + anis_alpha = alphas + + if reg_dirs is not None: + reg_dirs = validate_ndarray_with_shape( + "reg_dirs", + reg_dirs, + shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], + dtype=float, + ) + if reg_dirs.shape == (mesh.dim, mesh.dim): + reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) + if reg_dirs.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if ( + self.active_cells is not None + and reg_dirs.shape[0] == n_active_cells + ): + reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) + reg_dirs_temp[self.active_cells] = reg_dirs + reg_dirs = reg_dirs_temp + else: + raise IndexError( + f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " + f"of active cells {n_active_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + # check orthogonality? + if ortho_check: + eye = np.eye(mesh.dim) + for i, M in enumerate(reg_dirs): + if not np.allclose(eye, M @ M.T): + raise ValueError(f"Matrix {i} is not orthonormal") + # create a stack of matrices of dir @ alphas @ dir.T + anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) + # Then select the upper diagonal components for input to discretize + if mesh.dim == 2: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 0, 1], + ), + axis=-1, + ) + elif mesh.dim == 3: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 2, 2], + anis_alpha[..., 0, 1], + anis_alpha[..., 0, 2], + anis_alpha[..., 1, 2], + ), + axis=-1, + ) + self._anis_alpha = anis_alpha + + # overwrite the call, deriv, and deriv2... + def __call__(self, m): + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return r @ M_f @ r + + def deriv(self, m): + m_d = self.mapping.deriv(self._delta_m(m)) + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return 2 * (m_d.T * (G.T @ (M_f @ r))) + + def deriv2(self, m, v=None): + m_d = self.mapping.deriv(self._delta_m(m)) + G = self.cell_gradient + M_f = self.W + if v is None: + return 2 * (m_d.T @ (G.T @ M_f @ G) @ m_d) + + return 2 * (m_d.T @ (G.T @ (M_f @ (G @ (m_d @ v))))) + + @property + def cell_gradient(self): + """The (approximate) cell gradient operator + + Returns + ------- + scipy.sparse.csr_matrix + """ + if getattr(self, "_cell_gradient", None) is None: + mesh = self.regularization_mesh.mesh + try: + cell_gradient = mesh.cell_gradient + except AttributeError: + a = mesh.face_areas + v = mesh.average_cell_to_face @ mesh.cell_volumes + cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient + + v = np.ones(mesh.n_cells) + # Turn off cell_gradient at boundary faces + if self.active_cells is not None: + v[~self.active_cells] = 0 + + dv = cell_gradient @ v + P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) + cell_gradient = P @ cell_gradient + if self.active_cells is not None: + cell_gradient = cell_gradient[:, self.active_cells] + self._cell_gradient = cell_gradient + return self._cell_gradient + + @property + def _weights_shapes(self): + reg_mesh = self.regularization_mesh + mesh = reg_mesh.mesh + return [(mesh.n_faces,), (reg_mesh.n_cells,)] + + @property + def W(self): + """The inner product operator using rotated coordinates + + Returns + ------- + scipy.sparse.csr_matrix + + Notes + ----- + This matrix is equivalent to `W.T @ W` in most other regularizations. It uses + `discretize` inner product operators to form the matrix `W.T @ W` all at once. + """ + if getattr(self, "_W", None) is None: + mesh = self.regularization_mesh.mesh + n_faces = mesh.n_faces + n_cells = self.regularization_mesh.n_cells + cell_weights = np.ones(n_cells) + face_weights = np.ones(n_faces) + for values in self._weights.values(): + if len(values) == n_cells: + cell_weights *= values + elif len(values) == n_faces: + face_weights *= values + else: + raise ValueError( + "Weights must be either number of active cells, or number of total faces" + ) + # optionally expand the cell weights if there are inactive cells + if n_cells != len(mesh) and self.active_cells is not None: + weights = np.zeros(mesh.n_cells) + weights[self.active_cells] = cell_weights + cell_weights = weights + reg_model = self._anis_alpha * cell_weights[:, None] + # turn off measure in inactive cells + if self.active_cells is not None: + reg_model[~self.active_cells] = 0.0 + + Wf = sp.diags(np.sqrt(face_weights)) + + W = mesh.get_face_inner_product(reg_model) + + self._W = Wf @ (W @ Wf) + return self._W diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index 713ed8b630..63eaa981f2 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -111,7 +111,7 @@ def vol(self) -> np.ndarray: return self._vol @property - def nC(self) -> int: + def n_cells(self) -> int: """Number of active cells. Returns @@ -121,7 +121,9 @@ def nC(self) -> int: """ if self.active_cells is not None: return int(self.active_cells.sum()) - return self.mesh.nC + return self.mesh.n_cells + + nC = n_cells @property def dim(self) -> int: diff --git a/tests/base/test_cross_gradient.py b/tests/base/regularizations/test_cross_gradient.py similarity index 95% rename from tests/base/test_cross_gradient.py rename to tests/base/regularizations/test_cross_gradient.py index b0493b8569..907f04bb56 100644 --- a/tests/base/test_cross_gradient.py +++ b/tests/base/regularizations/test_cross_gradient.py @@ -8,8 +8,6 @@ regularization, ) -np.random.seed(10) - class CrossGradientTensor2D(unittest.TestCase): def setUp(self): @@ -42,6 +40,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -52,12 +51,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -134,6 +135,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -144,12 +146,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -167,6 +171,7 @@ def test_deriv2_no_arg(self): np.testing.assert_allclose(Wv, W @ v) def test_cross_grad_calc(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -209,6 +214,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -219,12 +225,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -274,6 +282,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -284,12 +293,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad diff --git a/tests/base/regularizations/test_full_gradient.py b/tests/base/regularizations/test_full_gradient.py new file mode 100644 index 0000000000..a827676fc8 --- /dev/null +++ b/tests/base/regularizations/test_full_gradient.py @@ -0,0 +1,236 @@ +from discretize.tests import assert_expected_order, check_derivative +from discretize.utils import example_simplex_mesh +import discretize +import numpy as np +from SimPEG.regularization import SmoothnessFullGradient +import pytest + + +def f_2d(x, y): + return (1 - np.cos(2 * x * np.pi)) * (1 - np.cos(4 * y * np.pi)) + + +def f_3d(x, y, z): + return f_2d(x, y) * (1 - np.cos(8 * z * np.pi)) + + +dir_2d = np.array([[1.0, 1.0], [-1.0, 1.0]]).T +dir_2d /= np.linalg.norm(dir_2d, axis=0) +dir_3d = np.array([[1, 1, 1], [-1, 1, 0], [-1, -1, 2]]).T +dir_3d = dir_3d / np.linalg.norm(dir_3d, axis=0) + +# a list of argument tuples to pass to pytest parameterize +# each is a tuple of (function, dim, true_value, alphas, reg_dirs) +parameterized_args = [ + (f_2d, 2, 15 * np.pi**2, [1, 1], None), # assumes reg_dirs aligned with axes + ( + f_2d, + 2, + 15 * np.pi**2, + [1, 1], + np.eye(2), + ), # test for explicitly aligned with axes + ( + f_2d, + 2, + 15 * np.pi**2, + [1, 1], + dir_2d, + ), # circular regularization should be invariant to rotation + ( + f_2d, + 2, + 27 * np.pi**2, + [1, 2], + None, + ), # elliptic regularization aligned with axes + (f_2d, 2, 111.033049512255 * 2, [1, 2], dir_2d), # rotated elliptic regularization + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + None, + ), # test for explicitly aligned with axes + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + np.eye(3), + ), # test for explicitly aligned with axes + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + dir_3d, + ), # circular regularization should be invariant to rotation + ( + f_3d, + 3, + 513 * np.pi**2 / 2, + [1, 2, 3], + None, + ), # elliptic regularization aligned with axes + ( + f_3d, + 3, + 1065.91727531765 * 2, + [1, 2, 3], + dir_3d, + ), # rotated elliptic regularization +] + + +@pytest.mark.parametrize("mesh_class", [discretize.TensorMesh, discretize.TreeMesh]) +@pytest.mark.parametrize("func,dim,true_value,alphas,reg_dirs", parameterized_args) +def test_regulariation_order(mesh_class, func, dim, true_value, alphas, reg_dirs): + """This function is testing for the accuracy of the regularization. + Basically, is it actually measuring what we say it's measuring. + """ + n_hs = [8, 16, 32] + + def reg_error(n): + h = [n] * dim + mesh = mesh_class(h) + if mesh_class is discretize.TreeMesh: + mesh.refine(-1) + # cell widths will be the same in each dimension + dh = mesh.h[0][0] + + f_eval = func(*mesh.cell_centers.T) + + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + numerical_eval = reg(f_eval) + err = np.abs(numerical_eval - true_value) + return err, dh + + assert_expected_order(reg_error, n_hs) + + +@pytest.mark.parametrize("dim", [2, 3]) +def test_simplex_mesh(dim): + """Test to make sure it works with a simplex mesh + + We can't make as strong of an accuracy claim for this mesh type because the cell gradient + operator is not actually defined for it (it uses an approximation to the cell gradient). + It is close, but we should at least test that it works.. + """ + h = [10] * dim + points, simplices = example_simplex_mesh(h) + mesh = discretize.SimplexMesh(points, simplices) + reg = SmoothnessFullGradient(mesh) + + # multiply it by a vector to make sure we can construct everything internally + # at the very least, we should be able to confirm it evaluates to 0 for a flat model. + out = reg(np.ones(mesh.n_cells)) + np.testing.assert_allclose(out, 0) + + +@pytest.mark.parametrize( + "dim,alphas,reg_dirs", [(2, [1, 2], dir_2d), (3, [1, 2, 3], dir_3d)] +) +def test_first_derivatives(dim, alphas, reg_dirs): + """Perform a derivative test.""" + h = [10] * dim + mesh = discretize.TensorMesh(h) + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + def func(x): + return reg(x), reg.deriv(x) + + check_derivative(func, np.ones(mesh.n_cells), plotIt=False) + + +@pytest.mark.parametrize( + "dim,alphas,reg_dirs", [(2, [1, 2], dir_2d), (3, [1, 2, 3], dir_3d)] +) +def test_second_derivatives(dim, alphas, reg_dirs): + """Perform a derivative test.""" + h = [10] * dim + mesh = discretize.TensorMesh(h) + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + def func(x): + return reg.deriv(x), lambda v: reg.deriv2(x, v) + + check_derivative(func, np.ones(mesh.n_cells), plotIt=False) + + +@pytest.mark.parametrize("with_active_cells", [True, False]) +def test_operations(with_active_cells, dim=3): + # Here we just make sure operations at least work + h = [10] * dim + mesh = discretize.TensorMesh(h) + if with_active_cells: + active_cells = mesh.cell_centers[:, -1] <= 0.75 + n_cells = active_cells.sum() + else: + active_cells = None + n_cells = mesh.n_cells + reg = SmoothnessFullGradient(mesh, active_cells=active_cells) + # create a model + m = np.arange(n_cells) + # create a vector + v = np.random.rand(n_cells) + # test the second derivative evaluates + # and gives same results with and without a vector + v1 = reg.deriv2(m, v) + v2 = reg.deriv2(m) @ v + np.testing.assert_allclose(v1, v2) + + W1 = reg.W + + # test assigning n_cells + reg.set_weights(temp_weight=np.random.rand(n_cells)) + + # setting a weight should've erased W + assert reg._W is None + + # test assigning n_total_faces face weight + reg.set_weights(temp_weight=np.random.rand(mesh.n_faces)) + + # and test it all works! + W2 = reg.W + assert W1 is not W2 + + +def test_errors(): + # bad dimension mesh + mesh1d = discretize.TensorMesh([5]) + with pytest.raises(TypeError): + SmoothnessFullGradient(mesh1d) + mesh2d = discretize.TensorMesh([5, 5]) + # test some bad alphas + with pytest.raises(ValueError): + # 3D alpha passed to 2D operator + SmoothnessFullGradient(mesh2d, [1, 2, 3]) + + with pytest.raises(IndexError): + # incorrect number cell dependent alphas + alphas = np.random.rand(mesh2d.n_cells - 5, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas) + + with pytest.raises(ValueError): + # negative alphas + SmoothnessFullGradient(mesh2d, [-1, 1, 1]) + + alphas = [1, 2] + # test some bad reg dirs + with pytest.raises(ValueError): + # 3D reg dirs to 2D reg + reg_dirs = np.random.rand(3, 3) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) + + with pytest.raises(IndexError): + # incorrect number of cell dependent reg_dirs + reg_dirs = np.random.rand(mesh2d.n_cells - 5, 2, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) + + with pytest.raises(ValueError): + # non orthnormal reg_dirs + # incorrect number of cell dependent reg_dirs + reg_dirs = np.random.rand(2, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) diff --git a/tests/base/test_jtv.py b/tests/base/regularizations/test_jtv.py similarity index 100% rename from tests/base/test_jtv.py rename to tests/base/regularizations/test_jtv.py diff --git a/tests/base/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py similarity index 100% rename from tests/base/test_pgi_regularization.py rename to tests/base/regularizations/test_pgi_regularization.py diff --git a/tests/base/test_regularization.py b/tests/base/regularizations/test_regularization.py similarity index 99% rename from tests/base/test_regularization.py rename to tests/base/regularizations/test_regularization.py index 4150aba4f9..5fc7773ce6 100644 --- a/tests/base/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -39,6 +39,7 @@ "LinearCorrespondence", "JointTotalVariation", "BaseAmplitude", + "SmoothnessFullGradient", "VectorAmplitude", "CrossReferenceRegularization", # Removed regularization classes that raise error on instantiation @@ -181,7 +182,7 @@ def test_property_mirroring(self): active_cells = mesh.gridCC[:, 2] < 0.6 reg = getattr(regularization, regType)(mesh, active_cells=active_cells) - self.assertTrue(reg.nP == reg.regularization_mesh.nC) + self.assertTrue(reg.nP == reg.regularization_mesh.n_cells) [ self.assertTrue(np.all(fct.active_cells == active_cells)) From 4c4a2d9727ed6204d1a525c7a5d9aaa72a0b145d Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 28 Mar 2024 12:39:36 -0700 Subject: [PATCH 45/68] Add directives to the API Reference (#1397) List directives in the API Reference under their own "Directives" category. Fixes #1396 --- SimPEG/directives/__init__.py | 98 ++++++++++++++++++++++++++ docs/content/api/SimPEG.directives.rst | 1 + docs/content/api/index.rst | 7 ++ 3 files changed, 106 insertions(+) create mode 100644 docs/content/api/SimPEG.directives.rst diff --git a/SimPEG/directives/__init__.py b/SimPEG/directives/__init__.py index a713648113..34c8c1fee7 100644 --- a/SimPEG/directives/__init__.py +++ b/SimPEG/directives/__init__.py @@ -1,3 +1,101 @@ +""" +============================================= +Directives (:mod:`SimPEG.directives`) +============================================= + +.. currentmodule:: SimPEG.directives + +Directives are classes that allow us to control the inversion, perform tasks +between iterations, save information about our inversion process and more. +Directives are passed to the ``SimPEG.inversion.BaseInversion`` class through +the ``directiveList`` argument. The tasks specified through the directives are +executed after each inversion iteration, following the same order as in which +they are passed in the ``directiveList``. + +Although you can write your own directive classes and plug them into your +inversion, we provide a set of useful directive classes that cover a wide range +of applications: + + +General purpose directives +========================== + +.. autosummary:: + :toctree: generated/ + + AlphasSmoothEstimate_ByEig + BetaEstimateMaxDerivative + BetaEstimate_ByEig + BetaSchedule + JointScalingSchedule + MultiTargetMisfits + ProjectSphericalBounds + ScalingMultipleDataMisfits_ByEig + TargetMisfit + UpdatePreconditioner + UpdateSensitivityWeights + Update_Wj + + +Directives to save inversion results +==================================== + +.. autosummary:: + :toctree: generated/ + + SaveEveryIteration + SaveModelEveryIteration + SaveOutputDictEveryIteration + SaveOutputEveryIteration + + +Directives related to sparse inversions +======================================= + +.. autosummary:: + :toctree: generated/ + + Update_IRLS + + +Directives related to PGI +========================= + +.. autosummary:: + :toctree: generated/ + + PGI_AddMrefInSmooth + PGI_BetaAlphaSchedule + PGI_UpdateParameters + + +Directives related to joint inversions +====================================== + +.. autosummary:: + :toctree: generated/ + + SimilarityMeasureInversionDirective + SimilarityMeasureSaveOutputEveryIteration + PairedBetaEstimate_ByEig + PairedBetaSchedule + MovingAndMultiTargetStopping + + +Base directive classes +====================== +The ``InversionDirective`` class defines the basic class for all directives. +Inherit from this class when writing your own directive. The ``DirectiveList`` +is used under the hood to handle the execution of all directives passed to the +``SimPEG.inversion.BaseInversion``. + +.. autosummary:: + :toctree: generated/ + + InversionDirective + DirectiveList + +""" from .directives import ( InversionDirective, DirectiveList, diff --git a/docs/content/api/SimPEG.directives.rst b/docs/content/api/SimPEG.directives.rst new file mode 100644 index 0000000000..35999d49d0 --- /dev/null +++ b/docs/content/api/SimPEG.directives.rst @@ -0,0 +1 @@ +.. automodule:: SimPEG.directives diff --git a/docs/content/api/index.rst b/docs/content/api/index.rst index 55faddc116..8ffe60ffa9 100644 --- a/docs/content/api/index.rst +++ b/docs/content/api/index.rst @@ -32,6 +32,13 @@ Regularizations SimPEG.regularization +Directives +---------- +.. toctree:: + :maxdepth: 2 + + SimPEG.directives + Utilities --------- From d44a1e6fdb5771e56e3670e8cbcb8f6017bc56d4 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 28 Mar 2024 13:02:20 -0700 Subject: [PATCH 46/68] Remove deprecated modelType in mag simulation (#1399) Remove the deprecated `modelType` property in the magnetic simulation and add test function checking that error is raised when trying to access it. Part of the solution to #1302 --- SimPEG/potential_fields/magnetics/simulation.py | 2 +- tests/pf/test_forward_Mag_Linear.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 11fdfb5a70..f90145000c 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -122,7 +122,7 @@ def G(self): return self._G modelType = deprecate_property( - model_type, "modelType", "model_type", removal_version="0.18.0" + model_type, "modelType", "model_type", removal_version="0.18.0", error=True ) @property diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index c3125b6499..9525edd4a1 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -1,3 +1,4 @@ +import pytest import unittest import discretize @@ -497,5 +498,20 @@ def get_block_inds(grid, block): np.testing.assert_allclose(data, d_amp) +def test_removed_modeltype(): + """Test if accesing removed modelType property raises error.""" + h = [[(2, 2)], [(2, 2)], [(2, 2)]] + mesh = discretize.TensorMesh(h) + receiver_location = np.array([[0, 0, 100]]) + receiver = mag.Point(receiver_location, components="tmi") + background_field = mag.UniformBackgroundField(receiver_list=[receiver]) + survey = mag.Survey(background_field) + mapping = maps.IdentityMap(mesh, nP=mesh.n_cells) + sim = mag.Simulation3DIntegral(mesh, survey=survey, chiMap=mapping) + message = "modelType has been removed, please use model_type." + with pytest.raises(NotImplementedError, match=message): + sim.modelType + + if __name__ == "__main__": unittest.main() From 7eaf970608025e6b3830d7c8340e16505d42fb1a Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 28 Mar 2024 15:46:12 -0700 Subject: [PATCH 47/68] Remove mref property of PGI regularization (#1400) Remove the deprecated `mref` property in the PGI regularization class and add test function checking that error is raised when trying to access it. Part of the solution to #1302 --- SimPEG/regularization/pgi.py | 3 +-- .../regularizations/test_pgi_regularization.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 2c98c321f8..0a7a371f96 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -1393,6 +1393,5 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) diff --git a/tests/base/regularizations/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py index 440e50a494..cc0ce5ac94 100644 --- a/tests/base/regularizations/test_pgi_regularization.py +++ b/tests/base/regularizations/test_pgi_regularization.py @@ -1,3 +1,4 @@ +import pytest import unittest import discretize @@ -469,5 +470,19 @@ def test_spherical_covariances(self): plt.show() +def test_removed_mref(): + """Test if PGI raises error when accessing removed mref property.""" + h = [[(2, 2)], [(2, 2)], [(2, 2)]] + mesh = discretize.TensorMesh(h) + n_components = 1 + gmm = WeightedGaussianMixture(mesh=mesh, n_components=n_components) + samples = np.random.default_rng(seed=42).normal(size=(mesh.n_cells, 2)) + gmm.fit(samples) + pgi = regularization.PGI(mesh=mesh, gmmref=gmm) + message = "mref has been removed, please use reference_model." + with pytest.raises(NotImplementedError, match=message): + pgi.mref + + if __name__ == "__main__": unittest.main() From 489851f1d8454b85b7f71c0bbe68ec0bbb006799 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 2 Apr 2024 09:48:37 -0700 Subject: [PATCH 48/68] Add link to User Tutorials to navbar in docs (#1401) Add an external link to the new User Tutorials in the navbar of the SimPEG documentation pages. --- docs/conf.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 25936ff517..88178fa245 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -231,6 +231,12 @@ def linkcode_resolve(domain, info): # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. +external_links = [ + dict(name="User Tutorials", url="https://simpeg.xyz/user-tutorials"), + dict(name="SimPEG", url="https://simpeg.xyz"), + dict(name="Contact", url="https://mattermost.softwareunderground.org/simpeg"), +] + try: import pydata_sphinx_theme @@ -240,13 +246,7 @@ def linkcode_resolve(domain, info): html_use_modindex = True html_theme_options = { - "external_links": [ - {"name": "SimPEG", "url": "https://simpeg.xyz"}, - { - "name": "Contact", - "url": "https://mattermost.softwareunderground.org/simpeg", - }, - ], + "external_links": external_links, "icon_links": [ { "name": "GitHub", From eeb406f0a8341c6e250588afaa5a3b34d6e845a5 Mon Sep 17 00:00:00 2001 From: "Williams A. Lima" Date: Tue, 2 Apr 2024 19:13:10 -0300 Subject: [PATCH 49/68] Improve documentation for base simulation classes (#1295) Extend documentation for base simulation classes and in `SyntheticData` class. Fix typo and rst style in `mesh_utils.py`. Add `pymatsolver` to the list of `intersphinx_mapping` in Sphinx's `docs/conf.py`. --------- Co-authored-by: Devin C. Cowan Co-authored-by: Santiago Soler Co-authored-by: Lindsey Heagy --- SimPEG/data.py | 12 +- SimPEG/simulation.py | 639 +++++++++++++++++++++++++++++-------- SimPEG/utils/mesh_utils.py | 4 +- docs/conf.py | 1 + 4 files changed, 511 insertions(+), 145 deletions(-) diff --git a/SimPEG/data.py b/SimPEG/data.py index 4ba5ca3571..fa42a6d59e 100644 --- a/SimPEG/data.py +++ b/SimPEG/data.py @@ -115,7 +115,7 @@ def dobs(self): numpy.ndarray Notes - -------- + ----- This array can also be modified by directly indexing the data object using the a tuple of the survey's sources and receivers. @@ -363,8 +363,10 @@ def fromvec(self, v): class SyntheticData(Data): - r""" - Class for creating synthetic data. + r"""Synthetic data class. + + The ``SyntheticData`` class is a :py:class:`SimPEG.data.Data` class that allows the + user to keep track of both clean and noisy data. Parameters ---------- @@ -375,12 +377,12 @@ class SyntheticData(Data): Observed data. dclean : (nD) numpy.ndarray Noiseless data. - relative_error : SimPEG.data.UncertaintyArray + relative_error : float or np.ndarray Assign relative uncertainties to the data using relative error; sometimes referred to as percent uncertainties. For each datum, we assume the standard deviation of Gaussian noise is the relative error times the absolute value of the datum; i.e. :math:`C_{err} \times |d|`. - noise_floor : UncertaintyArray + noise_floor : float or np.ndarray Assign floor/absolute uncertainties to the data. For each datum, we assume standard deviation of Gaussian noise is equal to *noise_floor*. """ diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index da0c94b1cb..c857aa841e 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -1,5 +1,5 @@ """ -Define simulation classes +Define simulation classes. """ import os import inspect @@ -41,10 +41,39 @@ class BaseSimulation(props.HasModel): + r"""Base class for all geophysical forward simulations in SimPEG. + + The ``BaseSimulation`` class defines properties and methods inherited by + practical simulation classes in SimPEG. + + .. important:: + This class is not meant to be instantiated. You should inherit from it to + create your own simulation class. + + Parameters + ---------- + mesh : discretize.base.BaseMesh, optional + Mesh on which the forward problem is discretized. + survey : SimPEG.survey.BaseSurvey, optional + The survey for the simulation. + solver : None or pymatsolver.base.Base, optional + Numerical solver used to solve the forward problem. If ``None``, + an appropriate solver specific to the simulation class is set by default. + solver_opts : dict, optional + Solver-specific parameters. If ``None``, default parameters are used for + the solver set by ``solver``. Otherwise, the ``dict`` must contain appropriate + pairs of keyword arguments and parameter values for the solver. Please visit + `pymatsolver `__ to learn more + about solvers and their parameters. + sensitivity_path : str, optional + Path to directory where sensitivity file is stored. + counter : None or SimPEG.utils.Counter + SimPEG ``Counter`` object to store iterations and run-times. + verbose : bool, optional + Verbose progress printout. """ - BaseSimulation is the base class for all geophysical forward simulations in - SimPEG. - """ + + _REGISTRY = {} def __init__( self, @@ -73,18 +102,16 @@ def __init__( super().__init__(**kwargs) - ########################################################################### - # Properties - - _REGISTRY = {} - @property def mesh(self): - """Discretize mesh for the simulation + """Mesh for the simulation. + + For more on meshes, visit :py:class:`discretize.base.BaseMesh`. Returns ------- discretize.base.BaseMesh + Mesh on which the forward problem is discretized. """ return self._mesh @@ -101,6 +128,7 @@ def survey(self): Returns ------- SimPEG.survey.BaseSurvey + The survey for the simulation. """ return self._survey @@ -112,11 +140,12 @@ def survey(self, value): @property def counter(self): - """The counter. + """SimPEG ``Counter`` object to store iterations and run-times. Returns ------- None or SimPEG.utils.Counter + SimPEG ``Counter`` object to store iterations and run-times. """ return self._counter @@ -128,11 +157,12 @@ def counter(self, value): @property def sensitivity_path(self): - """Path to store the sensitivity. + """Path to directory where sensitivity file is stored. Returns ------- str + Path to directory where sensitivity file is stored. """ return self._sensitivity_path @@ -142,13 +172,25 @@ def sensitivity_path(self, value): @property def solver(self): - """Linear algebra solver (e.g. from pymatsolver). + r"""Numerical solver used in the forward simulation. + + Many forward simulations in SimPEG require solutions to discrete linear + systems of the form: + + .. math:: + \mathbf{A}(\mathbf{m}) \, \mathbf{u} = \mathbf{q} + + where :math:`\mathbf{A}` is an invertible matrix that depends on the + model :math:`\mathbf{m}`. The numerical solver can be set using the + ``solver`` property. In SimPEG, the + `pymatsolver `__ package + is used to create solver objects. Parameters specific to each solver + can be set manually using the ``solver_opts`` property. Returns ------- - class - A solver class that, when instantiated allows a multiplication with the - returned object. + pymatsolver.base.Base + Numerical solver used to solve the forward problem. """ return self._solver @@ -163,12 +205,18 @@ def solver(self, cls): @property def solver_opts(self): - """Options passed to the `solver` class on initialization. + """Solver-specific parameters. + + The parameters specific to the solver set with the ``solver`` property are set + upon instantiation. The ``solver_opts`` property is used to set solver-specific properties. + This is done by providing a ``dict`` that contains appropriate pairs of keyword arguments + and parameter values. Please visit `pymatsolver `__ + to learn more about solvers and their parameters. Returns ------- dict - Passed as keyword arguments to the solver. + keyword arguments and parameters passed to the solver. """ return self._solver_opts @@ -178,11 +226,12 @@ def solver_opts(self, value): @property def verbose(self): - """Verbosity flag. + """Verbose progress printout. Returns ------- bool + Verbose progress printout status. """ return self._verbose @@ -190,31 +239,37 @@ def verbose(self): def verbose(self, value): self._verbose = validate_type("verbose", value, bool) - ########################################################################### - # Methods - def fields(self, m=None): - """ - u = fields(m) - The field given the model. - :param numpy.ndarray m: model - :rtype: numpy.ndarray - :return: u, the fields + r"""Return the computed geophysical fields for the model provided. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + + Returns + ------- + SimPEG.fields.Fields + Computed geophysical fields for the model provided. + """ raise NotImplementedError("fields has not been implemented for this ") def dpred(self, m=None, f=None): - r""" - dpred(m, f=None) - Create the projected data from a model. - The fields, f, (if provided) will be used for the predicted data - instead of recalculating the fields (which may be expensive!). + r"""Predicted data for the model provided. - .. math:: - - d_\text{pred} = P(f(m)) + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : SimPEG.fields.Fields, optional + If provided, will be used to compute the predicted data + without recalculating the fields. - Where P is a projection of the fields onto the data space. + Returns + ------- + (n_data, ) numpy.ndarray + The predicted data vector. """ if self.survey is None: raise AttributeError( @@ -237,51 +292,139 @@ def dpred(self, m=None, f=None): @timeIt def Jvec(self, m, v, f=None): - """ - Jv = Jvec(m, v, f=None) - Effect of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: Jv + r"""Compute the Jacobian times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an (n_data, n_param) matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jvec`` method computes the matrix-vector product + + .. math:: + \mathbf{u} = \mathbf{J \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_param, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jvec`. + + Returns + ------- + (n_data, ) numpy.ndarray + The Jacobian times a vector for the model and vector provided. """ raise NotImplementedError("Jvec is not yet implemented.") @timeIt def Jtvec(self, m, v, f=None): + r"""Compute the Jacobian transpose times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jtvec`` method computes the matrix-vector product with the adjoint-sensitivity + + .. math:: + \mathbf{u} = \mathbf{J^T \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + The Jacobian transpose times a vector for the model and vector provided. """ - Jtv = Jtvec(m, v, f=None) - Effect of transpose of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: JTv - """ - raise NotImplementedError("Jt is not yet implemented.") + raise NotImplementedError("Jtvec is not yet implemented.") @timeIt def Jvec_approx(self, m, v, f=None): - """Jvec_approx(m, v, f=None) - Approximate effect of J(m) on a vector v - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: approxJv + r"""Approximation of the Jacobian times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jvec_approx`` method **approximates** + the matrix-vector product: + + .. math:: + \mathbf{u} = \mathbf{J \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + Approximation of the Jacobian times a vector for the model provided. """ return self.Jvec(m, v, f) @timeIt def Jtvec_approx(self, m, v, f=None): - """Jtvec_approx(m, v, f=None) - Approximate effect of transpose of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: JTv + r"""Approximation of the Jacobian transpose times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jtvec_approx`` method **approximates** + the matrix-vector product: + + .. math:: + \mathbf{u} = \mathbf{J^T \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + Approximation of the Jacobian transpose times a vector for the model provided. """ return self.Jtvec(m, v, f) @@ -289,14 +432,27 @@ def Jtvec_approx(self, m, v, f=None): def residual(self, m, dobs, f=None): r"""The data residual. + This method computes and returns the data residual for the model provided. + Where :math:`\mathbf{d}_\text{obs}` are the observed data values, and :math:`\mathbf{d}_\text{pred}` + are the predicted data values for model parameters :math:`\mathbf{m}`, the data + residual is given by: + .. math:: + \mathbf{r}(\mathbf{m}) = \mathbf{d}_\text{pred} - \mathbf{d}_\text{obs} - \mu_\\text{data} = \mathbf{d}_\\text{pred} - \mathbf{d}_\\text{obs} + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + dobs : (n_data, ) numpy.ndarray + The observed data values. + f : SimPEG.fields.Fields, optional + If provided, fields will not need to be recomputed when solving the forward problem. - :param numpy.ndarray m: geophysical model - :param numpy.ndarray f: fields - :rtype: numpy.ndarray - :return: data residual + Returns + ------- + (n_data, ) numpy.ndarray + The data residual. """ return mkvc(self.dpred(m, f=f) - dobs) @@ -311,27 +467,35 @@ def make_synthetic_data( random_seed=None, **kwargs, ): - """ - Make synthetic data given a model, and a standard deviation. + r"""Make synthetic data for the model and Gaussian noise provided. + + This method generates and returns a :py:class:`SimPEG.data.SyntheticData` object + for the model and standard deviation of Gaussian noise provided. Parameters ---------- - m : array - Array containing with geophysical model. - relative_error : float - Standard deviation. - noise_floor : float - Noise floor. - f : array or None - Fields for the given model (if pre-calculated). + m : (n_param, ) numpy.ndarray + The model parameters. + relative_error : float, numpy.ndarray + Assign relative uncertainties to the data using relative error; sometimes + referred to as percent uncertainties. For each datum, we assume the + standard deviation of Gaussian noise is the relative error times the + absolute value of the datum; i.e. :math:`C_\text{err} \times |d|`. + noise_floor : float, numpy.ndarray + Assign floor/absolute uncertainties to the data. For each datum, we assume + standard deviation of Gaussian noise is equal to `noise_floor`. + f : SimPEG.fields.Fields, optional + If provided, fields will not need to be recomputed when solving the + forward problem to obtain noiseless data. add_noise : bool Whether to add gaussian noise to the synthetic data or not. - random_seed : int or None - Random seed to pass to `numpy.random.default_rng`. + random_seed : int, optional + Random seed to pass to :py:class:`numpy.random.default_rng`. Returns ------- - SyntheticData + SimPEG.data.SyntheticData + A SimPEG synthetic data object, which organizes both clean and noisy data. """ std = kwargs.pop("std", None) @@ -363,30 +527,78 @@ def make_synthetic_data( class BaseTimeSimulation(BaseSimulation): - """ - Base class for a time domain simulation + r"""Base class for time domain simulations. + + The ``BaseTimeSimulation`` defines properties and methods that are required + when the finite volume approach is used to solve time-dependent forward simulations. + Presently, SimPEG discretizes in time using the backward Euler approach. + And as such, the user must now define the step lengths for the forward simulation. + + Parameters + ---------- + mesh : discretize.base.BaseMesh, optional + Mesh on which the forward problem is discretized. This is not necessarily + the same as the mesh on which the simulation is defined. + t0 : float, optional + Initial time, in seconds, for the time-dependent forward simulation. + time_steps : (n_steps, ) numpy.ndarray, optional + The time step lengths, in seconds, for the time domain simulation. + This property can be also be set using a compact form; see *Notes*. + + Notes + ----- + There are two ways in which the user can set the ``time_steps`` property + for the forward simulation. The most basic approach is to use a ``(n_steps, )`` + :py:class:`numpy.ndarray` that explicitly defines the step lengths in order. + I.e.: + + >>> sim.time_steps = np.r_[1e-6, 1e-6, 1e-6, 1e-5, 1e-5, 1e-4, 1e-4] + + We can define also define the step lengths in compact for when the same + step length is reused multiple times in succession. In this case, the + ``time_steps`` property is set using a ``list`` of ``tuple``. Each + ``tuple`` contains the step length and number of times that step is repeated. + The time stepping defined above can be set equivalently with: + + >>> sim.time_steps = [(1e-6, 3), (1e-5, 2), (1e-4, 2)] + + When set, the :py:func:`discretize.utils.unpack_widths` utility is + used to convert the ``list`` of ``tuple`` to its (n_steps, ) :py:class:`numpy.ndarray` + representation. """ + def __init__(self, mesh=None, t0=0.0, time_steps=None, **kwargs): + self.t0 = t0 + self.time_steps = time_steps + super().__init__(mesh=mesh, **kwargs) + @property def time_steps(self): - """The time steps for the time domain simulation. + """Time step lengths, in seconds, for the time domain simulation. + + There are two ways in which the user can set the ``time_steps`` property + for the forward simulation. The most basic approach is to use a ``(n_steps, )`` + :py:class:`numpy.ndarray` that explicitly defines the step lengths in order. + I.e.: - You can set as an array of dt's or as a list of tuples/floats. - If it is set as a list, tuples are unpacked with - `discretize.utils.unpack_widths``. + >>> sim.time_steps = np.r_[1e-6, 1e-6, 1e-6, 1e-5, 1e-5, 1e-4, 1e-4] - For example, the following setters are the same:: + We can define also define the step lengths in compact for when the same + step length is reused multiple times in succession. In this case, the + ``time_steps`` property is set using a ``list`` of ``tuple``. Each + ``tuple`` contains the step length and number of times that step is repeated. + The time stepping defined above can be set equivalently with: - >>> sim.time_steps = [(1e-6, 3), 1e-5, (1e-4, 2)] - >>> sim.time_steps = np.r_[1e-6,1e-6,1e-6,1e-5,1e-4,1e-4] + >>> sim.time_steps = [(1e-6, 3), (1e-5, 2), (1e-4, 2)] + + When set, the :py:func:`discretize.utils.unpack_widths` utility is + used to convert the ``list`` of ``tuple`` to its ``(n_steps, )`` :py:class:`numpy.ndarray` + representation. Returns ------- - numpy.ndarray - - See Also - -------- - discretize.utils.unpack_widths + (n_steps, ) numpy.ndarray + The time step lengths for the time domain simulation. """ return self._time_steps @@ -401,11 +613,12 @@ def time_steps(self, value): @property def t0(self): - """Start time for the discretization. + """Initial time, in seconds, for the time-dependent forward simulation. Returns ------- float + Initial time, in seconds, for the time-dependent forward simulation. """ return self._t0 @@ -414,13 +627,21 @@ def t0(self, value): self._t0 = validate_float("t0", value) del self.time_mesh - def __init__(self, mesh=None, t0=0.0, time_steps=None, **kwargs): - self.t0 = t0 - self.time_steps = time_steps - super().__init__(mesh=mesh, **kwargs) - @property def time_mesh(self): + r"""Time mesh for easy interpolation to observation times. + + The time mesh is constructed internally from the :py:attr:`t0` and + :py:attr:`time_steps` properties using the :py:class:`discretize.TensorMesh` class. + The ``time_mesh`` property allows for easy interpolation from fields computed at + discrete time-steps, to an arbitrary set of observation + times within the continuous interval (:math:`t_0 , t_\text{end}`). + + Returns + ------- + discretize.TensorMesh + The time mesh. + """ if getattr(self, "_time_mesh", None) is None: self._time_mesh = TensorMesh( [ @@ -437,26 +658,32 @@ def time_mesh(self): @property def nT(self): + """Total number of time steps. + + Returns + ------- + int + Total number of time steps. + """ return self.time_mesh.n_cells @property def times(self): - "Modeling times" - return self.time_mesh.nodes_x + """Evaluation times. - def dpred(self, m=None, f=None): - r""" - dpred(m, f=None) - Create the projected data from a model. - The fields, f, (if provided) will be used for the predicted data - instead of recalculating the fields (which may be expensive!). - - .. math:: + Returns the discrete set of times at which the fields are computed for + the forward simulation. - d_\text{pred} = P(f(m)) - - Where P is a projection of the fields onto the data space. + Returns + ------- + (nT, ) numpy.ndarray + The discrete set of times at which the fields are computed for + the forward simulation. """ + return self.time_mesh.nodes_x + + def dpred(self, m=None, f=None): + # Docstring inherited from BaseSimulation. if self.survey is None: raise AttributeError( "The survey has not yet been set and is required to compute " @@ -482,16 +709,40 @@ def dpred(self, m=None, f=None): class LinearSimulation(BaseSimulation): - """ - Class for a linear simulation of the form + r"""Linear forward simulation class. + + The ``LinearSimulation`` class is used to define forward simulations of the form: .. math:: + \mathbf{d} = \mathbf{G \, f}(\mathbf{m}) + + where :math:`\mathbf{m}` are the model parameters, :math:`\mathbf{f}` is a + mapping operator (optional) from the model space to a user-defined parameter space, + :math:`\mathbf{d}` is the predicted data vector, and :math:`\mathbf{G}` is an + ``(n_data, n_param)`` linear operator. + + The ``LinearSimulation`` class is generally used as a base class that is inherited by + other simulation classes within SimPEG. However, it can be used directly as a + simulation class if the :py:attr:`G` property is used to set the linear forward + operator directly. - d = Gm + By default, we assume the mapping operator :math:`\mathbf{f}` is the identity map, + and that the forward simulation reduces to: - where :math:`d` is a vector of the data, `G` is the simulation matrix and - :math:`m` is the model. - Inherit this class to build a linear simulation. + .. math:: + \mathbf{d} = \mathbf{G \, m} + + Parameters + ---------- + mesh : discretize.BaseMesh, optional + Mesh on which the forward problem is discretized. This is not necessarily + the same as the mesh on which the simulation is defined. + model_map : SimPEG.maps.BaseMap + Mapping from the model parameters to vector that the linear operator acts on. + G : (n_data, n_param) numpy.ndarray or scipy.sparse.csr_matrx + The linear operator. For a ``model_map`` that maps within the same vector space + (e.g. the identity map), the dimension ``n_param`` equals the number of model parameters. + If not, the dimension ``n_param`` of the linear operator will depend on the mapping. """ linear_model, model_map, model_deriv = props.Invertible( @@ -516,6 +767,15 @@ def __init__(self, mesh=None, linear_model=None, model_map=None, G=None, **kwarg @property def G(self): + """The linear operator. + + Returns + ------- + (n_data, n_param) numpy.ndarray or scipy.sparse.csr_matrix + The linear operator. For a :py:attr:`model_map` that maps within the same vector space + (e.g. the identity map), the dimension ``n_param`` equals the number of model parameters. + If not, the dimension ``n_param`` of the linear operator will depend on the mapping. + """ if getattr(self, "_G", None) is not None: return self._G else: @@ -524,15 +784,17 @@ def G(self): @G.setter def G(self, G): - # Allows setting G in a LinearSimulation + # Allows setting G in a LinearSimulation. # TODO should be validated self._G = G def fields(self, m): + # Docstring inherited from BaseSimulation. self.model = m return self.G.dot(self.linear_model) def dpred(self, m=None, f=None): + # Docstring inherited from BaseSimulation if m is not None: self.model = m if f is not None: @@ -540,24 +802,80 @@ def dpred(self, m=None, f=None): return self.fields(self.model) def getJ(self, m, f=None): + r"""Returns the full Jacobian. + + The general definition of the linear forward simulation is: + + .. math:: + \mathbf{d} = \mathbf{G \, f}(\mathbf{m}) + + where :math:`\mathbf{f}` is a mapping operator (optional) from the model space + to a user-defined parameter space, and :math:`\mathbf{G}` is an (n_data, n_param) + linear operator. The ``getJ`` method forms and returns the full Jacobian: + + .. math:: + \mathbf{J}(\mathbf{m}) = \mathbf{G} \frac{\partial \mathbf{f}}{\partial \mathbf{m}} + + for the model :math:`\mathbf{m}` provided. When :math:`\mathbf{f}` is the identity map + (default), the Jacobian is no longer model-dependent and reduces to: + + .. math:: + \mathbf{J} = \mathbf{G} + + Parameters + ---------- + m : numpy.ndarray + The model vector. + f : None + Precomputed fields are not used to speed up the computation of the + Jacobian for linear problems. + + Returns + ------- + J : (n_data, n_param) numpy.ndarray + :math:`J = G\frac{\partial f}{\partial\mathbf{m}}`. + Where :math:`f` is :attr:`model_map`. + """ self.model = m # self.model_deriv is likely a sparse matrix # and G is possibly dense, thus we need to do.. return (self.model_deriv.T.dot(self.G.T)).T def Jvec(self, m, v, f=None): + # Docstring inherited from BaseSimulation self.model = m return self.G.dot(self.model_deriv * v) def Jtvec(self, m, v, f=None): + # Docstring inherited from BaseSimulation self.model = m return self.model_deriv.T * self.G.T.dot(v) class ExponentialSinusoidSimulation(LinearSimulation): - r""" + r"""Simulation class for exponentially decaying sinusoidal kernel functions. + This is the simulation class for the linear problem consisting of - exponentially decaying sinusoids. The kernel functions take the form: + exponentially decaying sinusoids. The entries of the linear operator + :math:`\mathbf{G}` are: + + .. math:: + + G_{ik} = \int_\Omega e^{p \, j_i \, x_k} \cos(\pi \, q \, j_i \, x_k) \, dx + + The model is defined on a 1D :py:class:`discretize.TensorMesh`, and :math:`x_k` + are the cell center locations. :math:`p \leq 0` defines the rate of exponential + decay of the kernel functions. :math:`q` defines the rate of oscillation of + the kernel functions. And :math:`j_i \in [j_0, ... , j_n]` controls the spread + of the kernel functions; the number of which is set using the ``n_kernels`` + property. + + .. tip:: + + For proper scaling, we advise defining the 1D tensor mesh to + discretize the interval [0, 1]. + + The kernel functions take the form: .. math:: @@ -571,6 +889,21 @@ class ExponentialSinusoidSimulation(LinearSimulation): d_j = \int g_j(x) m(x) dx to define our data. + + Parameters + ---------- + n_kernels : int + The number of kernel factors for the linear problem; i.e. the number of + :math:`j_i \in [j_0, ... , j_n]`. This sets the number of rows + in the linear forward operator. + p : float + Exponent specifying the decay (`p \leq 0`) or growth (`p \geq 0`) of the kernel. For decay, set :math:`p \leq 0`. + q : float + Rate of oscillation of the kernel. + j0 : float + Minimum value for the spread of the kernel factors. + jn : float + Maximum value for the spread of the kernel factors. """ def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): @@ -583,11 +916,17 @@ def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): @property def n_kernels(self): - """The number of kernels for the linear problem + r"""The number of kernel factors for the linear problem. + + Where :math:`j_0` represents the minimum value for the spread of + kernel factors and :math:`j_n` represents the maximum, ``n_kernels`` + defines the number of kernel factors :math:`j_i \in [j_0, ... , j_n]`. + This ultimately sets the number of rows in the linear forward operator. Returns ------- int + The number of kernel factors for the linear problem. """ return self._n_kernels @@ -602,6 +941,7 @@ def p(self): Returns ------- float + Rate of exponential decay of the kernel. """ return self._p @@ -611,11 +951,12 @@ def p(self, value): @property def q(self): - """rate of oscillation of the kernel. + """Rate of oscillation of the kernel. Returns ------- float + Rate of oscillation of the kernel. """ return self._q @@ -625,11 +966,12 @@ def q(self, value): @property def j0(self): - """Maximum value for :math:`j_k = j_0`. + """Minimum value for the spread of the kernel factors. Returns ------- float + Minimum value for the spread of the kernel factors. """ return self._j0 @@ -639,11 +981,12 @@ def j0(self, value): @property def jn(self): - """Maximum value for :math:`j_k = j_n`. + """Maximum value for the spread of the kernel factors. Returns ------- float + Maximum value for the spread of the kernel factors. """ return self._jn @@ -653,16 +996,32 @@ def jn(self, value): @property def jk(self): - """ - Parameters controlling the spread of kernel functions + """The set of kernel factors controlling the spread of the kernel functions. + + Returns + ------- + (n_kernels, ) numpy.ndarray + The set of kernel factors controlling the spread of the kernel functions. """ if getattr(self, "_jk", None) is None: self._jk = np.linspace(self.j0, self.jn, self.n_kernels) return self._jk def g(self, k): - """ - Kernel functions for the decaying oscillating exponential functions. + """Kernel functions evaluated for kernel factor :math:`j_k`. + + This method computes the row of the linear forward operator for + the kernel functions for kernel factor :math:`j_k`, given :math:`k` + + Parameters + ---------- + k : int + Kernel functions for kernel factor *k* + + Returns + ------- + (n_param, ) numpy.ndarray + Kernel functions evaluated for kernel factor *k*. """ return np.exp(self.p * self.jk[k] * self.mesh.nodes_x) * np.cos( np.pi * self.q * self.jk[k] * self.mesh.nodes_x @@ -670,8 +1029,12 @@ def g(self, k): @property def G(self): - """ - Matrix whose rows are the kernel functions + """The linear forward operator. + + Returns + ------- + (n_kernels, n_param) numpy.ndarray + The linear forward operator. """ if getattr(self, "_G", None) is None: G_nodes = np.empty((self.mesh.n_nodes, self.n_kernels)) diff --git a/SimPEG/utils/mesh_utils.py b/SimPEG/utils/mesh_utils.py index 0d39f5c162..1fc3a8d580 100644 --- a/SimPEG/utils/mesh_utils.py +++ b/SimPEG/utils/mesh_utils.py @@ -11,8 +11,8 @@ def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True): """Takes a triangulated surface and determine which mesh cells it intersects. - Paramters - --------- + Parameters + ---------- vrtx : (n_nodes, 3) numpy.ndarray of float The location of the vertices of the triangles trgl : (n_triang, 3) numpy.ndarray of int diff --git a/docs/conf.py b/docs/conf.py index 88178fa245..a47c5740e4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -423,6 +423,7 @@ def linkcode_resolve(domain, info): "matplotlib": ("https://matplotlib.org/stable/", None), "properties": ("https://propertiespy.readthedocs.io/en/latest/", None), "discretize": ("https://discretize.simpeg.xyz/en/main/", None), + "pymatsolver": ("https://pymatsolver.readthedocs.io/en/latest/", None), } numpydoc_xref_param_type = True From b8d8b8a0e6b2cd8d1511b6dbaf88973a9c5b1933 Mon Sep 17 00:00:00 2001 From: Ying Hu <64567062+YingHuuu@users.noreply.github.com> Date: Wed, 3 Apr 2024 09:34:05 -0700 Subject: [PATCH 50/68] Enforce regularization `weights` as dictionaries (#1344) Only allow passing `weights` to regularizations as `None` or dictionaries. Raise error if `weights` are a different type. Remove lines that were assigning weights if the passed argument was an array. Update examples, tests and tutorials to follow the new interface. Update how the `PGI` constructor passes weights to the underlying `WeightedLeastSquares` objects. --------- Co-authored-by: Santiago Soler Co-authored-by: yinghu --- SimPEG/regularization/base.py | 18 +++++++++---- SimPEG/regularization/pgi.py | 3 ++- examples/01-maps/plot_sumMap.py | 6 +++-- examples/08-vrm/plot_inv_vrm_eq.py | 5 +++- ...1_PGI_Linear_1D_joint_WithRelationships.py | 8 +++--- .../regularizations/test_regularization.py | 25 +++++++++++-------- tests/em/vrm/test_vrminv.py | 7 +++++- 7 files changed, 47 insertions(+), 25 deletions(-) diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index b82333025d..935efa9bba 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -65,6 +65,11 @@ def __init__( f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " f"Value of type {type(mesh)} provided." ) + if weights is not None and not isinstance(weights, dict): + raise TypeError( + f"Invalid 'weights' of type '{type(weights)}'. " + "It must be a dictionary with strings as keys and arrays as values." + ) # Raise errors on deprecated arguments: avoid old code that still uses # them to silently fail @@ -87,8 +92,6 @@ def __init__( self.reference_model = reference_model self.units = units if weights is not None: - if not isinstance(weights, dict): - weights = {"user_weights": weights} self.set_weights(**weights) @property @@ -608,7 +611,7 @@ class Smallness(BaseRegularization): or set after instantiation using the `set_weights` method: - >>> reg.set_weights(weights_1=array_1, weights_2=array_2}) + >>> reg.set_weights(weights_1=array_1, weights_2=array_2) The default weights that account for cell dimensions in the regularization are accessed via: @@ -1603,6 +1606,13 @@ def __init__( else: self.length_scale_z = length_scale_z + # Check if weights is a dictionary, raise error if it's not + if weights is not None and not isinstance(weights, dict): + raise TypeError( + f"Invalid 'weights' of type '{type(weights)}'. " + "It must be a dictionary with strings as keys and arrays as values." + ) + # do this to allow child classes to also pass a list of objfcts to this constructor if "objfcts" not in kwargs: objfcts = [ @@ -1652,8 +1662,6 @@ def __init__( self.alpha_yy = alpha_yy self.alpha_zz = alpha_zz if weights is not None: - if not isinstance(weights, dict): - weights = {"user_weights": weights} self.set_weights(**weights) def set_weights(self, **weights): diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 0a7a371f96..24aeaf866d 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -1184,6 +1184,7 @@ def __init__( for model_map, wire, weights in zip( self.maplist, self.wiresmap.maps, weights_list ): + weights_i = {"pgi-weights": weights} if weights is not None else None objfcts += [ WeightedLeastSquares( alpha_s=0.0, @@ -1195,7 +1196,7 @@ def __init__( alpha_zz=alpha_zz, mesh=self.regularization_mesh, mapping=model_map * wire[1], - weights=weights, + weights=weights_i, **kwargs, ) ] diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index fe99b0cc2e..a07bb8712b 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -138,7 +138,8 @@ def run(plotIt=True): regMesh = TensorMesh([len(domains)]) reg_m1 = regularization.Sparse(regMesh, mapping=wires.homo) - reg_m1.set_weights(cell_weights=wires.homo * wr) + reg_m1.set_weights(weights=wires.homo * wr) + reg_m1.norms = [0, 2] reg_m1.reference_model = np.zeros(sumMap.shape[1]) @@ -146,7 +147,8 @@ def run(plotIt=True): reg_m2 = regularization.Sparse( mesh, active_cells=actv, mapping=wires.hetero, gradient_type="components" ) - reg_m2.set_weights(cell_weights=wires.hetero * wr) + reg_m2.set_weights(weights=wires.hetero * wr) + reg_m2.norms = [0, 0, 0, 0] reg_m2.reference_model = np.zeros(sumMap.shape[1]) diff --git a/examples/08-vrm/plot_inv_vrm_eq.py b/examples/08-vrm/plot_inv_vrm_eq.py index e0eee4ff6f..dab9190535 100644 --- a/examples/08-vrm/plot_inv_vrm_eq.py +++ b/examples/08-vrm/plot_inv_vrm_eq.py @@ -196,7 +196,10 @@ w = w / np.max(w) w = w -reg = regularization.Smallness(mesh=mesh, active_cells=actCells, weights=w) +reg = regularization.Smallness( + mesh=mesh, active_cells=actCells, weights={"cell_weights": w} +) + opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 ) diff --git a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py index adabf3f35f..afaa07b183 100644 --- a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py +++ b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py @@ -233,13 +233,13 @@ def g(k): # WeightedLeastSquares Inversion reg1 = regularization.WeightedLeastSquares( - mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m1 + mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m1, weights={"cell_weights": wr1} ) -reg1.set_weights(cell_weights=wr1) + reg2 = regularization.WeightedLeastSquares( - mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m2 + mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m2, weights={"cell_weights": wr2} ) -reg2.set_weights(cell_weights=wr2) + reg = reg1 + reg2 opt = optimization.ProjectedGNCG( diff --git a/tests/base/regularizations/test_regularization.py b/tests/base/regularizations/test_regularization.py index 5fc7773ce6..779890667d 100644 --- a/tests/base/regularizations/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -302,7 +302,8 @@ def test_mappings_and_cell_weights(self): wires = maps.Wires(("sigma", mesh.nC), ("mu", mesh.nC)) - reg = regularization.Smallness(mesh, mapping=wires.sigma, weights=cell_weights) + reg = regularization.Smallness(mesh, mapping=wires.sigma) + reg.set_weights(cell_weights=cell_weights) objfct = objective_function.L2ObjectiveFunction( W=utils.sdiag(np.sqrt(cell_weights * mesh.cell_volumes)), @@ -337,8 +338,7 @@ def test_update_of_sparse_norms(self): v = np.random.rand(mesh.nC) cell_weights = np.random.rand(mesh.nC) - - reg = regularization.Sparse(mesh, weights=cell_weights) + reg = regularization.Sparse(mesh, weights={"cell_weights": cell_weights}) np.testing.assert_equal(reg.norms, [1, 1, 1, 1]) @@ -725,14 +725,6 @@ def test_user_defined_weights_as_dict(self, mesh): reg = BaseRegularization(mesh, weights=weights) assert reg.weights_keys == ["dummy_weight"] - def test_user_defined_weights_as_array(self, mesh): - """ - Test weights_keys after user defined weights as dictionary - """ - weights = np.ones(mesh.n_cells) - reg = BaseRegularization(mesh, weights=weights) - assert reg.weights_keys == ["user_weights"] - @pytest.mark.parametrize( "regularization_class", (Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder) ) @@ -908,5 +900,16 @@ def test_removed_class(self, regularization_class): regularization_class() +@pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) +) +def test_invalid_weights_type(regularization_class): + """Test error after passing weights as invalid type.""" + mesh = discretize.TensorMesh([[(2, 2)]]) + msg = "Invalid 'weights' of type ''" + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, weights=np.array([1.0])) + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/vrm/test_vrminv.py b/tests/em/vrm/test_vrminv.py index 8511f0a704..b4f316c02f 100644 --- a/tests/em/vrm/test_vrminv.py +++ b/tests/em/vrm/test_vrminv.py @@ -69,7 +69,12 @@ def test_basic_inversion(self): ** 0.25 ) reg = regularization.WeightedLeastSquares( - meshObj, alpha_s=0.01, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, weights=W + meshObj, + alpha_s=0.01, + alpha_x=1.0, + alpha_y=1.0, + alpha_z=1.0, + weights={"weights": W}, ) opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 From d06feea77391017c56da179e14eede94c7ee71f4 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 3 Apr 2024 11:16:33 -0700 Subject: [PATCH 51/68] Minor adjustments to Sphinx configuration (#1398) Align navbar elements to the left to avoid wrapping text in nav items. Remove plausible line from the html template and set it through sphinx pydata theme. --- docs/_templates/layout.html | 2 -- docs/conf.py | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index f208c5eced..d4fe319939 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -8,6 +8,4 @@ - - {% endblock %} diff --git a/docs/conf.py b/docs/conf.py index a47c5740e4..1e1b4419d6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -276,6 +276,11 @@ def linkcode_resolve(domain, info): ], "use_edit_page_button": False, "collapse_navigation": True, + "analytics": { + "plausible_analytics_domain": "docs.simpeg.xyz", + "plausible_analytics_url": "https://plausible.io/js/script.js", + }, + "navbar_align": "left", # make elements closer to logo on the left } html_logo = "images/simpeg-logo.png" From 9af04b197dbc2e82785e04430d8a7758ecea0719 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Wed, 3 Apr 2024 11:22:47 -0700 Subject: [PATCH 52/68] Update AUTHORS.rst (#1259) Add to the Authors: - Xiaolong Wei, @xiaolongw1223 - Santiago Soler, @santisoler - Nick Williams, @nwilliams-kobold - John Weis, @johnweis0480 - Kalen Martens, @kalen-sj --- AUTHORS.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/AUTHORS.rst b/AUTHORS.rst index 55de84f924..65518b241d 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -18,4 +18,9 @@ - Thibaut Astic, (`@thast `_) - Michael Mitchell, (`@micmitch `_) - I-Kang Ding, (`@ikding `_) -- Richard Scott (`@bluetyson `_) +- Richard Scott, (`@bluetyson `_) +- Xiaolong Wei, (`@xiaolongw1223 `_) +- Santiago Soler, (`@santisoler `_) +- Nick Williams, (`@nwilliams-kobold `_) +- John Weis, (`@johnweis0480 `_) +- Kalen Martens, (`@kalen-sj `_) From 20d0379d7aaa037faecf92bf3156969498449487 Mon Sep 17 00:00:00 2001 From: Lindsey Heagy Date: Wed, 3 Apr 2024 11:50:21 -0700 Subject: [PATCH 53/68] Update year in LICENSE (#1404) Update year in LICENSE file to 2024. --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 79e70af749..cd67a7669e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2023 SimPEG Developers +Copyright (c) 2013-2024 SimPEG Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in From 9c3ff437c200c65350dfc8f6e0b99a323f4393e4 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 3 Apr 2024 15:51:20 -0600 Subject: [PATCH 54/68] Dask MetaSim (#1199) Adds the `dask` version of the `MetaSimulation`. Essentially it never explicitly sends around fields objects, and it avoids sending around simulations (after they have gone through the setter). Every operation that involves one of the internal simulations is explicitly done on the worker that owns that simulation. The `fields` returned by this class are actually `Future`s that live on the worker that owns the respective simulation. Most items that live on this class are `Future`s of the respective item in the `MetaSimulation` class ( e.g. `sim.mappings` are a list of `Future` maps instead of a `list` of maps). As a side effect, it is actually possible to initialize the class with lists of Futures. This is useful if you want to avoid creating everything on the main thread and then serializing and sending the result out to each worker, instead you can create a function which returns an object and send that function to each worker. --- SimPEG/meta/__init__.py | 9 +- SimPEG/meta/dask_sim.py | 644 +++++++++++++++++++++++++ SimPEG/meta/simulation.py | 19 +- tests/meta/test_dask_meta.py | 393 +++++++++++++++ tests/meta/test_meta_sim.py | 1 - tests/meta/test_multiprocessing_sim.py | 45 +- 6 files changed, 1085 insertions(+), 26 deletions(-) create mode 100644 SimPEG/meta/dask_sim.py create mode 100644 tests/meta/test_dask_meta.py diff --git a/SimPEG/meta/__init__.py b/SimPEG/meta/__init__.py index e60961e273..9dab54e5be 100644 --- a/SimPEG/meta/__init__.py +++ b/SimPEG/meta/__init__.py @@ -50,7 +50,12 @@ Dask ---- -Coming soon! +.. autosummary:: + :toctree: generated/ + + DaskMetaSimulation + DaskSumMetaSimulation + DaskRepeatedSimulation MPI --- @@ -69,3 +74,5 @@ MultiprocessingSumMetaSimulation, MultiprocessingRepeatedSimulation, ) + +from .dask_sim import DaskMetaSimulation, DaskSumMetaSimulation, DaskRepeatedSimulation diff --git a/SimPEG/meta/dask_sim.py b/SimPEG/meta/dask_sim.py new file mode 100644 index 0000000000..268f5260dc --- /dev/null +++ b/SimPEG/meta/dask_sim.py @@ -0,0 +1,644 @@ +import numpy as np + +from SimPEG.simulation import BaseSimulation +from SimPEG.survey import BaseSurvey +from SimPEG.maps import IdentityMap +from SimPEG.utils import validate_list_of_types, validate_type +from SimPEG.props import HasModel +import itertools +from dask.distributed import Client +from dask.distributed import Future +from .simulation import MetaSimulation, SumMetaSimulation +import scipy.sparse as sp +from operator import add +import warnings + + +def _store_model(mapping, sim, model): + sim.model = mapping * model + + +def _calc_fields(mapping, sim, model, apply_map=False): + if apply_map and model is not None: + return sim.fields(m=mapping @ model) + else: + return sim.fields(m=sim.model) + + +def _calc_dpred(mapping, sim, model, field, apply_map=False): + if apply_map and model is not None: + return sim.dpred(m=mapping @ model) + else: + return sim.dpred(m=sim.model, f=field) + + +def _j_vec_op(mapping, sim, model, field, v, apply_map=False): + sim_v = mapping.deriv(model) @ v + if apply_map: + return sim.Jvec(mapping @ model, sim_v, f=field) + else: + return sim.Jvec(sim.model, sim_v, f=field) + + +def _jt_vec_op(mapping, sim, model, field, v, apply_map=False): + if apply_map: + jtv = sim.Jtvec(mapping @ model, v, f=field) + else: + jtv = sim.Jtvec(sim.model, v, f=field) + return mapping.deriv(model).T @ jtv + + +def _get_jtj_diag(mapping, sim, model, field, w, apply_map=False): + w = sp.diags(w) + if apply_map: + jtj = sim.getJtJdiag(mapping @ model, w, f=field) + else: + jtj = sim.getJtJdiag(sim.model, w, f=field) + sim_jtj = sp.diags(np.sqrt(jtj)) + m_deriv = mapping.deriv(model) + return np.asarray((sim_jtj @ m_deriv).power(2).sum(axis=0)).flatten() + + +def _reduce(client, operation, items): + while len(items) > 1: + new_reduce = client.map(operation, items[::2], items[1::2]) + if len(items) % 2 == 1: + new_reduce[-1] = client.submit(operation, new_reduce[-1], items[-1]) + items = new_reduce + return client.gather(items[0]) + + +def _validate_type_or_future_of_type( + property_name, + objects, + obj_type, + client, + workers=None, + return_workers=False, +): + try: + # validate as a list of things that need to be sent. + objects = validate_list_of_types( + property_name, objects, obj_type, ensure_unique=True + ) + if workers is None: + objects = client.scatter(objects) + else: + tmp = [] + for obj, worker in zip(objects, workers): + tmp.append(client.scatter([obj], workers=worker)[0]) + objects = tmp + except TypeError: + pass + # ensure list of futures + objects = validate_list_of_types( + property_name, + objects, + Future, + ) + # Figure out where everything lives + who = client.who_has(objects) + if workers is None: + workers = [] + for obj in objects: + workers.append(who[obj.key]) + else: + # Issue a warning if the future is not on the expected worker + for i, (obj, worker) in enumerate(zip(objects, workers)): + obj_owner = client.who_has(obj)[obj.key] + if obj_owner != worker: + warnings.warn( + f"{property_name} {i} is not on the expected worker.", stacklevel=2 + ) + + # Ensure this runs on the expected worker + futures = [] + for obj, worker in zip(objects, workers): + futures.append( + client.submit(lambda v: not isinstance(v, obj_type), obj, workers=worker) + ) + is_not_obj = np.array(client.gather(futures)) + if np.any(is_not_obj): + raise TypeError(f"{property_name} futures must be an instance of {obj_type}") + + if return_workers: + return objects, workers + else: + return objects + + +class DaskMetaSimulation(MetaSimulation): + """Dask Distributed version of simulation of simulations. + + This class makes use of `dask.distributed` module to provide + concurrency, executing the internal simulations in parallel. This class + is meant to be a (mostly) drop in replacement for :class:`.MetaSimulation`. + If you want to test your implementation, we recommend starting with a + small problem using `MetaSimulation`, then switching it to this class. + the serial version of this class is good for testing correctness. + + Parameters + ---------- + simulations : (n_sim) list of SimPEG.simulation.BaseSimulation or list of dask.distributed.Future + The list of unique simulations (or futures that would return a simulation) + that each handle a piece of the problem. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future + The map for every simulation (or futures that would return a map). Every + map should accept the same length model, and output a model appropriate + for its paired simulation. + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + def __init__(self, simulations, mappings, client): + self._client = validate_type("client", client, Client, cast=False) + super().__init__(simulations, mappings) + + def _make_survey(self): + survey = BaseSurvey([]) + vnD = [] + client = self.client + for sim, worker in zip(self.simulations, self._workers): + vnD.append(client.submit(lambda s: s.survey.nD, sim, workers=worker)) + vnD = client.gather(vnD) + survey._vnD = vnD + return survey + + @property + def simulations(self): + """The future list of simulations. + + Returns + ------- + (n_sim) list of distributed.Future SimPEG.simulation.BaseSimulation + """ + return self._simulations + + @simulations.setter + def simulations(self, value): + client = self.client + simulations, workers = _validate_type_or_future_of_type( + "simulations", value, BaseSimulation, client, return_workers=True + ) + self._simulations = simulations + self._workers = workers + + @property + def mappings(self): + """The future mappings paired to each simulation. + + Every mapping should accept the same length model, and output + a model that is consistent with the simulation. + + Returns + ------- + (n_sim) list of distributed.Future SimPEG.maps.IdentityMap + """ + return self._mappings + + @mappings.setter + def mappings(self, value): + client = self.client + if self._repeat_sim: + mappings, workers = _validate_type_or_future_of_type( + "mappings", value, IdentityMap, client, return_workers=True + ) + else: + workers = self._workers + if len(value) != len(self.simulations): + raise ValueError( + "Must provide the same number of mappings and simulations." + ) + mappings = _validate_type_or_future_of_type( + "mappings", value, IdentityMap, client, workers=workers + ) + + # validate mapping shapes and simulation shapes + model_len = client.submit(lambda v: v.shape[1], mappings[0]).result() + + def check_mapping(mapping, sim, model_len): + if mapping.shape[1] != model_len: + # Bad mapping model length + return 1 + map_out_shape = mapping.shape[0] + for name in sim._act_map_names: + sim_mapping = getattr(sim, name) + sim_in_shape = sim_mapping.shape[1] + if ( + map_out_shape != "*" + and sim_in_shape != "*" + and sim_in_shape != map_out_shape + ): + # Inconsistent simulation input and mapping output + return 2 + # All good + return 0 + + error_checks = [] + for mapping, sim, worker in zip(mappings, self.simulations, workers): + # if it was a repeat sim, this should cause the simulation to be transfered + # to each worker. + error_checks.append( + client.submit(check_mapping, mapping, sim, model_len, workers=worker) + ) + error_checks = np.asarray(client.gather(error_checks)) + + if np.any(error_checks == 1): + raise ValueError("All mappings must have the same input length") + if np.any(error_checks == 2): + raise ValueError( + f"Simulations and mappings at indices {np.where(error_checks==2)}" + f" are inconsistent." + ) + + self._mappings = mappings + if self._repeat_sim: + self._workers = workers + + @property + def _model_map(self): + # create a bland mapping that has the correct input shape + # to test against model inputs, avoids pulling the first + # mapping back to the main task. + if not hasattr(self, "__model_map"): + client = self.client + n_m = client.submit( + lambda v: v.shape[1], + self.mappings[0], + workers=self._workers[0], + ) + n_m = client.gather(n_m) + self.__model_map = IdentityMap(nP=n_m) + return self.__model_map + + @property + def client(self): + """The distributed client that handles the internal tasks. + + Returns + ------- + distributed.Client + """ + return self._client + + @property + def model(self): + return self._model + + @model.setter + def model(self, value): + updated = HasModel.model.fset(self, value) + # Only send the model to the internal simulations if it was updated. + if updated: + client = self.client + [self._m_as_future] = client.scatter([self._model], broadcast=True) + if not self._repeat_sim: + futures = [] + for mapping, sim, worker in zip( + self.mappings, self.simulations, self._workers + ): + futures.append( + client.submit( + _store_model, + mapping, + sim, + self._m_as_future, + workers=worker, + ) + ) + self.client.gather( + futures + ) # blocking call to ensure all models were stored + + def fields(self, m): + self.model = m + client = self.client + m_future = self._m_as_future + # The above should pass the model to all the internal simulations. + f = [] + for mapping, sim, worker in zip(self.mappings, self.simulations, self._workers): + f.append( + client.submit( + _calc_fields, + mapping, + sim, + m_future, + self._repeat_sim, + workers=worker, + ) + ) + return f + + def dpred(self, m=None, f=None): + if f is None: + if m is None: + m = self.model + f = self.fields(m) + client = self.client + m_future = self._m_as_future + dpred = [] + for mapping, sim, worker, field in zip( + self.mappings, self.simulations, self._workers, f + ): + dpred.append( + client.submit( + _calc_dpred, + mapping, + sim, + m_future, + field, + self._repeat_sim, + workers=worker, + ) + ) + return np.concatenate(client.gather(dpred)) + + def Jvec(self, m, v, f=None): + self.model = m + m_future = self._m_as_future + if f is None: + f = self.fields(m) + client = self.client + [v_future] = client.scatter([v], broadcast=True) + j_vec = [] + for mapping, sim, worker, field in zip( + self.mappings, self.simulations, self._workers, f + ): + j_vec.append( + client.submit( + _j_vec_op, + mapping, + sim, + m_future, + field, + v_future, + self._repeat_sim, + workers=worker, + ) + ) + return np.concatenate(self.client.gather(j_vec)) + + def Jtvec(self, m, v, f=None): + self.model = m + m_future = self._m_as_future + if f is None: + f = self.fields(m) + jt_vec = [] + client = self.client + for i, (mapping, sim, worker, field) in enumerate( + zip(self.mappings, self.simulations, self._workers, f) + ): + jt_vec.append( + client.submit( + _jt_vec_op, + mapping, + sim, + m_future, + field, + v[self._data_offsets[i] : self._data_offsets[i + 1]], + self._repeat_sim, + workers=worker, + ) + ) + # Do the sum by a reduction operation to avoid gathering a vector + # of size n_simulations by n_model parameters on the head. + return _reduce(client, add, jt_vec) + + def getJtJdiag(self, m, W=None, f=None): + self.model = m + m_future = self._m_as_future + if getattr(self, "_jtjdiag", None) is None: + if W is None: + W = np.ones(self.survey.nD) + else: + W = W.diagonal() + jtj_diag = [] + client = self.client + if f is None: + f = self.fields(m) + for i, (mapping, sim, worker, field) in enumerate( + zip(self.mappings, self.simulations, self._workers, f) + ): + sim_w = W[self._data_offsets[i] : self._data_offsets[i + 1]] + jtj_diag.append( + client.submit( + _get_jtj_diag, + mapping, + sim, + m_future, + field, + sim_w, + self._repeat_sim, + workers=worker, + ) + ) + self._jtjdiag = _reduce(client, add, jtj_diag) + + return self._jtjdiag + + +class DaskSumMetaSimulation(DaskMetaSimulation, SumMetaSimulation): + """A dask distributed version of :class:`.SumMetaSimulation`. + + A meta simulation that sums the results of the many individual + simulations. + + Parameters + ---------- + simulations : (n_sim) list of SimPEG.simulation.BaseSimulation or list of dask.distributed.Future + The list of unique simulations that each handle a piece + of the problem. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future The map for every simulation. Every map should accept the + same length model, and output a model appropriate for its + paired simulation. + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + def __init__(self, simulations, mappings, client): + super().__init__(simulations, mappings, client) + + def _make_survey(self): + survey = BaseSurvey([]) + client = self.client + n_d = client.submit(lambda s: s.survey.nD, self.simulations[0]).result() + survey._vnD = [ + n_d, + ] + return survey + + @DaskMetaSimulation.simulations.setter + def simulations(self, value): + client = self.client + simulations, workers = _validate_type_or_future_of_type( + "simulations", value, BaseSimulation, client, return_workers=True + ) + n_d = client.submit(lambda s: s.survey.nD, simulations[0], workers=workers[0]) + sim_check = [] + for sim, worker in zip(simulations, workers): + sim_check.append( + client.submit(lambda s, n: s.survey.nD != n, sim, n_d, workers=worker) + ) + if np.any(client.gather(sim_check)): + raise ValueError("All simulations must have the same number of data.") + self._simulations = simulations + self._workers = workers + + def dpred(self, m=None, f=None): + if f is None: + if m is None: + m = self.model + f = self.fields(m) + client = self.client + dpred = [] + for sim, worker, field in zip(self.simulations, self._workers, f): + dpred.append( + client.submit(_calc_dpred, None, sim, None, field, workers=worker) + ) + return _reduce(client, add, dpred) + + def Jvec(self, m, v, f=None): + self.model = m + if f is None: + f = self.fields(m) + client = self.client + [v_future] = client.scatter([v], broadcast=True) + j_vec = [] + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + j_vec.append( + client.submit( + _j_vec_op, + mapping, + sim, + self._m_as_future, + field, + v_future, + workers=worker, + ) + ) + return _reduce(client, add, j_vec) + + def Jtvec(self, m, v, f=None): + self.model = m + if f is None: + f = self.fields(m) + jt_vec = [] + client = self.client + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + jt_vec.append( + client.submit( + _jt_vec_op, + mapping, + sim, + self._m_as_future, + field, + v, + workers=worker, + ) + ) + # Do the sum by a reduction operation to avoid gathering a vector + # of size n_simulations by n_model parameters on the head. + return _reduce(client, add, jt_vec) + + def getJtJdiag(self, m, W=None, f=None): + self.model = m + if getattr(self, "_jtjdiag", None) is None: + jtj_diag = [] + if W is None: + W = np.ones(self.survey.nD) + else: + W = W.diagonal() + client = self.client + if f is None: + f = self.fields(m) + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + jtj_diag.append( + client.submit( + _get_jtj_diag, + mapping, + sim, + self._m_as_future, + field, + W, + workers=worker, + ) + ) + self._jtjdiag = _reduce(client, add, jtj_diag) + + return self._jtjdiag + + +class DaskRepeatedSimulation(DaskMetaSimulation): + """A multiprocessing version of the :class:`.RepeatedSimulation`. + + This class makes use of a single simulation that is copied to each internal + process, but only once per process. + + This simulation shares internals with the :class:`.MultiprocessingMetaSimulation`. + class, as such please see that documentation for details regarding how to properly + use multiprocessing on your operating system. + + Parameters + ---------- + simulation : SimPEG.simulation.BaseSimulation or dask.distributed.Future + The simulation to use repeatedly with different mappings. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future + The list of different mappings to use (or futures that each return a mapping). + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + _repeat_sim = True + + def __init__(self, simulation, mappings, client): + self._client = validate_type("client", client, Client, cast=False) + + self.simulation = simulation + self.mappings = mappings + + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): + survey = BaseSurvey([]) + nD = self.client.submit(lambda s: s.survey.nD, self.simulation).result() + survey._vnD = len(self.mappings) * [nD] + return survey + + @property + def simulations(self): + return itertools.repeat(self.simulation) + + @property + def simulation(self): + """The internal simulation. + + Returns + ------- + distributed.Future of SimPEG.simulation.BaseSimulation + """ + return self._simulation + + @simulation.setter + def simulation(self, value): + client = self.client + if isinstance(value, BaseSimulation): + # Scatter sim to every client + [ + value, + ] = client.scatter([value], broadcast=True) + if not ( + isinstance(value, Future) + and client.submit(lambda s: isinstance(s, BaseSimulation), value).result() + ): + raise TypeError( + "simulation must be an instance of BaseSimulation or a Future that returns" + " a BaseSimulation" + ) + self._simulation = value diff --git a/SimPEG/meta/simulation.py b/SimPEG/meta/simulation.py index a2327b9606..aa62a05b0b 100644 --- a/SimPEG/meta/simulation.py +++ b/SimPEG/meta/simulation.py @@ -98,11 +98,14 @@ def __init__(self, simulations, mappings): self.model = None # give myself a BaseSurvey that has the number of data equal # to the sum of the sims' data. + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): survey = BaseSurvey([]) vnD = [sim.survey.nD for sim in self.simulations] survey._vnD = vnD - self.survey = survey - self._data_offsets = np.cumsum(np.r_[0, vnD]) + return survey @property def simulations(self): @@ -352,11 +355,14 @@ def __init__(self, simulations, mappings): self.mappings = mappings self.model = None # give myself a BaseSurvey + self.survey = self._make_survey() + + def _make_survey(self): survey = BaseSurvey([]) survey._vnD = [ self.simulations[0].survey.nD, ] - self.survey = survey + return survey @MetaSimulation.simulations.setter def simulations(self, value): @@ -442,11 +448,14 @@ def __init__(self, simulation, mappings): self.simulation = simulation self.mappings = mappings self.model = None + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): survey = BaseSurvey([]) vnD = len(self.mappings) * [self.simulation.survey.nD] survey._vnD = vnD - self.survey = survey - self._data_offsets = np.cumsum(np.r_[0, vnD]) + return survey @property def simulations(self): diff --git a/tests/meta/test_dask_meta.py b/tests/meta/test_dask_meta.py new file mode 100644 index 0000000000..ac44d70a1a --- /dev/null +++ b/tests/meta/test_dask_meta.py @@ -0,0 +1,393 @@ +import numpy as np +from SimPEG.potential_fields import gravity +from SimPEG.electromagnetics.static import resistivity as dc +from SimPEG import maps +from discretize import TensorMesh +import scipy.sparse as sp +import pytest + +from SimPEG.meta import ( + MetaSimulation, + SumMetaSimulation, + RepeatedSimulation, + DaskMetaSimulation, + DaskSumMetaSimulation, + DaskRepeatedSimulation, +) + +from distributed import Client, LocalCluster + + +@pytest.fixture(scope="module") +def cluster(): + dask_cluster = LocalCluster( + n_workers=2, threads_per_worker=2, dashboard_address=None, processes=True + ) + yield dask_cluster + dask_cluster.close() + + +def test_meta_correctness(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1 + # split by chunks of sources + chunk_size = 3 + sims = [] + mappings = [] + for i in range(0, len(src_list) + 1, chunk_size): + end = min(i + chunk_size, len(src_list)) + if i == end: + break + survey_chunk = dc.Survey(src_list[i:end]) + sims.append( + dc.Simulation3DNodal( + mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap() + ) + ) + mappings.append(maps.IdentityMap()) + + serial_sim = MetaSimulation(sims, mappings) + dask_sim = DaskMetaSimulation(sims, mappings, client) + + # test fields objects + f_meta = serial_sim.fields(m_test) + f_dask = dask_sim.fields(m_test) + # Can't serialize DC nodal fields here, so can't directly test them. + # sol_meta = np.concatenate([f[:, "phiSolution"] for f in f_meta], axis=1) + # sol_dask = np.concatenate([f.result()[:, "phiSolution"] for f in f_dask], axis=1) + # np.testing.assert_allclose(sol_meta, sol_dask) + + # test data output + d_meta = serial_sim.dpred(m_test, f=f_meta) + d_dask = dask_sim.dpred(m_test, f=f_dask) + np.testing.assert_allclose(d_dask, d_meta) + + # test Jvec + rng = np.random.default_rng(seed=0) + u = rng.random(mesh.n_cells) + jvec_meta = serial_sim.Jvec(m_test, u, f=f_meta) + jvec_dask = dask_sim.Jvec(m_test, u, f=f_dask) + + np.testing.assert_allclose(jvec_dask, jvec_meta) + + # test Jtvec + v = rng.random(serial_sim.survey.nD) + jtvec_meta = serial_sim.Jtvec(m_test, v, f=f_meta) + jtvec_dask = dask_sim.Jtvec(m_test, v, f=f_dask) + + np.testing.assert_allclose(jtvec_dask, jtvec_meta) + + # test get diag + diag_meta = serial_sim.getJtJdiag(m_test, f=f_meta) + diag_dask = dask_sim.getJtJdiag(m_test, f=f_dask) + + np.testing.assert_allclose(diag_dask, diag_meta) + + # test things also works without passing optional fields + dask_sim.model = m_test + d_dask2 = dask_sim.dpred() + np.testing.assert_allclose(d_dask, d_dask2) + + jvec_dask2 = dask_sim.Jvec(m_test, u) + np.testing.assert_allclose(jvec_dask, jvec_dask2) + + jtvec_dask2 = dask_sim.Jtvec(m_test, v) + np.testing.assert_allclose(jtvec_dask, jtvec_dask2) + + # also pass a diagonal matrix here for testing. + dask_sim._jtjdiag = None + W = sp.eye(dask_sim.survey.nD) + diag_dask2 = dask_sim.getJtJdiag(m_test, W=W) + np.testing.assert_allclose(diag_dask, diag_dask2) + + +def test_sum_sim_correctness(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + # Create gravity sum sims + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + rx = gravity.Point(rx_locs, components=["gz"]) + survey = gravity.Survey(gravity.SourceField(rx)) + + mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin) + mesh_top = TensorMesh( + [mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]] + ) + + g_mappings = [ + maps.Mesh2Mesh((mesh_bot, mesh)), + maps.Mesh2Mesh((mesh_top, mesh)), + ] + g_sims = [ + gravity.Simulation3DIntegral( + mesh_bot, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ), + gravity.Simulation3DIntegral( + mesh_top, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ), + ] + + serial_sim = SumMetaSimulation(g_sims, g_mappings) + parallel_sim = DaskSumMetaSimulation(g_sims, g_mappings, client) + + m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1 + + # test fields objects + f_full = serial_sim.fields(m_test) + f_meta = parallel_sim.fields(m_test) + # Again don't serialize and collect the fields on the main + # process directly. + # np.testing.assert_allclose(f_full, sum(f_meta)) + + # test data output + d_full = serial_sim.dpred(m_test, f=f_full) + d_meta = parallel_sim.dpred(m_test, f=f_meta) + np.testing.assert_allclose(d_full, d_meta, rtol=1e-6) + + rng = np.random.default_rng(0) + + # test Jvec + u = rng.random(mesh.n_cells) + jvec_full = serial_sim.Jvec(m_test, u, f=f_full) + jvec_meta = parallel_sim.Jvec(m_test, u, f=f_meta) + + np.testing.assert_allclose(jvec_full, jvec_meta, rtol=1e-6) + + # test Jtvec + v = rng.random(survey.nD) + jtvec_full = serial_sim.Jtvec(m_test, v, f=f_full) + jtvec_meta = parallel_sim.Jtvec(m_test, v, f=f_meta) + + np.testing.assert_allclose(jtvec_full, jtvec_meta, rtol=1e-6) + + # test get diag + diag_full = serial_sim.getJtJdiag(m_test, f=f_full) + diag_meta = parallel_sim.getJtJdiag(m_test, f=f_meta) + + np.testing.assert_allclose(diag_full, diag_meta, rtol=1e-6) + + # test things also works without passing optional kwargs + parallel_sim.model = m_test + d_meta2 = parallel_sim.dpred() + np.testing.assert_allclose(d_meta, d_meta2) + + jvec_meta2 = parallel_sim.Jvec(m_test, u) + np.testing.assert_allclose(jvec_meta, jvec_meta2) + + jtvec_meta2 = parallel_sim.Jtvec(m_test, v) + np.testing.assert_allclose(jtvec_meta, jtvec_meta2) + + parallel_sim._jtjdiag = None + diag_meta2 = parallel_sim.getJtJdiag(m_test) + np.testing.assert_allclose(diag_meta, diag_meta2) + + +def test_repeat_sim_correctness(cluster): + with Client(cluster) as client: + # meta sim is tested for correctness + # so can test the repeat against the meta sim + mesh = TensorMesh([8, 8, 8], origin="CCN") + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + rx = gravity.Point(rx_locs, components=["gz"]) + survey = gravity.Survey(gravity.SourceField(rx)) + grav_sim = gravity.Simulation3DIntegral( + mesh, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ) + + time_mesh = TensorMesh([8], origin=[0]) + sim_ts = np.linspace(0, 1, 6) + + repeat_mappings = [] + eye = sp.eye(mesh.n_cells, mesh.n_cells) + for t in sim_ts: + ave_time = time_mesh.get_interpolation_matrix([t]) + ave_full = sp.kron(ave_time, eye, format="csr") + repeat_mappings.append(maps.LinearMap(ave_full)) + + serial_sim = RepeatedSimulation(grav_sim, repeat_mappings) + parallel_sim = DaskRepeatedSimulation(grav_sim, repeat_mappings, client) + + rng = np.random.default_rng(0) + model = rng.random((time_mesh.n_cells, mesh.n_cells)).reshape(-1) + + # test field things + f_full = serial_sim.fields(model) + f_meta = parallel_sim.fields(model) + # np.testing.assert_equal(np.c_[f_full], np.c_[f_meta]) + + d_full = serial_sim.dpred(model, f_full) + d_repeat = parallel_sim.dpred(model, f_meta) + np.testing.assert_allclose(d_full, d_repeat, rtol=1e-6) + + # test Jvec + u = rng.random(len(model)) + jvec_full = serial_sim.Jvec(model, u, f=f_full) + jvec_meta = parallel_sim.Jvec(model, u, f=f_meta) + np.testing.assert_allclose(jvec_full, jvec_meta, rtol=1e-6) + + # test Jtvec + v = rng.random(len(sim_ts) * survey.nD) + jtvec_full = serial_sim.Jtvec(model, v, f=f_full) + jtvec_meta = parallel_sim.Jtvec(model, v, f=f_meta) + np.testing.assert_allclose(jtvec_full, jtvec_meta, rtol=1e-6) + + # test get diag + diag_full = serial_sim.getJtJdiag(model, f=f_full) + diag_meta = parallel_sim.getJtJdiag(model, f=f_meta) + np.testing.assert_allclose(diag_full, diag_meta, rtol=1e-6) + + +def test_dask_meta_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + + # split by chunks of sources + chunk_size = 3 + sims = [] + mappings = [] + for i in range(0, len(src_list) + 1, chunk_size): + end = min(i + chunk_size, len(src_list)) + if i == end: + break + survey_chunk = dc.Survey(src_list[i:end]) + sims.append( + dc.Simulation3DNodal( + mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap(mesh) + ) + ) + mappings.append(maps.IdentityMap(mesh)) + + # incompatible length of mappings and simulations lists + with pytest.raises(ValueError): + DaskMetaSimulation(sims[:-1], mappings, client) + + # Bad Simulation type? + with pytest.raises(TypeError): + DaskRepeatedSimulation( + len(sims) + * [ + lambda x: x * 2, + ], + mappings, + client, + ) + + # mappings have incompatible input lengths: + mappings[0] = maps.Projection(mesh.n_cells + 10, np.arange(mesh.n_cells) + 1) + with pytest.raises(ValueError): + DaskMetaSimulation(sims, mappings, client) + + # incompatible mapping and simulation + mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10]) + with pytest.raises(ValueError): + DaskMetaSimulation(sims, mappings, client) + + +def test_sum_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin) + mesh_top = TensorMesh( + [mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]] + ) + + mappings = [ + maps.Mesh2Mesh((mesh_bot, mesh)), + maps.Mesh2Mesh((mesh_top, mesh)), + ] + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + + rx1 = gravity.Point(rx_locs, components=["gz"]) + survey1 = gravity.Survey(gravity.SourceField(rx1)) + rx2 = gravity.Point(rx_locs[1:], components=["gz"]) + survey2 = gravity.Survey(gravity.SourceField(rx2)) + + sims = [ + gravity.Simulation3DIntegral( + mesh_bot, + survey=survey1, + rhoMap=maps.IdentityMap(mesh_bot), + n_processes=1, + ), + gravity.Simulation3DIntegral( + mesh_top, + survey=survey2, + rhoMap=maps.IdentityMap(mesh_top), + n_processes=1, + ), + ] + + # Test simulations with different numbers of data. + with pytest.raises(ValueError): + DaskSumMetaSimulation(sims, mappings, client) + + +def test_repeat_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + survey = dc.Survey(src_list) + sim = dc.Simulation3DNodal(mesh, survey=survey, sigmaMap=maps.IdentityMap(mesh)) + + # split by chunks of sources + mappings = [] + for _i in range(10): + mappings.append(maps.IdentityMap(mesh)) + + # mappings have incompatible input lengths: + mappings[0] = maps.Projection(mesh.n_cells + 1, np.arange(mesh.n_cells) + 1) + with pytest.raises(ValueError): + DaskRepeatedSimulation(sim, mappings, client) + + # incompatible mappings and simulations + mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10]) + with pytest.raises(ValueError): + DaskRepeatedSimulation(sim, mappings, client) + + # Bad Simulation type? + with pytest.raises(TypeError): + DaskRepeatedSimulation(lambda x: x * 2, mappings, client) diff --git a/tests/meta/test_meta_sim.py b/tests/meta/test_meta_sim.py index 2530efcf7d..2498aeaa36 100644 --- a/tests/meta/test_meta_sim.py +++ b/tests/meta/test_meta_sim.py @@ -150,7 +150,6 @@ def test_sum_sim_correctness(): np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-6) # test Jtvec - rng = np.random.default_rng(seed=0) v = rng.random(survey.nD) jtvec_full = full_sim.Jtvec(m_test, v, f=f_full) jtvec_mult = sum_sim.Jtvec(m_test, v, f=f_mult) diff --git a/tests/meta/test_multiprocessing_sim.py b/tests/meta/test_multiprocessing_sim.py index bc816b0e24..fc1058862d 100644 --- a/tests/meta/test_multiprocessing_sim.py +++ b/tests/meta/test_multiprocessing_sim.py @@ -61,6 +61,8 @@ def test_meta_correctness(): serial_sim = MetaSimulation(dc_sims, dc_mappings) parallel_sim = MultiprocessingMetaSimulation(dc_sims2, dc_mappings, n_processes=12) + rng = np.random.default_rng(seed=0) + try: # create fields objects f_serial = serial_sim.fields(m_test) @@ -72,13 +74,13 @@ def test_meta_correctness(): np.testing.assert_allclose(d_full, d_mult) # test Jvec - u = np.random.rand(mesh.n_cells) + u = rng.random(mesh.n_cells) jvec_full = serial_sim.Jvec(m_test, u, f=f_serial) jvec_mult = parallel_sim.Jvec(m_test, u, f=f_parallel) np.testing.assert_allclose(jvec_full, jvec_mult) # test Jtvec - v = np.random.rand(serial_sim.survey.nD) + v = rng.random(serial_sim.survey.nD) jtvec_full = serial_sim.Jtvec(m_test, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(m_test, v, f=f_parallel) @@ -141,6 +143,8 @@ def test_sum_correctness(): serial_sim = SumMetaSimulation(g_sims, g_mappings) parallel_sim = MultiprocessingSumMetaSimulation(g_sims, g_mappings, n_processes=2) + + rng = np.random.default_rng(0) try: # test fields objects f_serial = serial_sim.fields(m_test) @@ -150,42 +154,42 @@ def test_sum_correctness(): # test data output d_full = serial_sim.dpred(m_test, f=f_serial) d_mult = parallel_sim.dpred(m_test, f=f_parallel) - np.testing.assert_allclose(d_full, d_mult) + np.testing.assert_allclose(d_full, d_mult, rtol=1e-06) # test Jvec - u = np.random.rand(mesh.n_cells) + u = rng.random(mesh.n_cells) jvec_full = serial_sim.Jvec(m_test, u, f=f_serial) jvec_mult = parallel_sim.Jvec(m_test, u, f=f_parallel) - np.testing.assert_allclose(jvec_full, jvec_mult) + np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-06) # test Jtvec - v = np.random.rand(survey.nD) + v = rng.random(survey.nD) jtvec_full = serial_sim.Jtvec(m_test, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(m_test, v, f=f_parallel) - np.testing.assert_allclose(jtvec_full, jtvec_mult) + np.testing.assert_allclose(jtvec_full, jtvec_mult, rtol=1e-06) # test get diag diag_full = serial_sim.getJtJdiag(m_test, f=f_serial) diag_mult = parallel_sim.getJtJdiag(m_test, f=f_parallel) - np.testing.assert_allclose(diag_full, diag_mult) + np.testing.assert_allclose(diag_full, diag_mult, rtol=1e-06) # test things also works without passing optional kwargs parallel_sim.model = m_test d_mult2 = parallel_sim.dpred() - np.testing.assert_allclose(d_mult, d_mult2) + np.testing.assert_allclose(d_mult, d_mult2, rtol=1e-06) jvec_mult2 = parallel_sim.Jvec(m_test, u) - np.testing.assert_allclose(jvec_mult, jvec_mult2) + np.testing.assert_allclose(jvec_mult, jvec_mult2, rtol=1e-06) jtvec_mult2 = parallel_sim.Jtvec(m_test, v) - np.testing.assert_allclose(jtvec_mult, jtvec_mult2) + np.testing.assert_allclose(jtvec_mult, jtvec_mult2, rtol=1e-06) parallel_sim._jtjdiag = None diag_mult2 = parallel_sim.getJtJdiag(m_test) - np.testing.assert_allclose(diag_mult, diag_mult2) + np.testing.assert_allclose(diag_mult, diag_mult2, rtol=1e-06) except Exception as err: raise err @@ -216,7 +220,10 @@ def test_repeat_correctness(): parallel_sim = MultiprocessingRepeatedSimulation( grav_sim, repeat_mappings, n_processes=2 ) - t_model = np.random.rand(time_mesh.n_cells, mesh.n_cells).reshape(-1) + + rng = np.random.default_rng(0) + + t_model = rng.random((time_mesh.n_cells, mesh.n_cells)).reshape(-1) try: # test field things @@ -226,24 +233,24 @@ def test_repeat_correctness(): d_full = serial_sim.dpred(t_model, f_serial) d_repeat = parallel_sim.dpred(t_model, f_parallel) - np.testing.assert_equal(d_full, d_repeat) + np.testing.assert_allclose(d_full, d_repeat, rtol=1e-6) # test Jvec - u = np.random.rand(len(t_model)) + u = rng.random(len(t_model)) jvec_full = serial_sim.Jvec(t_model, u, f=f_serial) jvec_mult = parallel_sim.Jvec(t_model, u, f=f_parallel) - np.testing.assert_allclose(jvec_full, jvec_mult) + np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-6) # test Jtvec - v = np.random.rand(len(sim_ts) * survey.nD) + v = rng.random(len(sim_ts) * survey.nD) jtvec_full = serial_sim.Jtvec(t_model, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(t_model, v, f=f_parallel) - np.testing.assert_allclose(jtvec_full, jtvec_mult) + np.testing.assert_allclose(jtvec_full, jtvec_mult, rtol=1e-6) # test get diag diag_full = serial_sim.getJtJdiag(t_model, f=f_serial) diag_mult = parallel_sim.getJtJdiag(t_model, f=f_parallel) - np.testing.assert_allclose(diag_full, diag_mult) + np.testing.assert_allclose(diag_full, diag_mult, rtol=1e-6) except Exception as err: raise err finally: From a0f0e9888207d4a54ab969deec6c1f4014a7929b Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 4 Apr 2024 10:21:01 -0700 Subject: [PATCH 55/68] Add Ying and Williams to AUTHORS.rst (#1405) Add @ghwilliams @YingHuuu to `AUTHORS.rst`. --- AUTHORS.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/AUTHORS.rst b/AUTHORS.rst index 65518b241d..e12ab5a8d8 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -24,3 +24,5 @@ - Nick Williams, (`@nwilliams-kobold `_) - John Weis, (`@johnweis0480 `_) - Kalen Martens, (`@kalen-sj `_) +- Williams A. Lima (`@ghwilliams `_) +- Ying Hu, (`@YingHuuu `_) From 64cc32ce3a118f41fcc909b0d449827312269e09 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Thu, 4 Apr 2024 12:20:03 -0600 Subject: [PATCH 56/68] Remove link to "twitter" (#1406) #### Summary Remove link to twitter from documentation. --- docs/conf.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 1e1b4419d6..b7edd86caf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -268,11 +268,6 @@ def linkcode_resolve(domain, info): "url": "https://www.youtube.com/c/geoscixyz", "icon": "fab fa-youtube", }, - { - "name": "Twitter", - "url": "https://twitter.com/simpegpy", - "icon": "fab fa-twitter", - }, ], "use_edit_page_button": False, "collapse_navigation": True, From 00e2b0a10859ff24cf84de359b8990aecbf5c6b3 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 4 Apr 2024 11:21:58 -0700 Subject: [PATCH 57/68] Bump Black version to 24.3.0 (#1403) Update black version to 24.3.0 in `environment_test.yml`, requirements files and the configuration file for `pre-commit`. This update was triggered after a moderate vulnerability to Regular Expression Denial of Service (ReDoS). The vulnerability was patched in Black 24.3.0. Run new version of black on code base and docs. Replace lambda definition for function in test due to flake8 error after formatted by the new version of Black. --- .pre-commit-config.yaml | 2 +- SimPEG/directives/__init__.py | 1 + SimPEG/electromagnetics/__init__.py | 1 + .../frequency_domain/__init__.py | 1 + .../frequency_domain/sources.py | 1 - .../natural_source/utils/__init__.py | 1 + .../natural_source/utils/analytic_1d.py | 6 +-- .../static/induced_polarization/__init__.py | 1 + .../static/resistivity/__init__.py | 1 + .../spectral_induced_polarization/__init__.py | 1 + .../electromagnetics/static/utils/__init__.py | 1 + .../electromagnetics/time_domain/__init__.py | 1 + .../time_domain/simulation_1d.py | 6 +-- SimPEG/electromagnetics/utils/__init__.py | 1 + .../__init__.py | 1 + .../simulation.py | 4 +- .../viscous_remanent_magnetization/sources.py | 18 ++----- SimPEG/flow/richards/__init__.py | 1 + SimPEG/flow/richards/empirical.py | 4 +- SimPEG/maps.py | 51 ++++++++++--------- SimPEG/potential_fields/gravity/__init__.py | 1 + .../gravity/_numba_functions.py | 1 + SimPEG/potential_fields/magnetics/__init__.py | 1 + SimPEG/regularization/__init__.py | 1 + .../straight_ray_tomography/__init__.py | 1 + SimPEG/simulation.py | 1 + SimPEG/utils/__init__.py | 1 + SimPEG/utils/pgi_utils.py | 2 +- environment_test.yml | 2 +- examples/01-maps/plot_block_in_layer.py | 1 + examples/01-maps/plot_combo.py | 1 + examples/01-maps/plot_layer.py | 1 + examples/01-maps/plot_mesh2mesh.py | 1 + examples/01-maps/plot_sumMap.py | 1 + examples/02-gravity/plot_inv_grav_tiled.py | 1 + examples/03-magnetics/plot_0_analytic.py | 1 + examples/04-dcip/plot_dc_analytic.py | 1 + examples/06-tdem/plot_fwd_tdem_3d_model.py | 1 + examples/06-tdem/plot_inv_tdem_1D.py | 1 + .../06-tdem/plot_inv_tdem_1D_raw_waveform.py | 1 + examples/09-flow/plot_fwd_flow_richards_1D.py | 1 + examples/09-flow/plot_inv_flow_richards_1D.py | 1 + .../20-published/plot_heagyetal2017_casing.py | 5 +- .../plot_heagyetal2017_cyl_inversions.py | 1 + .../plot_laguna_del_maule_inversion.py | 1 + .../20-published/plot_richards_celia1990.py | 1 + .../plot_schenkel_morrison_casing.py | 1 + .../20-published/plot_vadose_vangenuchten.py | 1 + examples/_archived/plot_inv_grav_linear.py | 1 + examples/_archived/plot_inv_mag_linear.py | 1 + requirements_dev.txt | 2 +- requirements_style.txt | 2 +- tests/base/test_optimizers.py | 10 ++-- tests/em/vrm/test_vrmfwd.py | 1 - tests/em/vrm/test_vrminv.py | 4 +- tests/pf/test_mag_uniform_background_field.py | 1 + .../plot_inv_2_inversion_irls.py | 1 - tutorials/07-fdem/plot_inv_1_em1dfm.py | 1 - tutorials/08-tdem/plot_inv_1_em1dtm.py | 1 - ...t_inv_1_joint_pf_pgi_full_info_tutorial.py | 1 + ...lot_inv_2_joint_pf_pgi_no_info_tutorial.py | 1 + .../plot_inv_1_em1dtm_stitched_skytem.py | 2 +- 62 files changed, 97 insertions(+), 70 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9697f155ad..ccb608b94b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/psf/black - rev: 23.12.1 + rev: 24.3.0 hooks: - id: black language_version: python3 diff --git a/SimPEG/directives/__init__.py b/SimPEG/directives/__init__.py index 34c8c1fee7..8ce839d89f 100644 --- a/SimPEG/directives/__init__.py +++ b/SimPEG/directives/__init__.py @@ -96,6 +96,7 @@ DirectiveList """ + from .directives import ( InversionDirective, DirectiveList, diff --git a/SimPEG/electromagnetics/__init__.py b/SimPEG/electromagnetics/__init__.py index c83bedd42b..5deacd0f50 100644 --- a/SimPEG/electromagnetics/__init__.py +++ b/SimPEG/electromagnetics/__init__.py @@ -32,6 +32,7 @@ analytics.getCasingBzMagDipole """ + from scipy.constants import mu_0, epsilon_0 from . import time_domain diff --git a/SimPEG/electromagnetics/frequency_domain/__init__.py b/SimPEG/electromagnetics/frequency_domain/__init__.py index 3dad3cde28..33ed705f2a 100644 --- a/SimPEG/electromagnetics/frequency_domain/__init__.py +++ b/SimPEG/electromagnetics/frequency_domain/__init__.py @@ -73,6 +73,7 @@ fields.FieldsFDEM """ + from .survey import Survey from . import sources from . import receivers diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index 1c739df1b7..95f3c43305 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -643,7 +643,6 @@ def s_eDeriv(self, simulation, v, adjoint=False): class MagDipole_Bfield(MagDipole): - """ Point magnetic dipole source calculated with the analytic solution for the fields from a magnetic dipole. No discrete curl is taken, so the magnetic diff --git a/SimPEG/electromagnetics/natural_source/utils/__init__.py b/SimPEG/electromagnetics/natural_source/utils/__init__.py index baae8201b5..79425e954f 100644 --- a/SimPEG/electromagnetics/natural_source/utils/__init__.py +++ b/SimPEG/electromagnetics/natural_source/utils/__init__.py @@ -5,6 +5,7 @@ NOTE: These utilities are not well test, use with care """ + from .solutions_1d import get1DEfields # Add the names of the functions from .analytic_1d import getEHfields, getImpedance from .data_utils import ( diff --git a/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py b/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py index 78de082377..ad50de476f 100644 --- a/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py +++ b/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py @@ -30,9 +30,9 @@ def getEHfields(m1d, sigma, freq, zd, scaleUD=True, scaleValue=1): # Initiate the propagation matrix, in the order down up. UDp = np.zeros((2, m1d.nC + 1), dtype=complex) - UDp[ - 1, 0 - ] = scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh + UDp[1, 0] = ( + scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh + ) # Loop over all the layers, starting at the bottom layer for lnr, h in enumerate(m1d.h[0]): # lnr-number of layer, h-thickness of the layer # Calculate diff --git a/SimPEG/electromagnetics/static/induced_polarization/__init__.py b/SimPEG/electromagnetics/static/induced_polarization/__init__.py index 6383f4e05c..b421d9afdb 100644 --- a/SimPEG/electromagnetics/static/induced_polarization/__init__.py +++ b/SimPEG/electromagnetics/static/induced_polarization/__init__.py @@ -20,6 +20,7 @@ The ``induced_polarization`` module makes use of receivers, sources, and surveys defined in the ``SimPEG.electromagnetics.static.resistivity`` module. """ + from .simulation import ( Simulation3DCellCentered, Simulation3DNodal, diff --git a/SimPEG/electromagnetics/static/resistivity/__init__.py b/SimPEG/electromagnetics/static/resistivity/__init__.py index 4e0409892b..9171c0cab9 100644 --- a/SimPEG/electromagnetics/static/resistivity/__init__.py +++ b/SimPEG/electromagnetics/static/resistivity/__init__.py @@ -71,6 +71,7 @@ sources.BaseSrc receivers.BaseRx """ + from .simulation import Simulation3DCellCentered, Simulation3DNodal from .simulation_2d import Simulation2DCellCentered, Simulation2DNodal from .simulation_1d import Simulation1DLayers diff --git a/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py b/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py index bf5c81630b..9bdeea56be 100644 --- a/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py +++ b/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py @@ -60,6 +60,7 @@ simulation_2d.BaseSIPSimulation2D """ + from ....data import Data from .simulation import Simulation3DCellCentered, Simulation3DNodal from .simulation_2d import Simulation2DCellCentered, Simulation2DNodal diff --git a/SimPEG/electromagnetics/static/utils/__init__.py b/SimPEG/electromagnetics/static/utils/__init__.py index a0d69fe512..5245be1490 100644 --- a/SimPEG/electromagnetics/static/utils/__init__.py +++ b/SimPEG/electromagnetics/static/utils/__init__.py @@ -46,6 +46,7 @@ closestPointsGrid """ + from .static_utils import ( electrode_separations, pseudo_locations, diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index dcf8dde9a8..9c3b41ba21 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -89,6 +89,7 @@ fields.FieldsDerivativesHJ """ + from .simulation import ( Simulation3DMagneticFluxDensity, Simulation3DElectricField, diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 83568857e3..afac06ae5b 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -153,9 +153,9 @@ def _compute_coefficients(self): def func(t, i): out = np.zeros_like(t) t = t.copy() - t[ - (t > 0.0) & (t <= t_spline_points.min()) - ] = t_spline_points.min() # constant at very low ts + t[(t > 0.0) & (t <= t_spline_points.min())] = ( + t_spline_points.min() + ) # constant at very low ts out[t > 0.0] = splines[i](np.log(t[t > 0.0])) / t[t > 0.0] return out diff --git a/SimPEG/electromagnetics/utils/__init__.py b/SimPEG/electromagnetics/utils/__init__.py index 25b3f24ac3..d8d4a6182c 100644 --- a/SimPEG/electromagnetics/utils/__init__.py +++ b/SimPEG/electromagnetics/utils/__init__.py @@ -30,6 +30,7 @@ convolve_with_waveform """ + from .waveform_utils import ( omega, k, diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py index 56349b8aba..2f3f05c1f8 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py @@ -66,6 +66,7 @@ waveforms.BaseVRMWaveform """ + from . import receivers from . import sources from . import receivers as Rx diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py index efb063fd5a..35ead8fd9b 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py @@ -767,9 +767,7 @@ def _getSubsetAcolumns(self, xyzc, xyzh, pp, qq, refFlag): xyzc[refFlag == qq, :] - xyzh[refFlag == qq, :] / 2 ) # Get bottom southwest corners of cells to be refined m = np.shape(xyzc_sub)[0] - xyzc_sub = np.kron( - xyzc_sub, np.ones((n**3, 1)) - ) # Kron for n**3 refined cells + xyzc_sub = np.kron(xyzc_sub, np.ones((n**3, 1))) # Kron for n**3 refined cells xyzh_sub = np.kron( xyzh_sub / n, np.ones((n**3, 1)) ) # Kron for n**3 refined cells with widths h/n diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py index 34a660862e..db332af0a7 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py @@ -138,15 +138,9 @@ def getH0(self, xyz): + m[2] * (xyz[:, 2] - r0[2]) ) - hx0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 0] - r0[0]) * mdotr / r**5 - m[0] / r**3 - ) - hy0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 1] - r0[1]) * mdotr / r**5 - m[1] / r**3 - ) - hz0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 2] - r0[2]) * mdotr / r**5 - m[2] / r**3 - ) + hx0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 0] - r0[0]) * mdotr / r**5 - m[0] / r**3) + hy0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 1] - r0[1]) * mdotr / r**5 - m[1] / r**3) + hz0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 2] - r0[2]) * mdotr / r**5 - m[2] / r**3) return np.c_[hx0, hy0, hz0] @@ -285,8 +279,7 @@ def getH0(self, xyz): (x1p / s) * (x3p * I / (2 * np.pi * s * np.sqrt(x3p**2 + (a + s) ** 2))) * ( - ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) - * spec.ellipe(k) + ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) * spec.ellipe(k) - spec.ellipk(k) ) ) @@ -294,8 +287,7 @@ def getH0(self, xyz): (x2p / s) * (x3p * I / (2 * np.pi * s * np.sqrt(x3p**2 + (a + s) ** 2))) * ( - ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) - * spec.ellipe(k) + ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) * spec.ellipe(k) - spec.ellipk(k) ) ) diff --git a/SimPEG/flow/richards/__init__.py b/SimPEG/flow/richards/__init__.py index b22a2ea880..ec638e997f 100644 --- a/SimPEG/flow/richards/__init__.py +++ b/SimPEG/flow/richards/__init__.py @@ -40,6 +40,7 @@ empirical.VanGenuchtenParams """ + from . import empirical from .survey import Survey from .simulation import SimulationNDCellCentered diff --git a/SimPEG/flow/richards/empirical.py b/SimPEG/flow/richards/empirical.py index edbf7361dd..83b38f33f0 100644 --- a/SimPEG/flow/richards/empirical.py +++ b/SimPEG/flow/richards/empirical.py @@ -570,9 +570,7 @@ def _derivKs(self, u): dKs_dm_p = P_p * self.KsDeriv dKs_dm_n = ( P_n - * utils.sdiag( - theta_e**I * ((1.0 - (1.0 - theta_e ** (1.0 / m)) ** m) ** 2) - ) + * utils.sdiag(theta_e**I * ((1.0 - (1.0 - theta_e ** (1.0 / m)) ** m) ** 2)) * self.KsDeriv ) return dKs_dm_p + dKs_dm_n diff --git a/SimPEG/maps.py b/SimPEG/maps.py index aa0087100a..f4530aefff 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -1697,9 +1697,7 @@ def getQ(self, alpha): if alpha < 1.0: # oblate spheroid chi = np.sqrt((1.0 / alpha**2.0) - 1) return ( - 1.0 - / 2.0 - * (1 + 1.0 / (alpha**2.0 - 1) * (1.0 - np.arctan(chi) / chi)) + 1.0 / 2.0 * (1 + 1.0 / (alpha**2.0 - 1) * (1.0 - np.arctan(chi) / chi)) ) elif alpha > 1.0: # prolate spheroid chi = np.sqrt(1 - (1.0 / alpha**2.0)) @@ -3273,9 +3271,11 @@ def indActive(self, value): def P(self): if getattr(self, "_P", None) is None: self._P = self.mesh2.get_interpolation_matrix( - self.mesh.cell_centers[self.indActive, :] - if self.indActive is not None - else self.mesh.cell_centers, + ( + self.mesh.cell_centers[self.indActive, :] + if self.indActive is not None + else self.mesh.cell_centers + ), "CC", zeros_outside=True, ) @@ -4581,15 +4581,19 @@ def x(self): if getattr(self, "_x", None) is None: if self.mesh.dim == 1: self._x = [ - self.mesh.cell_centers - if self.indActive is None - else self.mesh.cell_centers[self.indActive] + ( + self.mesh.cell_centers + if self.indActive is None + else self.mesh.cell_centers[self.indActive] + ) ][0] else: self._x = [ - self.mesh.cell_centers[:, 0] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 0] + ( + self.mesh.cell_centers[:, 0] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 0] + ) ][0] return self._x @@ -4605,9 +4609,11 @@ def y(self): if getattr(self, "_y", None) is None: if self.mesh.dim > 1: self._y = [ - self.mesh.cell_centers[:, 1] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 1] + ( + self.mesh.cell_centers[:, 1] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 1] + ) ][0] else: self._y = None @@ -4625,9 +4631,11 @@ def z(self): if getattr(self, "_z", None) is None: if self.mesh.dim > 2: self._z = [ - self.mesh.cell_centers[:, 2] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 2] + ( + self.mesh.cell_centers[:, 2] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 2] + ) ][0] else: self._z = None @@ -5094,12 +5102,7 @@ def _ekblom(self, val): return (val**2 + self.epsilon**2) ** (self.p / 2.0) def _ekblomDeriv(self, val): - return ( - (self.p / 2) - * (val**2 + self.epsilon**2) ** ((self.p / 2) - 1) - * 2 - * val - ) + return (self.p / 2) * (val**2 + self.epsilon**2) ** ((self.p / 2) - 1) * 2 * val # def _rotation(self, mDict): # if self.mesh.dim == 2: diff --git a/SimPEG/potential_fields/gravity/__init__.py b/SimPEG/potential_fields/gravity/__init__.py index 4a9763bc9d..ae4e97687e 100644 --- a/SimPEG/potential_fields/gravity/__init__.py +++ b/SimPEG/potential_fields/gravity/__init__.py @@ -35,6 +35,7 @@ analytics.GravityGradientSphereFreeSpace """ + from . import survey from . import sources from . import receivers diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index c84069f150..1d6b363b27 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -1,6 +1,7 @@ """ Numba functions for gravity simulation using Choclo. """ + import numpy as np try: diff --git a/SimPEG/potential_fields/magnetics/__init__.py b/SimPEG/potential_fields/magnetics/__init__.py index 0db16e066f..6d9241e310 100644 --- a/SimPEG/potential_fields/magnetics/__init__.py +++ b/SimPEG/potential_fields/magnetics/__init__.py @@ -35,6 +35,7 @@ analytics.MagSphereAnaFunA analytics.MagSphereFreeSpace """ + from . import survey from . import sources from . import receivers diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 3fa55b9fd3..9026acb078 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -147,6 +147,7 @@ BaseAmplitude """ + from ..utils.code_utils import deprecate_class from .base import ( BaseRegularization, diff --git a/SimPEG/seismic/straight_ray_tomography/__init__.py b/SimPEG/seismic/straight_ray_tomography/__init__.py index 68edbf39c3..69e8a06036 100644 --- a/SimPEG/seismic/straight_ray_tomography/__init__.py +++ b/SimPEG/seismic/straight_ray_tomography/__init__.py @@ -24,6 +24,7 @@ """ + from .simulation import Simulation2DIntegral as Simulation from .survey import StraightRaySurvey as Survey from ...survey import BaseSrc as Src diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index c857aa841e..f6cd144b1a 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -1,6 +1,7 @@ """ Define simulation classes. """ + import os import inspect import numpy as np diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index c53f37ec75..b023970eca 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -142,6 +142,7 @@ validate_active_indices """ + from discretize.utils.interpolation_utils import interpolation_matrix from .code_utils import ( diff --git a/SimPEG/utils/pgi_utils.py b/SimPEG/utils/pgi_utils.py index 3304b36d4a..eb0658e958 100644 --- a/SimPEG/utils/pgi_utils.py +++ b/SimPEG/utils/pgi_utils.py @@ -1560,7 +1560,7 @@ def __init__( warm_start=warm_start, weights_init=weights_init, update_covariances=update_covariances, - fixed_membership=fixed_membership + fixed_membership=fixed_membership, # **kwargs ) diff --git a/environment_test.yml b/environment_test.yml index 8c5604edb7..84940f92d6 100644 --- a/environment_test.yml +++ b/environment_test.yml @@ -40,7 +40,7 @@ dependencies: - choclo # Linters and code style - pre-commit - - black==23.12.1 + - black==24.3.0 - flake8==7.0.0 - flake8-bugbear==23.12.2 - flake8-builtins==2.2.0 diff --git a/examples/01-maps/plot_block_in_layer.py b/examples/01-maps/plot_block_in_layer.py index 4b116ceeb2..c84d27b218 100644 --- a/examples/01-maps/plot_block_in_layer.py +++ b/examples/01-maps/plot_block_in_layer.py @@ -21,6 +21,7 @@ ] """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_combo.py b/examples/01-maps/plot_combo.py index 86a98cf4aa..a7157a4b82 100644 --- a/examples/01-maps/plot_combo.py +++ b/examples/01-maps/plot_combo.py @@ -26,6 +26,7 @@ right). Just to be sure that the derivative is correct, you should always run the test on the mapping that you create. """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_layer.py b/examples/01-maps/plot_layer.py index 90600bde0a..d73a9bc8bf 100644 --- a/examples/01-maps/plot_layer.py +++ b/examples/01-maps/plot_layer.py @@ -17,6 +17,7 @@ 'layer thickness' ] """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_mesh2mesh.py b/examples/01-maps/plot_mesh2mesh.py index bb36c19a78..b2063e71bb 100644 --- a/examples/01-maps/plot_mesh2mesh.py +++ b/examples/01-maps/plot_mesh2mesh.py @@ -4,6 +4,7 @@ This mapping allows you to go from one mesh to another. """ + import discretize from SimPEG import maps, utils import matplotlib.pyplot as plt diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index a07bb8712b..aceb6c9220 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -12,6 +12,7 @@ """ + from discretize import TensorMesh from discretize.utils import active_from_xyz from SimPEG import ( diff --git a/examples/02-gravity/plot_inv_grav_tiled.py b/examples/02-gravity/plot_inv_grav_tiled.py index 5ed4cd90e2..37ae5e203d 100644 --- a/examples/02-gravity/plot_inv_grav_tiled.py +++ b/examples/02-gravity/plot_inv_grav_tiled.py @@ -5,6 +5,7 @@ Invert data in tiles. """ + import os import numpy as np import matplotlib.pyplot as plt diff --git a/examples/03-magnetics/plot_0_analytic.py b/examples/03-magnetics/plot_0_analytic.py index 8384445f2e..1c8e7980aa 100644 --- a/examples/03-magnetics/plot_0_analytic.py +++ b/examples/03-magnetics/plot_0_analytic.py @@ -5,6 +5,7 @@ Comparing the magnetics field in Vancouver to Seoul """ + import numpy as np from SimPEG.potential_fields.magnetics import analytics import matplotlib.pyplot as plt diff --git a/examples/04-dcip/plot_dc_analytic.py b/examples/04-dcip/plot_dc_analytic.py index ec16ee2672..da9fca2cb3 100644 --- a/examples/04-dcip/plot_dc_analytic.py +++ b/examples/04-dcip/plot_dc_analytic.py @@ -5,6 +5,7 @@ Comparison of the analytic and numerical solution for a direct current resistivity dipole in 3D. """ + import discretize from SimPEG import utils import numpy as np diff --git a/examples/06-tdem/plot_fwd_tdem_3d_model.py b/examples/06-tdem/plot_fwd_tdem_3d_model.py index 36637e5085..6a07f422ff 100644 --- a/examples/06-tdem/plot_fwd_tdem_3d_model.py +++ b/examples/06-tdem/plot_fwd_tdem_3d_model.py @@ -2,6 +2,7 @@ Time-domain CSEM for a resistive cube in a deep marine setting ============================================================== """ + import empymod import discretize diff --git a/examples/06-tdem/plot_inv_tdem_1D.py b/examples/06-tdem/plot_inv_tdem_1D.py index 95c005e96f..b3f6fd1a78 100644 --- a/examples/06-tdem/plot_inv_tdem_1D.py +++ b/examples/06-tdem/plot_inv_tdem_1D.py @@ -4,6 +4,7 @@ Here we will create and run a TDEM 1D inversion. """ + import numpy as np from SimPEG.electromagnetics import time_domain from SimPEG import ( diff --git a/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py b/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py index 4a0c5625c9..619ada07e4 100644 --- a/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py +++ b/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py @@ -6,6 +6,7 @@ with VTEM waveform of which initial condition is zero, but have some on- and off-time. """ + import numpy as np import discretize from SimPEG import ( diff --git a/examples/09-flow/plot_fwd_flow_richards_1D.py b/examples/09-flow/plot_fwd_flow_richards_1D.py index 811fc84a46..dcc2a7b7ff 100644 --- a/examples/09-flow/plot_fwd_flow_richards_1D.py +++ b/examples/09-flow/plot_fwd_flow_richards_1D.py @@ -38,6 +38,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib import matplotlib.pyplot as plt import numpy as np diff --git a/examples/09-flow/plot_inv_flow_richards_1D.py b/examples/09-flow/plot_inv_flow_richards_1D.py index f30a739c3c..d38dbb4014 100644 --- a/examples/09-flow/plot_inv_flow_richards_1D.py +++ b/examples/09-flow/plot_inv_flow_richards_1D.py @@ -25,6 +25,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib import matplotlib.pyplot as plt import numpy as np diff --git a/examples/20-published/plot_heagyetal2017_casing.py b/examples/20-published/plot_heagyetal2017_casing.py index 6d0543130c..bcce56721a 100644 --- a/examples/20-published/plot_heagyetal2017_casing.py +++ b/examples/20-published/plot_heagyetal2017_casing.py @@ -30,6 +30,7 @@ This example was updated for SimPEG 0.14.0 on January 31st, 2020 by Joseph Capriotti """ + import discretize from SimPEG import utils, maps, tests from SimPEG.electromagnetics import frequency_domain as FDEM, mu_0 @@ -265,8 +266,8 @@ def primaryMapping(self): expMapPrimary * injActMapPrimary # log(sigma) --> sigma * paramMapPrimary # log(sigma) below surface --> include air - * injectCasingParams # parametric --> casing + layered earth - * # parametric layered earth --> parametric + * injectCasingParams # parametric --> casing + layered earth # parametric layered earth --> parametric + * # layered earth + casing self.projectionMapPrimary # grab relevant parameters from full # model (eg. ignore block) diff --git a/examples/20-published/plot_heagyetal2017_cyl_inversions.py b/examples/20-published/plot_heagyetal2017_cyl_inversions.py index 98e04747a4..2c93524648 100644 --- a/examples/20-published/plot_heagyetal2017_cyl_inversions.py +++ b/examples/20-published/plot_heagyetal2017_cyl_inversions.py @@ -18,6 +18,7 @@ This example was updated for SimPEG 0.14.0 on January 31st, 2020 by Joseph Capriotti """ + import discretize from SimPEG import ( maps, diff --git a/examples/20-published/plot_laguna_del_maule_inversion.py b/examples/20-published/plot_laguna_del_maule_inversion.py index d124efa9da..47a467c343 100644 --- a/examples/20-published/plot_laguna_del_maule_inversion.py +++ b/examples/20-published/plot_laguna_del_maule_inversion.py @@ -11,6 +11,7 @@ Craig Miller """ + import os import shutil import tarfile diff --git a/examples/20-published/plot_richards_celia1990.py b/examples/20-published/plot_richards_celia1990.py index 798ec47149..ce2267d8b9 100644 --- a/examples/20-published/plot_richards_celia1990.py +++ b/examples/20-published/plot_richards_celia1990.py @@ -39,6 +39,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib.pyplot as plt import numpy as np diff --git a/examples/20-published/plot_schenkel_morrison_casing.py b/examples/20-published/plot_schenkel_morrison_casing.py index 4868459e2f..478d8b90e3 100644 --- a/examples/20-published/plot_schenkel_morrison_casing.py +++ b/examples/20-published/plot_schenkel_morrison_casing.py @@ -44,6 +44,7 @@ a citation would be much appreciated! """ + import matplotlib.pylab as plt import numpy as np import discretize diff --git a/examples/20-published/plot_vadose_vangenuchten.py b/examples/20-published/plot_vadose_vangenuchten.py index 95b8d10af3..a05cb0b6f4 100644 --- a/examples/20-published/plot_vadose_vangenuchten.py +++ b/examples/20-published/plot_vadose_vangenuchten.py @@ -10,6 +10,7 @@ The RETC code for quantifying the hydraulic functions of unsaturated soils, Van Genuchten, M Th, Leij, F J, Yates, S R """ + import matplotlib.pyplot as plt import discretize diff --git a/examples/_archived/plot_inv_grav_linear.py b/examples/_archived/plot_inv_grav_linear.py index c11bea15e9..c35831bd76 100644 --- a/examples/_archived/plot_inv_grav_linear.py +++ b/examples/_archived/plot_inv_grav_linear.py @@ -6,6 +6,7 @@ with a compact norm """ + import numpy as np import matplotlib.pyplot as plt diff --git a/examples/_archived/plot_inv_mag_linear.py b/examples/_archived/plot_inv_mag_linear.py index 661ed94062..bf25676a50 100644 --- a/examples/_archived/plot_inv_mag_linear.py +++ b/examples/_archived/plot_inv_mag_linear.py @@ -6,6 +6,7 @@ with a compact norm """ + import matplotlib.pyplot as plt import numpy as np from discretize import TensorMesh diff --git a/requirements_dev.txt b/requirements_dev.txt index f81dbe7d4e..5bab4a0c4b 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -19,7 +19,7 @@ jupyter toolz empymod>=2.0.0 scooby -black==23.12.1 +black==24.3.0 pre-commit twine memory_profiler diff --git a/requirements_style.txt b/requirements_style.txt index 86051e527b..a4fd699571 100644 --- a/requirements_style.txt +++ b/requirements_style.txt @@ -1,4 +1,4 @@ -black==23.12.1 +black==24.3.0 flake8==7.0.0 flake8-bugbear==23.12.2 flake8-builtins==2.2.0 diff --git a/tests/base/test_optimizers.py b/tests/base/test_optimizers.py index 4bdfd2cafa..212d433787 100644 --- a/tests/base/test_optimizers.py +++ b/tests/base/test_optimizers.py @@ -49,11 +49,11 @@ def test_ProjGradient_quadratic1Bound(self): self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) def test_NewtonRoot(self): - fun = ( - lambda x, return_g=True: np.sin(x) - if not return_g - else (np.sin(x), sdiag(np.cos(x))) - ) + def fun(x, return_g=True): + if return_g: + return np.sin(x), sdiag(np.cos(x)) + return np.sin(x) + x = np.array([np.pi - 0.3, np.pi + 0.1, 0]) xopt = optimization.NewtonRoot(comments=False).root(fun, x) x_true = np.array([np.pi, np.pi, 0]) diff --git a/tests/em/vrm/test_vrmfwd.py b/tests/em/vrm/test_vrmfwd.py index 89f1fc54f9..7bcef1c9e0 100644 --- a/tests/em/vrm/test_vrmfwd.py +++ b/tests/em/vrm/test_vrmfwd.py @@ -5,7 +5,6 @@ class VRM_fwd_tests(unittest.TestCase): - """ Computed vs analytic dipole field """ diff --git a/tests/em/vrm/test_vrminv.py b/tests/em/vrm/test_vrminv.py index b4f316c02f..15c022c926 100644 --- a/tests/em/vrm/test_vrminv.py +++ b/tests/em/vrm/test_vrminv.py @@ -63,9 +63,7 @@ def test_basic_inversion(self): dmis = data_misfit.L2DataMisfit(data=dobs, simulation=Problem) W = ( - mkvc( - (np.sum(np.array(Problem.A) ** 2, axis=0)) / meshObj.cell_volumes**2.0 - ) + mkvc((np.sum(np.array(Problem.A) ** 2, axis=0)) / meshObj.cell_volumes**2.0) ** 0.25 ) reg = regularization.WeightedLeastSquares( diff --git a/tests/pf/test_mag_uniform_background_field.py b/tests/pf/test_mag_uniform_background_field.py index 18989d4d09..d4e72bae40 100644 --- a/tests/pf/test_mag_uniform_background_field.py +++ b/tests/pf/test_mag_uniform_background_field.py @@ -1,6 +1,7 @@ """ Test the UniformBackgroundField class """ + import pytest from SimPEG.potential_fields.magnetics import UniformBackgroundField, SourceField diff --git a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py index 15c7f18dc5..4241d5d886 100644 --- a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py +++ b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py @@ -16,7 +16,6 @@ """ - import numpy as np import matplotlib.pyplot as plt diff --git a/tutorials/07-fdem/plot_inv_1_em1dfm.py b/tutorials/07-fdem/plot_inv_1_em1dfm.py index 2d566f06a7..d222fd9d62 100644 --- a/tutorials/07-fdem/plot_inv_1_em1dfm.py +++ b/tutorials/07-fdem/plot_inv_1_em1dfm.py @@ -18,7 +18,6 @@ """ - ######################################################################### # Import modules # -------------- diff --git a/tutorials/08-tdem/plot_inv_1_em1dtm.py b/tutorials/08-tdem/plot_inv_1_em1dtm.py index 535f646747..9e189b32cb 100644 --- a/tutorials/08-tdem/plot_inv_1_em1dtm.py +++ b/tutorials/08-tdem/plot_inv_1_em1dtm.py @@ -18,7 +18,6 @@ """ - ######################################################################### # Import modules # -------------- diff --git a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py index 96b83c4b36..c6e170bf27 100644 --- a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py @@ -22,6 +22,7 @@ `_. """ + ######################################################################### # Import modules # -------------- diff --git a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py index ba0219e59a..0e57205665 100644 --- a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py @@ -23,6 +23,7 @@ `_. """ + ######################################################################### # Import modules # -------------- diff --git a/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py b/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py index 7cdb188cba..6419f37d23 100644 --- a/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py +++ b/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py @@ -455,7 +455,7 @@ def PolygonInd(mesh, pts): ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) norm = mpl.colors.Normalize( vmin=np.log10(true_model.min()), - vmax=np.log10(true_model.max()) + vmax=np.log10(true_model.max()), # vmin=np.log10(0.1), vmax=np.log10(1) ) cbar = mpl.colorbar.ColorbarBase( From 843ed3507ad25d66d0d1ab14e8c70e09d7dabeb1 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Tue, 9 Apr 2024 09:40:55 -0700 Subject: [PATCH 58/68] Add release notes for SimPEG v0.21 (#1409) Add release notes for SimPEG v0.21.0 to the docs. --------- Co-authored-by: Joseph Capriotti --- docs/content/release/0.21.0-notes.rst | 274 ++++++++++++++++++++++++++ docs/content/release/index.rst | 1 + 2 files changed, 275 insertions(+) create mode 100644 docs/content/release/0.21.0-notes.rst diff --git a/docs/content/release/0.21.0-notes.rst b/docs/content/release/0.21.0-notes.rst new file mode 100644 index 0000000000..c317ded823 --- /dev/null +++ b/docs/content/release/0.21.0-notes.rst @@ -0,0 +1,274 @@ +.. _0.21.0_notes: + +=========================== +SimPEG 0.21.0 Release Notes +=========================== + +April 8th, 2024 + +.. contents:: Highlights + :depth: 3 + +Updates +======= + +New features +------------ + +Gravity simulation using Choclo +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now we can use a faster and more memory efficient implementation of the gravity +simulation ``SimPEG.potential_fields.gravity.Simulation3DIntegral``, making use +of Choclo and Numba. To make use of this functionality you will need to +`install ``choclo`` `__ in +addition to ``SimPEG``. + +See https://github.com/simpeg/simpeg/pull/1285. + +Use Dask with MetaSimulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A new ``SimPEG.meta.DaskMetaSimulation`` class has been added that allows to +use Dask with ``SimPEG.meta.MetaSimulations``. + +See https://github.com/simpeg/simpeg/pull/1199. + +Rotated Gradients +~~~~~~~~~~~~~~~~~ + +Added a new ``SimPEG.regularization.SmoothnessFullGradient`` regularization +class that allows to regularize first order smoothness along any arbitrary +direction, enabling anisotropic weighting. This regularization also works for +a ``SimplexMesh``. + +See https://github.com/simpeg/simpeg/pull/1167. + +Logistic Sigmoid Map +~~~~~~~~~~~~~~~~~~~~ + +New ``SimPEG.map.LogisticSigmoidMap`` mapping class that computes the logistic +sigmoid of the model parameters. This is an alternative method to incorporate +upper and lower bounds on model parameters. + +See https://github.com/simpeg/simpeg/pull/1352. + +Create Jacobian matrix in NSEM and FDEM simulations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The frequency domain electromagnetic simulations (including natural source) now +support creating and storing the Jacobian matrix. You can access it by using +the ``getJ`` method. + +See https://github.com/simpeg/simpeg/pull/1276. + + +Documentation +------------- + +This new release includes major improvements in documentation pages: more +detailed docstrings of classes and methods, the addition of directive classes +to the API reference, improvements to the contributing guide, among corrections +and fixes. + + +Breaking changes +---------------- + +Removal of deprecated bits +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Several deprecated bits of code has been removed in this release. From old +classes, methods and properties that were marked for deprecation a few releases +back. These removals simplify the SimPEG API and cleans up the codebase. + +Remove factor of half in data misfits and regularizations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Simplify the definition of data misfit and regularization terms by removing the +leading factor of one half from these functions. This change makes it easier to +interpret the resulting values of these objective functions, while +avoiding confusions with their definition. + +See https://github.com/simpeg/simpeg/pull/1326. + + +Bugfixes +-------- + +A few bugs have been fixed: + +- Fix issue with lengthscales in coterminal angle calculations by + `@domfournier `__ in https://github.com/simpeg/simpeg/pull/1299 +- ISSUE-1341: Set parent of objective functions by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1342 +- Ravel instead of flatten by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1343 +- Fix implementation of coterminal function by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1334 +- Simpeg vector update by `@johnweis0480 `__ in + https://github.com/simpeg/simpeg/pull/1329 + + +Contributors +============ + +This is a combination of contributors and reviewers who've made contributions +towards this release (in no particular order). + +* `@ckohnke `__ +* `@dccowan `__ +* `@domfournier `__ +* `@ghwilliams `__ +* `@jcapriot `__ +* `@JKutt `__ +* `@johnweis0480 `__ +* `@lheagy `__ +* `@mplough-kobold `__ +* `@santisoler `__ +* `@thibaut-kobold `__ +* `@YingHuuu `__ + +We would like to highlight the contributions made by new contributors: + +- `@mplough-kobold `__ made their first + contribution in https://github.com/simpeg/simpeg/pull/1282 +- `@ghwilliams `__ made their first contribution + in https://github.com/simpeg/simpeg/pull/1292 +- `@johnweis0480 `__ made their first + contribution in https://github.com/simpeg/simpeg/pull/1329 +- `@ckohnke `__ made their first contribution in + https://github.com/simpeg/simpeg/pull/1352 +- `@YingHuuu `__ made their first contribution in + https://github.com/simpeg/simpeg/pull/1344 + + +Pull Requests +============= + +- Add 0.20.0 release notes to toc by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1277 +- add plausible analytics to simpeg docs by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1279 +- Refresh links in documentation by `@mplough-kobold `__ in + https://github.com/simpeg/simpeg/pull/1282 +- Run pytest on Azure with increased verbosity by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1287 - Allow to use random seed in make_synthetic_data by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1286 +- pgi doc by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1291 +- Fix deprecation warning for gradientType in SparseSmoothness by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1284 +- Gravity simulation with Choclo as engine by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1285 +- Fix minor flake8 warning by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1307 +- ISSUE-1298: Use normal distributed noise in example. by `@domfournier `__ + in https://github.com/simpeg/simpeg/pull/1312 +- Ditch deprecated functions in utils.model_builder by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1311 - Triaxial magnetic gradient forward modelling by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1288 +- Documentation improvements for classes in Objective Function Pieces + by `@ghwilliams `__ in https://github.com/simpeg/simpeg/pull/1292 +- Fix description of source_field in gravity survey by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1322 +- Add ``weights_keys`` method to ``BaseRegularization`` by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1320 +- Bump versions of flake8 and black and pin flake plugins by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1330 +- Move ``__init__`` in ``BaseSimulation`` to the top of the class by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1323 +- Simpeg vector update by `@johnweis0480 `__ in + https://github.com/simpeg/simpeg/pull/1329 +- Fix typo in error messages by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1324 +- Fix issue with lengthscales in coterminal angle calculations by + `@domfournier `__ in https://github.com/simpeg/simpeg/pull/1299 +- Simplify check for invalid multipliers by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1336 +- Ravel instead of flatten by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1343 +- Fix implementation of coterminal function by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1334 +- Update cross gradient hessian approximation by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1355 +- ISSUE-1341: Set parent of objective functions by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1342 +- Fix partial derivatives in regularization docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1362 +- Remove factor of half in data misfits and regularizations by `@lheagy `__ + in https://github.com/simpeg/simpeg/pull/1326 +- Improvements to template for a bug report issue by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1359 +- Simplify a few gravity simulation tests by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1363 +- Exponential Sinusoids Simulation by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1337 +- Replace magnetic SourceField for UniformBackgroundField by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1364 +- Remove deprecated regularization classes by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1365 +- Removed deprecated properties of UpdateSensitivityWeights by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1368 +- Replace indActive for active_cells in regularizations by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1366 +- Remove the debug argument from InversionDirective by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1370 +- Remove cellDiff properties of RegularizationMesh by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1371 +- Remove deprecated bits of code by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1372 +- Use choclo in gravity tutorials by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1378 +- Remove surface2ind_topo by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1374 +- Speed up sphinx documentation building by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1382 +- Add docs/sg_execution_times.rst to .gitignore by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1380 +- Describe merge process of Pull Requests in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1375 +- Simplify private methods in gravity simulation by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1384 +- Update Slack links: point to Mattermost by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1385 +- added getJ for fdem and nsem simulations by `@JKutt `__ in + https://github.com/simpeg/simpeg/pull/1276 +- Add LogisticSigmoidMap by `@ckohnke `__ in + https://github.com/simpeg/simpeg/pull/1352 +- Remove the cell_weights attribute in regularizations by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1376 +- Remove regmesh, mref and gradientType from regularizations by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1377 +- Test if gravity sensitivities are stored on disk by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1388 +- Check if mesh is 3D when using Choclo in gravity simulation by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1386 +- Rotated Gradients by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1167 +- Add directives to the API Reference by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1397 +- Remove deprecated modelType in mag simulation by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1399 +- Remove mref property of PGI regularization by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1400 +- Add link to User Tutorials to navbar in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1401 +- Improve documentation for base simulation classes by `@ghwilliams `__ in + https://github.com/simpeg/simpeg/pull/1295 +- Enforce regularization ``weights`` as dictionaries by `@YingHuuu `__ in + https://github.com/simpeg/simpeg/pull/1344 +- Minor adjustments to Sphinx configuration by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1398 +- Update AUTHORS.rst by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1259 +- Update year in LICENSE by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1404 +- Dask MetaSim by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1199 +- Add Ying and Williams to AUTHORS.rst by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1405 +- Remove link to “twitter” by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1406 +- Bump Black version to 24.3.0 by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1403 diff --git a/docs/content/release/index.rst b/docs/content/release/index.rst index 98e30d6b57..bd5a9b9ed0 100644 --- a/docs/content/release/index.rst +++ b/docs/content/release/index.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 2 + 0.21.0 <0.21.0-notes> 0.20.0 <0.20.0-notes> 0.19.0 <0.19.0-notes> 0.18.1 <0.18.1-notes> From de79ed07a8075ab6f8b18735e90b82c2e207b7e8 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Tue, 9 Apr 2024 12:29:54 -0600 Subject: [PATCH 59/68] Publish documentation on azure (#1412) Adds a publish step on the documentation test CI so we can download it and view it. --- .azure-pipelines/matrix.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.azure-pipelines/matrix.yml b/.azure-pipelines/matrix.yml index fd6cc259a3..2b96f56561 100644 --- a/.azure-pipelines/matrix.yml +++ b/.azure-pipelines/matrix.yml @@ -50,6 +50,13 @@ jobs: pytest ${{ test }} -v --cov-config=.coveragerc --cov=SimPEG --cov-report=xml --cov-report=html -W ignore::DeprecationWarning displayName: 'Testing ${{ test }}' + - task: PublishPipelineArtifact@1 + inputs: + targetPath: $(Build.SourcesDirectory)/docs/_build/html + artifactName: html_docs + displayName: 'Publish documentation artifact' + condition: eq('${{ test }}', 'tests/docs -s -v') + - script: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov From ee05f30eb66a2496f9f0dbc40f3b70985a556384 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 10 Apr 2024 10:57:23 -0600 Subject: [PATCH 60/68] Fix hard dask dependency (#1415) #### Summary Fixes unintended `dask` hard dependency, --- SimPEG/meta/__init__.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/SimPEG/meta/__init__.py b/SimPEG/meta/__init__.py index 9dab54e5be..d78117a2e4 100644 --- a/SimPEG/meta/__init__.py +++ b/SimPEG/meta/__init__.py @@ -75,4 +75,20 @@ MultiprocessingRepeatedSimulation, ) -from .dask_sim import DaskMetaSimulation, DaskSumMetaSimulation, DaskRepeatedSimulation +try: + from .dask_sim import ( + DaskMetaSimulation, + DaskSumMetaSimulation, + DaskRepeatedSimulation, + ) +except ImportError: + + class DaskMetaSimulation(MetaSimulation): + def __init__(self, *args, **kwargs): + raise ImportError( + "This simulation requires dask.distributed. Please see installation " + "instructions at https://distributed.dask.org/" + ) + + DaskSumMetaSimulation = DaskMetaSimulation + DaskRepeatedMetaSimulation = DaskMetaSimulation From 5cd200d646aa40f039ce6c8c1a80c5551e95f1aa Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Wed, 10 Apr 2024 11:57:15 -0700 Subject: [PATCH 61/68] Add release notes for v0.21.1 (#1416) Add release notes for patch `v0.21.1`. --- docs/content/release/0.21.0-notes.rst | 2 ++ docs/content/release/0.21.1-notes.rst | 30 +++++++++++++++++++++++++++ docs/content/release/index.rst | 1 + 3 files changed, 33 insertions(+) create mode 100644 docs/content/release/0.21.1-notes.rst diff --git a/docs/content/release/0.21.0-notes.rst b/docs/content/release/0.21.0-notes.rst index c317ded823..f3ec79376e 100644 --- a/docs/content/release/0.21.0-notes.rst +++ b/docs/content/release/0.21.0-notes.rst @@ -272,3 +272,5 @@ Pull Requests https://github.com/simpeg/simpeg/pull/1406 - Bump Black version to 24.3.0 by `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1403 +- Publish documentation on azure `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1412 diff --git a/docs/content/release/0.21.1-notes.rst b/docs/content/release/0.21.1-notes.rst new file mode 100644 index 0000000000..cd35017d87 --- /dev/null +++ b/docs/content/release/0.21.1-notes.rst @@ -0,0 +1,30 @@ +.. _0.21.1_notes: + +=========================== +SimPEG 0.21.1 Release Notes +=========================== + +April 10th, 2024 + +.. contents:: Highlights + :depth: 2 + +Updates +======= + +Minor fix when importing Dask in the ``meta`` module: Dask is an optional +dependency. + +Contributors +============ + +This is a combination of contributors and reviewers who've made contributions +towards this release (in no particular order). + +* `@jcapriot `__ + +Pull Requests +============= + +* Fix hard dask dependency by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1415 diff --git a/docs/content/release/index.rst b/docs/content/release/index.rst index bd5a9b9ed0..49daf1cfc9 100644 --- a/docs/content/release/index.rst +++ b/docs/content/release/index.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 2 + 0.21.1 <0.21.1-notes> 0.21.0 <0.21.0-notes> 0.20.0 <0.20.0-notes> 0.19.0 <0.19.0-notes> From fa5d79475f2a9b5656364e10508cdb85c65e35d5 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 11 Apr 2024 09:22:03 -0700 Subject: [PATCH 62/68] Remove the parameters argument from docstring (#1417) Remove the old `parameters` argument from the docstrings of the `UniformBackgroundField` class. --- SimPEG/potential_fields/magnetics/sources.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/SimPEG/potential_fields/magnetics/sources.py b/SimPEG/potential_fields/magnetics/sources.py index c62bc80b23..33c9137ea3 100644 --- a/SimPEG/potential_fields/magnetics/sources.py +++ b/SimPEG/potential_fields/magnetics/sources.py @@ -12,9 +12,6 @@ class UniformBackgroundField(BaseSrc): Parameters ---------- receiver_list : list of SimPEG.potential_fields.magnetics.Point - parameters : tuple of (amplitude, inclutation, declination), optional - Deprecated input for the function, provided in this position for backwards - compatibility amplitude : float, optional amplitude of the inducing backgound field, usually this is in units of nT. inclination : float, optional From 999d18c03ebe8b04eae28d7397d8641b2259c5bd Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Tue, 16 Apr 2024 16:24:29 -0600 Subject: [PATCH 63/68] Use reviewdog to annotate PR's with black and flake8 errors. (#1424) #### Summary Uses reviewdog to annotate PR's with suggested changes from black, and comment on errors from flake8 #### What does this implement/fix? Allows for automated reviewing of PR's from reviewdog detailing the flake8 and black errors when they fail, making it much more obvious to people who have submitted PR's about why those tests are failing. --- .github/workflows/pull_request.yml | 38 ++++++++++++++++++++++++++++++ azure-pipelines.yml | 2 +- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/pull_request.yml diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 0000000000..026bf825c5 --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,38 @@ +name : Reviewdog PR Annotations +on: [pull_request] + +jobs: + flake8: + runs-on: ubuntu-latest + name: Flake8 check + steps: + - name: Checkout source repository + uses: actions/checkout@v4 + - name: Setup Python env + uses: actions/setup-python@v5 + with: + python-version: '3.11' + - name: Install dependencies to run the flake8 checks + run: pip install -r requirements_style.txt + - name: flake8 review + uses: reviewdog/action-flake8@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-review + + black: + name: Black check + runs-on: ubuntu-latest + steps: + - name: Checkout source repository + uses: actions/checkout@v4 + - name: Setup Python env + uses: actions/setup-python@v5 + with: + python-version: '3.11' + - name: Install dependencies to run the black checks + run: pip install -r requirements_style.txt + - uses: reviewdog/action-black@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-review \ No newline at end of file diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2b6d0dcb57..f7f7ed21a9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -141,4 +141,4 @@ stages: git push displayName: Push documentation to simpeg-docs env: - GH_TOKEN: $(gh.token) + GH_TOKEN: $(gh.token) \ No newline at end of file From 8a763d3dd3f620628c3fb0faf9fb1b359e219f12 Mon Sep 17 00:00:00 2001 From: Joseph Capriotti Date: Wed, 17 Apr 2024 10:40:14 -0600 Subject: [PATCH 64/68] Safely run reviewdog on `pull_request_target` events (#1427) #### Summary Updates reviewdog GitHub annotation action to safely run from pull request target events --- .github/workflows/pull_request.yml | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 026bf825c5..0151372c8d 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -1,22 +1,32 @@ name : Reviewdog PR Annotations -on: [pull_request] +on: [pull_request_target] jobs: flake8: runs-on: ubuntu-latest name: Flake8 check steps: - - name: Checkout source repository + - name: Checkout target repository source uses: actions/checkout@v4 + - name: Setup Python env uses: actions/setup-python@v5 with: python-version: '3.11' + - name: Install dependencies to run the flake8 checks run: pip install -r requirements_style.txt + + - name: checkout pull request source + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + path: pr_source + - name: flake8 review uses: reviewdog/action-flake8@v3 with: + workdir: pr_source github_token: ${{ secrets.GITHUB_TOKEN }} reporter: github-pr-review @@ -24,15 +34,25 @@ jobs: name: Black check runs-on: ubuntu-latest steps: - - name: Checkout source repository + - name: Checkout target repository source uses: actions/checkout@v4 + - name: Setup Python env uses: actions/setup-python@v5 with: python-version: '3.11' + - name: Install dependencies to run the black checks run: pip install -r requirements_style.txt + + - name: checkout pull request source + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + path: 'pr_source' + - uses: reviewdog/action-black@v3 with: + workdir: 'pr_source' github_token: ${{ secrets.GITHUB_TOKEN }} reporter: github-pr-review \ No newline at end of file From fbd92e5ef05b2e0ac49c736aaea6c7dd7731eb8f Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 25 Apr 2024 10:15:05 -0700 Subject: [PATCH 65/68] Add new Issue template for making a release (#1410) Add a new issue template for making a new release, containing a checklist of the steps needed to make a new release. --------- Co-authored-by: Joseph Capriotti --- .github/ISSUE_TEMPLATE/release-checklist.md | 151 ++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/release-checklist.md diff --git a/.github/ISSUE_TEMPLATE/release-checklist.md b/.github/ISSUE_TEMPLATE/release-checklist.md new file mode 100644 index 0000000000..d173424945 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release-checklist.md @@ -0,0 +1,151 @@ +--- +name: Release checklist +about: "Maintainers only: Checklist for making a new release" +title: "Release vX.Y.Z" +labels: "maintenance" +assignees: "" +--- + + + +**Target date:** YYYY/MM/DD + +## Generate release notes + +### Autogenerate release notes with GitHub + +- [ ] Generate a draft for a new Release in GitHub. +- [ ] Create a new tag for it (the version number with a leading `v`). +- [ ] Generate release notes automatically. +- [ ] Copy those notes and paste them into a `notes.md` file. +- [ ] Discard the draft (we'll generate a new one later on). + +### Add release notes to the docs + +- [ ] Convert the Markdown file to RST with: `pandoc notes.md -o notes.rst`. +- [ ] Generate list of contributors from the release notes with: + ```bash + grep -Eo "@[[:alnum:]-]+" notes.rst | sort -u | sed -E 's/^/* /' + ``` + Paste the list into the file under a new `Contributors` category. +- [ ] Check if every contributor that participated in the release is in the + list. Generate a list of authors and co-authors from the git log with (update + the `last_release`): + ```bash + export last_release="v0.20.0" + git shortlog HEAD...$last_release -sne > contributors + git log HEAD...$last_release | grep "Co-authored-by" | sed 's/Co-authored-by://' | sed 's/^[[:space:]]*/ /' | sort | uniq -c | sort -nr | sed 's/^ //' >> contributors + sort -rn contributors + ``` +- [ ] Transform GitHub handles into links to their profiles: + ```bash + sed -Ei 's/@([[:alnum:]-]+)/`@\1 `__/' notes.rst + ``` +- [ ] Copy the content of `notes.rst` to a new file + `docs/content/release/-notes.rst`. +- [ ] Edit the release notes file, following the template below and the + previous release notes. +- [ ] Add the new release notes to the list in `docs/content/release/index.rst`. +- [ ] Open a PR with the new release notes. +- [ ] Manually view the built documentation by downloading the Azure `html_doc` + artifact and check for formatting and errors. +- [ ] Merge that PR + + +
+Template for release notes: + +```rst +.. __notes: + +=========================== +SimPEG Release Notes +=========================== + +MONTH DAYth, YEAR + +.. contents:: Highlights + :depth: 3 + +Updates +======= + +New features +------------ + +.. + list new features under subheadings, include link to related PRs + +Documentation +------------- + +.. + list improvements to documentation + +Bugfixes +-------- + +.. + list bugfixes, include link to related PRs + +Breaking changes +---------------- + +.. + list breaking changes introduced in this new release, include link to + releated PRs + +Contributors +============ + +.. + paste list of contributors that was generated in `notes.rst` + +Pull Requests +============= + +.. + paste list of PRs that were copied to `notes.rst` +``` + +
+ + +## Make the new release + +- [ ] Draft a new GitHub Release +- [ ] Create a new tag for it (the version number with a leading `v`). +- [ ] Target the release on `main` or on a particular commit from `main` +- [ ] Generate release notes automatically. +- [ ] Publish the release + +## Extra tasks + +After publishing the release, Azure will automatically push the new version to +PyPI, and build and deploy the docs. You can check the progress of these tasks +in: https://dev.azure.com/simpeg/simpeg/_build + +After they finish: + +- [ ] Check the new version is available in PyPI: https://pypi.org/project/SimPEG/ +- [ ] Check the new documentation is online: https://docs.simpeg.xyz + +For the new version to be available in conda-forge, we need to update the +[conda-forge/simpeg-feedstock](https://github.com/conda-forge/simpeg-feedstock) +repository. Within the same day of the release a new PR will be automatically +open in that repository. So: + +- [ ] Follow the steps provided in the checklist in that PR and merge it. +- [ ] Make sure the new version is available through conda-forge: https://anaconda.org/conda-forge/simpeg + +Lastly, we would need to update the SimPEG version used in +[`simpeg/user-tutorials`](https://github.com/simpeg/user-tutorials) and rerun +its notebooks: + +- [ ] Open issue in + [`simpeg/user-tutorials`](https://github.com/simpeg/user-tutorials) for + rerunning the notebooks using the new released version of SimPEG + +Finally: + +- [ ] Close this issue From b85f758b216a32a5f4df06122cac22e67999b06b Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 25 Apr 2024 12:03:33 -0700 Subject: [PATCH 66/68] Replace use of `refine_tree_xyz` in DCIP tutorials (#1381) Replace the use of `refine_tree_xyz` in DC-IP tutorials for their corresponding counterpart methods of discretize meshes: `refine_bounding_box`, `refine_points` and `refine_surface`. Fixes #1379 --- tutorials/05-dcr/plot_fwd_2_dcr2d.py | 16 +++++----------- tutorials/05-dcr/plot_inv_2_dcr2d.py | 16 +++++----------- tutorials/05-dcr/plot_inv_2_dcr2d_irls.py | 16 +++++----------- tutorials/06-ip/plot_fwd_2_dcip2d.py | 16 +++++----------- tutorials/06-ip/plot_inv_2_dcip2d.py | 16 +++++----------- 5 files changed, 25 insertions(+), 55 deletions(-) diff --git a/tutorials/05-dcr/plot_fwd_2_dcr2d.py b/tutorials/05-dcr/plot_fwd_2_dcr2d.py index 2e7935463b..747e60e024 100644 --- a/tutorials/05-dcr/plot_fwd_2_dcr2d.py +++ b/tutorials/05-dcr/plot_fwd_2_dcr2d.py @@ -22,7 +22,7 @@ # from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc @@ -123,11 +123,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -144,16 +142,12 @@ np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d.py b/tutorials/05-dcr/plot_inv_2_dcr2d.py index a0479907ad..fd47d61bc2 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d.py @@ -29,7 +29,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -173,11 +173,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -194,16 +192,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py index d72de43fb9..a6a4024e1f 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py @@ -29,7 +29,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -179,11 +179,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -200,16 +198,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/06-ip/plot_fwd_2_dcip2d.py b/tutorials/06-ip/plot_fwd_2_dcip2d.py index 0631392f6d..ada51a11ef 100644 --- a/tutorials/06-ip/plot_fwd_2_dcip2d.py +++ b/tutorials/06-ip/plot_fwd_2_dcip2d.py @@ -30,7 +30,7 @@ # from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc @@ -135,11 +135,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -156,16 +154,12 @@ np.reshape(electrode_locations, (4 * dc_survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/06-ip/plot_inv_2_dcip2d.py b/tutorials/06-ip/plot_inv_2_dcip2d.py index ed14b4fcbe..fe274ce764 100644 --- a/tutorials/06-ip/plot_inv_2_dcip2d.py +++ b/tutorials/06-ip/plot_inv_2_dcip2d.py @@ -33,7 +33,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -188,11 +188,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -209,16 +207,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() From 07aeb65de865cd9df55e2d51ca840ea9d60f2234 Mon Sep 17 00:00:00 2001 From: Santiago Soler Date: Thu, 25 Apr 2024 12:05:26 -0700 Subject: [PATCH 67/68] Fix rst syntax in release notes for v0.21.0 (#1434) Fix rst syntax in the release notes of v0.21.0. Fixes #1433 --- docs/content/release/0.21.0-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/release/0.21.0-notes.rst b/docs/content/release/0.21.0-notes.rst index f3ec79376e..5e668d9dee 100644 --- a/docs/content/release/0.21.0-notes.rst +++ b/docs/content/release/0.21.0-notes.rst @@ -21,7 +21,7 @@ Gravity simulation using Choclo Now we can use a faster and more memory efficient implementation of the gravity simulation ``SimPEG.potential_fields.gravity.Simulation3DIntegral``, making use of Choclo and Numba. To make use of this functionality you will need to -`install ``choclo`` `__ in +`install Choclo `__ in addition to ``SimPEG``. See https://github.com/simpeg/simpeg/pull/1285. From e4bd54b2cf9a8afb07d465897c55619d404d495e Mon Sep 17 00:00:00 2001 From: Thibaut Date: Thu, 9 May 2024 14:25:54 -0700 Subject: [PATCH 68/68] move extended rotated to MP --- SimPEG/regularization/rotated.py | 527 ------------------------------- 1 file changed, 527 deletions(-) delete mode 100644 SimPEG/regularization/rotated.py diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py deleted file mode 100644 index 1eaf3166a2..0000000000 --- a/SimPEG/regularization/rotated.py +++ /dev/null @@ -1,527 +0,0 @@ -from typing import Literal - -import numpy as np -import scipy.sparse as sp -from discretize import TensorMesh, TreeMesh -from discretize.base import BaseMesh -from scipy.interpolate import NearestNDInterpolator - -from ..utils.code_utils import ( - validate_float, - validate_ndarray_with_shape, - validate_type, -) -from ..utils.mat_utils import coterminal -from . import BaseRegularization, RegularizationMesh, Sparse, SparseSmallness - - -class SmoothnessFullGradient(BaseRegularization): - r"""Measures the gradient of a model using optionally anisotropic weighting. - - This regularizer measures the first order smoothness in a mesh ambivalent way - by observing that the N-d smoothness operator can be represented as an - inner product with an arbitrarily anisotropic weight. - - By default it assumes uniform weighting in each dimension, which works - for most ``discretize`` mesh types. - - Parameters - ---------- - mesh : discretize.BaseMesh - The mesh object to use for regularization. The mesh should either have - a `cell_gradient` or a `stencil_cell_gradient` defined. - alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. - The weights of the regularization for each axis. This can be defined for each cell - in the mesh. Default is uniform weights equal to the smallest edge length squared. - reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float - Matrix or list of matrices whose columns represent the regularization directions. - Each matrix should be orthonormal. Default is Identity. - ortho_check : bool, optional - Whether to check `reg_dirs` for orthogonality. - kwargs : - Keyword arguments passed to the parent class ``BaseRegularization``. - - Examples - -------- - Construct of 2D measure with uniform smoothing in each direction. - - >>> from discretize import TensorMesh - >>> from SimPEG.regularization import SmoothnessFullGradient - >>> mesh = TensorMesh([32, 32]) - >>> reg = SmoothnessFullGradient(mesh) - - We can instead create a measure that smooths twice as much in the 1st dimension - than it does in the second dimension. - >>> reg = SmoothnessFullGradient(mesh, [2, 1]) - - The `alphas` parameter can also be indepenant for each cell. Here we set all cells - lower than 0.5 in the x2 to twice as much in the first dimension - otherwise it is uniform smoothing. - >>> alphas = np.ones((mesh.n_cells, mesh.dim)) - >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] - >>> reg = SmoothnessFullGradient(mesh, alphas) - - We can also rotate the axis in which we want to preferentially smooth. Say we want to - smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, - effectively rotating our smoothing 45 degrees. Note and the columns of the matrix - represent the directional vectors (not the rows). - >>> sqrt2 = np.sqrt(2) - >>> reg_dirs = np.array([ - ... [sqrt2, -sqrt2], - ... [sqrt2, sqrt2], - ... ]) - >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) - - Notes - ----- - The regularization object is the discretized form of the continuous regularization - - ..math: - f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V - - The tensor quantity `a` is used to represent the potential preferential directions of - regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: - - ..math: - \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} - - `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting - along each direction, with ``alphas`` along its diagonal. These are multiplied to form the - anisotropic alpha used for rotated gradients. - """ - - _multiplier_pair = "alpha_x" - - def __init__( - self, - mesh, - alphas=None, - reg_dirs=None, - ortho_check=True, - norm=2, - irls_scaled=True, - irls_threshold=1e-8, - reference_model_in_smooth=False, - **kwargs, - ): - self.reference_model_in_smooth = reference_model_in_smooth - - if mesh.dim < 2: - raise TypeError("Mesh must have dimension higher than 1") - super().__init__(mesh=mesh, **kwargs) - - self.norm = norm - self.irls_threshold = irls_threshold - self.irls_scaled = irls_scaled - - if alphas is None: - edge_length = np.min(mesh.edge_lengths) - alphas = edge_length**2 * np.ones(mesh.dim) - alphas = validate_ndarray_with_shape( - "alphas", - alphas, - shape=[(mesh.dim,), ("*", mesh.dim)], - dtype=float, - ) - n_active_cells = self.regularization_mesh.n_cells - if len(alphas.shape) == 1: - alphas = np.tile(alphas, (mesh.n_cells, 1)) - if alphas.shape[0] != mesh.n_cells: - # check if I need to expand from active cells to all cells (needed for discretize) - if alphas.shape[0] == n_active_cells and self.active_cells is not None: - alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) - alpha_temp[self.active_cells] = alphas - alphas = alpha_temp - else: - raise IndexError( - f"`alphas` first dimension, {alphas.shape[0]}, must be either number " - f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " - ) - if np.any(alphas < 0): - raise ValueError("`alpha` must be non-negative") - anis_alpha = alphas - - if reg_dirs is not None: - reg_dirs = validate_ndarray_with_shape( - "reg_dirs", - reg_dirs, - shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], - dtype=float, - ) - if reg_dirs.shape == (mesh.dim, mesh.dim): - reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) - if reg_dirs.shape[0] != mesh.n_cells: - # check if I need to expand from active cells to all cells (needed for discretize) - if ( - reg_dirs.shape[0] == n_active_cells - and self.active_cells is not None - ): - reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) - reg_dirs_temp[self.active_cells] = reg_dirs - reg_dirs = reg_dirs_temp - else: - raise IndexError( - f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " - f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " - ) - # check orthogonality? - if ortho_check: - eye = np.eye(mesh.dim) - for i, M in enumerate(reg_dirs): - if not np.allclose(eye, M @ M.T): - raise ValueError(f"Matrix {i} is not orthonormal") - # create a stack of matrices of dir @ alphas @ dir.T - anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) - # Then select the upper diagonal components for input to discretize - if mesh.dim == 2: - anis_alpha = np.stack( - ( - anis_alpha[..., 0, 0], - anis_alpha[..., 1, 1], - anis_alpha[..., 0, 1], - ), - axis=-1, - ) - elif mesh.dim == 3: - anis_alpha = np.stack( - ( - anis_alpha[..., 0, 0], - anis_alpha[..., 1, 1], - anis_alpha[..., 2, 2], - anis_alpha[..., 0, 1], - anis_alpha[..., 0, 2], - anis_alpha[..., 1, 2], - ), - axis=-1, - ) - self._anis_alpha = anis_alpha - - @property - def reference_model_in_smooth(self) -> bool: - """ - whether to include reference model in gradient or not - - :return: True or False - """ - return self._reference_model_in_smooth - - @reference_model_in_smooth.setter - def reference_model_in_smooth(self, value: bool): - if not isinstance(value, bool): - raise TypeError( - f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided." - ) - self._reference_model_in_smooth = value - - def _delta_m(self, m): - if self.reference_model is None or not self.reference_model_in_smooth: - return m - return m - self.reference_model - - def f_m(self, m): - dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) - - if self.units is not None and self.units.lower() == "radian": - return coterminal(dfm_dl * self._cell_distances) / self._cell_distances - return dfm_dl - - def f_m_deriv(self, m): - return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) - - # overwrite the call, deriv, and deriv2... - def __call__(self, m): - M_f = self.W - r = self.f_m(m) - return 0.5 * r @ M_f @ r - - def deriv(self, m): - m_d = self.f_m_deriv(m) - M_f = self.W - r = self.f_m(m) - return m_d.T @ (M_f @ r) - - def deriv2(self, m, v=None): - m_d = self.f_m_deriv(m) - M_f = self.W - if v is None: - return m_d.T @ (M_f @ m_d) - - return m_d.T @ (M_f @ (m_d @ v)) - - @property - def cell_gradient(self): - """The (approximate) cell gradient operator - - Returns - ------- - scipy.sparse.csr_matrix - """ - if getattr(self, "_cell_gradient", None) is None: - mesh = self.regularization_mesh.mesh - try: - cell_gradient = mesh.cell_gradient - except AttributeError: - a = mesh.face_areas - v = mesh.average_cell_to_face @ mesh.cell_volumes - cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient - - v = np.ones(mesh.n_cells) - # Turn off cell_gradient at boundary faces - if self.active_cells is not None: - v[~self.active_cells] = 0 - - dv = cell_gradient @ v - P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) - cell_gradient = P @ cell_gradient - if self.active_cells is not None: - cell_gradient = cell_gradient[:, self.active_cells] - self._cell_gradient = cell_gradient - return self._cell_gradient - - @property - def W(self): - """The inner product operator using rotated coordinates - - Returns - ------- - scipy.sparse.csr_matrix - """ - if getattr(self, "_W", None) is None: - mesh = self.regularization_mesh.mesh - cell_weights = np.ones(len(mesh)) - for values in self._weights.values(): - # project values to full mesh - # dirty fix of original PR - projection = NearestNDInterpolator( - mesh.cell_centers[self.active_cells], values - ) - proj_values = projection(mesh.cell_centers) - cell_weights *= proj_values - reg_model = self._anis_alpha * cell_weights[:, None] - # turn off measure in inactive cells - if self.active_cells is not None: - reg_model[~self.active_cells] = 0.0 - - self._W = mesh.get_face_inner_product(reg_model) - return self._W - - def update_weights(self, m): - f_m = self.f_m(m) - irls_weights = self.get_lp_weights(f_m) - irls_weights = self.regularization_mesh.mesh.average_face_to_cell @ irls_weights - self.set_weights(irls=irls_weights[self.active_cells]) - - def get_lp_weights(self, f_m): - lp_scale = np.ones_like(f_m) - if self.irls_scaled: - # Scale on l2-norm gradient: f_m.max() - l2_max = np.ones_like(f_m) * np.abs(f_m).max() - # Compute theoretical maximum gradients for p < 1 - l2_max[self.norm < 1] = self.irls_threshold / np.sqrt( - 1.0 - self.norm[self.norm < 1] - ) - lp_values = l2_max / (l2_max**2.0 + self.irls_threshold**2.0) ** ( - 1.0 - self.norm / 2.0 - ) - lp_scale[lp_values != 0] = np.abs(f_m).max() / lp_values[lp_values != 0] - - return lp_scale / (f_m**2.0 + self.irls_threshold**2.0) ** ( - 1.0 - self.norm / 2.0 - ) - - @property - def irls_scaled(self) -> bool: - """Scale IRLS weights. - - When ``True``, scaling is applied when computing IRLS weights. - The scaling acts to preserve the balance between the data misfit and the components of - the regularization based on the derivative of the l2-norm measure. And it assists the - convergence by ensuring the model does not deviate - aggressively from the global 2-norm solution during the first few IRLS iterations. - For a comprehensive description, see the documentation for :py:meth:`get_lp_weights` . - - Returns - ------- - bool - Whether to scale IRLS weights. - """ - return self._irls_scaled - - @irls_scaled.setter - def irls_scaled(self, value: bool): - self._irls_scaled = validate_type("irls_scaled", value, bool, cast=False) - - @property - def irls_threshold(self): - r"""Stability constant for computing IRLS weights. - - Returns - ------- - float - Stability constant for computing IRLS weights. - """ - return self._irls_threshold - - @irls_threshold.setter - def irls_threshold(self, value): - self._irls_threshold = validate_float( - "irls_threshold", value, min_val=0.0, inclusive_min=False - ) - - @property - def norm(self): - r"""Norm for the sparse regularization. - - Returns - ------- - None, float, (n_cells, ) numpy.ndarray - Norm for the sparse regularization. If ``None``, a 2-norm is used. - A float within the interval [0,2] represents a constant norm applied for all cells. - A ``numpy.ndarray`` object, where each entry is used to apply a different norm to each cell in the mesh. - """ - return self._norm - - @norm.setter - def norm(self, value: float | np.ndarray | None): - if value is None: - value = np.ones(self.cell_gradient.shape[0]) * 2.0 - else: - value = np.ones(self.cell_gradient.shape[0]) * value - if np.any(value < 0) or np.any(value > 2): - raise ValueError( - "Value provided for 'norm' should be in the interval [0, 2]" - ) - self._norm = value - - @property - def units(self) -> str | None: - """Units for the model parameters. - - Some regularization classes behave differently depending on the units; e.g. 'radian'. - - Returns - ------- - str - Units for the model parameters. - """ - return self._units - - @units.setter - def units(self, units: str | None): - if units is not None and not isinstance(units, str): - raise TypeError( - f"'units' must be None or type str. Value of type {type(units)} provided." - ) - self._units = units - - @property - def _cell_distances(self) -> np.ndarray: - """ - cell size average on faces - - :return: np.ndarray - """ - cell_distances = self.cell_gradient.max(axis=1).toarray().ravel() - cell_distances[cell_distances == 0] = 1 - cell_distances = cell_distances ** (-1) - - return cell_distances - - -class RotatedSparse(Sparse): - """ - Class that wraps the rotated gradients in a ComboObjectiveFunction similar to Sparse. - """ - - def __init__( - self, - mesh: TensorMesh | TreeMesh, - reg_dirs: np.ndarray, - alphas_rot: tuple[float, float, float], - active_cells: np.ndarray | None = None, - norms: list[float] = [2.0, 2.0], - gradient_type: Literal["components", "total"] = "total", - irls_scaled: bool = True, - irls_threshold: float = 1e-8, - objfcts: list[BaseRegularization] | None = None, - **kwargs, - ): - """ - Class to wrap rotated gradient into a ComboObjective Function - - :param mesh: mesh - :param reg_dirs: rotation matrix - :param alphas_rot: alphas for rotated gradients - :param active_cells: active cells, defaults to None - :param norms: norms, defaults to [2, 2] - :param gradient_type: gradient_type, defaults to "total" - :param irls_scaled: irls_scaled, defaults to True - :param irls_threshold: irls_threshold, defaults to 1e-8 - :param objfcts: objfcts, defaults to None - """ - if not isinstance(mesh, RegularizationMesh): - mesh = RegularizationMesh(mesh) - - if not isinstance(mesh, RegularizationMesh): - TypeError( - f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " - f"Value of type {type(mesh)} provided." - ) - self._regularization_mesh = mesh - if active_cells is not None: - self._regularization_mesh.active_cells = active_cells - - if objfcts is None: - objfcts = [ - SparseSmallness(mesh=self.regularization_mesh), - SmoothnessFullGradient( - mesh=self.regularization_mesh.mesh, - active_cells=active_cells, - reg_dirs=reg_dirs, - alphas=alphas_rot, - norm=norms[1], - irls_scaled=irls_scaled, - irls_threshold=irls_threshold, - ), - ] - - super().__init__( - self.regularization_mesh, - objfcts=objfcts, - active_cells=active_cells, - gradient_type=gradient_type, - norms=norms[:2], - irls_scaled=irls_scaled, - irls_threshold=irls_threshold, - **kwargs, - ) - - @property - def alpha_y(self): - """Multiplier constant for first-order smoothness along y. - - Returns - ------- - float - Multiplier constant for first-order smoothness along y. - """ - return self._alpha_y - - @alpha_y.setter - def alpha_y(self, value): - self._alpha_y = None - - @property - def alpha_z(self): - """Multiplier constant for first-order smoothness along z. - - Returns - ------- - float - Multiplier constant for first-order smoothness along z. - """ - return self._alpha_z - - @alpha_z.setter - def alpha_z(self, value): - self._alpha_z = None