diff --git a/.azure-pipelines/matrix.yml b/.azure-pipelines/matrix.yml index fd6cc259a3..2b96f56561 100644 --- a/.azure-pipelines/matrix.yml +++ b/.azure-pipelines/matrix.yml @@ -50,6 +50,13 @@ jobs: pytest ${{ test }} -v --cov-config=.coveragerc --cov=SimPEG --cov-report=xml --cov-report=html -W ignore::DeprecationWarning displayName: 'Testing ${{ test }}' + - task: PublishPipelineArtifact@1 + inputs: + targetPath: $(Build.SourcesDirectory)/docs/_build/html + artifactName: html_docs + displayName: 'Publish documentation artifact' + condition: eq('${{ test }}', 'tests/docs -s -v') + - script: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 5e2e876ec4..ba93c01385 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -7,7 +7,7 @@ body: - type: markdown attributes: value: > - Thanks for your use of SimPEG and for taking the time to report a bug! Please + Thanks for using SimPEG and taking the time to report a bug! Please first double check that there is not already a bug report on this issue by searching through the existing bugs. @@ -19,11 +19,11 @@ body: - type: textarea attributes: - label: "Reproducable code example:" + label: "Reproducible code example:" description: > Please submit a small, but complete, code sample that reproduces the bug or missing functionality. It should be able to be copy-pasted - into a Python interpreter and ran as-is. + into a Python interpreter and run as-is. placeholder: | import SimPEG << your code here >> @@ -58,4 +58,4 @@ body: placeholder: | << your explanation here >> validations: - required: false \ No newline at end of file + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4108d53c7..6180153534 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,5 +3,5 @@ contact_links: url: https://simpeg.discourse.group/ about: "If you have a question on how to use SimPEG, please submit them to our discourse page." - name: Development-related matters - url: http://slack.simpeg.xyz/ - about: "If you would like to discuss SimPEG, any geophysics related problems, or need help from the SimPEG team, get in touch with us on slack." + url: https://mattermost.softwareunderground.org/simpeg + about: "If you would like to discuss SimPEG, any geophysics related problems, or need help from the SimPEG team, get in touch with us on Mattermost." diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index c7db801f1a..5d8d196a5e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -6,10 +6,11 @@ body: - type: markdown attributes: value: > - If you'd like to request a new feature in SimPEG, or suggest changes in the - functionality of certain functions, we recommend getting in touch with the - developers on [slack](https://slack.simpeg.xyz), in addition to opening an - issue or pull request here. + If you'd like to request a new feature in SimPEG, or suggest changes in + the functionality of certain functions, we recommend getting in touch + with the developers on + [Mattermost](https://mattermost.softwareunderground.org/simpeg), in + addition to opening an Issue or Pull Request here. You can also check out our [Contributor Guide](https://docs.simpeg.xyz/content/getting_started/contributing/index.html) if you need more information. diff --git a/.github/ISSUE_TEMPLATE/release-checklist.md b/.github/ISSUE_TEMPLATE/release-checklist.md new file mode 100644 index 0000000000..d173424945 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release-checklist.md @@ -0,0 +1,151 @@ +--- +name: Release checklist +about: "Maintainers only: Checklist for making a new release" +title: "Release vX.Y.Z" +labels: "maintenance" +assignees: "" +--- + + + +**Target date:** YYYY/MM/DD + +## Generate release notes + +### Autogenerate release notes with GitHub + +- [ ] Generate a draft for a new Release in GitHub. +- [ ] Create a new tag for it (the version number with a leading `v`). +- [ ] Generate release notes automatically. +- [ ] Copy those notes and paste them into a `notes.md` file. +- [ ] Discard the draft (we'll generate a new one later on). + +### Add release notes to the docs + +- [ ] Convert the Markdown file to RST with: `pandoc notes.md -o notes.rst`. +- [ ] Generate list of contributors from the release notes with: + ```bash + grep -Eo "@[[:alnum:]-]+" notes.rst | sort -u | sed -E 's/^/* /' + ``` + Paste the list into the file under a new `Contributors` category. +- [ ] Check if every contributor that participated in the release is in the + list. Generate a list of authors and co-authors from the git log with (update + the `last_release`): + ```bash + export last_release="v0.20.0" + git shortlog HEAD...$last_release -sne > contributors + git log HEAD...$last_release | grep "Co-authored-by" | sed 's/Co-authored-by://' | sed 's/^[[:space:]]*/ /' | sort | uniq -c | sort -nr | sed 's/^ //' >> contributors + sort -rn contributors + ``` +- [ ] Transform GitHub handles into links to their profiles: + ```bash + sed -Ei 's/@([[:alnum:]-]+)/`@\1 `__/' notes.rst + ``` +- [ ] Copy the content of `notes.rst` to a new file + `docs/content/release/-notes.rst`. +- [ ] Edit the release notes file, following the template below and the + previous release notes. +- [ ] Add the new release notes to the list in `docs/content/release/index.rst`. +- [ ] Open a PR with the new release notes. +- [ ] Manually view the built documentation by downloading the Azure `html_doc` + artifact and check for formatting and errors. +- [ ] Merge that PR + + +
+Template for release notes: + +```rst +.. __notes: + +=========================== +SimPEG Release Notes +=========================== + +MONTH DAYth, YEAR + +.. contents:: Highlights + :depth: 3 + +Updates +======= + +New features +------------ + +.. + list new features under subheadings, include link to related PRs + +Documentation +------------- + +.. + list improvements to documentation + +Bugfixes +-------- + +.. + list bugfixes, include link to related PRs + +Breaking changes +---------------- + +.. + list breaking changes introduced in this new release, include link to + releated PRs + +Contributors +============ + +.. + paste list of contributors that was generated in `notes.rst` + +Pull Requests +============= + +.. + paste list of PRs that were copied to `notes.rst` +``` + +
+ + +## Make the new release + +- [ ] Draft a new GitHub Release +- [ ] Create a new tag for it (the version number with a leading `v`). +- [ ] Target the release on `main` or on a particular commit from `main` +- [ ] Generate release notes automatically. +- [ ] Publish the release + +## Extra tasks + +After publishing the release, Azure will automatically push the new version to +PyPI, and build and deploy the docs. You can check the progress of these tasks +in: https://dev.azure.com/simpeg/simpeg/_build + +After they finish: + +- [ ] Check the new version is available in PyPI: https://pypi.org/project/SimPEG/ +- [ ] Check the new documentation is online: https://docs.simpeg.xyz + +For the new version to be available in conda-forge, we need to update the +[conda-forge/simpeg-feedstock](https://github.com/conda-forge/simpeg-feedstock) +repository. Within the same day of the release a new PR will be automatically +open in that repository. So: + +- [ ] Follow the steps provided in the checklist in that PR and merge it. +- [ ] Make sure the new version is available through conda-forge: https://anaconda.org/conda-forge/simpeg + +Lastly, we would need to update the SimPEG version used in +[`simpeg/user-tutorials`](https://github.com/simpeg/user-tutorials) and rerun +its notebooks: + +- [ ] Open issue in + [`simpeg/user-tutorials`](https://github.com/simpeg/user-tutorials) for + rerunning the notebooks using the new released version of SimPEG + +Finally: + +- [ ] Close this issue diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 0000000000..0151372c8d --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,58 @@ +name : Reviewdog PR Annotations +on: [pull_request_target] + +jobs: + flake8: + runs-on: ubuntu-latest + name: Flake8 check + steps: + - name: Checkout target repository source + uses: actions/checkout@v4 + + - name: Setup Python env + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies to run the flake8 checks + run: pip install -r requirements_style.txt + + - name: checkout pull request source + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + path: pr_source + + - name: flake8 review + uses: reviewdog/action-flake8@v3 + with: + workdir: pr_source + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-review + + black: + name: Black check + runs-on: ubuntu-latest + steps: + - name: Checkout target repository source + uses: actions/checkout@v4 + + - name: Setup Python env + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies to run the black checks + run: pip install -r requirements_style.txt + + - name: checkout pull request source + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + path: 'pr_source' + + - uses: reviewdog/action-black@v3 + with: + workdir: 'pr_source' + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-review \ No newline at end of file diff --git a/.gitignore b/.gitignore index f6cf59ad22..d85d053a89 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ docs/content/api/generated/* docs/content/examples/* docs/content/tutorials/* docs/modules/* +docs/sg_execution_times.rst .vscode/* # paths to where data are downloaded diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9697f155ad..ccb608b94b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/psf/black - rev: 23.12.1 + rev: 24.3.0 hooks: - id: black language_version: python3 diff --git a/AUTHORS.rst b/AUTHORS.rst index 55de84f924..e12ab5a8d8 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -18,4 +18,11 @@ - Thibaut Astic, (`@thast `_) - Michael Mitchell, (`@micmitch `_) - I-Kang Ding, (`@ikding `_) -- Richard Scott (`@bluetyson `_) +- Richard Scott, (`@bluetyson `_) +- Xiaolong Wei, (`@xiaolongw1223 `_) +- Santiago Soler, (`@santisoler `_) +- Nick Williams, (`@nwilliams-kobold `_) +- John Weis, (`@johnweis0480 `_) +- Kalen Martens, (`@kalen-sj `_) +- Williams A. Lima (`@ghwilliams `_) +- Ying Hu, (`@YingHuuu `_) diff --git a/LICENSE b/LICENSE index 79e70af749..cd67a7669e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2023 SimPEG Developers +Copyright (c) 2013-2024 SimPEG Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index bb8122935b..7545b24db7 100644 --- a/README.rst +++ b/README.rst @@ -30,8 +30,8 @@ SimPEG .. image:: https://img.shields.io/discourse/users?server=http%3A%2F%2Fsimpeg.discourse.group%2F :target: https://simpeg.discourse.group/ -.. image:: https://img.shields.io/badge/Slack-simpeg-4A154B.svg?logo=slack - :target: http://slack.simpeg.xyz +.. image:: https://img.shields.io/badge/simpeg-purple?logo=mattermost&label=Mattermost + :target: https://mattermost.softwareunderground.org/simpeg .. image:: https://img.shields.io/badge/Youtube%20channel-GeoSci.xyz-FF0000.svg?logo=youtube :target: https://www.youtube.com/channel/UCBrC4M8_S4GXhyHht7FyQqw @@ -109,7 +109,8 @@ Questions If you have a question regarding a specific use of SimPEG, the fastest way to get a response is by posting on our Discourse discussion forum: https://simpeg.discourse.group/. Alternatively, if you prefer real-time chat, -you can join our slack group at http://slack.simpeg.xyz. +you can join our Mattermost Team at +https://mattermost.softwareunderground.org/simpeg. Please do not create an issue to ask a question. @@ -121,7 +122,8 @@ for developers to discuss upcoming changes to the code base, and for discussing topics related to geophysics in general. Currently our meetings are held every Wednesday, alternating between a mornings (10:30 am pacific time) and afternoons (3:00 pm pacific time) -on even numbered Wednesdays. Find more info on our `slack `_. +on even numbered Wednesdays. Find more info on our +`Mattermost `_. Links @@ -134,8 +136,8 @@ Forums: https://simpeg.discourse.group/ -Slack (real time chat): -http://slack.simpeg.xyz +Mattermost (real time chat): +https://mattermost.softwareunderground.org/simpeg Documentation: diff --git a/SimPEG/__init__.py b/SimPEG/__init__.py index ff20906fb9..d1d3ab2784 100644 --- a/SimPEG/__init__.py +++ b/SimPEG/__init__.py @@ -80,6 +80,7 @@ maps.InjectActiveEdges maps.MuRelative maps.LogMap + maps.LogisticSigmoidMap maps.ParametricBlock maps.ParametricCircleMap maps.ParametricEllipsoid diff --git a/SimPEG/base/pde_simulation.py b/SimPEG/base/pde_simulation.py index 1d1764e3fd..c7ebe776a7 100644 --- a/SimPEG/base/pde_simulation.py +++ b/SimPEG/base/pde_simulation.py @@ -16,6 +16,9 @@ def __inner_mat_mul_op(M, u, v=None, adjoint=False): # u has multiple fields if v.ndim == 1: v = v[:, None] + if adjoint and v.shape[1] != u.shape[1] and v.shape[1] > 1: + # make sure v is a good shape + v = v.reshape(u.shape[0], -1, u.shape[1]) else: if v.ndim > 1: u = u[:, None] diff --git a/SimPEG/data.py b/SimPEG/data.py index 4ba5ca3571..fa42a6d59e 100644 --- a/SimPEG/data.py +++ b/SimPEG/data.py @@ -115,7 +115,7 @@ def dobs(self): numpy.ndarray Notes - -------- + ----- This array can also be modified by directly indexing the data object using the a tuple of the survey's sources and receivers. @@ -363,8 +363,10 @@ def fromvec(self, v): class SyntheticData(Data): - r""" - Class for creating synthetic data. + r"""Synthetic data class. + + The ``SyntheticData`` class is a :py:class:`SimPEG.data.Data` class that allows the + user to keep track of both clean and noisy data. Parameters ---------- @@ -375,12 +377,12 @@ class SyntheticData(Data): Observed data. dclean : (nD) numpy.ndarray Noiseless data. - relative_error : SimPEG.data.UncertaintyArray + relative_error : float or np.ndarray Assign relative uncertainties to the data using relative error; sometimes referred to as percent uncertainties. For each datum, we assume the standard deviation of Gaussian noise is the relative error times the absolute value of the datum; i.e. :math:`C_{err} \times |d|`. - noise_floor : UncertaintyArray + noise_floor : float or np.ndarray Assign floor/absolute uncertainties to the data. For each datum, we assume standard deviation of Gaussian noise is equal to *noise_floor*. """ diff --git a/SimPEG/data_misfit.py b/SimPEG/data_misfit.py index 42ffc6532d..6d975f2d30 100644 --- a/SimPEG/data_misfit.py +++ b/SimPEG/data_misfit.py @@ -19,7 +19,7 @@ class inherits the :py:class:`SimPEG.objective_function.L2ObjectiveFunction`. create your own data misfit class. .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \| \mathbf{W} f(\mathbf{m}) \|_2^2 + \phi_d (\mathbf{m}) = \| \mathbf{W} f(\mathbf{m}) \|_2^2 where :math:`\mathbf{m}` is the model vector, :math:`\mathbf{W}` is a linear weighting matrix, and :math:`f` is a mapping function that acts on the model. @@ -152,7 +152,7 @@ def W(self): For a discrete least-squares data misfit function of the form: .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \| \mathbf{W} \mathbf{f}(\mathbf{m}) \|_2^2 + \phi_d (\mathbf{m}) = \| \mathbf{W} \mathbf{f}(\mathbf{m}) \|_2^2 :math:`\mathbf{W}` is a linear weighting matrix, :math:`\mathbf{m}` is the model vector, and :math:`\mathbf{f}` is a discrete mapping function that acts on the model vector. @@ -237,7 +237,7 @@ class L2DataMisfit(BaseDataMisfit): data and predicted data for a given model. I.e.: .. math:: - \phi_d (\mathbf{m}) = \frac{1}{2} \big \| \mathbf{W_d} + \phi_d (\mathbf{m}) = \big \| \mathbf{W_d} \big ( \mathbf{d}_\text{pred} - \mathbf{d}_\text{obs} \big ) \big \|_2^2 where :math:`\mathbf{d}_\text{obs}` is the observed data vector, :math:`\mathbf{d}_\text{pred}` @@ -266,7 +266,7 @@ def __call__(self, m, f=None): """Evaluate the residual for a given model.""" R = self.W * self.residual(m, f=f) - return 0.5 * np.vdot(R, R) + return np.vdot(R, R) @timeIt def deriv(self, m, f=None): @@ -293,7 +293,7 @@ def deriv(self, m, f=None): if f is None: f = self.simulation.fields(m) - return self.simulation.Jtvec( + return 2 * self.simulation.Jtvec( m, self.W.T * (self.W * self.residual(m, f=f)), f=f ) @@ -330,6 +330,6 @@ def deriv2(self, m, v, f=None): if f is None: f = self.simulation.fields(m) - return self.simulation.Jtvec_approx( + return 2 * self.simulation.Jtvec_approx( m, self.W * (self.W * self.simulation.Jvec_approx(m, v, f=f)), f=f ) diff --git a/SimPEG/directives/__init__.py b/SimPEG/directives/__init__.py index a713648113..8ce839d89f 100644 --- a/SimPEG/directives/__init__.py +++ b/SimPEG/directives/__init__.py @@ -1,3 +1,102 @@ +""" +============================================= +Directives (:mod:`SimPEG.directives`) +============================================= + +.. currentmodule:: SimPEG.directives + +Directives are classes that allow us to control the inversion, perform tasks +between iterations, save information about our inversion process and more. +Directives are passed to the ``SimPEG.inversion.BaseInversion`` class through +the ``directiveList`` argument. The tasks specified through the directives are +executed after each inversion iteration, following the same order as in which +they are passed in the ``directiveList``. + +Although you can write your own directive classes and plug them into your +inversion, we provide a set of useful directive classes that cover a wide range +of applications: + + +General purpose directives +========================== + +.. autosummary:: + :toctree: generated/ + + AlphasSmoothEstimate_ByEig + BetaEstimateMaxDerivative + BetaEstimate_ByEig + BetaSchedule + JointScalingSchedule + MultiTargetMisfits + ProjectSphericalBounds + ScalingMultipleDataMisfits_ByEig + TargetMisfit + UpdatePreconditioner + UpdateSensitivityWeights + Update_Wj + + +Directives to save inversion results +==================================== + +.. autosummary:: + :toctree: generated/ + + SaveEveryIteration + SaveModelEveryIteration + SaveOutputDictEveryIteration + SaveOutputEveryIteration + + +Directives related to sparse inversions +======================================= + +.. autosummary:: + :toctree: generated/ + + Update_IRLS + + +Directives related to PGI +========================= + +.. autosummary:: + :toctree: generated/ + + PGI_AddMrefInSmooth + PGI_BetaAlphaSchedule + PGI_UpdateParameters + + +Directives related to joint inversions +====================================== + +.. autosummary:: + :toctree: generated/ + + SimilarityMeasureInversionDirective + SimilarityMeasureSaveOutputEveryIteration + PairedBetaEstimate_ByEig + PairedBetaSchedule + MovingAndMultiTargetStopping + + +Base directive classes +====================== +The ``InversionDirective`` class defines the basic class for all directives. +Inherit from this class when writing your own directive. The ``DirectiveList`` +is used under the hood to handle the execution of all directives passed to the +``SimPEG.inversion.BaseInversion``. + +.. autosummary:: + :toctree: generated/ + + InversionDirective + DirectiveList + +""" + from .directives import ( InversionDirective, DirectiveList, diff --git a/SimPEG/directives/directives.py b/SimPEG/directives/directives.py index 6700257d9a..3907ca646e 100644 --- a/SimPEG/directives/directives.py +++ b/SimPEG/directives/directives.py @@ -14,7 +14,6 @@ Sparse, SparseSmallness, PGIsmallness, - PGIwithNonlinearRelationshipsSmallness, SmoothnessFirstOrder, SparseSmoothness, BaseSimilarityMeasure, @@ -23,7 +22,7 @@ mkvc, set_kwargs, sdiag, - diagEst, + estimate_diagonal, spherical2cartesian, cartesian2spherical, Zero, @@ -65,14 +64,13 @@ class InversionDirective: _dmisfitPair = [BaseDataMisfit, ComboObjectiveFunction] def __init__(self, inversion=None, dmisfit=None, reg=None, verbose=False, **kwargs): + # Raise error on deprecated arguments + if (key := "debug") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'verbose'.") self.inversion = inversion self.dmisfit = dmisfit self.reg = reg - debug = kwargs.pop("debug", None) - if debug is not None: - self.debug = debug - else: - self.verbose = verbose + self.verbose = verbose set_kwargs(self, **kwargs) @property @@ -90,7 +88,7 @@ def verbose(self, value): self._verbose = validate_type("verbose", value, bool) debug = deprecate_property( - verbose, "debug", "verbose", removal_version="0.19.0", future_warn=True + verbose, "debug", "verbose", removal_version="0.19.0", error=True ) @property @@ -716,7 +714,6 @@ def initialize(self): Smallness, SparseSmallness, PGIsmallness, - PGIwithNonlinearRelationshipsSmallness, ), ): smallness += [obj] @@ -1060,17 +1057,17 @@ def phi_d_star(self): ------- float """ - # the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if self._phi_d_star is None: nD = 0 for survey in self.survey: nD += survey.nD - self._phi_d_star = 0.5 * nD + self._phi_d_star = nD return self._phi_d_star @phi_d_star.setter def phi_d_star(self, value): - # the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if value is not None: value = validate_float( "phi_d_star", value, min_val=0.0, inclusive_min=False @@ -1166,13 +1163,13 @@ def phi_d_star(self): ------- float """ - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid = || dpred - dobs||^2 if getattr(self, "_phi_d_star", None) is None: # Check if it is a ComboObjective if isinstance(self.dmisfit, ComboObjectiveFunction): - value = np.r_[[0.5 * survey.nD for survey in self.survey]] + value = np.r_[[survey.nD for survey in self.survey]] else: - value = np.r_[[0.5 * self.survey.nD]] + value = np.r_[[self.survey.nD]] self._phi_d_star = value self._DMtarget = None @@ -1180,7 +1177,7 @@ def phi_d_star(self): @phi_d_star.setter def phi_d_star(self, value): - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid =|| dpred - dobs||^2 if value is not None: value = validate_ndarray_with_shape("phi_d_star", value, shape=("*",)) self._phi_d_star = value @@ -1288,13 +1285,7 @@ def initialize(self): np.r_[ i, j, - ( - isinstance( - regpart, - PGIwithNonlinearRelationshipsSmallness, - ) - or isinstance(regpart, PGIsmallness) - ), + isinstance(regpart, PGIsmallness), ] ) for i, regobjcts in enumerate(self.invProb.reg.objfcts) @@ -1332,13 +1323,7 @@ def initialize(self): ( np.r_[ j, - ( - isinstance( - regpart, - PGIwithNonlinearRelationshipsSmallness, - ) - or isinstance(regpart, PGIsmallness) - ), + isinstance(regpart, PGIsmallness), ] ) for j, regpart in enumerate(self.invProb.reg.objfcts) @@ -1426,11 +1411,11 @@ def CLtarget(self): self._CLtarget = self.chiSmall * self.phi_ms_star elif getattr(self, "_CLtarget", None) is None: - # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 + # phid = ||dpred - dobs||^2 if self.phi_ms_star is None: # Expected value is number of active cells * number of physical # properties - self.phi_ms_star = 0.5 * len(self.invProb.model) + self.phi_ms_star = len(self.invProb.model) self._CLtarget = self.chiSmall * self.phi_ms_star @@ -1747,7 +1732,7 @@ def load_results(self): self.f = results[:, 7] - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0 + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -1765,9 +1750,7 @@ def plot_misfit_curves( plot_small=False, plot_smooth=False, ): - self.target_misfit = ( - np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) / 2.0 - ) + self.target_misfit = np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -1821,7 +1804,7 @@ def plot_misfit_curves( fig.savefig(fname, dpi=dpi) def plot_tikhonov_curves(self, fname=None, dpi=200): - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD / 2.0 + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD self.i_target = None if self.invProb.phi_d < self.target_misfit: @@ -2062,7 +2045,7 @@ def target(self): for survey in self.survey: nD += survey.nD - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target @@ -2076,10 +2059,10 @@ def start(self): if isinstance(self.survey, list): self._start = 0 for survey in self.survey: - self._start += survey.nD * 0.5 * self.chifact_start + self._start += survey.nD * self.chifact_start else: - self._start = self.survey.nD * 0.5 * self.chifact_start + self._start = self.survey.nD * self.chifact_start return self._start @start.setter @@ -2435,7 +2418,7 @@ def JtJv(v): return self.simulation.Jtvec(m, Jv) - JtJdiag = diagEst(JtJv, len(m), k=self.k) + JtJdiag = estimate_diagonal(JtJv, len(m), k=self.k) JtJdiag = JtJdiag / max(JtJdiag) self.reg.wght = JtJdiag @@ -2537,33 +2520,20 @@ def __init__( normalization_method="maximum", **kwargs, ): - if "everyIter" in kwargs.keys(): - warnings.warn( - "'everyIter' property is deprecated and will be removed in SimPEG 0.20.0." - "Please use 'every_iteration'.", - stacklevel=2, + # Raise errors on deprecated arguments + if (key := "everyIter") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. Please use 'every_iteration'.", ) - every_iteration = kwargs.pop("everyIter") - - if "threshold" in kwargs.keys(): - warnings.warn( - "'threshold' property is deprecated and will be removed in SimPEG 0.20.0." - "Please use 'threshold_value'.", - stacklevel=2, + if (key := "threshold") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. Please use 'threshold_value'.", ) - threshold_value = kwargs.pop("threshold") - - if "normalization" in kwargs.keys(): - warnings.warn( - "'normalization' property is deprecated and will be removed in SimPEG 0.20.0." + if (key := "normalization") in kwargs.keys(): + raise TypeError( + f"'{key}' property has been removed. " "Please define normalization using 'normalization_method'.", - stacklevel=2, ) - normalization_method = kwargs.pop("normalization") - if normalization_method is True: - normalization_method = "maximum" - else: - normalization_method = None super().__init__(**kwargs) @@ -2590,7 +2560,11 @@ def every_iteration(self, value): self._every_iteration = validate_type("every_iteration", value, bool) everyIter = deprecate_property( - every_iteration, "everyIter", "every_iteration", removal_version="0.20.0" + every_iteration, + "everyIter", + "every_iteration", + removal_version="0.20.0", + error=True, ) @property @@ -2619,7 +2593,11 @@ def threshold_value(self, value): self._threshold_value = validate_float("threshold_value", value, min_val=0.0) threshold = deprecate_property( - threshold_value, "threshold", "threshold_value", removal_version="0.20.0" + threshold_value, + "threshold", + "threshold_value", + removal_version="0.20.0", + error=True, ) @property @@ -2669,18 +2647,6 @@ def normalization_method(self): def normalization_method(self, value): if value is None: self._normalization_method = value - - elif isinstance(value, bool): - warnings.warn( - "Boolean type for 'normalization_method' is deprecated and will be removed in 0.20.0." - "Please use None, 'maximum' or 'minimum'.", - stacklevel=2, - ) - if value: - self._normalization_method = "maximum" - else: - self._normalization_method = None - else: self._normalization_method = validate_string( "normalization_method", value, string_list=["minimum", "maximum"] @@ -2691,6 +2657,7 @@ def normalization_method(self, value): "normalization", "normalization_method", removal_version="0.20.0", + error=True, ) def initialize(self): diff --git a/SimPEG/directives/pgi_directives.py b/SimPEG/directives/pgi_directives.py index db332ff9bb..0cc141f026 100644 --- a/SimPEG/directives/pgi_directives.py +++ b/SimPEG/directives/pgi_directives.py @@ -12,7 +12,6 @@ from ..regularization import ( PGI, PGIsmallness, - PGIwithRelationships, SmoothnessFirstOrder, SparseSmoothness, ) @@ -363,12 +362,7 @@ def initialize(self): if getattr(self.reg.objfcts[0], "objfcts", None) is not None: # Find the petrosmallness terms in a two-levels combo-regularization. petrosmallness = np.where( - np.r_[ - [ - isinstance(regpart, (PGI, PGIwithRelationships)) - for regpart in self.reg.objfcts - ] - ] + np.r_[[isinstance(regpart, PGI) for regpart in self.reg.objfcts]] )[0][0] self.petrosmallness = petrosmallness @@ -413,7 +407,7 @@ def initialize(self): @property def DMtarget(self): if getattr(self, "_DMtarget", None) is None: - self.phi_d_target = 0.5 * self.invProb.dmisfit.survey.nD + self.phi_d_target = self.invProb.dmisfit.survey.nD self._DMtarget = self.chifact * self.phi_d_target return self._DMtarget diff --git a/SimPEG/directives/sim_directives.py b/SimPEG/directives/sim_directives.py index 5b781fe97a..0a3464717d 100644 --- a/SimPEG/directives/sim_directives.py +++ b/SimPEG/directives/sim_directives.py @@ -248,7 +248,7 @@ def initialize(self): if self.seed is not None: np.random.seed(self.seed) - if self.debug: + if self.verbose: print("Calculating the beta0 parameter.") m = self.invProb.model @@ -305,7 +305,7 @@ def target(self): if getattr(self, "_target", None) is None: nD = np.array([survey.nD for survey in self.survey]) - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target @@ -362,7 +362,7 @@ def target(self): nD += [survey.nD] nD = np.array(nD) - self._target = nD * 0.5 * self.chifact_target + self._target = nD * self.chifact_target return self._target diff --git a/SimPEG/electromagnetics/__init__.py b/SimPEG/electromagnetics/__init__.py index c83bedd42b..5deacd0f50 100644 --- a/SimPEG/electromagnetics/__init__.py +++ b/SimPEG/electromagnetics/__init__.py @@ -32,6 +32,7 @@ analytics.getCasingBzMagDipole """ + from scipy.constants import mu_0, epsilon_0 from . import time_domain diff --git a/SimPEG/electromagnetics/frequency_domain/__init__.py b/SimPEG/electromagnetics/frequency_domain/__init__.py index b6bf19bf60..00505f0115 100644 --- a/SimPEG/electromagnetics/frequency_domain/__init__.py +++ b/SimPEG/electromagnetics/frequency_domain/__init__.py @@ -77,6 +77,7 @@ fields.FieldsFDEM """ + from .survey import Survey from . import sources from . import receivers diff --git a/SimPEG/electromagnetics/frequency_domain/receivers.py b/SimPEG/electromagnetics/frequency_domain/receivers.py index b6423da9a0..a28c840ff3 100644 --- a/SimPEG/electromagnetics/frequency_domain/receivers.py +++ b/SimPEG/electromagnetics/frequency_domain/receivers.py @@ -1,6 +1,5 @@ from ... import survey from ...utils import validate_string, validate_type, validate_direction -import warnings from discretize.utils import Zero @@ -33,15 +32,8 @@ def __init__( use_source_receiver_offset=False, **kwargs, ): - proj = kwargs.pop("projComp", None) - if proj is not None: - warnings.warn( - "'projComp' overrides the 'orientation' property which automatically" - " handles the projection from the mesh the receivers!!! " - "'projComp' is deprecated and will be removed in SimPEG 0.19.0.", - stacklevel=2, - ) - self.projComp = proj + if (key := "projComp") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed.") self.orientation = orientation self.component = component diff --git a/SimPEG/electromagnetics/frequency_domain/simulation.py b/SimPEG/electromagnetics/frequency_domain/simulation.py index 5ab3c57f63..5cee177ec6 100644 --- a/SimPEG/electromagnetics/frequency_domain/simulation.py +++ b/SimPEG/electromagnetics/frequency_domain/simulation.py @@ -62,7 +62,13 @@ class BaseFDEMSimulation(BaseEMSimulation): # permittivity, permittivityMap, permittivityDeriv = props.Invertible("Dielectric permittivity (F/m)") def __init__( - self, mesh, survey=None, forward_only=False, permittivity=None, **kwargs + self, + mesh, + survey=None, + forward_only=False, + permittivity=None, + storeJ=False, + **kwargs ): super().__init__(mesh=mesh, survey=survey, **kwargs) self.forward_only = forward_only @@ -72,6 +78,7 @@ def __init__( stacklevel=2, ) self.permittivity = permittivity + self.storeJ = storeJ @property def survey(self): @@ -90,6 +97,21 @@ def survey(self, value): if value is not None: value = validate_type("survey", value, Survey, cast=False) self._survey = value + self._survey = value + + @property + def storeJ(self): + """Whether to store the sensitivity matrix + + Returns + ------- + bool + """ + return self._storeJ + + @storeJ.setter + def storeJ(self, value): + self._storeJ = validate_type("storeJ", value, bool) @property def forward_only(self): @@ -243,6 +265,86 @@ def Jtvec(self, m, v, f=None): return mkvc(Jtv) + def getJ(self, m, f=None): + """ + Method to form full J given a model m + + :param numpy.ndarray m: inversion model (nP,) + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object + :rtype: numpy.ndarray + :return: J (ndata, nP) + """ + self.model = m + + if getattr(self, "_Jmatrix", None) is None: + if f is None: + f = self.fields(m) + + Ainv = self.Ainv + m_size = self.model.size + + Jmatrix = np.zeros((self.survey.nD, m_size)) + + data = Data(self.survey) + + for A_i, freq in zip(Ainv, self.survey.frequencies): + for src in self.survey.get_sources_by_frequency(freq): + u_src = f[src, self._solutionType] + + for rx in src.receiver_list: + v = np.eye(rx.nD, dtype=float) + + df_duT, df_dmT = rx.evalDeriv( + src, self.mesh, f, v=v, adjoint=True + ) + + df_duT = np.hstack([df_duT]) + ATinvdf_duT = A_i * df_duT + dA_dmT = self.getADeriv(freq, u_src, ATinvdf_duT, adjoint=True) + dRHS_dmT = self.getRHSDeriv( + freq, src, ATinvdf_duT, adjoint=True + ) + du_dmT = -dA_dmT + + if not isinstance(dRHS_dmT, Zero): + du_dmT += dRHS_dmT + if not isinstance(df_dmT[0], Zero): + du_dmT += np.hstack(df_dmT) + + block = np.array(du_dmT, dtype=complex).real.T + data_inds = data.index_dictionary[src][rx] + Jmatrix[data_inds] = block + + self._Jmatrix = Jmatrix + + return self._Jmatrix + + def getJtJdiag(self, m, W=None, f=None): + """ + Return the diagonal of JtJ + + :param numpy.ndarray m: inversion model (nP,) + :param numpy.ndarray W: vector of weights (ndata,) + :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object + :rtype: numpy.ndarray + :return: JtJ (nP,) + """ + self.model = m + + if getattr(self, "_gtgdiag", None) is None: + J = self.getJ(m, f=f) + + if W is None: + W = np.ones(J.shape[0]) + else: + W = W.diagonal() ** 2 + + diag = np.einsum("i,ij,ij->j", W, J, J) + + self._gtgdiag = diag + + return self._gtgdiag + # @profile def getSourceTerm(self, freq): """ @@ -275,6 +377,11 @@ def getSourceTerm(self, freq): i = ii return s_m, s_e + @property + def deleteTheseOnModelUpdate(self): + toDelete = super().deleteTheseOnModelUpdate + return toDelete + ["_Jmatrix", "_gtgdiag"] + ############################################################################### # E-B Formulation # diff --git a/SimPEG/electromagnetics/frequency_domain/sources.py b/SimPEG/electromagnetics/frequency_domain/sources.py index 61fac07c95..cd8192b187 100644 --- a/SimPEG/electromagnetics/frequency_domain/sources.py +++ b/SimPEG/electromagnetics/frequency_domain/sources.py @@ -658,7 +658,6 @@ def s_eDeriv(self, simulation, v, adjoint=False): class MagDipole_Bfield(MagDipole): - """ Point magnetic dipole source calculated with the analytic solution for the fields from a magnetic dipole. No discrete curl is taken, so the magnetic @@ -785,11 +784,12 @@ def __init__( **kwargs, ): kwargs.pop("moment", None) - N = kwargs.pop("N", None) - if N is not None: - self.N = N - else: - self.n_turns = n_turns + + # Raise error on deprecated arguments + if (key := "N") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") + self.n_turns = n_turns + super().__init__( receiver_list=receiver_list, frequency=frequency, @@ -886,7 +886,9 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): ) return self.n_turns * self._loop.vector_potential(obsLoc, coordinates) - N = deprecate_property(n_turns, "N", "n_turns", removal_version="0.19.0") + N = deprecate_property( + n_turns, "N", "n_turns", removal_version="0.19.0", error=True + ) class PrimSecSigma(BaseFDEMSrc): diff --git a/SimPEG/electromagnetics/natural_source/receivers.py b/SimPEG/electromagnetics/natural_source/receivers.py index e7c9845630..65aa483dc3 100644 --- a/SimPEG/electromagnetics/natural_source/receivers.py +++ b/SimPEG/electromagnetics/natural_source/receivers.py @@ -1,4 +1,4 @@ -from ...utils.code_utils import deprecate_class, validate_string +from ...utils.code_utils import validate_string import numpy as np from scipy.constants import mu_0 @@ -294,18 +294,25 @@ def _eval_impedance_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=Fals # Work backwards! gtop_v = v / bot gbot_v = -imp * v / bot + n_d = self.nD if mesh.dim == 3: - ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v[:, None] - ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v[:, None] - ge_v = np.c_[h[:, 1], -h[:, 0]] * gtop_v[:, None] - gh_v = np.c_[-e[:, 1], e[:, 0]] * gtop_v[:, None] + ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v[..., None] + ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v[..., None] + ge_v = np.c_[h[:, 1], -h[:, 0]] * gtop_v[..., None] + gh_v = np.c_[-e[:, 1], e[:, 0]] * gtop_v[..., None] if self.orientation[1] == "x": ghy_v += gh_v else: ghx_v -= gh_v + if v.ndim == 2: + # collapse into a long list of n_d vectors + ghx_v = ghx_v.reshape((n_d, -1)) + ghy_v = ghy_v.reshape((n_d, -1)) + ge_v = ge_v.reshape((n_d, -1)) + gh_v = Phx.T @ ghx_v + Phy.T @ ghy_v ge_v = Pe.T @ ge_v else: @@ -515,8 +522,9 @@ def _eval_tipper_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): if adjoint: # Work backwards! - gtop_v = (v / bot)[:, None] - gbot_v = (-imp * v / bot)[:, None] + gtop_v = (v / bot)[..., None] + gbot_v = (-imp * v / bot)[..., None] + n_d = self.nD ghx_v = np.c_[hy[:, 1], -hy[:, 0]] * gbot_v ghy_v = np.c_[-hx[:, 1], hx[:, 0]] * gbot_v @@ -528,6 +536,12 @@ def _eval_tipper_deriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): else: ghx_v += gh_v + if v.ndim == 2: + # collapse into a long list of n_d vectors + ghx_v = ghx_v.reshape((n_d, -1)) + ghy_v = ghy_v.reshape((n_d, -1)) + ghz_v = ghz_v.reshape((n_d, -1)) + gh_v = Phx.T @ ghx_v + Phy.T @ ghy_v + Phz.T @ ghz_v return f._hDeriv(src, None, gh_v, adjoint=True) @@ -613,23 +627,3 @@ def evalDeriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): if adjoint: return imp_deriv return getattr(imp_deriv, self.component) - - -############ -# Deprecated -############ - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_impedance1D(PointNaturalSource): - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_impedance3D(PointNaturalSource): - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class Point_tipper3D(Point3DTipper): - pass diff --git a/SimPEG/electromagnetics/natural_source/utils/__init__.py b/SimPEG/electromagnetics/natural_source/utils/__init__.py index baae8201b5..79425e954f 100644 --- a/SimPEG/electromagnetics/natural_source/utils/__init__.py +++ b/SimPEG/electromagnetics/natural_source/utils/__init__.py @@ -5,6 +5,7 @@ NOTE: These utilities are not well test, use with care """ + from .solutions_1d import get1DEfields # Add the names of the functions from .analytic_1d import getEHfields, getImpedance from .data_utils import ( diff --git a/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py b/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py index 78de082377..ad50de476f 100644 --- a/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py +++ b/SimPEG/electromagnetics/natural_source/utils/analytic_1d.py @@ -30,9 +30,9 @@ def getEHfields(m1d, sigma, freq, zd, scaleUD=True, scaleValue=1): # Initiate the propagation matrix, in the order down up. UDp = np.zeros((2, m1d.nC + 1), dtype=complex) - UDp[ - 1, 0 - ] = scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh + UDp[1, 0] = ( + scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh + ) # Loop over all the layers, starting at the bottom layer for lnr, h in enumerate(m1d.h[0]): # lnr-number of layer, h-thickness of the layer # Calculate diff --git a/SimPEG/electromagnetics/static/induced_polarization/__init__.py b/SimPEG/electromagnetics/static/induced_polarization/__init__.py index 6383f4e05c..b421d9afdb 100644 --- a/SimPEG/electromagnetics/static/induced_polarization/__init__.py +++ b/SimPEG/electromagnetics/static/induced_polarization/__init__.py @@ -20,6 +20,7 @@ The ``induced_polarization`` module makes use of receivers, sources, and surveys defined in the ``SimPEG.electromagnetics.static.resistivity`` module. """ + from .simulation import ( Simulation3DCellCentered, Simulation3DNodal, diff --git a/SimPEG/electromagnetics/static/induced_polarization/run.py b/SimPEG/electromagnetics/static/induced_polarization/run.py index 550579a319..a7372118b8 100644 --- a/SimPEG/electromagnetics/static/induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/induced_polarization/run.py @@ -37,7 +37,7 @@ def run_inversion( regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion if use_sensitivity_weight: - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y @@ -45,9 +45,8 @@ def run_inversion( else: reg = regularization.Sparse( mesh, - indActive=actind, + active_cells=actind, mapping=regmap, - cell_weights=mesh.cell_volumes[actind], ) reg.alpha_s = alpha_s reg.alpha_x = alpha_x diff --git a/SimPEG/electromagnetics/static/resistivity/IODC.py b/SimPEG/electromagnetics/static/resistivity/IODC.py index 2fc42de988..cc47403d7e 100644 --- a/SimPEG/electromagnetics/static/resistivity/IODC.py +++ b/SimPEG/electromagnetics/static/resistivity/IODC.py @@ -10,7 +10,7 @@ from ....utils import ( sdiag, - uniqueRows, + unique_rows, plot2Ddata, validate_type, validate_integer, @@ -728,12 +728,6 @@ def geometric_factor(self, survey): G = geometric_factor(survey, space_type=self.space_type) return G - def from_ambn_locations_to_survey(self, *args, **kwargs): - raise NotImplementedError( - "from_ambn_locations_to_survey has been renamed to " - "from_abmn_locations_to_survey. It will be removed in a future version 0.17.0 of simpeg", - ) - def from_abmn_locations_to_survey( self, a_locations, @@ -767,8 +761,8 @@ def from_abmn_locations_to_survey( if times_ip is not None: self.times_ip = times_ip - uniqSrc = uniqueRows(np.c_[self.a_locations, self.b_locations]) - uniqElec = uniqueRows( + uniqSrc = unique_rows(np.c_[self.a_locations, self.b_locations]) + uniqElec = unique_rows( np.vstack( (self.a_locations, self.b_locations, self.m_locations, self.n_locations) ) diff --git a/SimPEG/electromagnetics/static/resistivity/__init__.py b/SimPEG/electromagnetics/static/resistivity/__init__.py index 4e0409892b..9171c0cab9 100644 --- a/SimPEG/electromagnetics/static/resistivity/__init__.py +++ b/SimPEG/electromagnetics/static/resistivity/__init__.py @@ -71,6 +71,7 @@ sources.BaseSrc receivers.BaseRx """ + from .simulation import Simulation3DCellCentered, Simulation3DNodal from .simulation_2d import Simulation2DCellCentered, Simulation2DNodal from .simulation_1d import Simulation1DLayers diff --git a/SimPEG/electromagnetics/static/resistivity/run.py b/SimPEG/electromagnetics/static/resistivity/run.py index a9a04dc3e0..0ee948ea48 100644 --- a/SimPEG/electromagnetics/static/resistivity/run.py +++ b/SimPEG/electromagnetics/static/resistivity/run.py @@ -37,14 +37,14 @@ def run_inversion( regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion if use_sensitivity_weight: - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y reg.alpha_z = alpha_z else: reg = regularization.WeightedLeastSquares( - mesh, indActive=actind, mapping=regmap + mesh, active_cells=actind, mapping=regmap ) reg.alpha_s = alpha_s reg.alpha_x = alpha_x diff --git a/SimPEG/electromagnetics/static/resistivity/survey.py b/SimPEG/electromagnetics/static/resistivity/survey.py index ecbbe58c37..cb02a32e65 100644 --- a/SimPEG/electromagnetics/static/resistivity/survey.py +++ b/SimPEG/electromagnetics/static/resistivity/survey.py @@ -1,6 +1,6 @@ import numpy as np -from ....utils.code_utils import deprecate_property, validate_string +from ....utils.code_utils import validate_string from ....survey import BaseSurvey from ..utils import drapeTopotoLoc @@ -152,14 +152,6 @@ def unique_electrode_locations(self): loc_n = self.locations_n return np.unique(np.vstack((loc_a, loc_b, loc_m, loc_n)), axis=0) - electrode_locations = deprecate_property( - unique_electrode_locations, - "electrode_locations", - new_name="unique_electrode_locations", - removal_version="0.17.0", - error=True, - ) - @property def source_locations(self): """ diff --git a/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py b/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py index bf5c81630b..9bdeea56be 100644 --- a/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py +++ b/SimPEG/electromagnetics/static/spectral_induced_polarization/__init__.py @@ -60,6 +60,7 @@ simulation_2d.BaseSIPSimulation2D """ + from ....data import Data from .simulation import Simulation3DCellCentered, Simulation3DNodal from .simulation_2d import Simulation2DCellCentered, Simulation2DNodal diff --git a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py index 18b766b3e1..101385a70b 100644 --- a/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py +++ b/SimPEG/electromagnetics/static/spectral_induced_polarization/run.py @@ -142,9 +142,15 @@ def run_inversion( m_lower = np.r_[eta_lower, tau_lower, c_lower] # Set up regularization - reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=actind) - reg_tau = regularization.Simple(mesh, mapping=wires.tau, indActive=actind) - reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=actind) + reg_eta = regularization.WeightedLeastSquares( + mesh, mapping=wires.eta, active_cells=actind + ) + reg_tau = regularization.WeightedLeastSquares( + mesh, mapping=wires.tau, active_cells=actind + ) + reg_c = regularization.WeightedLeastSquares( + mesh, mapping=wires.c, active_cells=actind + ) # Todo: diff --git a/SimPEG/electromagnetics/static/utils/__init__.py b/SimPEG/electromagnetics/static/utils/__init__.py index 1e5ac6adbb..5245be1490 100644 --- a/SimPEG/electromagnetics/static/utils/__init__.py +++ b/SimPEG/electromagnetics/static/utils/__init__.py @@ -46,36 +46,25 @@ closestPointsGrid """ + from .static_utils import ( electrode_separations, - source_receiver_midpoints, pseudo_locations, geometric_factor, apparent_resistivity_from_voltage, - apparent_resistivity, plot_pseudosection, generate_dcip_survey, - generate_dcip_survey_line, - gen_DCIPsurvey, generate_dcip_sources_line, generate_survey_from_abmn_locations, - writeUBC_DCobs, - writeUBC_DClocs, convert_survey_3d_to_2d_lines, - convertObs_DC3D_to_2D, - readUBC_DC2Dpre, - readUBC_DC3Dobs, xy_2_lineID, r_unit, - getSrc_locs, gettopoCC, drapeTopotoLoc, genTopography, closestPointsGrid, gen_3d_survey_from_2d_lines, plot_1d_layer_model, - plot_layer, - plot_pseudoSection, ) # Import if user has plotly diff --git a/SimPEG/electromagnetics/static/utils/static_utils.py b/SimPEG/electromagnetics/static/utils/static_utils.py index c52aaf81d2..573199e260 100644 --- a/SimPEG/electromagnetics/static/utils/static_utils.py +++ b/SimPEG/electromagnetics/static/utils/static_utils.py @@ -10,7 +10,6 @@ from .. import resistivity as dc from ....utils import ( mkvc, - surface2ind_topo, model_builder, define_plane_from_points, ) @@ -23,7 +22,6 @@ from ....utils.plot_utils import plot_1d_layer_model # noqa: F401 -from ....utils.code_utils import deprecate_method try: import plotly.graph_objects as grapho @@ -1627,7 +1625,7 @@ def drapeTopotoLoc(mesh, pts, ind_active=None, option="top", topo=None, **kwargs raise ValueError("Unsupported mesh dimension") if ind_active is None: - ind_active = surface2ind_topo(mesh, topo) + ind_active = discretize.utils.active_from_xyz(mesh, topo) if mesh._meshType == "TENSOR": meshtemp, topoCC = gettopoCC(mesh, ind_active, option=option) @@ -1816,170 +1814,3 @@ def gen_3d_survey_from_2d_lines( line_inds=line_inds, ) return IO_3d, survey_3d - - -############ -# Deprecated -############ - - -def plot_pseudoSection( - data, - ax=None, - survey_type="dipole-dipole", - data_type="appConductivity", - space_type="half-space", - clim=None, - scale="linear", - sameratio=True, - pcolor_opts=None, - data_location=False, - dobs=None, - dim=2, -): - raise TypeError( - "The plot_pseudoSection method has been removed. Please use " - "plot_pseudosection instead." - ) - - -def apparent_resistivity( - data_object, - survey_type=None, - space_type="half space", - dobs=None, - eps=1e-10, - **kwargs, -): - raise TypeError( - "The apparent_resistivity method has been removed. Please use " - "apparent_resistivity_from_voltage instead." - ) - - -source_receiver_midpoints = deprecate_method( - pseudo_locations, "source_receiver_midpoints", "0.17.0", error=True -) - - -def plot_layer(rho, mesh, **kwargs): - raise NotImplementedError( - "The plot_layer method has been deprecated. Please use " - "plot_1d_layer_model instead. This will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def convertObs_DC3D_to_2D(survey, lineID, flag="local"): - raise TypeError( - "The convertObs_DC3D_to_2D method has been removed. Please use " - "convert_3d_survey_to_2d." - ) - - -def getSrc_locs(survey): - raise NotImplementedError( - "The getSrc_locs method has been deprecated. Source " - "locations are now computed as a method of the survey " - "class. Please use Survey.source_locations(). This method " - " will be removed in version 0.17.0 of SimPEG", - ) - - -def writeUBC_DCobs( - fileName, - data, - dim, - format_type, - survey_type="dipole-dipole", - ip_type=0, - comment_lines="", -): - # """ - # Write UBC GIF DCIP 2D or 3D observation file - - # Input: - # :param str fileName: including path where the file is written out - # :param SimPEG.Data data: DC data object - # :param int dim: either 2 | 3 - # :param str format_type: either 'surface' | 'general' | 'simple' - # :param str survey_type: 'dipole-dipole' | 'pole-dipole' | - # 'dipole-pole' | 'pole-pole' | 'gradient' - - # Output: - # :return: UBC2D-Data file - # :rtype: file - # """ - - raise NotImplementedError( - "The writeUBC_DCobs method has been deprecated. Please use " - "write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def writeUBC_DClocs( - fileName, - dc_survey, - dim, - format_type, - survey_type="dipole-dipole", - ip_type=0, - comment_lines="", -): - # """ - # Write UBC GIF DCIP 2D or 3D locations file - - # Input: - # :param str fileName: including path where the file is written out - # :param SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object - # :param int dim: either 2 | 3 - # :param str survey_type: either 'SURFACE' | 'GENERAL' - - # Output: - # :rtype: file - # :return: UBC 2/3D-locations file - # """ - - raise NotImplementedError( - "The writeUBC_DClocs method has been deprecated. Please use " - "write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - FutureWarning, - ) - - -def readUBC_DC2Dpre(fileName): - raise NotImplementedError( - "The readUBC_DC2Dpre method has been deprecated. Please use " - "read_dcip2d_ubc instead. This is imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -def readUBC_DC3Dobs(fileName, data_type="volt"): - raise NotImplementedError( - "The readUBC_DC3Dobs method has been deprecated. Please use " - "read_dcip3d_ubc instead. This is imported " - "from SimPEG.utils.io_utils. This function will be removed in version" - " 0.17.0 of SimPEG", - ) - - -gen_DCIPsurvey = deprecate_method( - generate_dcip_survey, "gen_DCIPsurvey", removal_version="0.17.0", error=True -) - - -def generate_dcip_survey_line( - survey_type, data_type, endl, topo, ds, dh, n, dim_flag="2.5D", sources_only=False -): - raise NotImplementedError( - "The gen_dcip_survey_line method has been deprecated. Please use " - "generate_dcip_sources_line instead. This will be removed in version" - " 0.17.0 of SimPEG", - FutureWarning, - ) diff --git a/SimPEG/electromagnetics/time_domain/__init__.py b/SimPEG/electromagnetics/time_domain/__init__.py index f4b57dbd05..859c702f8b 100644 --- a/SimPEG/electromagnetics/time_domain/__init__.py +++ b/SimPEG/electromagnetics/time_domain/__init__.py @@ -93,6 +93,7 @@ fields.FieldsDerivativesHJ """ + from .simulation import ( Simulation3DMagneticFluxDensity, Simulation3DMagneticFluxDensityFaceEdgeConductivity, diff --git a/SimPEG/electromagnetics/time_domain/receivers.py b/SimPEG/electromagnetics/time_domain/receivers.py index f8dd65b4c1..9d6e46ccbb 100644 --- a/SimPEG/electromagnetics/time_domain/receivers.py +++ b/SimPEG/electromagnetics/time_domain/receivers.py @@ -1,9 +1,8 @@ import scipy.sparse as sp -from ...utils import mkvc, validate_type, validate_direction, validate_float +from ...utils import mkvc, validate_type, validate_direction from discretize.utils import Zero from ...survey import BaseTimeRx -import warnings class BaseRx(BaseTimeRx): @@ -25,21 +24,10 @@ def __init__( times, orientation="z", use_source_receiver_offset=False, - bw_cutoff_frequency=3e5, - bw_power=0.0, - lp_cutoff_frequency=2.1e5, - lp_power=0.0, - **kwargs + **kwargs, ): - proj = kwargs.pop("projComp", None) - if proj is not None: - warnings.warn( - "'projComp' overrides the 'orientation' property which automatically" - " handles the projection from the mesh the receivers!!! " - "'projComp' is deprecated and will be removed in SimPEG 0.19.0.", - stacklevel=2, - ) - self.projComp = proj + if (key := "projComp") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed.") if locations is None: raise AttributeError("'locations' are required. Cannot be 'None'") @@ -49,11 +37,6 @@ def __init__( self.orientation = orientation self.use_source_receiver_offset = use_source_receiver_offset - self.bw_cutoff_frequency = bw_cutoff_frequency - self.bw_power = bw_power - self.lp_cutoff_frequency = lp_cutoff_frequency - self.lp_power = lp_power - super().__init__(locations=locations, times=times, **kwargs) @property @@ -93,70 +76,6 @@ def use_source_receiver_offset(self, val): "use_source_receiver_offset", val, bool ) - @property - def bw_cutoff_frequency(self): - """Butter worth low pass filter - - Returns - ------- - numpy.ndarray - Butter worth low pass filter - """ - return self._bw_cutoff_frequency - - @bw_cutoff_frequency.setter - def bw_cutoff_frequency(self, var): - self._bw_cutoff_frequency = validate_float( - "bw_cutoff_frequency", var, min_val=0.0 - ) - - @property - def lp_cutoff_frequency(self): - """Low pass filter - - Returns - ------- - numpy.ndarray - Low pass filter - """ - return self._lp_cutoff_frequency - - @lp_cutoff_frequency.setter - def lp_cutoff_frequency(self, var): - self._lp_cutoff_frequency = validate_float( - "lp_cutoff_frequency", var, min_val=0.0 - ) - - @property - def bw_power(self): - """Butter worth low pass filter - - Returns - ------- - numpy.ndarray - Butter worth low pass filter - """ - return self._bw_power - - @bw_power.setter - def bw_power(self, var): - self._bw_power = validate_float("bw_power", var, min_val=0.0, max_val=2) - - @property - def lp_power(self): - """Low pass filter - - Returns - ------- - numpy.ndarray - Low pass filter - """ - return self._lp_power - - @lp_power.setter - def lp_power(self, var): - self._lp_power = validate_float("lp_power", var, min_val=0.0, max_val=0.99999) - def getSpatialP(self, mesh, f): """Get spatial projection matrix from mesh to receivers. diff --git a/SimPEG/electromagnetics/time_domain/simulation_1d.py b/SimPEG/electromagnetics/time_domain/simulation_1d.py index 2659cd1e38..55a9eb5b99 100644 --- a/SimPEG/electromagnetics/time_domain/simulation_1d.py +++ b/SimPEG/electromagnetics/time_domain/simulation_1d.py @@ -156,9 +156,9 @@ def _compute_coefficients(self): def func(t, i): out = np.zeros_like(t) t = t.copy() - t[ - (t > 0.0) & (t <= t_spline_points.min()) - ] = t_spline_points.min() # constant at very low ts + t[(t > 0.0) & (t <= t_spline_points.min())] = ( + t_spline_points.min() + ) # constant at very low ts out[t > 0.0] = splines[i](np.log(t[t > 0.0])) / t[t > 0.0] return out diff --git a/SimPEG/electromagnetics/time_domain/sources.py b/SimPEG/electromagnetics/time_domain/sources.py index 1a77bf8650..7c45588040 100644 --- a/SimPEG/electromagnetics/time_domain/sources.py +++ b/SimPEG/electromagnetics/time_domain/sources.py @@ -140,33 +140,6 @@ def eval_deriv(self, time): """ raise NotImplementedError # needed for E-formulation - ########################## - # Deprecated - ########################## - hasInitialFields = deprecate_property( - has_initial_fields, - "hasInitialFields", - new_name="has_initial_fields", - removal_version="0.17.0", - error=True, - ) - - offTime = deprecate_property( - off_time, - "offTime", - new_name="off_time", - removal_version="0.17.0", - error=True, - ) - - eps = deprecate_property( - epsilon, - "eps", - new_name="epsilon", - removal_version="0.17.0", - error=True, - ) - class StepOffWaveform(BaseWaveform): """ @@ -317,14 +290,6 @@ def waveform_function(self, value): def eval(self, time): # noqa: A003 return self.waveform_function(time) - waveFct = deprecate_property( - waveform_function, - "waveFct", - new_name="waveform_function", - removal_version="0.17.0", - error=True, - ) - class VTEMWaveform(BaseWaveform): """ @@ -429,26 +394,6 @@ def eval_deriv(self, time): def time_nodes(self): return np.r_[0, self.peak_time, self.off_time] - ########################## - # Deprecated - ########################## - - peakTime = deprecate_property( - peak_time, - "peakTime", - new_name="peak_time", - removal_version="0.17.0", - error=True, - ) - - a = deprecate_property( - ramp_on_rate, - "a", - new_name="ramp_on_rate", - removal_version="0.17.0", - error=True, - ) - class TrapezoidWaveform(BaseWaveform): """ @@ -626,18 +571,6 @@ def peak_time(self, value): self._ramp_on = np.r_[self._ramp_on[0], value] self._ramp_off = np.r_[value, self._ramp_off[1]] - ########################## - # Deprecated - ########################## - - peakTime = deprecate_property( - peak_time, - "peakTime", - new_name="peak_time", - removal_version="0.17.0", - error=True, - ) - class QuarterSineRampOnWaveform(TrapezoidWaveform): """ @@ -1597,11 +1530,10 @@ def __init__( if "moment" in kwargs: kwargs.pop("moment") - N = kwargs.pop("N", None) - if N is not None: - self.N = N - else: - self.n_turns = n_turns + # Raise error on deprecated arguments + if (key := "N") in kwargs.keys(): + raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") + self.n_turns = n_turns BaseTDEMSrc.__init__( self, receiver_list=receiver_list, location=location, moment=None, **kwargs @@ -1699,7 +1631,9 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): ) return self.n_turns * self._loop.vector_potential(obsLoc, coordinates) - N = deprecate_property(n_turns, "N", "n_turns", removal_version="0.19.0") + N = deprecate_property( + n_turns, "N", "n_turns", removal_version="0.19.0", error=True + ) class LineCurrent(BaseTDEMSrc): diff --git a/SimPEG/electromagnetics/utils/__init__.py b/SimPEG/electromagnetics/utils/__init__.py index 25b3f24ac3..d8d4a6182c 100644 --- a/SimPEG/electromagnetics/utils/__init__.py +++ b/SimPEG/electromagnetics/utils/__init__.py @@ -30,6 +30,7 @@ convolve_with_waveform """ + from .waveform_utils import ( omega, k, diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py index 56349b8aba..2f3f05c1f8 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/__init__.py @@ -66,6 +66,7 @@ waveforms.BaseVRMWaveform """ + from . import receivers from . import sources from . import receivers as Rx diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py index efb063fd5a..35ead8fd9b 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/simulation.py @@ -767,9 +767,7 @@ def _getSubsetAcolumns(self, xyzc, xyzh, pp, qq, refFlag): xyzc[refFlag == qq, :] - xyzh[refFlag == qq, :] / 2 ) # Get bottom southwest corners of cells to be refined m = np.shape(xyzc_sub)[0] - xyzc_sub = np.kron( - xyzc_sub, np.ones((n**3, 1)) - ) # Kron for n**3 refined cells + xyzc_sub = np.kron(xyzc_sub, np.ones((n**3, 1))) # Kron for n**3 refined cells xyzh_sub = np.kron( xyzh_sub / n, np.ones((n**3, 1)) ) # Kron for n**3 refined cells with widths h/n diff --git a/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py b/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py index 34a660862e..db332af0a7 100644 --- a/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py +++ b/SimPEG/electromagnetics/viscous_remanent_magnetization/sources.py @@ -138,15 +138,9 @@ def getH0(self, xyz): + m[2] * (xyz[:, 2] - r0[2]) ) - hx0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 0] - r0[0]) * mdotr / r**5 - m[0] / r**3 - ) - hy0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 1] - r0[1]) * mdotr / r**5 - m[1] / r**3 - ) - hz0 = (1 / (4 * np.pi)) * ( - 3 * (xyz[:, 2] - r0[2]) * mdotr / r**5 - m[2] / r**3 - ) + hx0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 0] - r0[0]) * mdotr / r**5 - m[0] / r**3) + hy0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 1] - r0[1]) * mdotr / r**5 - m[1] / r**3) + hz0 = (1 / (4 * np.pi)) * (3 * (xyz[:, 2] - r0[2]) * mdotr / r**5 - m[2] / r**3) return np.c_[hx0, hy0, hz0] @@ -285,8 +279,7 @@ def getH0(self, xyz): (x1p / s) * (x3p * I / (2 * np.pi * s * np.sqrt(x3p**2 + (a + s) ** 2))) * ( - ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) - * spec.ellipe(k) + ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) * spec.ellipe(k) - spec.ellipk(k) ) ) @@ -294,8 +287,7 @@ def getH0(self, xyz): (x2p / s) * (x3p * I / (2 * np.pi * s * np.sqrt(x3p**2 + (a + s) ** 2))) * ( - ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) - * spec.ellipe(k) + ((a**2 + x3p**2 + s**2) / (x3p**2 + (s - a) ** 2)) * spec.ellipe(k) - spec.ellipk(k) ) ) diff --git a/SimPEG/flow/richards/__init__.py b/SimPEG/flow/richards/__init__.py index b22a2ea880..ec638e997f 100644 --- a/SimPEG/flow/richards/__init__.py +++ b/SimPEG/flow/richards/__init__.py @@ -40,6 +40,7 @@ empirical.VanGenuchtenParams """ + from . import empirical from .survey import Survey from .simulation import SimulationNDCellCentered diff --git a/SimPEG/flow/richards/empirical.py b/SimPEG/flow/richards/empirical.py index edbf7361dd..83b38f33f0 100644 --- a/SimPEG/flow/richards/empirical.py +++ b/SimPEG/flow/richards/empirical.py @@ -570,9 +570,7 @@ def _derivKs(self, u): dKs_dm_p = P_p * self.KsDeriv dKs_dm_n = ( P_n - * utils.sdiag( - theta_e**I * ((1.0 - (1.0 - theta_e ** (1.0 / m)) ** m) ** 2) - ) + * utils.sdiag(theta_e**I * ((1.0 - (1.0 - theta_e ** (1.0 / m)) ** m) ** 2)) * self.KsDeriv ) return dKs_dm_p + dKs_dm_n diff --git a/SimPEG/maps.py b/SimPEG/maps.py index 88cf2073f6..ca42c8aadc 100644 --- a/SimPEG/maps.py +++ b/SimPEG/maps.py @@ -8,6 +8,7 @@ from scipy.interpolate import UnivariateSpline from scipy.constants import mu_0 from scipy.sparse import csr_matrix as csr +from scipy.special import expit, logit from discretize.tests import check_derivative from discretize import TensorMesh, CylindricalMesh @@ -1696,9 +1697,7 @@ def getQ(self, alpha): if alpha < 1.0: # oblate spheroid chi = np.sqrt((1.0 / alpha**2.0) - 1) return ( - 1.0 - / 2.0 - * (1 + 1.0 / (alpha**2.0 - 1) * (1.0 - np.arctan(chi) / chi)) + 1.0 / 2.0 * (1 + 1.0 / (alpha**2.0 - 1) * (1.0 - np.arctan(chi) / chi)) ) elif alpha > 1.0: # prolate spheroid chi = np.sqrt(1 - (1.0 / alpha**2.0)) @@ -2156,6 +2155,166 @@ def is_linear(self): return False +class LogisticSigmoidMap(IdentityMap): + r"""Mapping that computes the logistic sigmoid of the model parameters. + + Where :math:`\mathbf{m}` is a set of model parameters, ``LogisticSigmoidMap`` creates + a mapping :math:`\mathbf{u}(\mathbf{m})` that computes the logistic sigmoid + of every element in :math:`\mathbf{m}`; i.e.: + + .. math:: + \mathbf{u}(\mathbf{m}) = sigmoid(\mathbf{m}) = \frac{1}{1+\exp{-\mathbf{m}}} + + ``LogisticSigmoidMap`` transforms values onto the interval (0,1), but can optionally + be scaled and shifted to the interval (a,b). This can be useful for inversion + of data that varies over a log scale and bounded on some interval: + + .. math:: + \mathbf{u}(\mathbf{m}) = a + (b - a) \cdot sigmoid(\mathbf{m}) + + Parameters + ---------- + mesh : discretize.BaseMesh + The number of parameters accepted by the mapping is set to equal the number + of mesh cells. + nP : int + Set the number of parameters accepted by the mapping directly. Used if the + number of parameters is known. Used generally when the number of parameters + is not equal to the number of cells in a mesh. + lower_bound: float or (nP) numpy.ndarray + lower bound (a) for the transform. Default 0. Defined \in \mathbf{u} space. + upper_bound: float or (nP) numpy.ndarray + upper bound (b) for the transform. Default 1. Defined \in \mathbf{u} space. + + """ + + def __init__(self, mesh=None, nP=None, lower_bound=0, upper_bound=1, **kwargs): + super().__init__(mesh=mesh, nP=nP, **kwargs) + lower_bound = np.atleast_1d(lower_bound) + upper_bound = np.atleast_1d(upper_bound) + if self.nP != "*": + # check if lower bound and upper bound broadcast to nP + try: + np.broadcast_shapes(lower_bound.shape, (self.nP,)) + except ValueError as err: + raise ValueError( + f"Lower bound does not broadcast to the number of parameters. " + f"Lower bound shape is {lower_bound.shape} and tried against " + f"{self.nP} parameters." + ) from err + try: + np.broadcast_shapes(upper_bound.shape, (self.nP,)) + except ValueError as err: + raise ValueError( + f"Upper bound does not broadcast to the number of parameters. " + f"Upper bound shape is {upper_bound.shape} and tried against " + f"{self.nP} parameters." + ) from err + # make sure lower and upper bound broadcast to each other... + try: + np.broadcast_shapes(lower_bound.shape, upper_bound.shape) + except ValueError as err: + raise ValueError( + f"Upper bound does not broadcast to the lower bound. " + f"Shapes {upper_bound.shape} and {lower_bound.shape} " + f"are incompatible with each other." + ) from err + + if np.any(lower_bound >= upper_bound): + raise ValueError( + "A lower bound is greater than or equal to the upper bound." + ) + + self._lower_bound = lower_bound + self._upper_bound = upper_bound + + @property + def lower_bound(self): + """The lower bound + + Returns + ------- + numpy.ndarray + """ + return self._lower_bound + + @property + def upper_bound(self): + """The upper bound + + Returns + ------- + numpy.ndarray + """ + return self._upper_bound + + def _transform(self, m): + return self.lower_bound + (self.upper_bound - self.lower_bound) * expit(mkvc(m)) + + def inverse(self, m): + r"""Apply the inverse of the mapping to an array. + + For the logistic sigmoid mapping :math:`\mathbf{u}(\mathbf{m})`, the + inverse mapping on a variable :math:`\mathbf{x}` is performed by taking + the log-odds of elements, i.e.: + + .. math:: + \mathbf{m} = \mathbf{u}^{-1}(\mathbf{x}) = logit(\mathbf{x}) = \log \frac{\mathbf{x}}{1 - \mathbf{x}} + + or scaled and translated to interval (a,b): + .. math:: + \mathbf{m} = logit(\frac{(\mathbf{x} - a)}{b-a}) + + Parameters + ---------- + m : numpy.ndarray + A set of input values + + Returns + ------- + numpy.ndarray + the inverse mapping to the elements in *m*; which in this case + is the log-odds function with scaled and shifted input. + """ + return logit( + (mkvc(m) - self.lower_bound) / (self.upper_bound - self.lower_bound) + ) + + def deriv(self, m, v=None): + r"""Derivative of mapping with respect to the input parameters. + + For a mapping :math:`\mathbf{u}(\mathbf{m})` the derivative of the mapping with + respect to the model is a diagonal matrix of the form: + + .. math:: + \frac{\partial \mathbf{u}}{\partial \mathbf{m}} + = \textrm{diag} \big ( (b-a)\cdot sigmoid(\mathbf{m})\cdot(1-sigmoid(\mathbf{m})) \big ) + + Parameters + ---------- + m : (nP) numpy.ndarray + A vector representing a set of model parameters + v : (nP) numpy.ndarray + If not ``None``, the method returns the derivative times the vector *v* + + Returns + ------- + numpy.ndarray or scipy.sparse.csr_matrix + Derivative of the mapping with respect to the model parameters. If the + input argument *v* is not ``None``, the method returns the derivative times + the vector *v*. + """ + sigmoid = expit(mkvc(m)) + deriv = (self.upper_bound - self.lower_bound) * sigmoid * (1.0 - sigmoid) + if v is not None: + return deriv * v + return sdiag(deriv) + + @property + def is_linear(self): + return False + + class ChiMap(IdentityMap): r"""Mapping that computes the magnetic permeability given a set of magnetic susceptibilities. @@ -3112,9 +3271,11 @@ def indActive(self, value): def P(self): if getattr(self, "_P", None) is None: self._P = self.mesh2.get_interpolation_matrix( - self.mesh.cell_centers[self.indActive, :] - if self.indActive is not None - else self.mesh.cell_centers, + ( + self.mesh.cell_centers[self.indActive, :] + if self.indActive is not None + else self.mesh.cell_centers + ), "CC", zeros_outside=True, ) @@ -4768,15 +4929,19 @@ def x(self): if getattr(self, "_x", None) is None: if self.mesh.dim == 1: self._x = [ - self.mesh.cell_centers - if self.indActive is None - else self.mesh.cell_centers[self.indActive] + ( + self.mesh.cell_centers + if self.indActive is None + else self.mesh.cell_centers[self.indActive] + ) ][0] else: self._x = [ - self.mesh.cell_centers[:, 0] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 0] + ( + self.mesh.cell_centers[:, 0] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 0] + ) ][0] return self._x @@ -4792,9 +4957,11 @@ def y(self): if getattr(self, "_y", None) is None: if self.mesh.dim > 1: self._y = [ - self.mesh.cell_centers[:, 1] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 1] + ( + self.mesh.cell_centers[:, 1] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 1] + ) ][0] else: self._y = None @@ -4812,9 +4979,11 @@ def z(self): if getattr(self, "_z", None) is None: if self.mesh.dim > 2: self._z = [ - self.mesh.cell_centers[:, 2] - if self.indActive is None - else self.mesh.cell_centers[self.indActive, 2] + ( + self.mesh.cell_centers[:, 2] + if self.indActive is None + else self.mesh.cell_centers[self.indActive, 2] + ) ][0] else: self._z = None @@ -5281,12 +5450,7 @@ def _ekblom(self, val): return (val**2 + self.epsilon**2) ** (self.p / 2.0) def _ekblomDeriv(self, val): - return ( - (self.p / 2) - * (val**2 + self.epsilon**2) ** ((self.p / 2) - 1) - * 2 - * val - ) + return (self.p / 2) * (val**2 + self.epsilon**2) ** ((self.p / 2) - 1) * 2 * val # def _rotation(self, mDict): # if self.mesh.dim == 2: diff --git a/SimPEG/meta/__init__.py b/SimPEG/meta/__init__.py index e60961e273..d78117a2e4 100644 --- a/SimPEG/meta/__init__.py +++ b/SimPEG/meta/__init__.py @@ -50,7 +50,12 @@ Dask ---- -Coming soon! +.. autosummary:: + :toctree: generated/ + + DaskMetaSimulation + DaskSumMetaSimulation + DaskRepeatedSimulation MPI --- @@ -69,3 +74,21 @@ MultiprocessingSumMetaSimulation, MultiprocessingRepeatedSimulation, ) + +try: + from .dask_sim import ( + DaskMetaSimulation, + DaskSumMetaSimulation, + DaskRepeatedSimulation, + ) +except ImportError: + + class DaskMetaSimulation(MetaSimulation): + def __init__(self, *args, **kwargs): + raise ImportError( + "This simulation requires dask.distributed. Please see installation " + "instructions at https://distributed.dask.org/" + ) + + DaskSumMetaSimulation = DaskMetaSimulation + DaskRepeatedMetaSimulation = DaskMetaSimulation diff --git a/SimPEG/meta/dask_sim.py b/SimPEG/meta/dask_sim.py new file mode 100644 index 0000000000..268f5260dc --- /dev/null +++ b/SimPEG/meta/dask_sim.py @@ -0,0 +1,644 @@ +import numpy as np + +from SimPEG.simulation import BaseSimulation +from SimPEG.survey import BaseSurvey +from SimPEG.maps import IdentityMap +from SimPEG.utils import validate_list_of_types, validate_type +from SimPEG.props import HasModel +import itertools +from dask.distributed import Client +from dask.distributed import Future +from .simulation import MetaSimulation, SumMetaSimulation +import scipy.sparse as sp +from operator import add +import warnings + + +def _store_model(mapping, sim, model): + sim.model = mapping * model + + +def _calc_fields(mapping, sim, model, apply_map=False): + if apply_map and model is not None: + return sim.fields(m=mapping @ model) + else: + return sim.fields(m=sim.model) + + +def _calc_dpred(mapping, sim, model, field, apply_map=False): + if apply_map and model is not None: + return sim.dpred(m=mapping @ model) + else: + return sim.dpred(m=sim.model, f=field) + + +def _j_vec_op(mapping, sim, model, field, v, apply_map=False): + sim_v = mapping.deriv(model) @ v + if apply_map: + return sim.Jvec(mapping @ model, sim_v, f=field) + else: + return sim.Jvec(sim.model, sim_v, f=field) + + +def _jt_vec_op(mapping, sim, model, field, v, apply_map=False): + if apply_map: + jtv = sim.Jtvec(mapping @ model, v, f=field) + else: + jtv = sim.Jtvec(sim.model, v, f=field) + return mapping.deriv(model).T @ jtv + + +def _get_jtj_diag(mapping, sim, model, field, w, apply_map=False): + w = sp.diags(w) + if apply_map: + jtj = sim.getJtJdiag(mapping @ model, w, f=field) + else: + jtj = sim.getJtJdiag(sim.model, w, f=field) + sim_jtj = sp.diags(np.sqrt(jtj)) + m_deriv = mapping.deriv(model) + return np.asarray((sim_jtj @ m_deriv).power(2).sum(axis=0)).flatten() + + +def _reduce(client, operation, items): + while len(items) > 1: + new_reduce = client.map(operation, items[::2], items[1::2]) + if len(items) % 2 == 1: + new_reduce[-1] = client.submit(operation, new_reduce[-1], items[-1]) + items = new_reduce + return client.gather(items[0]) + + +def _validate_type_or_future_of_type( + property_name, + objects, + obj_type, + client, + workers=None, + return_workers=False, +): + try: + # validate as a list of things that need to be sent. + objects = validate_list_of_types( + property_name, objects, obj_type, ensure_unique=True + ) + if workers is None: + objects = client.scatter(objects) + else: + tmp = [] + for obj, worker in zip(objects, workers): + tmp.append(client.scatter([obj], workers=worker)[0]) + objects = tmp + except TypeError: + pass + # ensure list of futures + objects = validate_list_of_types( + property_name, + objects, + Future, + ) + # Figure out where everything lives + who = client.who_has(objects) + if workers is None: + workers = [] + for obj in objects: + workers.append(who[obj.key]) + else: + # Issue a warning if the future is not on the expected worker + for i, (obj, worker) in enumerate(zip(objects, workers)): + obj_owner = client.who_has(obj)[obj.key] + if obj_owner != worker: + warnings.warn( + f"{property_name} {i} is not on the expected worker.", stacklevel=2 + ) + + # Ensure this runs on the expected worker + futures = [] + for obj, worker in zip(objects, workers): + futures.append( + client.submit(lambda v: not isinstance(v, obj_type), obj, workers=worker) + ) + is_not_obj = np.array(client.gather(futures)) + if np.any(is_not_obj): + raise TypeError(f"{property_name} futures must be an instance of {obj_type}") + + if return_workers: + return objects, workers + else: + return objects + + +class DaskMetaSimulation(MetaSimulation): + """Dask Distributed version of simulation of simulations. + + This class makes use of `dask.distributed` module to provide + concurrency, executing the internal simulations in parallel. This class + is meant to be a (mostly) drop in replacement for :class:`.MetaSimulation`. + If you want to test your implementation, we recommend starting with a + small problem using `MetaSimulation`, then switching it to this class. + the serial version of this class is good for testing correctness. + + Parameters + ---------- + simulations : (n_sim) list of SimPEG.simulation.BaseSimulation or list of dask.distributed.Future + The list of unique simulations (or futures that would return a simulation) + that each handle a piece of the problem. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future + The map for every simulation (or futures that would return a map). Every + map should accept the same length model, and output a model appropriate + for its paired simulation. + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + def __init__(self, simulations, mappings, client): + self._client = validate_type("client", client, Client, cast=False) + super().__init__(simulations, mappings) + + def _make_survey(self): + survey = BaseSurvey([]) + vnD = [] + client = self.client + for sim, worker in zip(self.simulations, self._workers): + vnD.append(client.submit(lambda s: s.survey.nD, sim, workers=worker)) + vnD = client.gather(vnD) + survey._vnD = vnD + return survey + + @property + def simulations(self): + """The future list of simulations. + + Returns + ------- + (n_sim) list of distributed.Future SimPEG.simulation.BaseSimulation + """ + return self._simulations + + @simulations.setter + def simulations(self, value): + client = self.client + simulations, workers = _validate_type_or_future_of_type( + "simulations", value, BaseSimulation, client, return_workers=True + ) + self._simulations = simulations + self._workers = workers + + @property + def mappings(self): + """The future mappings paired to each simulation. + + Every mapping should accept the same length model, and output + a model that is consistent with the simulation. + + Returns + ------- + (n_sim) list of distributed.Future SimPEG.maps.IdentityMap + """ + return self._mappings + + @mappings.setter + def mappings(self, value): + client = self.client + if self._repeat_sim: + mappings, workers = _validate_type_or_future_of_type( + "mappings", value, IdentityMap, client, return_workers=True + ) + else: + workers = self._workers + if len(value) != len(self.simulations): + raise ValueError( + "Must provide the same number of mappings and simulations." + ) + mappings = _validate_type_or_future_of_type( + "mappings", value, IdentityMap, client, workers=workers + ) + + # validate mapping shapes and simulation shapes + model_len = client.submit(lambda v: v.shape[1], mappings[0]).result() + + def check_mapping(mapping, sim, model_len): + if mapping.shape[1] != model_len: + # Bad mapping model length + return 1 + map_out_shape = mapping.shape[0] + for name in sim._act_map_names: + sim_mapping = getattr(sim, name) + sim_in_shape = sim_mapping.shape[1] + if ( + map_out_shape != "*" + and sim_in_shape != "*" + and sim_in_shape != map_out_shape + ): + # Inconsistent simulation input and mapping output + return 2 + # All good + return 0 + + error_checks = [] + for mapping, sim, worker in zip(mappings, self.simulations, workers): + # if it was a repeat sim, this should cause the simulation to be transfered + # to each worker. + error_checks.append( + client.submit(check_mapping, mapping, sim, model_len, workers=worker) + ) + error_checks = np.asarray(client.gather(error_checks)) + + if np.any(error_checks == 1): + raise ValueError("All mappings must have the same input length") + if np.any(error_checks == 2): + raise ValueError( + f"Simulations and mappings at indices {np.where(error_checks==2)}" + f" are inconsistent." + ) + + self._mappings = mappings + if self._repeat_sim: + self._workers = workers + + @property + def _model_map(self): + # create a bland mapping that has the correct input shape + # to test against model inputs, avoids pulling the first + # mapping back to the main task. + if not hasattr(self, "__model_map"): + client = self.client + n_m = client.submit( + lambda v: v.shape[1], + self.mappings[0], + workers=self._workers[0], + ) + n_m = client.gather(n_m) + self.__model_map = IdentityMap(nP=n_m) + return self.__model_map + + @property + def client(self): + """The distributed client that handles the internal tasks. + + Returns + ------- + distributed.Client + """ + return self._client + + @property + def model(self): + return self._model + + @model.setter + def model(self, value): + updated = HasModel.model.fset(self, value) + # Only send the model to the internal simulations if it was updated. + if updated: + client = self.client + [self._m_as_future] = client.scatter([self._model], broadcast=True) + if not self._repeat_sim: + futures = [] + for mapping, sim, worker in zip( + self.mappings, self.simulations, self._workers + ): + futures.append( + client.submit( + _store_model, + mapping, + sim, + self._m_as_future, + workers=worker, + ) + ) + self.client.gather( + futures + ) # blocking call to ensure all models were stored + + def fields(self, m): + self.model = m + client = self.client + m_future = self._m_as_future + # The above should pass the model to all the internal simulations. + f = [] + for mapping, sim, worker in zip(self.mappings, self.simulations, self._workers): + f.append( + client.submit( + _calc_fields, + mapping, + sim, + m_future, + self._repeat_sim, + workers=worker, + ) + ) + return f + + def dpred(self, m=None, f=None): + if f is None: + if m is None: + m = self.model + f = self.fields(m) + client = self.client + m_future = self._m_as_future + dpred = [] + for mapping, sim, worker, field in zip( + self.mappings, self.simulations, self._workers, f + ): + dpred.append( + client.submit( + _calc_dpred, + mapping, + sim, + m_future, + field, + self._repeat_sim, + workers=worker, + ) + ) + return np.concatenate(client.gather(dpred)) + + def Jvec(self, m, v, f=None): + self.model = m + m_future = self._m_as_future + if f is None: + f = self.fields(m) + client = self.client + [v_future] = client.scatter([v], broadcast=True) + j_vec = [] + for mapping, sim, worker, field in zip( + self.mappings, self.simulations, self._workers, f + ): + j_vec.append( + client.submit( + _j_vec_op, + mapping, + sim, + m_future, + field, + v_future, + self._repeat_sim, + workers=worker, + ) + ) + return np.concatenate(self.client.gather(j_vec)) + + def Jtvec(self, m, v, f=None): + self.model = m + m_future = self._m_as_future + if f is None: + f = self.fields(m) + jt_vec = [] + client = self.client + for i, (mapping, sim, worker, field) in enumerate( + zip(self.mappings, self.simulations, self._workers, f) + ): + jt_vec.append( + client.submit( + _jt_vec_op, + mapping, + sim, + m_future, + field, + v[self._data_offsets[i] : self._data_offsets[i + 1]], + self._repeat_sim, + workers=worker, + ) + ) + # Do the sum by a reduction operation to avoid gathering a vector + # of size n_simulations by n_model parameters on the head. + return _reduce(client, add, jt_vec) + + def getJtJdiag(self, m, W=None, f=None): + self.model = m + m_future = self._m_as_future + if getattr(self, "_jtjdiag", None) is None: + if W is None: + W = np.ones(self.survey.nD) + else: + W = W.diagonal() + jtj_diag = [] + client = self.client + if f is None: + f = self.fields(m) + for i, (mapping, sim, worker, field) in enumerate( + zip(self.mappings, self.simulations, self._workers, f) + ): + sim_w = W[self._data_offsets[i] : self._data_offsets[i + 1]] + jtj_diag.append( + client.submit( + _get_jtj_diag, + mapping, + sim, + m_future, + field, + sim_w, + self._repeat_sim, + workers=worker, + ) + ) + self._jtjdiag = _reduce(client, add, jtj_diag) + + return self._jtjdiag + + +class DaskSumMetaSimulation(DaskMetaSimulation, SumMetaSimulation): + """A dask distributed version of :class:`.SumMetaSimulation`. + + A meta simulation that sums the results of the many individual + simulations. + + Parameters + ---------- + simulations : (n_sim) list of SimPEG.simulation.BaseSimulation or list of dask.distributed.Future + The list of unique simulations that each handle a piece + of the problem. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future The map for every simulation. Every map should accept the + same length model, and output a model appropriate for its + paired simulation. + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + def __init__(self, simulations, mappings, client): + super().__init__(simulations, mappings, client) + + def _make_survey(self): + survey = BaseSurvey([]) + client = self.client + n_d = client.submit(lambda s: s.survey.nD, self.simulations[0]).result() + survey._vnD = [ + n_d, + ] + return survey + + @DaskMetaSimulation.simulations.setter + def simulations(self, value): + client = self.client + simulations, workers = _validate_type_or_future_of_type( + "simulations", value, BaseSimulation, client, return_workers=True + ) + n_d = client.submit(lambda s: s.survey.nD, simulations[0], workers=workers[0]) + sim_check = [] + for sim, worker in zip(simulations, workers): + sim_check.append( + client.submit(lambda s, n: s.survey.nD != n, sim, n_d, workers=worker) + ) + if np.any(client.gather(sim_check)): + raise ValueError("All simulations must have the same number of data.") + self._simulations = simulations + self._workers = workers + + def dpred(self, m=None, f=None): + if f is None: + if m is None: + m = self.model + f = self.fields(m) + client = self.client + dpred = [] + for sim, worker, field in zip(self.simulations, self._workers, f): + dpred.append( + client.submit(_calc_dpred, None, sim, None, field, workers=worker) + ) + return _reduce(client, add, dpred) + + def Jvec(self, m, v, f=None): + self.model = m + if f is None: + f = self.fields(m) + client = self.client + [v_future] = client.scatter([v], broadcast=True) + j_vec = [] + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + j_vec.append( + client.submit( + _j_vec_op, + mapping, + sim, + self._m_as_future, + field, + v_future, + workers=worker, + ) + ) + return _reduce(client, add, j_vec) + + def Jtvec(self, m, v, f=None): + self.model = m + if f is None: + f = self.fields(m) + jt_vec = [] + client = self.client + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + jt_vec.append( + client.submit( + _jt_vec_op, + mapping, + sim, + self._m_as_future, + field, + v, + workers=worker, + ) + ) + # Do the sum by a reduction operation to avoid gathering a vector + # of size n_simulations by n_model parameters on the head. + return _reduce(client, add, jt_vec) + + def getJtJdiag(self, m, W=None, f=None): + self.model = m + if getattr(self, "_jtjdiag", None) is None: + jtj_diag = [] + if W is None: + W = np.ones(self.survey.nD) + else: + W = W.diagonal() + client = self.client + if f is None: + f = self.fields(m) + for mapping, sim, worker, field in zip( + self.mappings, self._simulations, self._workers, f + ): + jtj_diag.append( + client.submit( + _get_jtj_diag, + mapping, + sim, + self._m_as_future, + field, + W, + workers=worker, + ) + ) + self._jtjdiag = _reduce(client, add, jtj_diag) + + return self._jtjdiag + + +class DaskRepeatedSimulation(DaskMetaSimulation): + """A multiprocessing version of the :class:`.RepeatedSimulation`. + + This class makes use of a single simulation that is copied to each internal + process, but only once per process. + + This simulation shares internals with the :class:`.MultiprocessingMetaSimulation`. + class, as such please see that documentation for details regarding how to properly + use multiprocessing on your operating system. + + Parameters + ---------- + simulation : SimPEG.simulation.BaseSimulation or dask.distributed.Future + The simulation to use repeatedly with different mappings. + mappings : (n_sim) list of SimPEG.maps.IdentityMap or list of dask.distributed.Future + The list of different mappings to use (or futures that each return a mapping). + client : dask.distributed.Client, optional + The dask client to use for communication. + """ + + _repeat_sim = True + + def __init__(self, simulation, mappings, client): + self._client = validate_type("client", client, Client, cast=False) + + self.simulation = simulation + self.mappings = mappings + + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): + survey = BaseSurvey([]) + nD = self.client.submit(lambda s: s.survey.nD, self.simulation).result() + survey._vnD = len(self.mappings) * [nD] + return survey + + @property + def simulations(self): + return itertools.repeat(self.simulation) + + @property + def simulation(self): + """The internal simulation. + + Returns + ------- + distributed.Future of SimPEG.simulation.BaseSimulation + """ + return self._simulation + + @simulation.setter + def simulation(self, value): + client = self.client + if isinstance(value, BaseSimulation): + # Scatter sim to every client + [ + value, + ] = client.scatter([value], broadcast=True) + if not ( + isinstance(value, Future) + and client.submit(lambda s: isinstance(s, BaseSimulation), value).result() + ): + raise TypeError( + "simulation must be an instance of BaseSimulation or a Future that returns" + " a BaseSimulation" + ) + self._simulation = value diff --git a/SimPEG/meta/simulation.py b/SimPEG/meta/simulation.py index a2327b9606..aa62a05b0b 100644 --- a/SimPEG/meta/simulation.py +++ b/SimPEG/meta/simulation.py @@ -98,11 +98,14 @@ def __init__(self, simulations, mappings): self.model = None # give myself a BaseSurvey that has the number of data equal # to the sum of the sims' data. + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): survey = BaseSurvey([]) vnD = [sim.survey.nD for sim in self.simulations] survey._vnD = vnD - self.survey = survey - self._data_offsets = np.cumsum(np.r_[0, vnD]) + return survey @property def simulations(self): @@ -352,11 +355,14 @@ def __init__(self, simulations, mappings): self.mappings = mappings self.model = None # give myself a BaseSurvey + self.survey = self._make_survey() + + def _make_survey(self): survey = BaseSurvey([]) survey._vnD = [ self.simulations[0].survey.nD, ] - self.survey = survey + return survey @MetaSimulation.simulations.setter def simulations(self, value): @@ -442,11 +448,14 @@ def __init__(self, simulation, mappings): self.simulation = simulation self.mappings = mappings self.model = None + self.survey = self._make_survey() + self._data_offsets = np.cumsum(np.r_[0, self.survey.vnD]) + + def _make_survey(self): survey = BaseSurvey([]) vnD = len(self.mappings) * [self.simulation.survey.nD] survey._vnD = vnD - self.survey = survey - self._data_offsets = np.cumsum(np.r_[0, vnD]) + return survey @property def simulations(self): diff --git a/SimPEG/objective_function.py b/SimPEG/objective_function.py index 3e2b347f30..b3c299cea2 100644 --- a/SimPEG/objective_function.py +++ b/SimPEG/objective_function.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numbers import numpy as np import scipy.sparse as sp @@ -361,7 +363,12 @@ class ComboObjectiveFunction(BaseObjectiveFunction): """ - def __init__(self, objfcts=None, multipliers=None, unpack_on_add=True): + def __init__( + self, + objfcts: list[BaseObjectiveFunction] | None = None, + multipliers=None, + unpack_on_add=True, + ): # Define default lists if None if objfcts is None: objfcts = [] @@ -382,6 +389,7 @@ def __init__(self, objfcts=None, multipliers=None, unpack_on_add=True): nP = None super().__init__(nP=nP) + self.objfcts = objfcts self._multipliers = multipliers self._unpack_on_add = unpack_on_add @@ -528,7 +536,7 @@ class L2ObjectiveFunction(BaseObjectiveFunction): Weighting least-squares objective functions in SimPEG are defined as follows: .. math:: - \phi = \frac{1}{2} \big \| \mathbf{W} f(\mathbf{m}) \big \|_2^2 + \phi = \big \| \mathbf{W} f(\mathbf{m}) \big \|_2^2 where :math:`\mathbf{m}` are the model parameters, :math:`f` is a mapping operator, and :math:`\mathbf{W}` is the weighting matrix. @@ -597,20 +605,22 @@ def W(self): def __call__(self, m): """Evaluate the objective function for a given model.""" r = self.W * (self.mapping * m) - return 0.5 * r.dot(r) + return r.dot(r) def deriv(self, m): # Docstring inherited from BaseObjectiveFunction - return self.mapping.deriv(m).T * (self.W.T * (self.W * (self.mapping * m))) + return 2 * self.mapping.deriv(m).T * (self.W.T * (self.W * (self.mapping * m))) def deriv2(self, m, v=None): # Docstring inherited from BaseObjectiveFunction if v is not None: - return self.mapping.deriv(m).T * ( - self.W.T * (self.W * (self.mapping.deriv(m) * v)) + return ( + 2 + * self.mapping.deriv(m).T + * (self.W.T * (self.W * (self.mapping.deriv(m) * v))) ) W = self.W * self.mapping.deriv(m) - return W.T * W + return 2 * W.T * W def _validate_objective_functions(objective_functions): diff --git a/SimPEG/potential_fields/base.py b/SimPEG/potential_fields/base.py index 6e13b41932..8f67e73094 100644 --- a/SimPEG/potential_fields/base.py +++ b/SimPEG/potential_fields/base.py @@ -200,14 +200,6 @@ def ind_active(self): """ return self._ind_active - @property - def actInd(self): - """'actInd' is deprecated. Use 'ind_active' instead.""" - raise AttributeError( - "The 'actInd' property has been deprecated. " - "Please use 'ind_active'. This will be removed in version 0.17.0 of SimPEG.", - ) - def linear_operator(self): """Return linear operator. diff --git a/SimPEG/potential_fields/gravity/__init__.py b/SimPEG/potential_fields/gravity/__init__.py index 4a9763bc9d..ae4e97687e 100644 --- a/SimPEG/potential_fields/gravity/__init__.py +++ b/SimPEG/potential_fields/gravity/__init__.py @@ -35,6 +35,7 @@ analytics.GravityGradientSphereFreeSpace """ + from . import survey from . import sources from . import receivers diff --git a/SimPEG/potential_fields/gravity/_numba_functions.py b/SimPEG/potential_fields/gravity/_numba_functions.py index c84069f150..1d6b363b27 100644 --- a/SimPEG/potential_fields/gravity/_numba_functions.py +++ b/SimPEG/potential_fields/gravity/_numba_functions.py @@ -1,6 +1,7 @@ """ Numba functions for gravity simulation using Choclo. """ + import numpy as np try: diff --git a/SimPEG/potential_fields/gravity/simulation.py b/SimPEG/potential_fields/gravity/simulation.py index a41ea48112..a520019939 100644 --- a/SimPEG/potential_fields/gravity/simulation.py +++ b/SimPEG/potential_fields/gravity/simulation.py @@ -131,8 +131,14 @@ def __init__( self.numba_parallel = numba_parallel self.engine = engine self._sanity_checks_engine(kwargs) - # Define jit functions if self.engine == "choclo": + # Check dimensions of the mesh + if self.mesh.dim != 3: + raise ValueError( + f"Invalid mesh with {self.mesh.dim} dimensions. " + "Only 3D meshes are supported when using 'choclo' as engine." + ) + # Define jit functions if numba_parallel: self._sensitivity_gravity = _sensitivity_gravity_parallel self._forward_gravity = _forward_gravity_parallel @@ -449,15 +455,6 @@ def _sensitivity_matrix(self): index_offset += n_rows return sensitivity_matrix - def _get_cell_nodes(self): - """ - Return indices of nodes for each cell in the mesh. - """ - if not isinstance(self.mesh, (discretize.TreeMesh, discretize.TensorMesh)): - raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") - cell_nodes = self.mesh.cell_nodes - return cell_nodes - def _get_active_nodes(self): """ Return locations of nodes only for active cells @@ -473,7 +470,7 @@ def _get_active_nodes(self): else: raise TypeError(f"Invalid mesh of type {self.mesh.__class__.__name__}.") # Get original cell_nodes but only for active cells - cell_nodes = self._get_cell_nodes() + cell_nodes = self.mesh.cell_nodes # If all cells in the mesh are active, return nodes and cell_nodes if self.nC == self.mesh.n_cells: return nodes, cell_nodes @@ -484,7 +481,7 @@ def _get_active_nodes(self): unique_nodes, active_cell_nodes = np.unique(cell_nodes, return_inverse=True) # Select only the nodes that belong to the active cells (active nodes) active_nodes = nodes[unique_nodes] - # Reshape indices of active cells for each active cell in the mesh + # Reshape indices of active cell nodes for each active cell in the mesh active_cell_nodes = active_cell_nodes.reshape(cell_nodes.shape) return active_nodes, active_cell_nodes diff --git a/SimPEG/potential_fields/magnetics/__init__.py b/SimPEG/potential_fields/magnetics/__init__.py index 0db16e066f..6d9241e310 100644 --- a/SimPEG/potential_fields/magnetics/__init__.py +++ b/SimPEG/potential_fields/magnetics/__init__.py @@ -35,6 +35,7 @@ analytics.MagSphereAnaFunA analytics.MagSphereFreeSpace """ + from . import survey from . import sources from . import receivers diff --git a/SimPEG/potential_fields/magnetics/simulation.py b/SimPEG/potential_fields/magnetics/simulation.py index 2cc2fd47b1..5440aca56b 100644 --- a/SimPEG/potential_fields/magnetics/simulation.py +++ b/SimPEG/potential_fields/magnetics/simulation.py @@ -164,7 +164,7 @@ def G(self): return self._G modelType = deprecate_property( - model_type, "modelType", "model_type", removal_version="0.18.0" + model_type, "modelType", "model_type", removal_version="0.18.0", error=True ) @property diff --git a/SimPEG/potential_fields/magnetics/sources.py b/SimPEG/potential_fields/magnetics/sources.py index 6c9d13c50a..33c9137ea3 100644 --- a/SimPEG/potential_fields/magnetics/sources.py +++ b/SimPEG/potential_fields/magnetics/sources.py @@ -12,9 +12,6 @@ class UniformBackgroundField(BaseSrc): Parameters ---------- receiver_list : list of SimPEG.potential_fields.magnetics.Point - parameters : tuple of (amplitude, inclutation, declination), optional - Deprecated input for the function, provided in this position for backwards - compatibility amplitude : float, optional amplitude of the inducing backgound field, usually this is in units of nT. inclination : float, optional @@ -26,11 +23,22 @@ class UniformBackgroundField(BaseSrc): def __init__( self, receiver_list=None, - amplitude=50000, - inclination=90, - declination=0, - **kwargs + amplitude=50000.0, + inclination=90.0, + declination=0.0, + **kwargs, ): + # Raise errors on 'parameters' argument + # The parameters argument was supported in the deprecated SourceField + # class. We would like to raise an error in case the user passes it + # so the class doesn't behave differently than expected. + if (key := "parameters") in kwargs: + raise TypeError( + f"'{key}' property has been removed." + "Please pass the amplitude, inclination and declination" + " through their own arguments." + ) + self.amplitude = amplitude self.inclination = inclination self.declination = declination @@ -39,7 +47,7 @@ def __init__( @property def amplitude(self): - """Amplitude of the inducing backgound field. + """Amplitude of the inducing background field. Returns ------- @@ -92,7 +100,7 @@ def b0(self): ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SourceField(UniformBackgroundField): """Source field for magnetics integral formulation diff --git a/SimPEG/potential_fields/magnetics/survey.py b/SimPEG/potential_fields/magnetics/survey.py index beed236268..98ac827a5c 100644 --- a/SimPEG/potential_fields/magnetics/survey.py +++ b/SimPEG/potential_fields/magnetics/survey.py @@ -9,7 +9,7 @@ class Survey(BaseSurvey): Parameters ---------- - source_field : SimPEG.potential_fields.magnetics.sources.SourceField + source_field : SimPEG.potential_fields.magnetics.sources.UniformBackgroundField A source object that defines the Earth's inducing field """ diff --git a/SimPEG/regularization/__init__.py b/SimPEG/regularization/__init__.py index 984c537509..6496ec988b 100644 --- a/SimPEG/regularization/__init__.py +++ b/SimPEG/regularization/__init__.py @@ -52,10 +52,10 @@ .. math:: \phi_m (m) = - \alpha_s \! \int_\Omega \Bigg [ \frac{1}{2} w_s(r) \, m(r)^2 \Bigg ] \, dv + - \alpha_x \! \int_\Omega \Bigg [ \frac{1}{2} w_x(r) + \alpha_s \! \int_\Omega \Bigg [ w_s(r) \, m(r)^2 \Bigg ] \, dv + + \alpha_x \! \int_\Omega \Bigg [ w_x(r) \bigg ( \frac{\partial m}{\partial x} \bigg )^2 \Bigg ] \, dv + - \alpha_y \! \int_\Omega \Bigg [ \frac{1}{2} w_y(r) + \alpha_y \! \int_\Omega \Bigg [ w_y(r) \bigg ( \frac{\partial m}{\partial y} \bigg )^2 \Bigg ] \, dv where :math:`w_s(r), w_x(r), w_y(r)` are user-defined weighting functions. @@ -65,9 +65,9 @@ And the regularization is implemented using a weighted sum of objective functions: .. math:: - \phi_m (\mathbf{m}) \approx \frac{\alpha_s}{2} \big \| \mathbf{W_s m} \big \|^2 + - \frac{\alpha_x}{2} \big \| \mathbf{W_x G_x m} \big \|^2 + - \frac{\alpha_y}{2} \big \| \mathbf{W_y G_y m} \big \|^2 + \phi_m (\mathbf{m}) \approx \alpha_s \big \| \mathbf{W_s m} \big \|^2 + + \alpha_x \big \| \mathbf{W_x G_x m} \big \|^2 + + \alpha_y \big \| \mathbf{W_y G_y m} \big \|^2 where :math:`\mathbf{G_x}` and :math:`\mathbf{G_y}` are partial gradient operators along the x and y-directions, respectively. :math:`\mathbf{W_s}`, :math:`\mathbf{W_x}` and :math:`\mathbf{W_y}` @@ -90,6 +90,7 @@ Smallness SmoothnessFirstOrder SmoothnessSecondOrder + SmoothnessFullGradient Sparse Norm Regularization -------------------------- @@ -146,6 +147,7 @@ BaseAmplitude """ + from ..utils.code_utils import deprecate_class from .base import ( BaseRegularization, @@ -170,23 +172,24 @@ CrossReferenceRegularization, VectorAmplitude, ) +from ._gradient import SmoothnessFullGradient -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SimpleSmall(Smallness): """Deprecated class, replaced by Smallness.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SimpleSmoothDeriv(SmoothnessFirstOrder): """Deprecated class, replaced by SmoothnessFirstOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Simple(WeightedLeastSquares): """Deprecated class, replaced by WeightedLeastSquares.""" @@ -202,7 +205,7 @@ def __init__(self, mesh=None, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs): ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Tikhonov(WeightedLeastSquares): """Deprecated class, replaced by WeightedLeastSquares.""" @@ -219,28 +222,28 @@ def __init__( ) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class Small(Smallness): """Deprecated class, replaced by Smallness.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SmoothDeriv(SmoothnessFirstOrder): """Deprecated class, replaced by SmoothnessFirstOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class SmoothDeriv2(SmoothnessSecondOrder): """Deprecated class, replaced by SmoothnessSecondOrder.""" pass -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class PGIwithNonlinearRelationshipsSmallness(PGIsmallness): """Deprecated class, replaced by PGIsmallness.""" @@ -248,7 +251,7 @@ def __init__(self, gmm, **kwargs): super().__init__(gmm, non_linear_relationships=True, **kwargs) -@deprecate_class(removal_version="0.19.0", future_warn=True) +@deprecate_class(removal_version="0.19.0", error=True) class PGIwithRelationships(PGI): """Deprecated class, replaced by PGI.""" diff --git a/SimPEG/regularization/_gradient.py b/SimPEG/regularization/_gradient.py new file mode 100644 index 0000000000..7e98309f48 --- /dev/null +++ b/SimPEG/regularization/_gradient.py @@ -0,0 +1,271 @@ +from .base import BaseRegularization +import numpy as np +import scipy.sparse as sp +from ..utils.code_utils import validate_ndarray_with_shape + + +class SmoothnessFullGradient(BaseRegularization): + r"""Measures the gradient of a model using optionally anisotropic weighting. + + This regularizer measures the first order smoothness in a mesh ambivalent way + by observing that the N-d smoothness operator can be represented as an + inner product with an arbitrarily anisotropic weight. + + By default it assumes uniform weighting in each dimension, which works + for most ``discretize`` mesh types. + + Parameters + ---------- + mesh : discretize.BaseMesh + The mesh object to use for regularization. The mesh should either have + a `cell_gradient` or a `stencil_cell_gradient` defined. + alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. + The weights of the regularization for each axis. This can be defined for each cell + in the mesh. Default is uniform weights equal to the smallest edge length squared. + reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float + Matrix or list of matrices whose columns represent the regularization directions. + Each matrix should be orthonormal. Default is Identity. + ortho_check : bool, optional + Whether to check `reg_dirs` for orthogonality. + **kwargs + Keyword arguments passed to the parent class ``BaseRegularization``. + + Examples + -------- + Construct of 2D measure with uniform smoothing in each direction. + + >>> from discretize import TensorMesh + >>> from SimPEG.regularization import SmoothnessFullGradient + >>> mesh = TensorMesh([32, 32]) + >>> reg = SmoothnessFullGradient(mesh) + + We can instead create a measure that smooths twice as much in the 1st dimension + than it does in the second dimension. + >>> reg = SmoothnessFullGradient(mesh, [2, 1]) + + The `alphas` parameter can also be indepenant for each cell. Here we set all cells + lower than 0.5 in the x2 to twice as much in the first dimension + otherwise it is uniform smoothing. + >>> alphas = np.ones((mesh.n_cells, mesh.dim)) + >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] + >>> reg = SmoothnessFullGradient(mesh, alphas) + + We can also rotate the axis in which we want to preferentially smooth. Say we want to + smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, + effectively rotating our smoothing 45 degrees. Note and the columns of the matrix + represent the directional vectors (not the rows). + >>> sqrt2 = np.sqrt(2) + >>> reg_dirs = np.array([ + ... [sqrt2, -sqrt2], + ... [sqrt2, sqrt2], + ... ]) + >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) + + Notes + ----- + The regularization object is the discretized form of the continuous regularization + + ..math: + f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V + + The tensor quantity `a` is used to represent the potential preferential directions of + regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: + + ..math: + \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} + + `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting + along each direction, with ``alphas`` along its diagonal. These are multiplied to form the + anisotropic alpha used for rotated gradients. + """ + + def __init__(self, mesh, alphas=None, reg_dirs=None, ortho_check=True, **kwargs): + if mesh.dim < 2: + raise TypeError("Mesh must have dimension higher than 1") + super().__init__(mesh=mesh, **kwargs) + + if alphas is None: + edge_length = np.min(mesh.edge_lengths) + alphas = edge_length**2 * np.ones(mesh.dim) + alphas = validate_ndarray_with_shape( + "alphas", + alphas, + shape=[(mesh.dim,), ("*", mesh.dim)], + dtype=float, + ) + n_active_cells = self.regularization_mesh.n_cells + if len(alphas.shape) == 1: + alphas = np.tile(alphas, (mesh.n_cells, 1)) + if alphas.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if self.active_cells is not None and alphas.shape[0] == n_active_cells: + alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) + alpha_temp[self.active_cells] = alphas + alphas = alpha_temp + else: + raise IndexError( + f"`alphas` first dimension, {alphas.shape[0]}, must be either number " + f"of active cells {n_active_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + if np.any(alphas < 0): + raise ValueError("`alpha` must be non-negative") + anis_alpha = alphas + + if reg_dirs is not None: + reg_dirs = validate_ndarray_with_shape( + "reg_dirs", + reg_dirs, + shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], + dtype=float, + ) + if reg_dirs.shape == (mesh.dim, mesh.dim): + reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) + if reg_dirs.shape[0] != mesh.n_cells: + # check if I need to expand from active cells to all cells (needed for discretize) + if ( + self.active_cells is not None + and reg_dirs.shape[0] == n_active_cells + ): + reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) + reg_dirs_temp[self.active_cells] = reg_dirs + reg_dirs = reg_dirs_temp + else: + raise IndexError( + f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " + f"of active cells {n_active_cells}, or the number of mesh cells {mesh.n_cells}. " + ) + # check orthogonality? + if ortho_check: + eye = np.eye(mesh.dim) + for i, M in enumerate(reg_dirs): + if not np.allclose(eye, M @ M.T): + raise ValueError(f"Matrix {i} is not orthonormal") + # create a stack of matrices of dir @ alphas @ dir.T + anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) + # Then select the upper diagonal components for input to discretize + if mesh.dim == 2: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 0, 1], + ), + axis=-1, + ) + elif mesh.dim == 3: + anis_alpha = np.stack( + ( + anis_alpha[..., 0, 0], + anis_alpha[..., 1, 1], + anis_alpha[..., 2, 2], + anis_alpha[..., 0, 1], + anis_alpha[..., 0, 2], + anis_alpha[..., 1, 2], + ), + axis=-1, + ) + self._anis_alpha = anis_alpha + + # overwrite the call, deriv, and deriv2... + def __call__(self, m): + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return r @ M_f @ r + + def deriv(self, m): + m_d = self.mapping.deriv(self._delta_m(m)) + G = self.cell_gradient + M_f = self.W + r = G @ (self.mapping * (self._delta_m(m))) + return 2 * (m_d.T * (G.T @ (M_f @ r))) + + def deriv2(self, m, v=None): + m_d = self.mapping.deriv(self._delta_m(m)) + G = self.cell_gradient + M_f = self.W + if v is None: + return 2 * (m_d.T @ (G.T @ M_f @ G) @ m_d) + + return 2 * (m_d.T @ (G.T @ (M_f @ (G @ (m_d @ v))))) + + @property + def cell_gradient(self): + """The (approximate) cell gradient operator + + Returns + ------- + scipy.sparse.csr_matrix + """ + if getattr(self, "_cell_gradient", None) is None: + mesh = self.regularization_mesh.mesh + try: + cell_gradient = mesh.cell_gradient + except AttributeError: + a = mesh.face_areas + v = mesh.average_cell_to_face @ mesh.cell_volumes + cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient + + v = np.ones(mesh.n_cells) + # Turn off cell_gradient at boundary faces + if self.active_cells is not None: + v[~self.active_cells] = 0 + + dv = cell_gradient @ v + P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) + cell_gradient = P @ cell_gradient + if self.active_cells is not None: + cell_gradient = cell_gradient[:, self.active_cells] + self._cell_gradient = cell_gradient + return self._cell_gradient + + @property + def _weights_shapes(self): + reg_mesh = self.regularization_mesh + mesh = reg_mesh.mesh + return [(mesh.n_faces,), (reg_mesh.n_cells,)] + + @property + def W(self): + """The inner product operator using rotated coordinates + + Returns + ------- + scipy.sparse.csr_matrix + + Notes + ----- + This matrix is equivalent to `W.T @ W` in most other regularizations. It uses + `discretize` inner product operators to form the matrix `W.T @ W` all at once. + """ + if getattr(self, "_W", None) is None: + mesh = self.regularization_mesh.mesh + n_faces = mesh.n_faces + n_cells = self.regularization_mesh.n_cells + cell_weights = np.ones(n_cells) + face_weights = np.ones(n_faces) + for values in self._weights.values(): + if len(values) == n_cells: + cell_weights *= values + elif len(values) == n_faces: + face_weights *= values + else: + raise ValueError( + "Weights must be either number of active cells, or number of total faces" + ) + # optionally expand the cell weights if there are inactive cells + if n_cells != len(mesh) and self.active_cells is not None: + weights = np.zeros(mesh.n_cells) + weights[self.active_cells] = cell_weights + cell_weights = weights + reg_model = self._anis_alpha * cell_weights[:, None] + # turn off measure in inactive cells + if self.active_cells is not None: + reg_model[~self.active_cells] = 0.0 + + Wf = sp.diags(np.sqrt(face_weights)) + + W = mesh.get_face_inner_product(reg_model) + + self._W = Wf @ (W @ Wf) + return self._W diff --git a/SimPEG/regularization/base.py b/SimPEG/regularization/base.py index 22cbe2632c..cab3af7e58 100644 --- a/SimPEG/regularization/base.py +++ b/SimPEG/regularization/base.py @@ -1,5 +1,4 @@ from __future__ import annotations -import warnings import numpy as np from discretize.base import BaseMesh @@ -67,36 +66,23 @@ def __init__( f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " f"Value of type {type(mesh)} provided." ) + if weights is not None and not isinstance(weights, dict): + raise TypeError( + f"Invalid 'weights' of type '{type(weights)}'. " + "It must be a dictionary with strings as keys and arrays as values." + ) - # Handle deprecated indActive argument + # Raise errors on deprecated arguments: avoid old code that still uses + # them to silently fail if (key := "indActive") in kwargs: - if active_cells is not None: - raise ValueError( - f"Cannot simultaneously pass 'active_cells' and '{key}'. " - "Pass 'active_cells' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'active_cells'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'active_cells' instead." ) - active_cells = kwargs.pop(key) - - # Handle deprecated cell_weights argument if (key := "cell_weights") in kwargs: - if weights is not None: - raise ValueError( - f"Cannot simultaneously pass 'weights' and '{key}'. " - "Pass 'weights' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'weights'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. Please use 'weights' instead." ) - weights = kwargs.pop(key) super().__init__(nP=None, mapping=None, **kwargs) self._regularization_mesh = mesh @@ -107,8 +93,6 @@ def __init__( self.reference_model = reference_model self.units = units if weights is not None: - if not isinstance(weights, dict): - weights = {"user_weights": weights} self.set_weights(**weights) @property @@ -147,8 +131,7 @@ def active_cells(self, values: np.ndarray | None): "indActive", "active_cells", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property @@ -283,8 +266,7 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property @@ -306,30 +288,25 @@ def regularization_mesh(self) -> RegularizationMesh: "regmesh", "regularization_mesh", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property def cell_weights(self) -> np.ndarray: """Deprecated property for 'volume' and user defined weights.""" - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - return np.prod(list(self._weights.values()), axis=0) @cell_weights.setter def cell_weights(self, value): - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - self.set_weights(cell_weights=value) def get_weights(self, key) -> np.ndarray: """Cell weights for a given key. @@ -473,7 +450,7 @@ def __call__(self, m): The regularization function evaluated for the model provided. """ r = self.W * self.f_m(m) - return 0.5 * r.dot(r) + return r.dot(r) def f_m(self, m) -> np.ndarray: """Not implemented for ``BaseRegularization`` class.""" @@ -507,7 +484,7 @@ def deriv(self, m) -> np.ndarray: The Gradient of the regularization function evaluated for the model provided. """ r = self.W * self.f_m(m) - return self.f_m_deriv(m).T * (self.W.T * r) + return 2 * self.f_m_deriv(m).T * (self.W.T * r) @utils.timeIt def deriv2(self, m, v=None) -> csr_matrix: @@ -540,9 +517,9 @@ def deriv2(self, m, v=None) -> csr_matrix: """ f_m_deriv = self.f_m_deriv(m) if v is None: - return f_m_deriv.T * ((self.W.T * self.W) * f_m_deriv) + return 2 * f_m_deriv.T * ((self.W.T * self.W) * f_m_deriv) - return f_m_deriv.T * (self.W.T * (self.W * (f_m_deriv * v))) + return 2 * f_m_deriv.T * (self.W.T * (self.W * (f_m_deriv * v))) class Smallness(BaseRegularization): @@ -585,7 +562,7 @@ class Smallness(BaseRegularization): We define the regularization function (objective function) for smallness as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Big [ m(r) - m^{(ref)}(r) \Big ]^2 \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model and :math:`w(r)` @@ -596,7 +573,7 @@ class Smallness(BaseRegularization): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, m_i - m_i^{(ref)} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the mesh and @@ -605,7 +582,7 @@ class Smallness(BaseRegularization): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} + \phi (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where @@ -635,7 +612,7 @@ class Smallness(BaseRegularization): or set after instantiation using the `set_weights` method: - >>> reg.set_weights(weights_1=array_1, weights_2=array_2}) + >>> reg.set_weights(weights_1=array_1, weights_2=array_2) The default weights that account for cell dimensions in the regularization are accessed via: @@ -675,7 +652,7 @@ def f_m(self, m) -> np.ndarray: The objective function for smallness regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters defined on the mesh (model), @@ -690,7 +667,7 @@ def f_m(self, m) -> np.ndarray: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 """ return self.mapping * self._delta_m(m) @@ -721,7 +698,7 @@ def f_m_deriv(self, m) -> csr_matrix: The objective function for smallness regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters defined on the mesh (model), @@ -736,7 +713,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 Thus, the derivative with respect to the model is: @@ -795,7 +772,7 @@ class SmoothnessFirstOrder(BaseRegularization): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \bigg [ \frac{\partial m}{\partial x} \bigg ]^2 \, dv where :math:`m(r)` is the model and :math:`w(r)` is a user-defined weighting function. @@ -805,7 +782,7 @@ class SmoothnessFirstOrder(BaseRegularization): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, \frac{\partial m_i}{\partial x} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the mesh @@ -814,7 +791,7 @@ class SmoothnessFirstOrder(BaseRegularization): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, G_x m } \, \Big \|^2 + \phi (\mathbf{m}) = \Big \| \mathbf{W \, G_x m } \, \Big \|^2 where @@ -831,7 +808,7 @@ class SmoothnessFirstOrder(BaseRegularization): In this case, the objective function becomes: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W G_x} + \phi (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting a reference model with the @@ -993,7 +970,7 @@ def f_m(self, m): is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1010,7 +987,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 """ dfm_dl = self.mapping * self._delta_m(m) @@ -1049,7 +1026,7 @@ def f_m_deriv(self, m) -> csr_matrix: is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W G_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1066,7 +1043,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 The derivative with respect to the model is therefore: @@ -1164,7 +1141,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): smoothness along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \bigg [ \frac{\partial^2 m}{\partial x^2} \bigg ]^2 \, dv where :math:`m(r)` is the model and :math:`w(r)` is a user-defined weighting function. @@ -1174,7 +1151,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \bigg | \, \frac{\partial^2 m_i}{\partial x^2} \, \bigg |^2 where :math:`m_i \in \mathbf{m}` are the discrete model parameter values defined on the @@ -1183,7 +1160,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): This is equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \big \| \mathbf{W \, L_x \, m } \, \big \|^2 + \phi (\mathbf{m}) = \big \| \mathbf{W \, L_x \, m } \, \big \|^2 where @@ -1197,7 +1174,7 @@ class SmoothnessSecondOrder(SmoothnessFirstOrder): In this case, the objective function becomes: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W L_x} + \phi (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting a reference model with the @@ -1260,7 +1237,7 @@ def f_m(self, m): is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1277,7 +1254,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 """ dfm_dl = self.mapping * self._delta_m(m) @@ -1318,7 +1295,7 @@ def f_m_deriv(self, m) -> csr_matrix: is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W L_x} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where :math:`\mathbf{m}` are the discrete model parameters (model), @@ -1335,7 +1312,7 @@ def f_m_deriv(self, m) -> csr_matrix: such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W \, f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W \, f_m} \Big \|^2 The derivative of the regularization kernel function with respect to the model is: @@ -1438,11 +1415,11 @@ class WeightedLeastSquares(ComboObjectiveFunction): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) =& \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) =& \alpha_s \int_\Omega \, w(r) \Big [ m(r) - m^{(ref)}(r) \Big ]^2 \, dv \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + &+ \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \bigg [ \frac{\partial m}{\partial \xi_j} \bigg ]^2 \, dv \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \int_\Omega \, w(r) + &+ \sum_{j=x,y,z} \alpha_{jj} \int_\Omega \, w(r) \bigg [ \frac{\partial^2 m}{\partial \xi_j^2} \bigg ]^2 \, dv \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) @@ -1466,10 +1443,10 @@ class WeightedLeastSquares(ComboObjectiveFunction): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) =& \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) =& \alpha_s \Big \| \mathbf{W_s} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -1487,11 +1464,11 @@ class WeightedLeastSquares(ComboObjectiveFunction): In this case, the objective function becomes: .. math:: - \phi_m (\mathbf{m}) =& \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) =& \alpha_s \Big \| \mathbf{W_s} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j} + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j} + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) @@ -1590,19 +1567,18 @@ def __init__( ) self._regularization_mesh = mesh + # Raise errors on deprecated arguments: avoid old code that still uses + # them to silently fail if (key := "indActive") in kwargs: - if active_cells is not None: - raise ValueError( - f"Cannot simultaneously pass 'active_cells' and '{key}'. " - "Pass 'active_cells' only." - ) - warnings.warn( - f"The '{key}' argument has been deprecated, please use 'active_cells'. " - "It will be removed in future versions of SimPEG.", - DeprecationWarning, - stacklevel=2, + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'active_cells' instead." + ) + + if (key := "cell_weights") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. Please use 'weights' instead." ) - active_cells = kwargs.pop(key) self.alpha_s = alpha_s if alpha_x is not None: @@ -1635,6 +1611,13 @@ def __init__( else: self.length_scale_z = length_scale_z + # Check if weights is a dictionary, raise error if it's not + if weights is not None and not isinstance(weights, dict): + raise TypeError( + f"Invalid 'weights' of type '{type(weights)}'. " + "It must be a dictionary with strings as keys and arrays as values." + ) + # do this to allow child classes to also pass a list of objfcts to this constructor if "objfcts" not in kwargs: objfcts = [ @@ -1670,8 +1653,13 @@ def __init__( objfcts = kwargs.pop("objfcts") super().__init__(objfcts=objfcts, unpack_on_add=False, **kwargs) + + for fun in objfcts: + fun.parent = self + if active_cells is not None: self.active_cells = active_cells + self.mapping = mapping self.reference_model = reference_model self.reference_model_in_smooth = reference_model_in_smooth @@ -1679,8 +1667,6 @@ def __init__( self.alpha_yy = alpha_yy self.alpha_zz = alpha_zz if weights is not None: - if not isinstance(weights, dict): - weights = {"user_weights": weights} self.set_weights(**weights) def set_weights(self, **weights): @@ -1730,20 +1716,19 @@ def remove_weights(self, key): @property def cell_weights(self): - # All of the objective functions should have the same weights, - # so just grab the one from smallness here, which should also - # trigger the deprecation warning - return self.objfcts[0].cell_weights + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) @cell_weights.setter def cell_weights(self, value): - warnings.warn( - "cell_weights are deprecated please access weights using the `set_weights`," - " `get_weights`, and `remove_weights` functionality. This will be removed in 0.19.0", - FutureWarning, - stacklevel=2, + raise AttributeError( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." ) - self.set_weights(cell_weights=value) @property def alpha_s(self): @@ -2107,8 +2092,7 @@ def active_cells(self, values: np.ndarray): "indActive", "active_cells", "0.19.0", - error=False, - future_warn=True, + error=True, ) @property @@ -2138,8 +2122,7 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) @property diff --git a/SimPEG/regularization/correspondence.py b/SimPEG/regularization/correspondence.py index 6f31dd3317..670afc3132 100644 --- a/SimPEG/regularization/correspondence.py +++ b/SimPEG/regularization/correspondence.py @@ -43,7 +43,7 @@ class LinearCorrespondence(BaseSimilarityMeasure): .. math:: \phi (\mathbf{m}) - = \frac{1}{2} \big \| \lambda_1 \mathbf{m_1} + \lambda_2 \mathbf{m_2} + \lambda_3 \big \|^2 + = \big \| \lambda_1 \mathbf{m_1} + \lambda_2 \mathbf{m_2} + \lambda_3 \big \|^2 Scalar coefficients :math:`\{ \lambda_1 , \lambda_2 , \lambda_3 \}` are set using the `coefficients` property. For a true linear correspondence constraint, we set @@ -130,7 +130,7 @@ def __call__(self, model): """ result = self.relation(model) - return 0.5 * result.T @ result + return result.T @ result def deriv(self, model): r"""Gradient of the regularization function evaluated for the model provided. @@ -167,7 +167,7 @@ def deriv(self, model): result = np.r_[dc_dm1, dc_dm2] - return result + return 2 * result def deriv2(self, model, v=None): r"""Hessian of the regularization function evaluated for the model provided. @@ -185,10 +185,10 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} \end{bmatrix} When a vector :math:`(\mathbf{v})` is supplied, the method returns the Hessian @@ -217,10 +217,10 @@ def deriv2(self, model, v=None): v1, v2 = self.wire_map * v p1 = k1**2 * v1 + k2 * k1 * v2 p2 = k2 * k1 * v1 + k2**2 * v2 - return np.r_[p1, p2] + return 2 * np.r_[p1, p2] else: n = self.regularization_mesh.nC A = utils.sdiag(np.ones(n) * (k1**2)) B = utils.sdiag(np.ones(n) * (k2**2)) C = utils.sdiag(np.ones(n) * (k1 * k2)) - return sp.bmat([[A, C], [C, B]], format="csr") + return 2 * sp.bmat([[A, C], [C, B]], format="csr") diff --git a/SimPEG/regularization/cross_gradient.py b/SimPEG/regularization/cross_gradient.py index 9c8193a19f..a1cb7e1c03 100644 --- a/SimPEG/regularization/cross_gradient.py +++ b/SimPEG/regularization/cross_gradient.py @@ -52,7 +52,7 @@ class CrossGradient(BaseSimilarityMeasure): (`Haber and Gazit, 2013 `__): .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big | \nabla m_1 \, \times \, \nabla m_2 \, \Big |^2 \, dv where :math:`w(r)` is a user-defined weighting function. @@ -60,7 +60,7 @@ class CrossGradient(BaseSimilarityMeasure): the regularization function can be re-expressed as: .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, \Big [ \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big [ \, \big | \nabla m_1 \big |^2 \big | \nabla m_2 \big |^2 - \big ( \nabla m_1 \, \cdot \, \nabla m_2 \, \big )^2 \Big ] \, dv @@ -69,7 +69,7 @@ class CrossGradient(BaseSimilarityMeasure): function (objective function) is given by: .. math:: - \phi (m_1, m_2) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \bigg [ + \phi (m_1, m_2) \approx \sum_i \tilde{w}_i \, \bigg [ \Big | (\nabla m_1)_i \Big |^2 \Big | (\nabla m_2)_i \Big |^2 - \Big [ (\nabla m_1)_i \, \cdot \, (\nabla m_2)_i \, \Big ]^2 \, \bigg ] @@ -89,9 +89,9 @@ class CrossGradient(BaseSimilarityMeasure): .. math:: \phi (\mathbf{m}) = - \frac{1}{2} \Big [ \mathbf{W A} \big ( \mathbf{G \, m_1} \big )^2 \Big ]^T + \Big [ \mathbf{W A} \big ( \mathbf{G \, m_1} \big )^2 \Big ]^T \Big [ \mathbf{W A} \big ( \mathbf{G \, m_2} \big )^2 \Big ] - - \frac{1}{2} \bigg \| \mathbf{W A} \Big [ \big ( \mathbf{G \, m_1} \big ) + - \bigg \| \mathbf{W A} \Big [ \big ( \mathbf{G \, m_1} \big ) \odot \big ( \mathbf{G \, m_2} \big ) \Big ] \bigg \|^2 where exponents are computed elementwise, @@ -249,9 +249,7 @@ def __call__(self, model): G = self._G g_m1 = G @ m1 g_m2 = G @ m2 - return 0.5 * np.sum( - (Av @ g_m1**2) * (Av @ g_m2**2) - (Av @ (g_m1 * g_m2)) ** 2 - ) + return np.sum((Av @ g_m1**2) * (Av @ g_m2**2) - (Av @ (g_m1 * g_m2)) ** 2) def deriv(self, model): r"""Gradient of the regularization function evaluated for the model provided. @@ -267,7 +265,7 @@ def deriv(self, model): The gradient has the form: .. math:: - \frac{\partial \phi}{\partial \mathbf{m}} = + 2 \frac{\partial \phi}{\partial \mathbf{m}} = \begin{bmatrix} \dfrac{\partial \phi}{\partial \mathbf{m_1}} \\ \dfrac{\partial \phi}{\partial \mathbf{m_2}} \end{bmatrix} @@ -288,12 +286,15 @@ def deriv(self, model): g_m1 = G @ m1 g_m2 = G @ m2 - return np.r_[ - (((Av @ g_m2**2) @ Av) * g_m1) @ G - - (((Av @ (g_m1 * g_m2)) @ Av) * g_m2) @ G, - (((Av @ g_m1**2) @ Av) * g_m2) @ G - - (((Av @ (g_m1 * g_m2)) @ Av) * g_m1) @ G, - ] + return ( + 2 + * np.r_[ + (((Av @ g_m2**2) @ Av) * g_m1) @ G + - (((Av @ (g_m1 * g_m2)) @ Av) * g_m2) @ G, + (((Av @ g_m1**2) @ Av) * g_m2) @ G + - (((Av @ (g_m1 * g_m2)) @ Av) * g_m1) @ G, + ] + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 def deriv2(self, model, v=None): r"""Hessian of the regularization function evaluated for the model provided. @@ -310,10 +311,10 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} \end{bmatrix} When a vector :math:`(\mathbf{v})` is supplied, the method returns the Hessian @@ -344,64 +345,49 @@ def deriv2(self, model, v=None): g_m1 = G @ m1 g_m2 = G @ m2 - if v is None: - A = ( - G.T - @ ( - sp.diags(Av.T @ (Av @ g_m2**2)) - - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m2) - ) - @ G - ) - - C = ( - G.T - @ ( - sp.diags(Av.T @ (Av @ g_m1**2)) - - sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m1) - ) - @ G - ) + d11_mid = Av.T @ (Av @ g_m2**2) + d12_mid = -(Av.T @ (Av @ (g_m1 * g_m2))) + d22_mid = Av.T @ (Av @ g_m1**2) - B = None - BT = None + if v is None: + D11_mid = sp.diags(d11_mid) + D12_mid = sp.diags(d12_mid) + D22_mid = sp.diags(d22_mid) if not self.approx_hessian: - # d_m1_d_m2 - B = ( - G.T - @ ( - 2 * sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m2) - - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m1) - - sp.diags(Av.T @ Av @ (g_m1 * g_m2)) - ) - @ G + D11_mid = D11_mid - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m2) + D12_mid = ( + D12_mid + + 2 * sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m2) + - sp.diags(g_m2) @ Av.T @ Av @ sp.diags(g_m1) ) - BT = B.T + D22_mid = D22_mid - sp.diags(g_m1) @ Av.T @ Av @ sp.diags(g_m1) + D11 = G.T @ D11_mid @ G + D12 = G.T @ D12_mid @ G + D22 = G.T @ D22_mid @ G + + return 2 * sp.bmat( + [[D11, D12], [D12.T, D22]], format="csr" + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 - return sp.bmat([[A, B], [BT, C]], format="csr") else: v1, v2 = self.wire_map * v Gv1 = G @ v1 Gv2 = G @ v2 - - p1 = G.T @ ( - (Av.T @ (Av @ g_m2**2)) * Gv1 - g_m2 * (Av.T @ (Av @ (g_m2 * Gv1))) - ) - p2 = G.T @ ( - (Av.T @ (Av @ g_m1**2)) * Gv2 - g_m1 * (Av.T @ (Av @ (g_m1 * Gv2))) - ) - + p1 = G.T @ (d11_mid * Gv1 + d12_mid * Gv2) + p2 = G.T @ (d12_mid * Gv1 + d22_mid * Gv2) if not self.approx_hessian: p1 += G.T @ ( - 2 * g_m1 * (Av.T @ (Av @ (g_m2 * Gv2))) - - g_m2 * (Av.T @ (Av @ (g_m1 * Gv2))) - - (Av.T @ (Av @ (g_m1 * g_m2))) * Gv2 + -g_m2 * (Av.T @ (Av @ (g_m2 * Gv1))) # d11*v1 full addition + + 2 * g_m1 * (Av.T @ (Av @ (g_m2 * Gv2))) # d12*v2 full addition + - g_m2 * (Av.T @ (Av @ (g_m1 * Gv2))) # d12*v2 continued ) p2 += G.T @ ( - 2 * g_m2 * (Av.T @ (Av @ (g_m1 * Gv1))) - - g_m1 * (Av.T @ (Av @ (g_m2 * Gv1))) - - (Av.T @ (Av @ (g_m2 * g_m1))) * Gv1 + -g_m1 * (Av.T @ (Av @ (g_m1 * Gv2))) # d22*v2 full addition + + 2 * g_m2 * (Av.T @ (Av @ (g_m1 * Gv1))) # d12.T*v1 full addition + - g_m1 * (Av.T @ (Av @ (g_m2 * Gv1))) # d12.T*v1 fcontinued ) - return np.r_[p1, p2] + return ( + 2 * np.r_[p1, p2] + ) # factor of 2 from derviative of | grad m1 x grad m2 | ^2 diff --git a/SimPEG/regularization/jtv.py b/SimPEG/regularization/jtv.py index aa55780b47..86a2208915 100644 --- a/SimPEG/regularization/jtv.py +++ b/SimPEG/regularization/jtv.py @@ -50,7 +50,7 @@ class JointTotalVariation(BaseSimilarityMeasure): (`Haber and Gazit, 2013 `__): .. math:: - \phi (m_1, m_2) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m_1, m_2) = \int_\Omega \, w(r) \, \Big [ \, \big | \nabla m_1 \big |^2 \, + \, \big | \nabla m_2 \big |^2 \, \Big ]^{1/2} \, dv where :math:`w(r)` is a user-defined weighting function. @@ -60,7 +60,7 @@ class JointTotalVariation(BaseSimilarityMeasure): function (objective function) is given by: .. math:: - \phi (m_1, m_2) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \bigg [ \, + \phi (m_1, m_2) \approx \sum_i \tilde{w}_i \, \bigg [ \, \Big | (\nabla m_1)_i \Big |^2 \, + \, \Big | (\nabla m_2)_i \Big |^2 \, \bigg ]^{1/2} where :math:`(\nabla m_1)_i` are the gradients of property :math:`m_1` defined on the mesh and @@ -78,7 +78,7 @@ class JointTotalVariation(BaseSimilarityMeasure): is therefore equivalent to an objective function of the form: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \, \mathbf{e}^T \Bigg ( \, + \phi (\mathbf{m}) = \mathbf{e}^T \Bigg ( \, \mathbf{W \, A} \bigg [ \sum_k (\mathbf{G \, m_k})^2 \bigg ] \; + \; \epsilon \mathbf{v}^2 \, \Bigg )^{1/2} @@ -256,11 +256,11 @@ def deriv2(self, model, v=None): .. math:: \frac{\partial^2 \phi}{\partial \mathbf{m}^2} = \begin{bmatrix} - \dfrac{\partial \phi^2}{\partial \mathbf{m_1}^2} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_1} \partial \mathbf{m_2}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1}^2} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_1} \partial \mathbf{m_2}} & \cdots \\ - \dfrac{\partial \phi^2}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & - \dfrac{\partial \phi^2}{\partial \mathbf{m_2}^2} & \; \\ + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2} \partial \mathbf{m_1}} & + \dfrac{\partial^2 \phi}{\partial \mathbf{m_2}^2} & \; \\ \vdots & \; & \ddots \end{bmatrix} diff --git a/SimPEG/regularization/pgi.py b/SimPEG/regularization/pgi.py index 496df06f45..24aeaf866d 100644 --- a/SimPEG/regularization/pgi.py +++ b/SimPEG/regularization/pgi.py @@ -103,10 +103,10 @@ class PGIsmallness(Smallness): least-square: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} + \phi (\mathbf{m}) &= \alpha_\text{pgi} \big | \mathbf{W} ( \Theta , \mathbf{z}^\ast ) \, (\mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \, \Big \|^2 - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -497,7 +497,7 @@ def __call__(self, m, external_weights=True): ] ] - return 0.5 * mkvc(r0).dot(mkvc(r1)) + return mkvc(r0).dot(mkvc(r1)) else: modellist = self.wiresmap * m @@ -506,7 +506,7 @@ def __call__(self, m, external_weights=True): if self.non_linear_relationships: score = self.gmm.score_samples(model) score_vec = mkvc(np.r_[[score for maps in self.wiresmap.maps]]) - return -np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) + return -2 * np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) else: if external_weights and getattr(self.W, "diagonal", None) is not None: @@ -519,7 +519,7 @@ def __call__(self, m, external_weights=True): score = self.gmm.score_samples_with_sensW(model, sensW) # score_vec = mkvc(np.r_[[score for maps in self.wiresmap.maps]]) # return -np.sum((W.T * W) * score_vec) / len(self.wiresmap.maps) - return -np.sum(score) + return -2 * np.sum(score) @timeIt def deriv(self, m): @@ -616,7 +616,7 @@ def deriv(self, m): ] ] ) - return mkvc(mD.T * (self.W.T * r)) + return 2 * mkvc(mD.T * (self.W.T * r)) else: if self.non_linear_relationships: @@ -726,7 +726,7 @@ def deriv(self, m): logP = np.vstack([logP for maps in self.wiresmap.maps]) numer = (W * np.exp(logP)).sum(axis=1) r = numer / (np.exp(score_vec)) - return mkvc(mD.T * r) + return 2 * mkvc(mD.T * r) @timeIt def deriv2(self, m, v=None): @@ -841,22 +841,12 @@ def deriv2(self, m, v=None): mDv = self.wiresmap * (mD * v) mDv = np.c_[mDv] r0 = (self.W * (mkvc(mDv))).reshape(mDv.shape, order="F") - return mkvc( - mD.T - * ( - self.W - * ( - mkvc( - np.r_[ - [ - np.dot(self._r_second_deriv[i], r0[i]) - for i in range(len(r0)) - ] - ] - ) - ) - ) + second_deriv_times_r0 = mkvc( + np.r_[ + [np.dot(self._r_second_deriv[i], r0[i]) for i in range(len(r0))] + ] ) + return 2 * mkvc(mD.T * (self.W * second_deriv_times_r0)) else: # Forming the Hessian by diagonal blocks hlist = [ @@ -875,7 +865,7 @@ def deriv2(self, m, v=None): Hr = Hr.dot(self.W) - return (mD.T * mD) * (self.W * (Hr)) + return 2 * (mD.T * mD) * (self.W * (Hr)) else: if self.non_linear_relationships: @@ -953,7 +943,7 @@ def deriv2(self, m, v=None): for j in range(len(self.wiresmap.maps)): Hc = sp.hstack([Hc, sdiag(hlist[i][j])]) Hr = sp.vstack([Hr, Hc]) - Hr = (mD.T * mD) * Hr + Hr = 2 * (mD.T * mD) * Hr if v is not None: return Hr.dot(v) @@ -1041,12 +1031,12 @@ class PGI(ComboObjectiveFunction): ``PGI`` is given by: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} + \phi (\mathbf{m}) &= \alpha_\text{pgi} \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ]^T \mathbf{W} ( \Theta , \mathbf{z}^\ast ) \, \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ] \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) where @@ -1072,10 +1062,10 @@ class PGI(ComboObjectiveFunction): regularization function (objective function) can be expressed as: .. math:: - \phi (\mathbf{m}) &= \frac{\alpha_{pgi}}{2} \Big \| \mathbf{W}_{\! 1/2}(\Theta, \mathbf{z}^\ast ) \, + \phi (\mathbf{m}) &= \alpha_\text{pgi} \Big \| \mathbf{W}_{\! 1/2}(\Theta, \mathbf{z}^\ast ) \, \big [ \mathbf{m} - \mathbf{m_{ref}}(\Theta, \mathbf{z}^\ast ) \big ] \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ - &+ \sum_{j=x,y,z} \frac{\alpha_{jj}}{2} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 + &+ \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j G_j \, m} \, \Big \|^2 \\ + &+ \sum_{j=x,y,z} \alpha_{jj} \Big \| \mathbf{W_{jj} L_j \, m} \, \Big \|^2 \;\;\;\;\;\;\;\; \big ( \textrm{optional} \big ) When the ``approx_eval`` property is ``True``, you may also set the ``approx_gradient`` property @@ -1194,6 +1184,7 @@ def __init__( for model_map, wire, weights in zip( self.maplist, self.wiresmap.maps, weights_list ): + weights_i = {"pgi-weights": weights} if weights is not None else None objfcts += [ WeightedLeastSquares( alpha_s=0.0, @@ -1205,7 +1196,7 @@ def __init__( alpha_zz=alpha_zz, mesh=self.regularization_mesh, mapping=model_map * wire[1], - weights=weights, + weights=weights_i, **kwargs, ) ] @@ -1403,6 +1394,5 @@ def reference_model(self, values: np.ndarray | float): "mref", "reference_model", "0.19.0", - future_warn=True, - error=False, + error=True, ) diff --git a/SimPEG/regularization/regularization_mesh.py b/SimPEG/regularization/regularization_mesh.py index a16dd8eaab..63eaa981f2 100755 --- a/SimPEG/regularization/regularization_mesh.py +++ b/SimPEG/regularization/regularization_mesh.py @@ -1,9 +1,9 @@ import numpy as np import scipy.sparse as sp + from SimPEG.utils.code_utils import deprecate_property, validate_active_indices -from .. import props -from .. import utils +from .. import props, utils ############################################################################### # # @@ -523,24 +523,21 @@ def cell_gradient_z(self) -> sp.csr_matrix: "cellDiffx", "cell_gradient_x", "0.19.0", - error=False, - future_warn=True, + error=True, ) cellDiffy = deprecate_property( cell_gradient_y, "cellDiffy", "cell_gradient_y", "0.19.0", - error=False, - future_warn=True, + error=True, ) cellDiffz = deprecate_property( cell_gradient_z, "cellDiffz", "cell_gradient_z", "0.19.0", - error=False, - future_warn=True, + error=True, ) @property @@ -555,7 +552,7 @@ def cell_distances_x(self) -> np.ndarray: if getattr(self, "_cell_distances_x", None) is None: self._cell_distances_x = self.cell_gradient_x.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_x @@ -571,7 +568,7 @@ def cell_distances_y(self) -> np.ndarray: if getattr(self, "_cell_distances_y", None) is None: self._cell_distances_y = self.cell_gradient_y.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_y @@ -587,7 +584,7 @@ def cell_distances_z(self) -> np.ndarray: if getattr(self, "_cell_distances_z", None) is None: self._cell_distances_z = self.cell_gradient_z.max( axis=1 - ).toarray().flatten() ** (-1.0) + ).toarray().ravel() ** (-1.0) return self._cell_distances_z diff --git a/SimPEG/regularization/rotated.py b/SimPEG/regularization/rotated.py deleted file mode 100644 index 1eaf3166a2..0000000000 --- a/SimPEG/regularization/rotated.py +++ /dev/null @@ -1,527 +0,0 @@ -from typing import Literal - -import numpy as np -import scipy.sparse as sp -from discretize import TensorMesh, TreeMesh -from discretize.base import BaseMesh -from scipy.interpolate import NearestNDInterpolator - -from ..utils.code_utils import ( - validate_float, - validate_ndarray_with_shape, - validate_type, -) -from ..utils.mat_utils import coterminal -from . import BaseRegularization, RegularizationMesh, Sparse, SparseSmallness - - -class SmoothnessFullGradient(BaseRegularization): - r"""Measures the gradient of a model using optionally anisotropic weighting. - - This regularizer measures the first order smoothness in a mesh ambivalent way - by observing that the N-d smoothness operator can be represented as an - inner product with an arbitrarily anisotropic weight. - - By default it assumes uniform weighting in each dimension, which works - for most ``discretize`` mesh types. - - Parameters - ---------- - mesh : discretize.BaseMesh - The mesh object to use for regularization. The mesh should either have - a `cell_gradient` or a `stencil_cell_gradient` defined. - alphas : (mesh.dim,) or (mesh.n_cells, mesh.dim) array_like of float, optional. - The weights of the regularization for each axis. This can be defined for each cell - in the mesh. Default is uniform weights equal to the smallest edge length squared. - reg_dirs : (mesh.dim, mesh.dim) or (mesh.n_cells, mesh.dim, mesh.dim) array_like of float - Matrix or list of matrices whose columns represent the regularization directions. - Each matrix should be orthonormal. Default is Identity. - ortho_check : bool, optional - Whether to check `reg_dirs` for orthogonality. - kwargs : - Keyword arguments passed to the parent class ``BaseRegularization``. - - Examples - -------- - Construct of 2D measure with uniform smoothing in each direction. - - >>> from discretize import TensorMesh - >>> from SimPEG.regularization import SmoothnessFullGradient - >>> mesh = TensorMesh([32, 32]) - >>> reg = SmoothnessFullGradient(mesh) - - We can instead create a measure that smooths twice as much in the 1st dimension - than it does in the second dimension. - >>> reg = SmoothnessFullGradient(mesh, [2, 1]) - - The `alphas` parameter can also be indepenant for each cell. Here we set all cells - lower than 0.5 in the x2 to twice as much in the first dimension - otherwise it is uniform smoothing. - >>> alphas = np.ones((mesh.n_cells, mesh.dim)) - >>> alphas[mesh.cell_centers[:, 1] < 0.5] = [2, 1] - >>> reg = SmoothnessFullGradient(mesh, alphas) - - We can also rotate the axis in which we want to preferentially smooth. Say we want to - smooth twice as much along the +x1,+x2 diagonal as we do along the -x1,+x2 diagonal, - effectively rotating our smoothing 45 degrees. Note and the columns of the matrix - represent the directional vectors (not the rows). - >>> sqrt2 = np.sqrt(2) - >>> reg_dirs = np.array([ - ... [sqrt2, -sqrt2], - ... [sqrt2, sqrt2], - ... ]) - >>> reg = SmoothnessFullGradient(mesh, alphas, reg_dirs=reg_dirs) - - Notes - ----- - The regularization object is the discretized form of the continuous regularization - - ..math: - f(m) = \int_V \nabla m \cdot \mathbf{a} \nabla m \hspace{5pt} \partial V - - The tensor quantity `a` is used to represent the potential preferential directions of - regularization. `a` must be symmetric positive semi-definite with an eigendecomposition of: - - ..math: - \mathbf{a} = \mathbf{Q}\mathbf{L}\mathbf{Q}^{-1} - - `Q` is then the regularization directions ``reg_dirs``, and `L` is represents the weighting - along each direction, with ``alphas`` along its diagonal. These are multiplied to form the - anisotropic alpha used for rotated gradients. - """ - - _multiplier_pair = "alpha_x" - - def __init__( - self, - mesh, - alphas=None, - reg_dirs=None, - ortho_check=True, - norm=2, - irls_scaled=True, - irls_threshold=1e-8, - reference_model_in_smooth=False, - **kwargs, - ): - self.reference_model_in_smooth = reference_model_in_smooth - - if mesh.dim < 2: - raise TypeError("Mesh must have dimension higher than 1") - super().__init__(mesh=mesh, **kwargs) - - self.norm = norm - self.irls_threshold = irls_threshold - self.irls_scaled = irls_scaled - - if alphas is None: - edge_length = np.min(mesh.edge_lengths) - alphas = edge_length**2 * np.ones(mesh.dim) - alphas = validate_ndarray_with_shape( - "alphas", - alphas, - shape=[(mesh.dim,), ("*", mesh.dim)], - dtype=float, - ) - n_active_cells = self.regularization_mesh.n_cells - if len(alphas.shape) == 1: - alphas = np.tile(alphas, (mesh.n_cells, 1)) - if alphas.shape[0] != mesh.n_cells: - # check if I need to expand from active cells to all cells (needed for discretize) - if alphas.shape[0] == n_active_cells and self.active_cells is not None: - alpha_temp = np.zeros((mesh.n_cells, mesh.dim)) - alpha_temp[self.active_cells] = alphas - alphas = alpha_temp - else: - raise IndexError( - f"`alphas` first dimension, {alphas.shape[0]}, must be either number " - f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " - ) - if np.any(alphas < 0): - raise ValueError("`alpha` must be non-negative") - anis_alpha = alphas - - if reg_dirs is not None: - reg_dirs = validate_ndarray_with_shape( - "reg_dirs", - reg_dirs, - shape=[(mesh.dim, mesh.dim), ("*", mesh.dim, mesh.dim)], - dtype=float, - ) - if reg_dirs.shape == (mesh.dim, mesh.dim): - reg_dirs = np.tile(reg_dirs, (mesh.n_cells, 1, 1)) - if reg_dirs.shape[0] != mesh.n_cells: - # check if I need to expand from active cells to all cells (needed for discretize) - if ( - reg_dirs.shape[0] == n_active_cells - and self.active_cells is not None - ): - reg_dirs_temp = np.zeros((mesh.n_cells, mesh.dim, mesh.dim)) - reg_dirs_temp[self.active_cells] = reg_dirs - reg_dirs = reg_dirs_temp - else: - raise IndexError( - f"`reg_dirs` first dimension, {reg_dirs.shape[0]}, must be either number " - f"of active cells {mesh.n_cells}, or the number of mesh cells {mesh.n_cells}. " - ) - # check orthogonality? - if ortho_check: - eye = np.eye(mesh.dim) - for i, M in enumerate(reg_dirs): - if not np.allclose(eye, M @ M.T): - raise ValueError(f"Matrix {i} is not orthonormal") - # create a stack of matrices of dir @ alphas @ dir.T - anis_alpha = np.einsum("ink,ik,imk->inm", reg_dirs, anis_alpha, reg_dirs) - # Then select the upper diagonal components for input to discretize - if mesh.dim == 2: - anis_alpha = np.stack( - ( - anis_alpha[..., 0, 0], - anis_alpha[..., 1, 1], - anis_alpha[..., 0, 1], - ), - axis=-1, - ) - elif mesh.dim == 3: - anis_alpha = np.stack( - ( - anis_alpha[..., 0, 0], - anis_alpha[..., 1, 1], - anis_alpha[..., 2, 2], - anis_alpha[..., 0, 1], - anis_alpha[..., 0, 2], - anis_alpha[..., 1, 2], - ), - axis=-1, - ) - self._anis_alpha = anis_alpha - - @property - def reference_model_in_smooth(self) -> bool: - """ - whether to include reference model in gradient or not - - :return: True or False - """ - return self._reference_model_in_smooth - - @reference_model_in_smooth.setter - def reference_model_in_smooth(self, value: bool): - if not isinstance(value, bool): - raise TypeError( - f"'reference_model_in_smooth must be of type 'bool'. Value of type {type(value)} provided." - ) - self._reference_model_in_smooth = value - - def _delta_m(self, m): - if self.reference_model is None or not self.reference_model_in_smooth: - return m - return m - self.reference_model - - def f_m(self, m): - dfm_dl = self.cell_gradient @ (self.mapping * self._delta_m(m)) - - if self.units is not None and self.units.lower() == "radian": - return coterminal(dfm_dl * self._cell_distances) / self._cell_distances - return dfm_dl - - def f_m_deriv(self, m): - return self.cell_gradient @ self.mapping.deriv(self._delta_m(m)) - - # overwrite the call, deriv, and deriv2... - def __call__(self, m): - M_f = self.W - r = self.f_m(m) - return 0.5 * r @ M_f @ r - - def deriv(self, m): - m_d = self.f_m_deriv(m) - M_f = self.W - r = self.f_m(m) - return m_d.T @ (M_f @ r) - - def deriv2(self, m, v=None): - m_d = self.f_m_deriv(m) - M_f = self.W - if v is None: - return m_d.T @ (M_f @ m_d) - - return m_d.T @ (M_f @ (m_d @ v)) - - @property - def cell_gradient(self): - """The (approximate) cell gradient operator - - Returns - ------- - scipy.sparse.csr_matrix - """ - if getattr(self, "_cell_gradient", None) is None: - mesh = self.regularization_mesh.mesh - try: - cell_gradient = mesh.cell_gradient - except AttributeError: - a = mesh.face_areas - v = mesh.average_cell_to_face @ mesh.cell_volumes - cell_gradient = sp.diags(a / v) @ mesh.stencil_cell_gradient - - v = np.ones(mesh.n_cells) - # Turn off cell_gradient at boundary faces - if self.active_cells is not None: - v[~self.active_cells] = 0 - - dv = cell_gradient @ v - P = sp.diags((np.abs(dv) <= 1e-16).astype(int)) - cell_gradient = P @ cell_gradient - if self.active_cells is not None: - cell_gradient = cell_gradient[:, self.active_cells] - self._cell_gradient = cell_gradient - return self._cell_gradient - - @property - def W(self): - """The inner product operator using rotated coordinates - - Returns - ------- - scipy.sparse.csr_matrix - """ - if getattr(self, "_W", None) is None: - mesh = self.regularization_mesh.mesh - cell_weights = np.ones(len(mesh)) - for values in self._weights.values(): - # project values to full mesh - # dirty fix of original PR - projection = NearestNDInterpolator( - mesh.cell_centers[self.active_cells], values - ) - proj_values = projection(mesh.cell_centers) - cell_weights *= proj_values - reg_model = self._anis_alpha * cell_weights[:, None] - # turn off measure in inactive cells - if self.active_cells is not None: - reg_model[~self.active_cells] = 0.0 - - self._W = mesh.get_face_inner_product(reg_model) - return self._W - - def update_weights(self, m): - f_m = self.f_m(m) - irls_weights = self.get_lp_weights(f_m) - irls_weights = self.regularization_mesh.mesh.average_face_to_cell @ irls_weights - self.set_weights(irls=irls_weights[self.active_cells]) - - def get_lp_weights(self, f_m): - lp_scale = np.ones_like(f_m) - if self.irls_scaled: - # Scale on l2-norm gradient: f_m.max() - l2_max = np.ones_like(f_m) * np.abs(f_m).max() - # Compute theoretical maximum gradients for p < 1 - l2_max[self.norm < 1] = self.irls_threshold / np.sqrt( - 1.0 - self.norm[self.norm < 1] - ) - lp_values = l2_max / (l2_max**2.0 + self.irls_threshold**2.0) ** ( - 1.0 - self.norm / 2.0 - ) - lp_scale[lp_values != 0] = np.abs(f_m).max() / lp_values[lp_values != 0] - - return lp_scale / (f_m**2.0 + self.irls_threshold**2.0) ** ( - 1.0 - self.norm / 2.0 - ) - - @property - def irls_scaled(self) -> bool: - """Scale IRLS weights. - - When ``True``, scaling is applied when computing IRLS weights. - The scaling acts to preserve the balance between the data misfit and the components of - the regularization based on the derivative of the l2-norm measure. And it assists the - convergence by ensuring the model does not deviate - aggressively from the global 2-norm solution during the first few IRLS iterations. - For a comprehensive description, see the documentation for :py:meth:`get_lp_weights` . - - Returns - ------- - bool - Whether to scale IRLS weights. - """ - return self._irls_scaled - - @irls_scaled.setter - def irls_scaled(self, value: bool): - self._irls_scaled = validate_type("irls_scaled", value, bool, cast=False) - - @property - def irls_threshold(self): - r"""Stability constant for computing IRLS weights. - - Returns - ------- - float - Stability constant for computing IRLS weights. - """ - return self._irls_threshold - - @irls_threshold.setter - def irls_threshold(self, value): - self._irls_threshold = validate_float( - "irls_threshold", value, min_val=0.0, inclusive_min=False - ) - - @property - def norm(self): - r"""Norm for the sparse regularization. - - Returns - ------- - None, float, (n_cells, ) numpy.ndarray - Norm for the sparse regularization. If ``None``, a 2-norm is used. - A float within the interval [0,2] represents a constant norm applied for all cells. - A ``numpy.ndarray`` object, where each entry is used to apply a different norm to each cell in the mesh. - """ - return self._norm - - @norm.setter - def norm(self, value: float | np.ndarray | None): - if value is None: - value = np.ones(self.cell_gradient.shape[0]) * 2.0 - else: - value = np.ones(self.cell_gradient.shape[0]) * value - if np.any(value < 0) or np.any(value > 2): - raise ValueError( - "Value provided for 'norm' should be in the interval [0, 2]" - ) - self._norm = value - - @property - def units(self) -> str | None: - """Units for the model parameters. - - Some regularization classes behave differently depending on the units; e.g. 'radian'. - - Returns - ------- - str - Units for the model parameters. - """ - return self._units - - @units.setter - def units(self, units: str | None): - if units is not None and not isinstance(units, str): - raise TypeError( - f"'units' must be None or type str. Value of type {type(units)} provided." - ) - self._units = units - - @property - def _cell_distances(self) -> np.ndarray: - """ - cell size average on faces - - :return: np.ndarray - """ - cell_distances = self.cell_gradient.max(axis=1).toarray().ravel() - cell_distances[cell_distances == 0] = 1 - cell_distances = cell_distances ** (-1) - - return cell_distances - - -class RotatedSparse(Sparse): - """ - Class that wraps the rotated gradients in a ComboObjectiveFunction similar to Sparse. - """ - - def __init__( - self, - mesh: TensorMesh | TreeMesh, - reg_dirs: np.ndarray, - alphas_rot: tuple[float, float, float], - active_cells: np.ndarray | None = None, - norms: list[float] = [2.0, 2.0], - gradient_type: Literal["components", "total"] = "total", - irls_scaled: bool = True, - irls_threshold: float = 1e-8, - objfcts: list[BaseRegularization] | None = None, - **kwargs, - ): - """ - Class to wrap rotated gradient into a ComboObjective Function - - :param mesh: mesh - :param reg_dirs: rotation matrix - :param alphas_rot: alphas for rotated gradients - :param active_cells: active cells, defaults to None - :param norms: norms, defaults to [2, 2] - :param gradient_type: gradient_type, defaults to "total" - :param irls_scaled: irls_scaled, defaults to True - :param irls_threshold: irls_threshold, defaults to 1e-8 - :param objfcts: objfcts, defaults to None - """ - if not isinstance(mesh, RegularizationMesh): - mesh = RegularizationMesh(mesh) - - if not isinstance(mesh, RegularizationMesh): - TypeError( - f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " - f"Value of type {type(mesh)} provided." - ) - self._regularization_mesh = mesh - if active_cells is not None: - self._regularization_mesh.active_cells = active_cells - - if objfcts is None: - objfcts = [ - SparseSmallness(mesh=self.regularization_mesh), - SmoothnessFullGradient( - mesh=self.regularization_mesh.mesh, - active_cells=active_cells, - reg_dirs=reg_dirs, - alphas=alphas_rot, - norm=norms[1], - irls_scaled=irls_scaled, - irls_threshold=irls_threshold, - ), - ] - - super().__init__( - self.regularization_mesh, - objfcts=objfcts, - active_cells=active_cells, - gradient_type=gradient_type, - norms=norms[:2], - irls_scaled=irls_scaled, - irls_threshold=irls_threshold, - **kwargs, - ) - - @property - def alpha_y(self): - """Multiplier constant for first-order smoothness along y. - - Returns - ------- - float - Multiplier constant for first-order smoothness along y. - """ - return self._alpha_y - - @alpha_y.setter - def alpha_y(self, value): - self._alpha_y = None - - @property - def alpha_z(self): - """Multiplier constant for first-order smoothness along z. - - Returns - ------- - float - Multiplier constant for first-order smoothness along z. - """ - return self._alpha_z - - @alpha_z.setter - def alpha_z(self, value): - self._alpha_z = None diff --git a/SimPEG/regularization/sparse.py b/SimPEG/regularization/sparse.py index e1602971c0..43d5916a03 100644 --- a/SimPEG/regularization/sparse.py +++ b/SimPEG/regularization/sparse.py @@ -257,7 +257,7 @@ class SparseSmallness(BaseSparse, Smallness): We define the regularization function (objective function) for sparse smallness (compactness) as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Big | \, m(r) - m^{(ref)}(r) \, \Big |^{p(r)} \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model, :math:`w(r)` @@ -271,7 +271,7 @@ class SparseSmallness(BaseSparse, Smallness): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Big | m_i - m_i^{(ref)} \Big |^{p_i} where :math:`m_i \in \mathbf{m}` are the discrete model parameters defined on the mesh. @@ -286,8 +286,8 @@ class SparseSmallness(BaseSparse, Smallness): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i \tilde{w}_i \, \Big | m_i^{(k)} - m_i^{(ref)} \Big |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} \Big | m_i^{(k)} - m_i^{(ref)} \Big |^2 + = \sum_i \tilde{w}_i \, \Big | m_i^{(k)} - m_i^{(ref)} \Big |^{p_i} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Big | m_i^{(k)} - m_i^{(ref)} \Big |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -300,7 +300,7 @@ class SparseSmallness(BaseSparse, Smallness): function for IRLS iteration :math:`k` can be expressed as follows: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{\! (k)} \big [ \mathbf{m}^{(k)} - \mathbf{m}^{(ref)} \big ] \Big \|^2 where @@ -464,7 +464,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Bigg | \, \frac{\partial m}{\partial x} \, \Bigg |^{p(r)} \, dv where :math:`m(r)` is the model, :math:`w(r)` @@ -478,7 +478,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial m_i}{\partial x} \, \Bigg |^{p_i} where :math:`m_i \in \mathbf{m}` are the discrete model parameters defined on the mesh. @@ -493,9 +493,9 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i + = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial m_i^{(k)}}{\partial x} \Bigg |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Bigg | \, \frac{\partial m_i^{(k)}}{\partial x} \Bigg |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -509,7 +509,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): function for IRLS iteration :math:`k` can be expressed as follows: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{m}^{(k)} \Big \|^2 where @@ -528,7 +528,7 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): In this case, the least-squares problem for IRLS iteration :math:`k` becomes: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \mathbf{G_x} \big [ \mathbf{m}^{(k)} - \mathbf{m}^{(ref)} \big ] \Big \|^2 @@ -577,10 +577,13 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): """ def __init__(self, mesh, orientation="x", gradient_type="total", **kwargs): - if "gradientType" in kwargs: - self.gradientType = kwargs.pop("gradientType") - else: - self.gradient_type = gradient_type + # Raise error if removed arguments were passed + if (key := "gradientType") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'gradient_type' instead." + ) + self.gradient_type = gradient_type super().__init__(mesh=mesh, orientation=orientation, **kwargs) def update_weights(self, m): @@ -695,8 +698,7 @@ def gradient_type(self, value: str): "gradientType", new_name="gradient_type", removal_version="0.19.0", - error=False, - future_warn=True, + error=True, ) @@ -765,9 +767,9 @@ class Sparse(WeightedLeastSquares): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) = \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) = \alpha_s \int_\Omega \, w(r) \Big | \, m(r) - m^{(ref)}(r) \, \Big |^{p_s(r)} \, dv - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + + \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \Bigg | \, \frac{\partial m}{\partial \xi_j} \, \Bigg |^{p_j(r)} \, dv where :math:`m(r)` is the model, :math:`m^{(ref)}(r)` is the reference model, and :math:`w(r)` @@ -819,9 +821,9 @@ class Sparse(WeightedLeastSquares): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \mathbf{W_s}^{\!\! (k)} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j \, m} \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j \, m} \Big \|^2 where @@ -878,9 +880,9 @@ class Sparse(WeightedLeastSquares): the objective function becomes: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \mathbf{W_s}^{\! (k)} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| + + \sum_{j=x,y,z} \alpha_j \Big \| \mathbf{W_j}^{\! (k)} \mathbf{G_j} \big [ \mathbf{m} - \mathbf{m}^{(ref)} \big ] \Big \|^2 This functionality is used by setting the `reference_model_in_smooth` parameter @@ -930,6 +932,14 @@ def __init__( f"'regularization_mesh' must be of type {RegularizationMesh} or {BaseMesh}. " f"Value of type {type(mesh)} provided." ) + + # Raise error if removed arguments were passed + if (key := "gradientType") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " + "Please use 'gradient_type' instead." + ) + self._regularization_mesh = mesh if active_cells is not None: self._regularization_mesh.active_cells = active_cells @@ -950,7 +960,6 @@ def __init__( SparseSmoothness(mesh=self.regularization_mesh, orientation="z") ) - gradientType = kwargs.pop("gradientType", None) super().__init__( self.regularization_mesh, objfcts=objfcts, @@ -959,13 +968,7 @@ def __init__( if norms is None: norms = [1] * (mesh.dim + 1) self.norms = norms - - if gradientType is not None: - # Trigger deprecation warning - self.gradientType = gradientType - else: - self.gradient_type = gradient_type - + self.gradient_type = gradient_type self.irls_scaled = irls_scaled self.irls_threshold = irls_threshold @@ -995,7 +998,7 @@ def gradient_type(self, value: str): self._gradient_type = value gradientType = utils.code_utils.deprecate_property( - gradient_type, "gradientType", "0.19.0", error=False, future_warn=True + gradient_type, "gradientType", "0.19.0", error=True ) @property diff --git a/SimPEG/regularization/vector.py b/SimPEG/regularization/vector.py index 2341b4d82c..19bd68f080 100644 --- a/SimPEG/regularization/vector.py +++ b/SimPEG/regularization/vector.py @@ -82,7 +82,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): regularization is given by: .. math:: - \phi (\vec{m}) = \frac{1}{2} \int_\Omega \, \vec{w}(r) \, \cdot \, + \phi (\vec{m}) = \int_\Omega \, \vec{w}(r) \, \cdot \, \Big [ \vec{m}(r) \, \times \, \vec{m}^{(ref)}(r) \Big ]^2 \, dv where :math:`\vec{m}^{(ref)}(r)` is the reference model vector and :math:`\vec{w}(r)` @@ -93,7 +93,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): function (objective function) is given by: .. math:: - \phi (\vec{m}) \approx \frac{1}{2} \sum_i \tilde{w}_i \, \cdot \, + \phi (\vec{m}) \approx \sum_i \tilde{w}_i \, \cdot \, \Big | \vec{m}_i \, \times \, \vec{m}_i^{(ref)} \Big |^2 where :math:`\tilde{m}_i \in \mathbf{m}` are the model vectors at cell centers and @@ -129,7 +129,7 @@ class CrossReferenceRegularization(Smallness, BaseVectorRegularization): The discrete regularization function in linear form can ultimately be expressed as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} + \phi (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 @@ -262,7 +262,7 @@ def f_m(self, m): The objective function for cross reference regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 where :math:`\mathbf{m}` are the discrete vector model parameters defined on the mesh (model), @@ -277,7 +277,7 @@ def f_m(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 """ return self._X @ (self.mapping * m) @@ -309,7 +309,7 @@ def f_m_deriv(self, m): The objective function for cross reference regularization is given by: .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} + \phi_m (\mathbf{m}) = \Big \| \mathbf{W X m} \, \Big \|^2 where :math:`\mathbf{m}` are the discrete vector model parameters defined on the mesh (model), @@ -324,7 +324,7 @@ def f_m_deriv(self, m): such that .. math:: - \phi_m (\mathbf{m}) = \frac{1}{2} \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 + \phi_m (\mathbf{m}) = \Big \| \mathbf{W} \, \mathbf{f_m} \Big \|^2 Thus, the derivative with respect to the model is: @@ -423,11 +423,15 @@ def deriv(self, m) -> np.ndarray: """ d_m = self._delta_m(m) - return self.f_m_deriv(m).T * ( - self.W.T - @ self.W - @ (self.f_m_deriv(m) @ d_m).reshape((-1, self.n_comp), order="F") - ).flatten(order="F") + return ( + 2 + * self.f_m_deriv(m).T + * ( + self.W.T + @ self.W + @ (self.f_m_deriv(m) @ d_m).reshape((-1, self.n_comp), order="F") + ).flatten(order="F") + ) def deriv2(self, m, v=None) -> csr_matrix: r"""Hessian of the regularization function evaluated for the model provided. @@ -460,13 +464,21 @@ def deriv2(self, m, v=None) -> csr_matrix: f_m_deriv = self.f_m_deriv(m) if v is None: - return f_m_deriv.T * ( - sp.block_diag([self.W.T * self.W] * self.n_comp) * f_m_deriv + return ( + 2 + * f_m_deriv.T + * (sp.block_diag([self.W.T * self.W] * self.n_comp) * f_m_deriv) ) - return f_m_deriv.T * ( - self.W.T @ self.W @ (f_m_deriv * v).reshape((-1, self.n_comp), order="F") - ).flatten(order="F") + return ( + 2 + * f_m_deriv.T + * ( + self.W.T + @ self.W + @ (f_m_deriv * v).reshape((-1, self.n_comp), order="F") + ).flatten(order="F") + ) class AmplitudeSmallness(SparseSmallness, BaseAmplitude): @@ -519,7 +531,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): (compactness) as: .. math:: - \phi (\vec{m}) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (\vec{m}) = \int_\Omega \, w(r) \, \Big | \, \vec{m}(r) - \vec{m}^{(ref)}(r) \, \Big |^{p(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`\vec{m}^{(ref)}(r)` is the reference model, :math:`w(r)` @@ -533,7 +545,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Big | \vec{m}_i - \vec{m}_i^{(ref)} \Big |^{p_i} where :math:`\mathbf{m}` are the model parameters, :math:`\vec{m}_i` represents the vector @@ -549,8 +561,8 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i \tilde{w}_i \, \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + = \sum_i \tilde{w}_i \, \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^{p_i} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \Big | \, \vec{m}_i^{(k)} - \vec{m}_i^{(ref)} \, \Big |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -578,7 +590,7 @@ class AmplitudeSmallness(SparseSmallness, BaseAmplitude): The objective function for IRLS iteration :math:`k` is given by: .. math:: - \phi \big ( \mathbf{\bar{m}}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{\bar{m}}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{\bar{m}}^{(k)} \; \Big \|^2 where @@ -738,7 +750,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): along the x-direction as: .. math:: - \phi (m) = \frac{1}{2} \int_\Omega \, w(r) \, + \phi (m) = \int_\Omega \, w(r) \, \Bigg | \, \frac{\partial |\vec{m}|}{\partial x} \, \Bigg |^{p(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`w(r)` @@ -752,7 +764,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): function (objective function) is expressed in linear form as: .. math:: - \phi (\mathbf{m}) = \frac{1}{2} \sum_i + \phi (\mathbf{m}) = \sum_i \tilde{w}_i \, \Bigg | \, \frac{\partial |\vec{m}_i|}{\partial x} \, \Bigg |^{p_i} where :math:`\vec{m}_i` is the vector defined for mesh cell :math:`i`. @@ -767,9 +779,9 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): .. math:: \phi \big (\mathbf{m}^{(k)} \big ) - = \frac{1}{2} \sum_i + = \sum_i \tilde{w}_i \, \left | \, \frac{\partial \big | \vec{m}_i^{(k)} \big | }{\partial x} \right |^{p_i} - \approx \frac{1}{2} \sum_i \tilde{w}_i \, r_i^{(k)} + \approx \sum_i \tilde{w}_i \, r_i^{(k)} \left | \, \frac{\partial \big | \vec{m}_i^{(k)} \big | }{\partial x} \right |^2 where the IRLS weight :math:`r_i` for iteration :math:`k` is given by: @@ -794,7 +806,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): The objective function for IRLS iteration :math:`k` is given by: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{\bar{m}}^{(k)} \Big \|^2 where @@ -813,7 +825,7 @@ class AmplitudeSmoothnessFirstOrder(SparseSmoothness, BaseAmplitude): In this case, the least-squares problem for IRLS iteration :math:`k` becomes: .. math:: - \phi \big ( \mathbf{m}^{(k)} \big ) \approx \frac{1}{2} \Big \| \, + \phi \big ( \mathbf{m}^{(k)} \big ) \approx \Big \| \, \mathbf{W}^{(k)} \, \mathbf{G_x} \, \mathbf{\bar{m}}^{(k)} \Big \|^2 where @@ -1042,9 +1054,9 @@ class VectorAmplitude(Sparse): :math:`\phi_m (m)` of the form: .. math:: - \phi_m (m) = \frac{\alpha_s}{2} \int_\Omega \, w(r) + \phi_m (m) = \alpha_s \int_\Omega \, w(r) \Big | \, \vec{m}(r) - \vec{m}^{(ref)}(r) \, \Big |^{p_s(r)} \, dv - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \int_\Omega \, w(r) + + \sum_{j=x,y,z} \alpha_j \int_\Omega \, w(r) \Bigg | \, \frac{\partial |\vec{m}|}{\partial \xi_j} \, \bigg |^{p_j(r)} \, dv where :math:`\vec{m}(r)` is the model, :math:`\vec{m}^{(ref)}(r)` is the reference model, @@ -1104,9 +1116,9 @@ class VectorAmplitude(Sparse): objective functions of the form: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \, \mathbf{W_s}^{\! (k)} \, \Delta \mathbf{\bar{m}} \, \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \bar{m}} \, \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \bar{m}} \, \Big \|^2 where @@ -1171,9 +1183,9 @@ class VectorAmplitude(Sparse): the objective function becomes: .. math:: - \phi_m (\mathbf{m}) = \frac{\alpha_s}{2} + \phi_m (\mathbf{m}) = \alpha_s \Big \| \, \mathbf{W_s}^{\! (k)} \, \Delta \mathbf{\bar{m}} \, \Big \|^2 - + \sum_{j=x,y,z} \frac{\alpha_j}{2} \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \Delta \bar{m}} \, \Big \|^2 + + \sum_{j=x,y,z} \alpha_j \Big \| \, \mathbf{W_j}^{\! (k)} \mathbf{G_j \, \Delta \bar{m}} \, \Big \|^2 This functionality is used by setting the `reference_model_in_smooth` parameter to ``True``. diff --git a/SimPEG/seismic/straight_ray_tomography/__init__.py b/SimPEG/seismic/straight_ray_tomography/__init__.py index 68edbf39c3..69e8a06036 100644 --- a/SimPEG/seismic/straight_ray_tomography/__init__.py +++ b/SimPEG/seismic/straight_ray_tomography/__init__.py @@ -24,6 +24,7 @@ """ + from .simulation import Simulation2DIntegral as Simulation from .survey import StraightRaySurvey as Survey from ...survey import BaseSrc as Src diff --git a/SimPEG/simulation.py b/SimPEG/simulation.py index c35bd484dd..f6cd144b1a 100644 --- a/SimPEG/simulation.py +++ b/SimPEG/simulation.py @@ -1,6 +1,7 @@ """ -Define simulation classes +Define simulation classes. """ + import os import inspect import numpy as np @@ -8,7 +9,7 @@ from discretize.base import BaseMesh from discretize import TensorMesh -from discretize.utils import unpack_widths +from discretize.utils import unpack_widths, sdiag from . import props from .data import SyntheticData, Data @@ -41,10 +42,39 @@ class BaseSimulation(props.HasModel): + r"""Base class for all geophysical forward simulations in SimPEG. + + The ``BaseSimulation`` class defines properties and methods inherited by + practical simulation classes in SimPEG. + + .. important:: + This class is not meant to be instantiated. You should inherit from it to + create your own simulation class. + + Parameters + ---------- + mesh : discretize.base.BaseMesh, optional + Mesh on which the forward problem is discretized. + survey : SimPEG.survey.BaseSurvey, optional + The survey for the simulation. + solver : None or pymatsolver.base.Base, optional + Numerical solver used to solve the forward problem. If ``None``, + an appropriate solver specific to the simulation class is set by default. + solver_opts : dict, optional + Solver-specific parameters. If ``None``, default parameters are used for + the solver set by ``solver``. Otherwise, the ``dict`` must contain appropriate + pairs of keyword arguments and parameter values for the solver. Please visit + `pymatsolver `__ to learn more + about solvers and their parameters. + sensitivity_path : str, optional + Path to directory where sensitivity file is stored. + counter : None or SimPEG.utils.Counter + SimPEG ``Counter`` object to store iterations and run-times. + verbose : bool, optional + Verbose progress printout. """ - BaseSimulation is the base class for all geophysical forward simulations in - SimPEG. - """ + + _REGISTRY = {} def __init__( self, @@ -73,18 +103,16 @@ def __init__( super().__init__(**kwargs) - ########################################################################### - # Properties - - _REGISTRY = {} - @property def mesh(self): - """Discretize mesh for the simulation + """Mesh for the simulation. + + For more on meshes, visit :py:class:`discretize.base.BaseMesh`. Returns ------- discretize.base.BaseMesh + Mesh on which the forward problem is discretized. """ return self._mesh @@ -101,6 +129,7 @@ def survey(self): Returns ------- SimPEG.survey.BaseSurvey + The survey for the simulation. """ return self._survey @@ -112,11 +141,12 @@ def survey(self, value): @property def counter(self): - """The counter. + """SimPEG ``Counter`` object to store iterations and run-times. Returns ------- None or SimPEG.utils.Counter + SimPEG ``Counter`` object to store iterations and run-times. """ return self._counter @@ -128,11 +158,12 @@ def counter(self, value): @property def sensitivity_path(self): - """Path to store the sensitivity. + """Path to directory where sensitivity file is stored. Returns ------- str + Path to directory where sensitivity file is stored. """ return self._sensitivity_path @@ -142,13 +173,25 @@ def sensitivity_path(self, value): @property def solver(self): - """Linear algebra solver (e.g. from pymatsolver). + r"""Numerical solver used in the forward simulation. + + Many forward simulations in SimPEG require solutions to discrete linear + systems of the form: + + .. math:: + \mathbf{A}(\mathbf{m}) \, \mathbf{u} = \mathbf{q} + + where :math:`\mathbf{A}` is an invertible matrix that depends on the + model :math:`\mathbf{m}`. The numerical solver can be set using the + ``solver`` property. In SimPEG, the + `pymatsolver `__ package + is used to create solver objects. Parameters specific to each solver + can be set manually using the ``solver_opts`` property. Returns ------- - class - A solver class that, when instantiated allows a multiplication with the - returned object. + pymatsolver.base.Base + Numerical solver used to solve the forward problem. """ return self._solver @@ -163,12 +206,18 @@ def solver(self, cls): @property def solver_opts(self): - """Options passed to the `solver` class on initialization. + """Solver-specific parameters. + + The parameters specific to the solver set with the ``solver`` property are set + upon instantiation. The ``solver_opts`` property is used to set solver-specific properties. + This is done by providing a ``dict`` that contains appropriate pairs of keyword arguments + and parameter values. Please visit `pymatsolver `__ + to learn more about solvers and their parameters. Returns ------- dict - Passed as keyword arguments to the solver. + keyword arguments and parameters passed to the solver. """ return self._solver_opts @@ -178,11 +227,12 @@ def solver_opts(self, value): @property def verbose(self): - """Verbosity flag. + """Verbose progress printout. Returns ------- bool + Verbose progress printout status. """ return self._verbose @@ -190,31 +240,37 @@ def verbose(self): def verbose(self, value): self._verbose = validate_type("verbose", value, bool) - ########################################################################### - # Methods - def fields(self, m=None): - """ - u = fields(m) - The field given the model. - :param numpy.ndarray m: model - :rtype: numpy.ndarray - :return: u, the fields + r"""Return the computed geophysical fields for the model provided. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + + Returns + ------- + SimPEG.fields.Fields + Computed geophysical fields for the model provided. + """ raise NotImplementedError("fields has not been implemented for this ") def dpred(self, m=None, f=None): - r""" - dpred(m, f=None) - Create the projected data from a model. - The fields, f, (if provided) will be used for the predicted data - instead of recalculating the fields (which may be expensive!). - - .. math:: + r"""Predicted data for the model provided. - d_\text{pred} = P(f(m)) + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : SimPEG.fields.Fields, optional + If provided, will be used to compute the predicted data + without recalculating the fields. - Where P is a projection of the fields onto the data space. + Returns + ------- + (n_data, ) numpy.ndarray + The predicted data vector. """ if self.survey is None: raise AttributeError( @@ -237,51 +293,139 @@ def dpred(self, m=None, f=None): @timeIt def Jvec(self, m, v, f=None): - """ - Jv = Jvec(m, v, f=None) - Effect of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: Jv + r"""Compute the Jacobian times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an (n_data, n_param) matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jvec`` method computes the matrix-vector product + + .. math:: + \mathbf{u} = \mathbf{J \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_param, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jvec`. + + Returns + ------- + (n_data, ) numpy.ndarray + The Jacobian times a vector for the model and vector provided. """ raise NotImplementedError("Jvec is not yet implemented.") @timeIt def Jtvec(self, m, v, f=None): + r"""Compute the Jacobian transpose times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jtvec`` method computes the matrix-vector product with the adjoint-sensitivity + + .. math:: + \mathbf{u} = \mathbf{J^T \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + The Jacobian transpose times a vector for the model and vector provided. """ - Jtv = Jtvec(m, v, f=None) - Effect of transpose of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: JTv - """ - raise NotImplementedError("Jt is not yet implemented.") + raise NotImplementedError("Jtvec is not yet implemented.") @timeIt def Jvec_approx(self, m, v, f=None): - """Jvec_approx(m, v, f=None) - Approximate effect of J(m) on a vector v - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: approxJv + r"""Approximation of the Jacobian times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jvec_approx`` method **approximates** + the matrix-vector product: + + .. math:: + \mathbf{u} = \mathbf{J \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + Approximation of the Jacobian times a vector for the model provided. """ return self.Jvec(m, v, f) @timeIt def Jtvec_approx(self, m, v, f=None): - """Jtvec_approx(m, v, f=None) - Approximate effect of transpose of J(m) on a vector v. - :param numpy.ndarray m: model - :param numpy.ndarray v: vector to multiply - :param Fields f: fields - :rtype: numpy.ndarray - :return: JTv + r"""Approximation of the Jacobian transpose times a vector for the model provided. + + The Jacobian defines the derivative of the predicted data vector with respect to the + model parameters. For a data vector :math:`\mathbf{d}` predicted for a set of model parameters + :math:`\mathbf{m}`, the Jacobian is an ``(n_data, n_param)`` matrix whose elements + are given by: + + .. math:: + J_{ij} = \frac{\partial d_i}{\partial m_j} + + For a model `m` and vector `v`, the ``Jtvec_approx`` method **approximates** + the matrix-vector product: + + .. math:: + \mathbf{u} = \mathbf{J^T \, v} + + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + v : (n_data, ) numpy.ndarray + Vector we are multiplying. + f : SimPEG.field.Fields, optional + If provided, fields will not need to be recomputed for the + current model to compute `Jtvec`. + + Returns + ------- + (n_param, ) numpy.ndarray + Approximation of the Jacobian transpose times a vector for the model provided. """ return self.Jtvec(m, v, f) @@ -289,14 +433,27 @@ def Jtvec_approx(self, m, v, f=None): def residual(self, m, dobs, f=None): r"""The data residual. + This method computes and returns the data residual for the model provided. + Where :math:`\mathbf{d}_\text{obs}` are the observed data values, and :math:`\mathbf{d}_\text{pred}` + are the predicted data values for model parameters :math:`\mathbf{m}`, the data + residual is given by: + .. math:: + \mathbf{r}(\mathbf{m}) = \mathbf{d}_\text{pred} - \mathbf{d}_\text{obs} - \mu_\\text{data} = \mathbf{d}_\\text{pred} - \mathbf{d}_\\text{obs} + Parameters + ---------- + m : (n_param, ) numpy.ndarray + The model parameters. + dobs : (n_data, ) numpy.ndarray + The observed data values. + f : SimPEG.fields.Fields, optional + If provided, fields will not need to be recomputed when solving the forward problem. - :param numpy.ndarray m: geophysical model - :param numpy.ndarray f: fields - :rtype: numpy.ndarray - :return: data residual + Returns + ------- + (n_data, ) numpy.ndarray + The data residual. """ return mkvc(self.dpred(m, f=f) - dobs) @@ -311,27 +468,35 @@ def make_synthetic_data( random_seed=None, **kwargs, ): - """ - Make synthetic data given a model, and a standard deviation. + r"""Make synthetic data for the model and Gaussian noise provided. + + This method generates and returns a :py:class:`SimPEG.data.SyntheticData` object + for the model and standard deviation of Gaussian noise provided. Parameters ---------- - m : array - Array containing with geophysical model. - relative_error : float - Standard deviation. - noise_floor : float - Noise floor. - f : array or None - Fields for the given model (if pre-calculated). + m : (n_param, ) numpy.ndarray + The model parameters. + relative_error : float, numpy.ndarray + Assign relative uncertainties to the data using relative error; sometimes + referred to as percent uncertainties. For each datum, we assume the + standard deviation of Gaussian noise is the relative error times the + absolute value of the datum; i.e. :math:`C_\text{err} \times |d|`. + noise_floor : float, numpy.ndarray + Assign floor/absolute uncertainties to the data. For each datum, we assume + standard deviation of Gaussian noise is equal to `noise_floor`. + f : SimPEG.fields.Fields, optional + If provided, fields will not need to be recomputed when solving the + forward problem to obtain noiseless data. add_noise : bool Whether to add gaussian noise to the synthetic data or not. - random_seed : int or None - Random seed to pass to `numpy.random.default_rng`. + random_seed : int, optional + Random seed to pass to :py:class:`numpy.random.default_rng`. Returns ------- - SyntheticData + SimPEG.data.SyntheticData + A SimPEG synthetic data object, which organizes both clean and noisy data. """ std = kwargs.pop("std", None) @@ -363,30 +528,78 @@ def make_synthetic_data( class BaseTimeSimulation(BaseSimulation): - """ - Base class for a time domain simulation + r"""Base class for time domain simulations. + + The ``BaseTimeSimulation`` defines properties and methods that are required + when the finite volume approach is used to solve time-dependent forward simulations. + Presently, SimPEG discretizes in time using the backward Euler approach. + And as such, the user must now define the step lengths for the forward simulation. + + Parameters + ---------- + mesh : discretize.base.BaseMesh, optional + Mesh on which the forward problem is discretized. This is not necessarily + the same as the mesh on which the simulation is defined. + t0 : float, optional + Initial time, in seconds, for the time-dependent forward simulation. + time_steps : (n_steps, ) numpy.ndarray, optional + The time step lengths, in seconds, for the time domain simulation. + This property can be also be set using a compact form; see *Notes*. + + Notes + ----- + There are two ways in which the user can set the ``time_steps`` property + for the forward simulation. The most basic approach is to use a ``(n_steps, )`` + :py:class:`numpy.ndarray` that explicitly defines the step lengths in order. + I.e.: + + >>> sim.time_steps = np.r_[1e-6, 1e-6, 1e-6, 1e-5, 1e-5, 1e-4, 1e-4] + + We can define also define the step lengths in compact for when the same + step length is reused multiple times in succession. In this case, the + ``time_steps`` property is set using a ``list`` of ``tuple``. Each + ``tuple`` contains the step length and number of times that step is repeated. + The time stepping defined above can be set equivalently with: + + >>> sim.time_steps = [(1e-6, 3), (1e-5, 2), (1e-4, 2)] + + When set, the :py:func:`discretize.utils.unpack_widths` utility is + used to convert the ``list`` of ``tuple`` to its (n_steps, ) :py:class:`numpy.ndarray` + representation. """ + def __init__(self, mesh=None, t0=0.0, time_steps=None, **kwargs): + self.t0 = t0 + self.time_steps = time_steps + super().__init__(mesh=mesh, **kwargs) + @property def time_steps(self): - """The time steps for the time domain simulation. + """Time step lengths, in seconds, for the time domain simulation. + + There are two ways in which the user can set the ``time_steps`` property + for the forward simulation. The most basic approach is to use a ``(n_steps, )`` + :py:class:`numpy.ndarray` that explicitly defines the step lengths in order. + I.e.: + + >>> sim.time_steps = np.r_[1e-6, 1e-6, 1e-6, 1e-5, 1e-5, 1e-4, 1e-4] - You can set as an array of dt's or as a list of tuples/floats. - If it is set as a list, tuples are unpacked with - `discretize.utils.unpack_widths``. + We can define also define the step lengths in compact for when the same + step length is reused multiple times in succession. In this case, the + ``time_steps`` property is set using a ``list`` of ``tuple``. Each + ``tuple`` contains the step length and number of times that step is repeated. + The time stepping defined above can be set equivalently with: - For example, the following setters are the same:: + >>> sim.time_steps = [(1e-6, 3), (1e-5, 2), (1e-4, 2)] - >>> sim.time_steps = [(1e-6, 3), 1e-5, (1e-4, 2)] - >>> sim.time_steps = np.r_[1e-6,1e-6,1e-6,1e-5,1e-4,1e-4] + When set, the :py:func:`discretize.utils.unpack_widths` utility is + used to convert the ``list`` of ``tuple`` to its ``(n_steps, )`` :py:class:`numpy.ndarray` + representation. Returns ------- - numpy.ndarray - - See Also - -------- - discretize.utils.unpack_widths + (n_steps, ) numpy.ndarray + The time step lengths for the time domain simulation. """ return self._time_steps @@ -401,11 +614,12 @@ def time_steps(self, value): @property def t0(self): - """Start time for the discretization. + """Initial time, in seconds, for the time-dependent forward simulation. Returns ------- float + Initial time, in seconds, for the time-dependent forward simulation. """ return self._t0 @@ -414,13 +628,21 @@ def t0(self, value): self._t0 = validate_float("t0", value) del self.time_mesh - def __init__(self, mesh=None, t0=0.0, time_steps=None, **kwargs): - self.t0 = t0 - self.time_steps = time_steps - super().__init__(mesh=mesh, **kwargs) - @property def time_mesh(self): + r"""Time mesh for easy interpolation to observation times. + + The time mesh is constructed internally from the :py:attr:`t0` and + :py:attr:`time_steps` properties using the :py:class:`discretize.TensorMesh` class. + The ``time_mesh`` property allows for easy interpolation from fields computed at + discrete time-steps, to an arbitrary set of observation + times within the continuous interval (:math:`t_0 , t_\text{end}`). + + Returns + ------- + discretize.TensorMesh + The time mesh. + """ if getattr(self, "_time_mesh", None) is None: self._time_mesh = TensorMesh( [ @@ -437,26 +659,32 @@ def time_mesh(self): @property def nT(self): + """Total number of time steps. + + Returns + ------- + int + Total number of time steps. + """ return self.time_mesh.n_cells @property def times(self): - "Modeling times" - return self.time_mesh.nodes_x - - def dpred(self, m=None, f=None): - r""" - dpred(m, f=None) - Create the projected data from a model. - The fields, f, (if provided) will be used for the predicted data - instead of recalculating the fields (which may be expensive!). - - .. math:: + """Evaluation times. - d_\text{pred} = P(f(m)) + Returns the discrete set of times at which the fields are computed for + the forward simulation. - Where P is a projection of the fields onto the data space. + Returns + ------- + (nT, ) numpy.ndarray + The discrete set of times at which the fields are computed for + the forward simulation. """ + return self.time_mesh.nodes_x + + def dpred(self, m=None, f=None): + # Docstring inherited from BaseSimulation. if self.survey is None: raise AttributeError( "The survey has not yet been set and is required to compute " @@ -482,16 +710,40 @@ def dpred(self, m=None, f=None): class LinearSimulation(BaseSimulation): - """ - Class for a linear simulation of the form + r"""Linear forward simulation class. + + The ``LinearSimulation`` class is used to define forward simulations of the form: .. math:: + \mathbf{d} = \mathbf{G \, f}(\mathbf{m}) + + where :math:`\mathbf{m}` are the model parameters, :math:`\mathbf{f}` is a + mapping operator (optional) from the model space to a user-defined parameter space, + :math:`\mathbf{d}` is the predicted data vector, and :math:`\mathbf{G}` is an + ``(n_data, n_param)`` linear operator. - d = Gm + The ``LinearSimulation`` class is generally used as a base class that is inherited by + other simulation classes within SimPEG. However, it can be used directly as a + simulation class if the :py:attr:`G` property is used to set the linear forward + operator directly. - where :math:`d` is a vector of the data, `G` is the simulation matrix and - :math:`m` is the model. - Inherit this class to build a linear simulation. + By default, we assume the mapping operator :math:`\mathbf{f}` is the identity map, + and that the forward simulation reduces to: + + .. math:: + \mathbf{d} = \mathbf{G \, m} + + Parameters + ---------- + mesh : discretize.BaseMesh, optional + Mesh on which the forward problem is discretized. This is not necessarily + the same as the mesh on which the simulation is defined. + model_map : SimPEG.maps.BaseMap + Mapping from the model parameters to vector that the linear operator acts on. + G : (n_data, n_param) numpy.ndarray or scipy.sparse.csr_matrx + The linear operator. For a ``model_map`` that maps within the same vector space + (e.g. the identity map), the dimension ``n_param`` equals the number of model parameters. + If not, the dimension ``n_param`` of the linear operator will depend on the mapping. """ linear_model, model_map, model_deriv = props.Invertible( @@ -516,6 +768,15 @@ def __init__(self, mesh=None, linear_model=None, model_map=None, G=None, **kwarg @property def G(self): + """The linear operator. + + Returns + ------- + (n_data, n_param) numpy.ndarray or scipy.sparse.csr_matrix + The linear operator. For a :py:attr:`model_map` that maps within the same vector space + (e.g. the identity map), the dimension ``n_param`` equals the number of model parameters. + If not, the dimension ``n_param`` of the linear operator will depend on the mapping. + """ if getattr(self, "_G", None) is not None: return self._G else: @@ -524,15 +785,17 @@ def G(self): @G.setter def G(self, G): - # Allows setting G in a LinearSimulation + # Allows setting G in a LinearSimulation. # TODO should be validated self._G = G def fields(self, m): + # Docstring inherited from BaseSimulation. self.model = m return self.G.dot(self.linear_model) def dpred(self, m=None, f=None): + # Docstring inherited from BaseSimulation if m is not None: self.model = m if f is not None: @@ -540,37 +803,131 @@ def dpred(self, m=None, f=None): return self.fields(self.model) def getJ(self, m, f=None): + r"""Returns the full Jacobian. + + The general definition of the linear forward simulation is: + + .. math:: + \mathbf{d} = \mathbf{G \, f}(\mathbf{m}) + + where :math:`\mathbf{f}` is a mapping operator (optional) from the model space + to a user-defined parameter space, and :math:`\mathbf{G}` is an (n_data, n_param) + linear operator. The ``getJ`` method forms and returns the full Jacobian: + + .. math:: + \mathbf{J}(\mathbf{m}) = \mathbf{G} \frac{\partial \mathbf{f}}{\partial \mathbf{m}} + + for the model :math:`\mathbf{m}` provided. When :math:`\mathbf{f}` is the identity map + (default), the Jacobian is no longer model-dependent and reduces to: + + .. math:: + \mathbf{J} = \mathbf{G} + + Parameters + ---------- + m : numpy.ndarray + The model vector. + f : None + Precomputed fields are not used to speed up the computation of the + Jacobian for linear problems. + + Returns + ------- + J : (n_data, n_param) numpy.ndarray + :math:`J = G\frac{\partial f}{\partial\mathbf{m}}`. + Where :math:`f` is :attr:`model_map`. + """ self.model = m # self.model_deriv is likely a sparse matrix # and G is possibly dense, thus we need to do.. return (self.model_deriv.T.dot(self.G.T)).T def Jvec(self, m, v, f=None): + # Docstring inherited from BaseSimulation self.model = m return self.G.dot(self.model_deriv * v) def Jtvec(self, m, v, f=None): + # Docstring inherited from BaseSimulation self.model = m return self.model_deriv.T * self.G.T.dot(v) class ExponentialSinusoidSimulation(LinearSimulation): - r""" + r"""Simulation class for exponentially decaying sinusoidal kernel functions. + This is the simulation class for the linear problem consisting of - exponentially decaying sinusoids. The rows of the G matrix are + exponentially decaying sinusoids. The entries of the linear operator + :math:`\mathbf{G}` are: + + .. math:: + + G_{ik} = \int_\Omega e^{p \, j_i \, x_k} \cos(\pi \, q \, j_i \, x_k) \, dx + + The model is defined on a 1D :py:class:`discretize.TensorMesh`, and :math:`x_k` + are the cell center locations. :math:`p \leq 0` defines the rate of exponential + decay of the kernel functions. :math:`q` defines the rate of oscillation of + the kernel functions. And :math:`j_i \in [j_0, ... , j_n]` controls the spread + of the kernel functions; the number of which is set using the ``n_kernels`` + property. + + .. tip:: + + For proper scaling, we advise defining the 1D tensor mesh to + discretize the interval [0, 1]. + + The kernel functions take the form: .. math:: \int_x e^{p j_k x} \cos(\pi q j_k x) \quad, j_k \in [j_0, ..., j_n] + + The model is defined at cell centers while the kernel functions are defined on nodes. + The trapezoid rule is used to evaluate the integral + + .. math:: + + d_j = \int g_j(x) m(x) dx + + to define our data. + + Parameters + ---------- + n_kernels : int + The number of kernel factors for the linear problem; i.e. the number of + :math:`j_i \in [j_0, ... , j_n]`. This sets the number of rows + in the linear forward operator. + p : float + Exponent specifying the decay (`p \leq 0`) or growth (`p \geq 0`) of the kernel. For decay, set :math:`p \leq 0`. + q : float + Rate of oscillation of the kernel. + j0 : float + Minimum value for the spread of the kernel factors. + jn : float + Maximum value for the spread of the kernel factors. """ + def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): + self.n_kernels = n_kernels + self.p = p + self.q = q + self.j0 = j0 + self.jn = jn + super(ExponentialSinusoidSimulation, self).__init__(**kwargs) + @property def n_kernels(self): - """The number of kernels for the linear problem + r"""The number of kernel factors for the linear problem. + + Where :math:`j_0` represents the minimum value for the spread of + kernel factors and :math:`j_n` represents the maximum, ``n_kernels`` + defines the number of kernel factors :math:`j_i \in [j_0, ... , j_n]`. + This ultimately sets the number of rows in the linear forward operator. Returns ------- int + The number of kernel factors for the linear problem. """ return self._n_kernels @@ -585,6 +942,7 @@ def p(self): Returns ------- float + Rate of exponential decay of the kernel. """ return self._p @@ -594,11 +952,12 @@ def p(self, value): @property def q(self): - """rate of oscillation of the kernel. + """Rate of oscillation of the kernel. Returns ------- float + Rate of oscillation of the kernel. """ return self._q @@ -608,11 +967,12 @@ def q(self, value): @property def j0(self): - """Maximum value for :math:`j_k = j_0`. + """Minimum value for the spread of the kernel factors. Returns ------- float + Minimum value for the spread of the kernel factors. """ return self._j0 @@ -622,11 +982,12 @@ def j0(self, value): @property def jn(self): - """Maximum value for :math:`j_k = j_n`. + """Maximum value for the spread of the kernel factors. Returns ------- float + Maximum value for the spread of the kernel factors. """ return self._jn @@ -634,41 +995,55 @@ def jn(self): def jn(self, value): self._jn = validate_float("jn", value) - def __init__(self, n_kernels=20, p=-0.25, q=0.25, j0=0.0, jn=60.0, **kwargs): - self.n_kernels = n_kernels - self.p = p - self.q = q - self.j0 = j0 - self.jn = jn - super(ExponentialSinusoidSimulation, self).__init__(**kwargs) - @property def jk(self): - """ - Parameters controlling the spread of kernel functions + """The set of kernel factors controlling the spread of the kernel functions. + + Returns + ------- + (n_kernels, ) numpy.ndarray + The set of kernel factors controlling the spread of the kernel functions. """ if getattr(self, "_jk", None) is None: self._jk = np.linspace(self.j0, self.jn, self.n_kernels) return self._jk def g(self, k): + """Kernel functions evaluated for kernel factor :math:`j_k`. + + This method computes the row of the linear forward operator for + the kernel functions for kernel factor :math:`j_k`, given :math:`k` + + Parameters + ---------- + k : int + Kernel functions for kernel factor *k* + + Returns + ------- + (n_param, ) numpy.ndarray + Kernel functions evaluated for kernel factor *k*. """ - Kernel functions for the decaying oscillating exponential functions. - """ - return np.exp(self.p * self.jk[k] * self.mesh.cell_centers_x) * np.cos( - np.pi * self.q * self.jk[k] * self.mesh.cell_centers_x + return np.exp(self.p * self.jk[k] * self.mesh.nodes_x) * np.cos( + np.pi * self.q * self.jk[k] * self.mesh.nodes_x ) @property def G(self): - """ - Matrix whose rows are the kernel functions + """The linear forward operator. + + Returns + ------- + (n_kernels, n_param) numpy.ndarray + The linear forward operator. """ if getattr(self, "_G", None) is None: - G = np.empty((self.n_kernels, self.mesh.nC)) + G_nodes = np.empty((self.mesh.n_nodes, self.n_kernels)) for i in range(self.n_kernels): - G[i, :] = self.g(i) * self.mesh.h[0] + G_nodes[:, i] = self.g(i) - self._G = G + self._G = (self.mesh.average_node_to_cell @ G_nodes).T @ sdiag( + self.mesh.cell_volumes + ) return self._G diff --git a/SimPEG/utils/__init__.py b/SimPEG/utils/__init__.py index 5edf2565c9..b023970eca 100644 --- a/SimPEG/utils/__init__.py +++ b/SimPEG/utils/__init__.py @@ -76,7 +76,6 @@ :toctree: generated/ depth_weighting - surface2ind_topo model_builder.add_block model_builder.create_2_layer_model model_builder.create_block_in_wholespace @@ -143,31 +142,28 @@ validate_active_indices """ + from discretize.utils.interpolation_utils import interpolation_matrix -from . import io_utils, model_builder, solver_utils from .code_utils import ( - Report, - as_array_n_by_dim, - call_hooks, - check_stoppers, - dependent_property, - deprecate_class, - deprecate_function, - deprecate_method, - deprecate_module, - deprecate_property, - hook, mem_profile_class, - print_done, + hook, + set_kwargs, + print_titles, print_line, + check_stoppers, print_stoppers, - print_titles, + print_done, + call_hooks, + deprecate_property, + deprecate_module, + deprecate_method, + deprecate_function, + deprecate_class, + dependent_property, + as_array_n_by_dim, requires, - set_kwargs, - validate_active_indices, - validate_callable, - validate_direction, + Report, validate_float, validate_integer, validate_list_of_types, @@ -175,88 +171,110 @@ validate_ndarray_with_shape, validate_string, validate_type, + validate_callable, + validate_direction, + validate_active_indices, ) -from .coord_utils import rotate_points_from_normals, rotation_matrix_from_normals -from .counter_utils import Counter, count, timeIt -from .curv_utils import ( - example_curvilinear_grid, - face_info, - index_cube, - volume_tetrahedron, -) -from .io_utils import download + from .mat_utils import ( - Identity, - TensorType, - Zero, - av, - av_extrap, - cartesian2spherical, - coterminal, - ddx, - define_plane_from_points, - eigenvalue_by_power_iteration, - estimate_diagonal, - get_subarray, - ind2sub, - inverse_2x2_block_diagonal, - inverse_3x3_block_diagonal, - inverse_property_tensor, - kron3, - make_property_tensor, mkvc, - ndgrid, sdiag, sdinv, speye, - spherical2cartesian, + kron3, spzeros, + ddx, + av, + av_extrap, + ndgrid, + ind2sub, sub2ind, + get_subarray, + inverse_3x3_block_diagonal, + inverse_2x2_block_diagonal, + TensorType, + make_property_tensor, + inverse_property_tensor, + estimate_diagonal, + Zero, + Identity, unique_rows, + eigenvalue_by_power_iteration, + cartesian2spherical, + spherical2cartesian, + coterminal, + define_plane_from_points, ) from .mesh_utils import ( + unpack_widths, closest_points_index, extract_core_mesh, surface2inds, - unpack_widths, ) -from .model_utils import depth_weighting, distance_weighting, surface2ind_topo +from .curv_utils import ( + volume_tetrahedron, + index_cube, + face_info, + example_curvilinear_grid, +) +from .counter_utils import Counter, count, timeIt +from . import model_builder +from . import solver_utils +from . import io_utils +from .coord_utils import ( + rotation_matrix_from_normals, + rotate_points_from_normals, +) +from .model_utils import depth_weighting +from .plot_utils import plot2Ddata, plotLayer, plot_1d_layer_model +from .io_utils import download from .pgi_utils import ( GaussianMixture, + WeightedGaussianMixture, + GaussianMixtureWithPrior, GaussianMixtureWithNonlinearRelationships, GaussianMixtureWithNonlinearRelationshipsWithPrior, - GaussianMixtureWithPrior, - WeightedGaussianMixture, ) -from .plot_utils import plot2Ddata, plot_1d_layer_model, plotLayer # Deprecated imports interpmat = deprecate_function( - interpolation_matrix, "interpmat", removal_version="0.19.0", future_warn=True + interpolation_matrix, "interpmat", removal_version="0.19.0", error=True ) from .code_utils import ( - asArray_N_x_Dim, - callHooks, - checkStoppers, - dependentProperty, memProfileWrapper, - printDone, + setKwargs, + printTitles, printLine, + checkStoppers, printStoppers, - printTitles, - setKwargs, + printDone, + callHooks, + dependentProperty, + asArray_N_x_Dim, ) -from .coord_utils import rotatePointsFromNormals, rotationMatrixFromNormals -from .curv_utils import exampleLrmGrid, faceInfo, indexCube, volTetra from .mat_utils import ( - diagEst, + sdInv, getSubArray, - inv2X2BlockDiagonal, inv3X3BlockDiagonal, - invPropertyTensor, + inv2X2BlockDiagonal, makePropertyTensor, - sdInv, + invPropertyTensor, + diagEst, uniqueRows, ) -from .mesh_utils import ExtractCoreMesh, closestPoints, meshTensor +from .mesh_utils import ( + meshTensor, + closestPoints, + ExtractCoreMesh, +) +from .curv_utils import ( + volTetra, + faceInfo, + indexCube, + exampleLrmGrid, +) +from .coord_utils import ( + rotatePointsFromNormals, + rotationMatrixFromNormals, +) diff --git a/SimPEG/utils/code_utils.py b/SimPEG/utils/code_utils.py index 87e4c1bede..58e759118d 100644 --- a/SimPEG/utils/code_utils.py +++ b/SimPEG/utils/code_utils.py @@ -322,7 +322,7 @@ def call_hooks(match, mainFirst=False): Use the following syntax:: - @callHooks('doEndIteration') + @call_hooks('doEndIteration') def doEndIteration(self): pass @@ -1233,32 +1233,32 @@ def validate_active_indices(property_name, index_arr, n_cells): # DEPRECATIONS ############################################################### memProfileWrapper = deprecate_function( - mem_profile_class, "memProfileWrapper", removal_version="0.18.0", future_warn=True + mem_profile_class, "memProfileWrapper", removal_version="0.18.0", error=True ) setKwargs = deprecate_function( - set_kwargs, "setKwargs", removal_version="0.18.0", future_warn=True + set_kwargs, "setKwargs", removal_version="0.18.0", error=True ) printTitles = deprecate_function( - print_titles, "printTitles", removal_version="0.18.0", future_warn=True + print_titles, "printTitles", removal_version="0.18.0", error=True ) printLine = deprecate_function( - print_line, "printLine", removal_version="0.18.0", future_warn=True + print_line, "printLine", removal_version="0.18.0", error=True ) printStoppers = deprecate_function( - print_stoppers, "printStoppers", removal_version="0.18.0", future_warn=True + print_stoppers, "printStoppers", removal_version="0.18.0", error=True ) checkStoppers = deprecate_function( - check_stoppers, "checkStoppers", removal_version="0.18.0", future_warn=True + check_stoppers, "checkStoppers", removal_version="0.18.0", error=True ) printDone = deprecate_function( - print_done, "printDone", removal_version="0.18.0", future_warn=True + print_done, "printDone", removal_version="0.18.0", error=True ) callHooks = deprecate_function( - call_hooks, "callHooks", removal_version="0.18.0", future_warn=True + call_hooks, "callHooks", removal_version="0.18.0", error=True ) dependentProperty = deprecate_function( - dependent_property, "dependentProperty", removal_version="0.18.0", future_warn=True + dependent_property, "dependentProperty", removal_version="0.18.0", error=True ) asArray_N_x_Dim = deprecate_function( - as_array_n_by_dim, "asArray_N_x_Dim", removal_version="0.19.0", future_warn=True + as_array_n_by_dim, "asArray_N_x_Dim", removal_version="0.19.0", error=True ) diff --git a/SimPEG/utils/coord_utils.py b/SimPEG/utils/coord_utils.py index bb46021ba9..e1d17c5dbf 100644 --- a/SimPEG/utils/coord_utils.py +++ b/SimPEG/utils/coord_utils.py @@ -9,11 +9,11 @@ rotation_matrix_from_normals, "rotationMatrixFromNormals", removal_version="0.19.0", - future_warn=True, + error=True, ) rotatePointsFromNormals = deprecate_function( rotate_points_from_normals, "rotatePointsFromNormals", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/curv_utils.py b/SimPEG/utils/curv_utils.py index 6f516db1c9..71e764ce60 100644 --- a/SimPEG/utils/curv_utils.py +++ b/SimPEG/utils/curv_utils.py @@ -8,17 +8,17 @@ # deprecated functions volTetra = deprecate_function( - volume_tetrahedron, "volTetra", removal_version="0.19.0", future_warn=True + volume_tetrahedron, "volTetra", removal_version="0.19.0", error=True ) indexCube = deprecate_function( - index_cube, "indexCube", removal_version="0.19.0", future_warn=True + index_cube, "indexCube", removal_version="0.19.0", error=True ) faceInfo = deprecate_function( - face_info, "faceInfo", removal_version="0.19.0", future_warn=True + face_info, "faceInfo", removal_version="0.19.0", error=True ) exampleLrmGrid = deprecate_function( example_curvilinear_grid, "exampleLrmGrid", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/io_utils/__init__.py b/SimPEG/utils/io_utils/__init__.py index b3226b2c2e..14d628ab3d 100644 --- a/SimPEG/utils/io_utils/__init__.py +++ b/SimPEG/utils/io_utils/__init__.py @@ -18,11 +18,3 @@ write_dcipoctree_ubc, write_dcip_xyz, ) - -# Deprecated -from .io_utils_pf import ( - readUBCmagneticsObservations, - writeUBCmagneticsObservations, - readUBCgravityObservations, - writeUBCgravityObservations, -) diff --git a/SimPEG/utils/io_utils/io_utils_pf.py b/SimPEG/utils/io_utils/io_utils_pf.py index b5048d908a..9387896481 100644 --- a/SimPEG/utils/io_utils/io_utils_pf.py +++ b/SimPEG/utils/io_utils/io_utils_pf.py @@ -1,6 +1,5 @@ import numpy as np from discretize.utils import mkvc -from ...utils.code_utils import deprecate_method def read_mag3d_ubc(obs_file): @@ -379,34 +378,3 @@ def write_gg3d_ubc(filename, data_object): ) print("Observation file saved to: " + filename) - - -# ====================================================== -# Depricated Methods -# ====================================================== - - -readUBCmagneticsObservations = deprecate_method( - read_mag3d_ubc, - "readUBCmagneticsObservations", - removal_version="0.14.4", - error=True, -) -writeUBCmagneticsObservations = deprecate_method( - write_mag3d_ubc, - "writeUBCmagneticsObservations", - removal_version="0.14.4", - error=True, -) -readUBCgravityObservations = deprecate_method( - read_grav3d_ubc, - "readUBCgravityObservations", - removal_version="0.14.4", - error=True, -) -writeUBCgravityObservations = deprecate_method( - write_grav3d_ubc, - "writeUBCgravityObservations", - removal_version="0.14.4", - error=True, -) diff --git a/SimPEG/utils/mat_utils.py b/SimPEG/utils/mat_utils.py index c7b3d07fcb..46798e75dd 100644 --- a/SimPEG/utils/mat_utils.py +++ b/SimPEG/utils/mat_utils.py @@ -451,36 +451,36 @@ def define_plane_from_points(xyz1, xyz2, xyz3): diagEst = deprecate_function( - estimate_diagonal, "diagEst", removal_version="0.19.0", future_warn=True + estimate_diagonal, "diagEst", removal_version="0.19.0", error=True ) uniqueRows = deprecate_function( - unique_rows, "uniqueRows", removal_version="0.19.0", future_warn=True + unique_rows, "uniqueRows", removal_version="0.19.0", error=True ) -sdInv = deprecate_function(sdinv, "sdInv", removal_version="0.19.0", future_warn=True) +sdInv = deprecate_function(sdinv, "sdInv", removal_version="0.19.0", error=True) getSubArray = deprecate_function( - get_subarray, "getSubArray", removal_version="0.19.0", future_warn=True + get_subarray, "getSubArray", removal_version="0.19.0", error=True ) inv3X3BlockDiagonal = deprecate_function( inverse_3x3_block_diagonal, "inv3X3BlockDiagonal", removal_version="0.19.0", - future_warn=True, + error=True, ) inv2X2BlockDiagonal = deprecate_function( inverse_2x2_block_diagonal, "inv2X2BlockDiagonal", removal_version="0.19.0", - future_warn=True, + error=True, ) makePropertyTensor = deprecate_function( make_property_tensor, "makePropertyTensor", removal_version="0.19.0", - future_warn=True, + error=True, ) invPropertyTensor = deprecate_function( inverse_property_tensor, "invPropertyTensor", removal_version="0.19.0", - future_warn=True, + error=True, ) diff --git a/SimPEG/utils/mesh_utils.py b/SimPEG/utils/mesh_utils.py index 30a7e52143..1fc3a8d580 100644 --- a/SimPEG/utils/mesh_utils.py +++ b/SimPEG/utils/mesh_utils.py @@ -11,8 +11,8 @@ def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True): """Takes a triangulated surface and determine which mesh cells it intersects. - Paramters - --------- + Parameters + ---------- vrtx : (n_nodes, 3) numpy.ndarray of float The location of the vertices of the triangles trgl : (n_triang, 3) numpy.ndarray of int @@ -101,11 +101,11 @@ def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True): # DEPRECATED FUNCTIONS ################################################ meshTensor = deprecate_function( - unpack_widths, "meshTensor", removal_version="0.19.0", future_warn=True + unpack_widths, "meshTensor", removal_version="0.19.0", error=True ) closestPoints = deprecate_function( - closest_points_index, "closestPoints", removal_version="0.19.0", future_warn=True + closest_points_index, "closestPoints", removal_version="0.19.0", error=True ) ExtractCoreMesh = deprecate_function( - extract_core_mesh, "ExtractCoreMesh", removal_version="0.19.0", future_warn=True + extract_core_mesh, "ExtractCoreMesh", removal_version="0.19.0", error=True ) diff --git a/SimPEG/utils/model_utils.py b/SimPEG/utils/model_utils.py index 4b67f584e8..91df15da71 100644 --- a/SimPEG/utils/model_utils.py +++ b/SimPEG/utils/model_utils.py @@ -1,67 +1,8 @@ -import warnings -from typing import Literal, Optional - -import discretize +from .mat_utils import mkvc import numpy as np -import scipy.sparse as sp -from discretize.utils import active_from_xyz from scipy.interpolate import griddata from scipy.spatial import cKDTree -from scipy.spatial.distance import cdist - -from .mat_utils import mkvc - -try: - import numba - from numba import njit, prange -except ImportError: - numba = None - - # Define dummy njit decorator - def njit(*args, **kwargs): - return lambda f: f - - # Define dummy prange function - prange = range - - -def surface2ind_topo(mesh, topo, gridLoc="CC", method="nearest", fill_value=np.nan): - """Get indices of active cells from topography. - - For a mesh and surface topography, this function returns the indices of cells - lying below the discretized surface topography. - - Parameters - ---------- - mesh : discretize.TensorMesh or discretize.TreeMesh - Mesh on which you want to identify active cells - topo : (n, 3) numpy.ndarray - Topography data as a ``numpyndarray`` with columns [x,y,z]; can use [x,z] for 2D meshes. - Topography data can be unstructured. - gridLoc : str {'CC', 'N'} - If 'CC', all cells whose centers are below the topography are active cells. - If 'N', then cells must lie entirely below the topography in order to be active cells. - method : str {'nearest','linear'} - Interpolation method for approximating topography at cell's horizontal position. - Default is 'nearest'. - fill_value : float - Defines the elevation for cells outside the horizontal extent of the topography data. - Default is :py:class:`numpy.nan`. - - Returns - ------- - (n_active) numpy.ndarray of int - Indices of active cells below xyz. - """ - warnings.warn( - "The surface2ind_topo function has been deprecated, please import " - "discretize.utils.active_from_xyz. This will be removed in SimPEG 0.20.0", - FutureWarning, - stacklevel=2, - ) - - active_cells = active_from_xyz(mesh, topo, gridLoc, method) - return np.arange(mesh.n_cells)[active_cells] +import scipy.sparse as sp def surface_layer_index(mesh, topo, index=0): @@ -178,14 +119,10 @@ def depth_weighting( value. """ - if "indActive" in kwargs: - warnings.warn( - "The indActive keyword argument has been deprecated, please use active_cells. " - "This will be removed in SimPEG 0.19.0", - FutureWarning, - stacklevel=2, + if (key := "indActive") in kwargs: + raise TypeError( + f"'{key}' argument has been removed. " "Please use 'active_cells' instead." ) - active_cells = kwargs["indActive"] # Default threshold value if threshold is None: @@ -213,181 +150,3 @@ def depth_weighting( wz = wz[active_cells] return wz / np.nanmax(wz) - - -@njit(parallel=True) -def _distance_weighting_numba( - cell_centers: np.ndarray, - cell_volumes: np.ndarray, - reference_locs: np.ndarray, - threshold: float, - exponent: float = 2.0, -) -> np.ndarray: - r""" - distance weighting kernel in numba. - - If numba is not installed, this will work as a regular for loop. - - Parameters - ---------- - cell_centers : np.ndarray - cell centers of the mesh. - cell_volumes : np.ndarray - cell volumes of the mesh. - reference_locs : float or (n, ndim) numpy.ndarray - Reference location for the distance weighting. - It can be a ``float``, which value is the component for - the reference location. - Or it can be a 2d array, with multiple reference locations, where each - row should contain the coordinates of a single location point in the - following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D - meshes). - The coordinate of the reference location, usually the receiver locations - exponent : float, optional - Exponent parameter for distance weighting. - The exponent should match the natural decay power of the potential - field. For example, for gravity acceleration, set it to 2; for magnetic - fields, to 3. - threshold : float or None, optional - Threshold parameters used in the distance weighting. - If ``None``, it will be set to half of the smallest cell width. - - Returns - ------- - (n_active) numpy.ndarray - Normalized distance weights for the mesh at every active cell as - a 1d-array. - """ - - distance_weights = np.zeros(len(cell_centers)) - n_reference_locs = len(reference_locs) - for i in prange(n_reference_locs): - rl = reference_locs[i] - dst_wgt = ( - np.sqrt(((cell_centers - rl) ** 2).sum(axis=1)) + threshold - ) ** exponent - dst_wgt = (cell_volumes / dst_wgt) ** 2 - distance_weights += dst_wgt - - distance_weights = distance_weights**0.5 - distance_weights /= cell_volumes - distance_weights /= np.nanmax(distance_weights) - - return distance_weights - - -def distance_weighting( - mesh: discretize.base.BaseMesh, - reference_locs: np.ndarray, - active_cells: Optional[np.ndarray] = None, - exponent: float = 2.0, - threshold: Optional[float] = None, - engine: Literal["loop", "cdist"] = "loop", - cdist_opts: Optional[dict] = None, -): - r""" - Construct diagonal elements of a distance weighting matrix - - Builds the model weights following the distance weighting strategy, a method - to generate weights based on the distance between mesh cell centers and some - reference location(s). - Use these weights in regularizations to counteract the natural decay of - potential field data with distance. - - Parameters - ---------- - mesh : discretize.base.BaseMesh - Discretized model space. - reference_locs : float or (n, ndim) numpy.ndarray - Reference location for the distance weighting. - It can be a ``float``, which value is the component for - the reference location. - Or it can be a 2d array, with multiple reference locations, where each - row should contain the coordinates of a single location point in the - following order: _x_, _y_, _z_ (for 3D meshes) or _x_, _z_ (for 2D - meshes). - The coordinate of the reference location, usually the receiver locations - active_cells : (mesh.n_cells) numpy.ndarray of bool, optional - Index vector for the active cells on the mesh. - If ``None``, every cell will be assumed to be active. - exponent : float, optional - Exponent parameter for distance weighting. - The exponent should match the natural decay power of the potential - field. For example, for gravity acceleration, set it to 2; for magnetic - fields, to 3. - threshold : float or None, optional - Threshold parameters used in the distance weighting. - If ``None``, it will be set to half of the smallest cell width. - engine: str, 'loop' or 'cdist' - pick between a `scipy.spatial.distance.cdist` computation (memory intensive) or `for` loop implementation, - parallelized with numba if available. Default to 'loop'. - cdist_opts: dct, optional - Only valid with `engine=='cdist'`. Options to pass to scipy.spatial.distance.cdist. Default to None. - - Returns - ------- - (n_active) numpy.ndarray - Normalized distance weights for the mesh at every active cell as - a 1d-array. - """ - - active_cells = ( - np.ones(mesh.n_cells, dtype=bool) if active_cells is None else active_cells - ) - - # Default threshold value - if threshold is None: - threshold = 0.5 * mesh.h_gridded.min() - - reference_locs = np.asarray(reference_locs) - - cell_centers = mesh.cell_centers[active_cells] - cell_volumes = mesh.cell_volumes[active_cells] - - # address 1D case - if mesh.dim == 1: - cell_centers = cell_centers.reshape(-1, 1) - reference_locs = reference_locs.reshape(-1, 1) - - if engine == "loop": - if numba is None: - warnings.warn( - "numba is not installed. 'loop' computations might be slower.", - stacklevel=2, - ) - if cdist_opts is not None: - warnings.warn( - f"`cdist_opts` is only valid with `engine=='cdist'`, currently {engine=}", - stacklevel=2, - ) - distance_weights = _distance_weighting_numba( - cell_centers, - cell_volumes, - reference_locs, - exponent=exponent, - threshold=threshold, - ) - - elif engine == "cdist": - warnings.warn( - "scipy.spatial.distance.cdist computations can be memory intensive. Consider switching to `engine='loop'` " - "if you run into memory overflow issues", - stacklevel=2, - ) - cdist_opts = cdist_opts or dict() - distance = cdist(cell_centers, reference_locs, **cdist_opts) - - distance_weights = ( - (cell_volumes.reshape(-1, 1) / ((distance + threshold) ** exponent)) ** 2 - ).sum(axis=1) - - distance_weights = distance_weights**0.5 - distance_weights /= cell_volumes - distance_weights /= np.nanmax(distance_weights) - - else: - raise ValueError( - f"engine should be either 'cdist' or 'loop', instead {engine=}" - ) - - return distance_weights diff --git a/SimPEG/utils/pgi_utils.py b/SimPEG/utils/pgi_utils.py index 3304b36d4a..eb0658e958 100644 --- a/SimPEG/utils/pgi_utils.py +++ b/SimPEG/utils/pgi_utils.py @@ -1560,7 +1560,7 @@ def __init__( warm_start=warm_start, weights_init=weights_init, update_covariances=update_covariances, - fixed_membership=fixed_membership + fixed_membership=fixed_membership, # **kwargs ) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2b6d0dcb57..f7f7ed21a9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -141,4 +141,4 @@ stages: git push displayName: Push documentation to simpeg-docs env: - GH_TOKEN: $(gh.token) + GH_TOKEN: $(gh.token) \ No newline at end of file diff --git a/docs/_templates/autosummary/attribute.rst b/docs/_templates/autosummary/attribute.rst new file mode 100644 index 0000000000..820f45286e --- /dev/null +++ b/docs/_templates/autosummary/attribute.rst @@ -0,0 +1,7 @@ +:orphan: + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/_templates/autosummary/base.rst b/docs/_templates/autosummary/base.rst new file mode 100644 index 0000000000..ef8e6277cb --- /dev/null +++ b/docs/_templates/autosummary/base.rst @@ -0,0 +1,9 @@ +{% if objtype == 'property' %} +:orphan: +{% endif %} + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/_templates/autosummary/method.rst b/docs/_templates/autosummary/method.rst new file mode 100644 index 0000000000..820f45286e --- /dev/null +++ b/docs/_templates/autosummary/method.rst @@ -0,0 +1,7 @@ +:orphan: + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index f208c5eced..d4fe319939 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -8,6 +8,4 @@ - - {% endblock %} diff --git a/docs/conf.py b/docs/conf.py index cea63da1c6..b7edd86caf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,9 +55,7 @@ autosummary_generate = True numpydoc_attributes_as_param_list = False -# This has to be set to false in order to make the doc build in a -# reasonable amount of time. -numpydoc_show_inherited_class_members = False +numpydoc_show_inherited_class_members = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -233,6 +231,12 @@ def linkcode_resolve(domain, info): # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. +external_links = [ + dict(name="User Tutorials", url="https://simpeg.xyz/user-tutorials"), + dict(name="SimPEG", url="https://simpeg.xyz"), + dict(name="Contact", url="https://mattermost.softwareunderground.org/simpeg"), +] + try: import pydata_sphinx_theme @@ -242,10 +246,7 @@ def linkcode_resolve(domain, info): html_use_modindex = True html_theme_options = { - "external_links": [ - {"name": "SimPEG", "url": "https://simpeg.xyz"}, - {"name": "Contact", "url": "http://slack.simpeg.xyz"}, - ], + "external_links": external_links, "icon_links": [ { "name": "GitHub", @@ -253,9 +254,9 @@ def linkcode_resolve(domain, info): "icon": "fab fa-github", }, { - "name": "Slack", - "url": "http://slack.simpeg.xyz/", - "icon": "fab fa-slack", + "name": "Mattermost", + "url": "https://mattermost.softwareunderground.org/simpeg", + "icon": "fas fa-comment", }, { "name": "Discourse", @@ -267,13 +268,14 @@ def linkcode_resolve(domain, info): "url": "https://www.youtube.com/c/geoscixyz", "icon": "fab fa-youtube", }, - { - "name": "Twitter", - "url": "https://twitter.com/simpegpy", - "icon": "fab fa-twitter", - }, ], "use_edit_page_button": False, + "collapse_navigation": True, + "analytics": { + "plausible_analytics_domain": "docs.simpeg.xyz", + "plausible_analytics_url": "https://plausible.io/js/script.js", + }, + "navbar_align": "left", # make elements closer to logo on the left } html_logo = "images/simpeg-logo.png" @@ -421,6 +423,7 @@ def linkcode_resolve(domain, info): "matplotlib": ("https://matplotlib.org/stable/", None), "properties": ("https://propertiespy.readthedocs.io/en/latest/", None), "discretize": ("https://discretize.simpeg.xyz/en/main/", None), + "pymatsolver": ("https://pymatsolver.readthedocs.io/en/latest/", None), } numpydoc_xref_param_type = True diff --git a/docs/content/api/SimPEG.directives.rst b/docs/content/api/SimPEG.directives.rst new file mode 100644 index 0000000000..35999d49d0 --- /dev/null +++ b/docs/content/api/SimPEG.directives.rst @@ -0,0 +1 @@ +.. automodule:: SimPEG.directives diff --git a/docs/content/api/index.rst b/docs/content/api/index.rst index 55faddc116..8ffe60ffa9 100644 --- a/docs/content/api/index.rst +++ b/docs/content/api/index.rst @@ -32,6 +32,13 @@ Regularizations SimPEG.regularization +Directives +---------- +.. toctree:: + :maxdepth: 2 + + SimPEG.directives + Utilities --------- diff --git a/docs/content/getting_started/contributing/index.rst b/docs/content/getting_started/contributing/index.rst index 66a10fd678..cb26c41c54 100644 --- a/docs/content/getting_started/contributing/index.rst +++ b/docs/content/getting_started/contributing/index.rst @@ -20,7 +20,8 @@ Ask questions If you have a question regarding a specific use of SimPEG, the fastest way to get a response is by posting on our Discourse discussion forum: https://simpeg.discourse.group/. Alternatively, if you prefer real-time chat, -you can join our slack group at http://slack.simpeg.xyz. +you can join our Mattermost Team at +https://mattermost.softwareunderground.org/simpeg. Please do not create an issue to ask a question. .. _issues: diff --git a/docs/content/getting_started/contributing/pull-requests.rst b/docs/content/getting_started/contributing/pull-requests.rst index 4475ae1828..75ee05bcdd 100644 --- a/docs/content/getting_started/contributing/pull-requests.rst +++ b/docs/content/getting_started/contributing/pull-requests.rst @@ -33,3 +33,22 @@ pull request into the main branch (feel free to ping one of us on Github). This being said, all SimPEG developers and admins are essentially volunteers providing their time for the benefit of the community. This does mean that it might take some time for us to get your PR. + +Merging a Pull Request +---------------------- + +The ``@simpeg/simpeg-admin`` will merge a Pull Request to the `main` branch +using the `Squash and Merge +`_ +strategy: all commits made to the PR branch will be _squashed_ to a single +commit that will be added to `main`. + +SimPEG admins will ensure that the commit message is descriptive and +comprehensive. Contributors can help by providing a descriptive and +comprehensive PR description of the changes that were applied and the reasons +behind them. This will be greatly appreciated. + +Admins will mention other authors that made significant contributions to +the PR in the commit message, following GitHub's approach for `Creating +co-authored commits +`_. diff --git a/docs/content/getting_started/installing.rst b/docs/content/getting_started/installing.rst index 7105415ad6..14adff1570 100644 --- a/docs/content/getting_started/installing.rst +++ b/docs/content/getting_started/installing.rst @@ -104,7 +104,7 @@ be able to download and run any of the :ref:`examples and tutorials `_ or on -`slack `_. +`Mattermost `_. Useful Links ============ diff --git a/docs/content/release/0.21.0-notes.rst b/docs/content/release/0.21.0-notes.rst new file mode 100644 index 0000000000..5e668d9dee --- /dev/null +++ b/docs/content/release/0.21.0-notes.rst @@ -0,0 +1,276 @@ +.. _0.21.0_notes: + +=========================== +SimPEG 0.21.0 Release Notes +=========================== + +April 8th, 2024 + +.. contents:: Highlights + :depth: 3 + +Updates +======= + +New features +------------ + +Gravity simulation using Choclo +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now we can use a faster and more memory efficient implementation of the gravity +simulation ``SimPEG.potential_fields.gravity.Simulation3DIntegral``, making use +of Choclo and Numba. To make use of this functionality you will need to +`install Choclo `__ in +addition to ``SimPEG``. + +See https://github.com/simpeg/simpeg/pull/1285. + +Use Dask with MetaSimulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A new ``SimPEG.meta.DaskMetaSimulation`` class has been added that allows to +use Dask with ``SimPEG.meta.MetaSimulations``. + +See https://github.com/simpeg/simpeg/pull/1199. + +Rotated Gradients +~~~~~~~~~~~~~~~~~ + +Added a new ``SimPEG.regularization.SmoothnessFullGradient`` regularization +class that allows to regularize first order smoothness along any arbitrary +direction, enabling anisotropic weighting. This regularization also works for +a ``SimplexMesh``. + +See https://github.com/simpeg/simpeg/pull/1167. + +Logistic Sigmoid Map +~~~~~~~~~~~~~~~~~~~~ + +New ``SimPEG.map.LogisticSigmoidMap`` mapping class that computes the logistic +sigmoid of the model parameters. This is an alternative method to incorporate +upper and lower bounds on model parameters. + +See https://github.com/simpeg/simpeg/pull/1352. + +Create Jacobian matrix in NSEM and FDEM simulations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The frequency domain electromagnetic simulations (including natural source) now +support creating and storing the Jacobian matrix. You can access it by using +the ``getJ`` method. + +See https://github.com/simpeg/simpeg/pull/1276. + + +Documentation +------------- + +This new release includes major improvements in documentation pages: more +detailed docstrings of classes and methods, the addition of directive classes +to the API reference, improvements to the contributing guide, among corrections +and fixes. + + +Breaking changes +---------------- + +Removal of deprecated bits +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Several deprecated bits of code has been removed in this release. From old +classes, methods and properties that were marked for deprecation a few releases +back. These removals simplify the SimPEG API and cleans up the codebase. + +Remove factor of half in data misfits and regularizations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Simplify the definition of data misfit and regularization terms by removing the +leading factor of one half from these functions. This change makes it easier to +interpret the resulting values of these objective functions, while +avoiding confusions with their definition. + +See https://github.com/simpeg/simpeg/pull/1326. + + +Bugfixes +-------- + +A few bugs have been fixed: + +- Fix issue with lengthscales in coterminal angle calculations by + `@domfournier `__ in https://github.com/simpeg/simpeg/pull/1299 +- ISSUE-1341: Set parent of objective functions by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1342 +- Ravel instead of flatten by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1343 +- Fix implementation of coterminal function by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1334 +- Simpeg vector update by `@johnweis0480 `__ in + https://github.com/simpeg/simpeg/pull/1329 + + +Contributors +============ + +This is a combination of contributors and reviewers who've made contributions +towards this release (in no particular order). + +* `@ckohnke `__ +* `@dccowan `__ +* `@domfournier `__ +* `@ghwilliams `__ +* `@jcapriot `__ +* `@JKutt `__ +* `@johnweis0480 `__ +* `@lheagy `__ +* `@mplough-kobold `__ +* `@santisoler `__ +* `@thibaut-kobold `__ +* `@YingHuuu `__ + +We would like to highlight the contributions made by new contributors: + +- `@mplough-kobold `__ made their first + contribution in https://github.com/simpeg/simpeg/pull/1282 +- `@ghwilliams `__ made their first contribution + in https://github.com/simpeg/simpeg/pull/1292 +- `@johnweis0480 `__ made their first + contribution in https://github.com/simpeg/simpeg/pull/1329 +- `@ckohnke `__ made their first contribution in + https://github.com/simpeg/simpeg/pull/1352 +- `@YingHuuu `__ made their first contribution in + https://github.com/simpeg/simpeg/pull/1344 + + +Pull Requests +============= + +- Add 0.20.0 release notes to toc by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1277 +- add plausible analytics to simpeg docs by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1279 +- Refresh links in documentation by `@mplough-kobold `__ in + https://github.com/simpeg/simpeg/pull/1282 +- Run pytest on Azure with increased verbosity by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1287 - Allow to use random seed in make_synthetic_data by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1286 +- pgi doc by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1291 +- Fix deprecation warning for gradientType in SparseSmoothness by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1284 +- Gravity simulation with Choclo as engine by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1285 +- Fix minor flake8 warning by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1307 +- ISSUE-1298: Use normal distributed noise in example. by `@domfournier `__ + in https://github.com/simpeg/simpeg/pull/1312 +- Ditch deprecated functions in utils.model_builder by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1311 - Triaxial magnetic gradient forward modelling by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1288 +- Documentation improvements for classes in Objective Function Pieces + by `@ghwilliams `__ in https://github.com/simpeg/simpeg/pull/1292 +- Fix description of source_field in gravity survey by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1322 +- Add ``weights_keys`` method to ``BaseRegularization`` by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1320 +- Bump versions of flake8 and black and pin flake plugins by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1330 +- Move ``__init__`` in ``BaseSimulation`` to the top of the class by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1323 +- Simpeg vector update by `@johnweis0480 `__ in + https://github.com/simpeg/simpeg/pull/1329 +- Fix typo in error messages by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1324 +- Fix issue with lengthscales in coterminal angle calculations by + `@domfournier `__ in https://github.com/simpeg/simpeg/pull/1299 +- Simplify check for invalid multipliers by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1336 +- Ravel instead of flatten by `@thibaut-kobold `__ in + https://github.com/simpeg/simpeg/pull/1343 +- Fix implementation of coterminal function by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1334 +- Update cross gradient hessian approximation by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1355 +- ISSUE-1341: Set parent of objective functions by `@domfournier `__ in + https://github.com/simpeg/simpeg/pull/1342 +- Fix partial derivatives in regularization docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1362 +- Remove factor of half in data misfits and regularizations by `@lheagy `__ + in https://github.com/simpeg/simpeg/pull/1326 +- Improvements to template for a bug report issue by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1359 +- Simplify a few gravity simulation tests by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1363 +- Exponential Sinusoids Simulation by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1337 +- Replace magnetic SourceField for UniformBackgroundField by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1364 +- Remove deprecated regularization classes by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1365 +- Removed deprecated properties of UpdateSensitivityWeights by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1368 +- Replace indActive for active_cells in regularizations by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1366 +- Remove the debug argument from InversionDirective by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1370 +- Remove cellDiff properties of RegularizationMesh by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1371 +- Remove deprecated bits of code by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1372 +- Use choclo in gravity tutorials by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1378 +- Remove surface2ind_topo by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1374 +- Speed up sphinx documentation building by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1382 +- Add docs/sg_execution_times.rst to .gitignore by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1380 +- Describe merge process of Pull Requests in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1375 +- Simplify private methods in gravity simulation by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1384 +- Update Slack links: point to Mattermost by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1385 +- added getJ for fdem and nsem simulations by `@JKutt `__ in + https://github.com/simpeg/simpeg/pull/1276 +- Add LogisticSigmoidMap by `@ckohnke `__ in + https://github.com/simpeg/simpeg/pull/1352 +- Remove the cell_weights attribute in regularizations by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1376 +- Remove regmesh, mref and gradientType from regularizations by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1377 +- Test if gravity sensitivities are stored on disk by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1388 +- Check if mesh is 3D when using Choclo in gravity simulation by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1386 +- Rotated Gradients by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1167 +- Add directives to the API Reference by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1397 +- Remove deprecated modelType in mag simulation by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1399 +- Remove mref property of PGI regularization by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1400 +- Add link to User Tutorials to navbar in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1401 +- Improve documentation for base simulation classes by `@ghwilliams `__ in + https://github.com/simpeg/simpeg/pull/1295 +- Enforce regularization ``weights`` as dictionaries by `@YingHuuu `__ in + https://github.com/simpeg/simpeg/pull/1344 +- Minor adjustments to Sphinx configuration by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1398 +- Update AUTHORS.rst by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1259 +- Update year in LICENSE by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1404 +- Dask MetaSim by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1199 +- Add Ying and Williams to AUTHORS.rst by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1405 +- Remove link to “twitter” by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1406 +- Bump Black version to 24.3.0 by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1403 +- Publish documentation on azure `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1412 diff --git a/docs/content/release/0.21.1-notes.rst b/docs/content/release/0.21.1-notes.rst new file mode 100644 index 0000000000..cd35017d87 --- /dev/null +++ b/docs/content/release/0.21.1-notes.rst @@ -0,0 +1,30 @@ +.. _0.21.1_notes: + +=========================== +SimPEG 0.21.1 Release Notes +=========================== + +April 10th, 2024 + +.. contents:: Highlights + :depth: 2 + +Updates +======= + +Minor fix when importing Dask in the ``meta`` module: Dask is an optional +dependency. + +Contributors +============ + +This is a combination of contributors and reviewers who've made contributions +towards this release (in no particular order). + +* `@jcapriot `__ + +Pull Requests +============= + +* Fix hard dask dependency by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1415 diff --git a/docs/content/release/index.rst b/docs/content/release/index.rst index 98e30d6b57..49daf1cfc9 100644 --- a/docs/content/release/index.rst +++ b/docs/content/release/index.rst @@ -5,6 +5,8 @@ Release Notes .. toctree:: :maxdepth: 2 + 0.21.1 <0.21.1-notes> + 0.21.0 <0.21.0-notes> 0.20.0 <0.20.0-notes> 0.19.0 <0.19.0-notes> 0.18.1 <0.18.1-notes> diff --git a/environment_test.yml b/environment_test.yml index 8c5604edb7..84940f92d6 100644 --- a/environment_test.yml +++ b/environment_test.yml @@ -40,7 +40,7 @@ dependencies: - choclo # Linters and code style - pre-commit - - black==23.12.1 + - black==24.3.0 - flake8==7.0.0 - flake8-bugbear==23.12.2 - flake8-builtins==2.2.0 diff --git a/examples/01-maps/plot_block_in_layer.py b/examples/01-maps/plot_block_in_layer.py index 4b116ceeb2..c84d27b218 100644 --- a/examples/01-maps/plot_block_in_layer.py +++ b/examples/01-maps/plot_block_in_layer.py @@ -21,6 +21,7 @@ ] """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_combo.py b/examples/01-maps/plot_combo.py index 86a98cf4aa..a7157a4b82 100644 --- a/examples/01-maps/plot_combo.py +++ b/examples/01-maps/plot_combo.py @@ -26,6 +26,7 @@ right). Just to be sure that the derivative is correct, you should always run the test on the mapping that you create. """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_layer.py b/examples/01-maps/plot_layer.py index 90600bde0a..d73a9bc8bf 100644 --- a/examples/01-maps/plot_layer.py +++ b/examples/01-maps/plot_layer.py @@ -17,6 +17,7 @@ 'layer thickness' ] """ + import discretize from SimPEG import maps import numpy as np diff --git a/examples/01-maps/plot_mesh2mesh.py b/examples/01-maps/plot_mesh2mesh.py index bb36c19a78..b2063e71bb 100644 --- a/examples/01-maps/plot_mesh2mesh.py +++ b/examples/01-maps/plot_mesh2mesh.py @@ -4,6 +4,7 @@ This mapping allows you to go from one mesh to another. """ + import discretize from SimPEG import maps, utils import matplotlib.pyplot as plt diff --git a/examples/01-maps/plot_sumMap.py b/examples/01-maps/plot_sumMap.py index 270e7cec22..aceb6c9220 100644 --- a/examples/01-maps/plot_sumMap.py +++ b/examples/01-maps/plot_sumMap.py @@ -12,6 +12,7 @@ """ + from discretize import TensorMesh from discretize.utils import active_from_xyz from SimPEG import ( @@ -30,7 +31,7 @@ def run(plotIt=True): - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh dx = 5.0 @@ -62,7 +63,12 @@ def run(plotIt=True): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.Point(rxLoc) - srcField = magnetics.SourceField([rxLoc], parameters=H0) + srcField = magnetics.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = magnetics.Survey(srcField) # We can now create a susceptibility model and generate data @@ -133,17 +139,19 @@ def run(plotIt=True): regMesh = TensorMesh([len(domains)]) reg_m1 = regularization.Sparse(regMesh, mapping=wires.homo) - reg_m1.cell_weights = wires.homo * wr + reg_m1.set_weights(weights=wires.homo * wr) + reg_m1.norms = [0, 2] - reg_m1.mref = np.zeros(sumMap.shape[1]) + reg_m1.reference_model = np.zeros(sumMap.shape[1]) # Regularization for the voxel model reg_m2 = regularization.Sparse( mesh, active_cells=actv, mapping=wires.hetero, gradient_type="components" ) - reg_m2.cell_weights = wires.hetero * wr + reg_m2.set_weights(weights=wires.hetero * wr) + reg_m2.norms = [0, 0, 0, 0] - reg_m2.mref = np.zeros(sumMap.shape[1]) + reg_m2.reference_model = np.zeros(sumMap.shape[1]) reg = reg_m1 + reg_m2 diff --git a/examples/02-gravity/plot_inv_grav_tiled.py b/examples/02-gravity/plot_inv_grav_tiled.py index cc8fe41f78..37ae5e203d 100644 --- a/examples/02-gravity/plot_inv_grav_tiled.py +++ b/examples/02-gravity/plot_inv_grav_tiled.py @@ -5,6 +5,7 @@ Invert data in tiles. """ + import os import numpy as np import matplotlib.pyplot as plt @@ -243,7 +244,7 @@ ) saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[update_IRLS, sensitivity_weights, betaest, update_Jacobi, saveDict], diff --git a/examples/03-magnetics/plot_0_analytic.py b/examples/03-magnetics/plot_0_analytic.py index 8384445f2e..1c8e7980aa 100644 --- a/examples/03-magnetics/plot_0_analytic.py +++ b/examples/03-magnetics/plot_0_analytic.py @@ -5,6 +5,7 @@ Comparing the magnetics field in Vancouver to Seoul """ + import numpy as np from SimPEG.potential_fields.magnetics import analytics import matplotlib.pyplot as plt diff --git a/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py b/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py index 9c420650b6..7cc54915f2 100644 --- a/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py +++ b/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py @@ -51,7 +51,7 @@ # np.random.seed(1) # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -74,7 +74,12 @@ # Create a MAGsurvey xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] rxLoc = magnetics.receivers.Point(xyzLoc) -srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) # Here how the topography looks with a quick interpolation, just a Gaussian... diff --git a/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py b/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py index bc23e82d3c..0e8740197d 100644 --- a/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py +++ b/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.py @@ -44,7 +44,7 @@ # np.random.seed(1) # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create grid of points for topography # Lets create a simple Gaussian topo and set the active cells @@ -63,7 +63,12 @@ # Create a MAGsurvey xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] rxLoc = magnetics.receivers.Point(xyzLoc) -srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) ############################################################################### diff --git a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py index 3f43150103..a7bf711161 100644 --- a/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py +++ b/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.py @@ -50,7 +50,7 @@ # # We will assume a vertical inducing field -H0 = (50000.0, 90.0, 0.0) +h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -75,7 +75,12 @@ # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) -srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) survey = magnetics.survey.Survey(srcField) # Here how the topography looks with a quick interpolation, just a Gaussian... @@ -228,9 +233,9 @@ # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) -reg.mref = np.zeros(nC) +reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -267,7 +272,12 @@ # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) -srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0) +srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, +) surveyAmp = magnetics.survey.Survey(srcField) simulation = magnetics.simulation.Simulation3DIntegral( @@ -335,9 +345,9 @@ data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization -reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) +reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] -reg.mref = np.zeros(nC) +reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/examples/04-dcip/plot_dc_analytic.py b/examples/04-dcip/plot_dc_analytic.py index ec16ee2672..da9fca2cb3 100644 --- a/examples/04-dcip/plot_dc_analytic.py +++ b/examples/04-dcip/plot_dc_analytic.py @@ -5,6 +5,7 @@ Comparison of the analytic and numerical solution for a direct current resistivity dipole in 3D. """ + import discretize from SimPEG import utils import numpy as np diff --git a/examples/06-tdem/plot_fwd_tdem_3d_model.py b/examples/06-tdem/plot_fwd_tdem_3d_model.py index 36637e5085..6a07f422ff 100644 --- a/examples/06-tdem/plot_fwd_tdem_3d_model.py +++ b/examples/06-tdem/plot_fwd_tdem_3d_model.py @@ -2,6 +2,7 @@ Time-domain CSEM for a resistive cube in a deep marine setting ============================================================== """ + import empymod import discretize diff --git a/examples/06-tdem/plot_inv_tdem_1D.py b/examples/06-tdem/plot_inv_tdem_1D.py index 95c005e96f..b3f6fd1a78 100644 --- a/examples/06-tdem/plot_inv_tdem_1D.py +++ b/examples/06-tdem/plot_inv_tdem_1D.py @@ -4,6 +4,7 @@ Here we will create and run a TDEM 1D inversion. """ + import numpy as np from SimPEG.electromagnetics import time_domain from SimPEG import ( diff --git a/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py b/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py index 4a0c5625c9..619ada07e4 100644 --- a/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py +++ b/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.py @@ -6,6 +6,7 @@ with VTEM waveform of which initial condition is zero, but have some on- and off-time. """ + import numpy as np import discretize from SimPEG import ( diff --git a/examples/08-vrm/plot_inv_vrm_eq.py b/examples/08-vrm/plot_inv_vrm_eq.py index e4004a0b29..dab9190535 100644 --- a/examples/08-vrm/plot_inv_vrm_eq.py +++ b/examples/08-vrm/plot_inv_vrm_eq.py @@ -196,7 +196,10 @@ w = w / np.max(w) w = w -reg = regularization.Smallness(mesh=mesh, indActive=actCells, cell_weights=w) +reg = regularization.Smallness( + mesh=mesh, active_cells=actCells, weights={"cell_weights": w} +) + opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 ) diff --git a/examples/09-flow/plot_fwd_flow_richards_1D.py b/examples/09-flow/plot_fwd_flow_richards_1D.py index 811fc84a46..dcc2a7b7ff 100644 --- a/examples/09-flow/plot_fwd_flow_richards_1D.py +++ b/examples/09-flow/plot_fwd_flow_richards_1D.py @@ -38,6 +38,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib import matplotlib.pyplot as plt import numpy as np diff --git a/examples/09-flow/plot_inv_flow_richards_1D.py b/examples/09-flow/plot_inv_flow_richards_1D.py index f30a739c3c..d38dbb4014 100644 --- a/examples/09-flow/plot_inv_flow_richards_1D.py +++ b/examples/09-flow/plot_inv_flow_richards_1D.py @@ -25,6 +25,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib import matplotlib.pyplot as plt import numpy as np diff --git a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py index 5da5932952..afaa07b183 100644 --- a/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py +++ b/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.py @@ -233,13 +233,13 @@ def g(k): # WeightedLeastSquares Inversion reg1 = regularization.WeightedLeastSquares( - mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m1 + mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m1, weights={"cell_weights": wr1} ) -reg1.cell_weights = wr1 + reg2 = regularization.WeightedLeastSquares( - mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m2 + mesh, alpha_s=1.0, alpha_x=1.0, mapping=wires.m2, weights={"cell_weights": wr2} ) -reg2.cell_weights = wr2 + reg = reg1 + reg2 opt = optimization.ProjectedGNCG( diff --git a/examples/20-published/plot_booky_1D_time_freq_inv.py b/examples/20-published/plot_booky_1D_time_freq_inv.py index 180685ef32..e1469d3f3d 100644 --- a/examples/20-published/plot_booky_1D_time_freq_inv.py +++ b/examples/20-published/plot_booky_1D_time_freq_inv.py @@ -261,7 +261,7 @@ def run(plotIt=True, saveFig=False, cleanup=True): inv = inversion.BaseInversion(invProb, directiveList=[target]) reg.alpha_s = 1e-3 reg.alpha_x = 1.0 - reg.mref = m0.copy() + reg.reference_model = m0.copy() opt.LSshorten = 0.5 opt.remember("xc") # run the inversion @@ -379,7 +379,7 @@ def run(plotIt=True, saveFig=False, cleanup=True): reg.alpha_x = 1.0 opt.LSshorten = 0.5 opt.remember("xc") - reg.mref = mopt_re # Use RESOLVE model as a reference model + reg.reference_model = mopt_re # Use RESOLVE model as a reference model # run the inversion mopt_sky = inv.run(m0) diff --git a/examples/20-published/plot_booky_1Dstitched_resolve_inv.py b/examples/20-published/plot_booky_1Dstitched_resolve_inv.py index fc10a3317c..dbdff9966b 100644 --- a/examples/20-published/plot_booky_1Dstitched_resolve_inv.py +++ b/examples/20-published/plot_booky_1Dstitched_resolve_inv.py @@ -127,7 +127,7 @@ def resolve_1Dinversions( # regularization regMesh = discretize.TensorMesh([mesh.h[2][mapping.maps[-1].indActive]]) reg = regularization.WeightedLeastSquares(regMesh) - reg.mref = mref + reg.reference_model = mref # optimization opt = optimization.InexactGaussNewton(maxIter=10) diff --git a/examples/20-published/plot_heagyetal2017_casing.py b/examples/20-published/plot_heagyetal2017_casing.py index 6d0543130c..bcce56721a 100644 --- a/examples/20-published/plot_heagyetal2017_casing.py +++ b/examples/20-published/plot_heagyetal2017_casing.py @@ -30,6 +30,7 @@ This example was updated for SimPEG 0.14.0 on January 31st, 2020 by Joseph Capriotti """ + import discretize from SimPEG import utils, maps, tests from SimPEG.electromagnetics import frequency_domain as FDEM, mu_0 @@ -265,8 +266,8 @@ def primaryMapping(self): expMapPrimary * injActMapPrimary # log(sigma) --> sigma * paramMapPrimary # log(sigma) below surface --> include air - * injectCasingParams # parametric --> casing + layered earth - * # parametric layered earth --> parametric + * injectCasingParams # parametric --> casing + layered earth # parametric layered earth --> parametric + * # layered earth + casing self.projectionMapPrimary # grab relevant parameters from full # model (eg. ignore block) diff --git a/examples/20-published/plot_heagyetal2017_cyl_inversions.py b/examples/20-published/plot_heagyetal2017_cyl_inversions.py index 98e04747a4..2c93524648 100644 --- a/examples/20-published/plot_heagyetal2017_cyl_inversions.py +++ b/examples/20-published/plot_heagyetal2017_cyl_inversions.py @@ -18,6 +18,7 @@ This example was updated for SimPEG 0.14.0 on January 31st, 2020 by Joseph Capriotti """ + import discretize from SimPEG import ( maps, diff --git a/examples/20-published/plot_laguna_del_maule_inversion.py b/examples/20-published/plot_laguna_del_maule_inversion.py index 0edd61c3c2..47a467c343 100644 --- a/examples/20-published/plot_laguna_del_maule_inversion.py +++ b/examples/20-published/plot_laguna_del_maule_inversion.py @@ -11,6 +11,7 @@ Craig Miller """ + import os import shutil import tarfile @@ -96,9 +97,9 @@ def run(plotIt=True, cleanAfterRun=True): # %% Create inversion objects reg = regularization.Sparse( - mesh, active_cells=active, mapping=staticCells, gradientType="total" + mesh, active_cells=active, mapping=staticCells, gradient_type="total" ) - reg.mref = driver.mref[dynamic] + reg.reference_model = driver.mref[dynamic] reg.norms = [0.0, 1.0, 1.0, 1.0] # reg.norms = driver.lpnorms diff --git a/examples/20-published/plot_richards_celia1990.py b/examples/20-published/plot_richards_celia1990.py index 798ec47149..ce2267d8b9 100644 --- a/examples/20-published/plot_richards_celia1990.py +++ b/examples/20-published/plot_richards_celia1990.py @@ -39,6 +39,7 @@ .. _Celia1990: http://www.webpages.uidaho.edu/ch/papers/Celia.pdf """ + import matplotlib.pyplot as plt import numpy as np diff --git a/examples/20-published/plot_schenkel_morrison_casing.py b/examples/20-published/plot_schenkel_morrison_casing.py index 4868459e2f..478d8b90e3 100644 --- a/examples/20-published/plot_schenkel_morrison_casing.py +++ b/examples/20-published/plot_schenkel_morrison_casing.py @@ -44,6 +44,7 @@ a citation would be much appreciated! """ + import matplotlib.pylab as plt import numpy as np import discretize diff --git a/examples/20-published/plot_tomo_joint_with_volume.py b/examples/20-published/plot_tomo_joint_with_volume.py index 2b9c445917..791bc32a8c 100644 --- a/examples/20-published/plot_tomo_joint_with_volume.py +++ b/examples/20-published/plot_tomo_joint_with_volume.py @@ -42,7 +42,7 @@ class Volume(objective_function.BaseObjectiveFunction): .. math:: - \phi_v = \frac{1}{2}|| \int_V m dV - \text{knownVolume} ||^2 + \phi_v = || \int_V m dV - \text{knownVolume} ||^2 """ def __init__(self, mesh, knownVolume=0.0, **kwargs): @@ -60,25 +60,27 @@ def knownVolume(self, value): self._knownVolume = utils.validate_float("knownVolume", value, min_val=0.0) def __call__(self, m): - return 0.5 * (self.estVol(m) - self.knownVolume) ** 2 + return (self.estVol(m) - self.knownVolume) ** 2 def estVol(self, m): return np.inner(self.mesh.cell_volumes, m) def deriv(self, m): # return (self.mesh.cell_volumes * np.inner(self.mesh.cell_volumes, m)) - return self.mesh.cell_volumes * ( - self.knownVolume - np.inner(self.mesh.cell_volumes, m) - ) + return ( + 2 + * self.mesh.cell_volumes + * (self.knownVolume - np.inner(self.mesh.cell_volumes, m)) + ) # factor of 2 from deriv of ||estVol - knownVol||^2 def deriv2(self, m, v=None): if v is not None: - return utils.mkvc( + return 2 * utils.mkvc( self.mesh.cell_volumes * np.inner(self.mesh.cell_volumes, v) ) else: # TODO: this is inefficent. It is a fully dense matrix - return sp.csc_matrix( + return 2 * sp.csc_matrix( np.outer(self.mesh.cell_volumes, self.mesh.cell_volumes) ) diff --git a/examples/20-published/plot_vadose_vangenuchten.py b/examples/20-published/plot_vadose_vangenuchten.py index 95b8d10af3..a05cb0b6f4 100644 --- a/examples/20-published/plot_vadose_vangenuchten.py +++ b/examples/20-published/plot_vadose_vangenuchten.py @@ -10,6 +10,7 @@ The RETC code for quantifying the hydraulic functions of unsaturated soils, Van Genuchten, M Th, Leij, F J, Yates, S R """ + import matplotlib.pyplot as plt import discretize diff --git a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py index 1caac9b8d2..465390fca9 100644 --- a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py +++ b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py @@ -146,7 +146,7 @@ def run(plotIt=True, survey_type="dipole-dipole"): regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion - reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) + reg = regularization.Sparse(mesh, active_cells=actind, mapping=regmap) opt = optimization.InexactGaussNewton(maxIter=15) invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt) beta = directives.BetaSchedule(coolingFactor=5, coolingRate=2) diff --git a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py index 188662e72e..8c3238670b 100644 --- a/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py +++ b/examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py @@ -155,9 +155,8 @@ def run(plotIt=True, survey_type="dipole-dipole", p=0.0, qx=2.0, qz=2.0): # Related to inversion reg = regularization.Sparse( - mesh, indActive=actind, mapping=regmap, gradientType="components" + mesh, active_cells=actind, mapping=regmap, gradient_type="components" ) - # gradientType = 'components' reg.norms = [p, qx, qz, 0.0] IRLS = directives.Update_IRLS( max_irls_iterations=20, minGNiter=1, beta_search=False, fix_Jmatrix=True diff --git a/examples/_archived/plot_inv_grav_linear.py b/examples/_archived/plot_inv_grav_linear.py index d84bcc5bd9..c35831bd76 100644 --- a/examples/_archived/plot_inv_grav_linear.py +++ b/examples/_archived/plot_inv_grav_linear.py @@ -6,6 +6,7 @@ with a compact norm """ + import numpy as np import matplotlib.pyplot as plt @@ -102,7 +103,7 @@ def run(plotIt=True): rxLoc = survey.source_field.receiver_list[0].locations # Create a regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] # Data misfit function @@ -127,7 +128,7 @@ def run(plotIt=True): ) saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[ diff --git a/examples/_archived/plot_inv_mag_linear.py b/examples/_archived/plot_inv_mag_linear.py index 09bfe42a64..bf25676a50 100644 --- a/examples/_archived/plot_inv_mag_linear.py +++ b/examples/_archived/plot_inv_mag_linear.py @@ -6,6 +6,7 @@ with a compact norm """ + import matplotlib.pyplot as plt import numpy as np from discretize import TensorMesh @@ -26,7 +27,7 @@ def run(plotIt=True): # Define the inducing field parameter - H0 = (50000, 90, 0) + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a mesh dx = 5.0 @@ -64,7 +65,12 @@ def run(plotIt=True): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.receivers.Point(rxLoc, components=["tmi"]) - srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = magnetics.survey.Survey(srcField) # We can now create a susceptibility model and generate data @@ -99,7 +105,7 @@ def run(plotIt=True): data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd) # Create a regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.mref = np.zeros(nC) reg.norms = [0, 0, 0, 0] # reg.eps_p, reg.eps_q = 1e-0, 1e-0 @@ -126,7 +132,7 @@ def run(plotIt=True): saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() # Add sensitivity weights - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, diff --git a/requirements_dev.txt b/requirements_dev.txt index f81dbe7d4e..5bab4a0c4b 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -19,7 +19,7 @@ jupyter toolz empymod>=2.0.0 scooby -black==23.12.1 +black==24.3.0 pre-commit twine memory_profiler diff --git a/requirements_style.txt b/requirements_style.txt index 86051e527b..a4fd699571 100644 --- a/requirements_style.txt +++ b/requirements_style.txt @@ -1,4 +1,4 @@ -black==23.12.1 +black==24.3.0 flake8==7.0.0 flake8-bugbear==23.12.2 flake8-builtins==2.2.0 diff --git a/tests/base/regularizations/test_cross_gradient.py b/tests/base/regularizations/test_cross_gradient.py index 66e84082ab..907f04bb56 100644 --- a/tests/base/regularizations/test_cross_gradient.py +++ b/tests/base/regularizations/test_cross_gradient.py @@ -8,8 +8,6 @@ regularization, ) -np.random.seed(10) - class CrossGradientTensor2D(unittest.TestCase): def setUp(self): @@ -30,7 +28,7 @@ def setUp(self): cros_grad = regularization.CrossGradient( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -42,6 +40,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -52,12 +51,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -96,7 +97,7 @@ def test_cross_grad_calc(self): cross_grad = self.cross_grad - v1 = 0.5 * np.sum(np.abs(cross_grad.calculate_cross_gradient(m))) + v1 = np.sum(np.abs(cross_grad.calculate_cross_gradient(m))) v2 = cross_grad(m) self.assertEqual(v1, v2) @@ -122,7 +123,7 @@ def setUp(self): cros_grad = regularization.CrossGradient( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -134,6 +135,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -144,12 +146,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -167,6 +171,7 @@ def test_deriv2_no_arg(self): np.testing.assert_allclose(Wv, W @ v) def test_cross_grad_calc(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -196,7 +201,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - cross_grad = regularization.CrossGradient(mesh, wire_map=wires, indActive=actv) + cross_grad = regularization.CrossGradient( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.cross_grad = cross_grad @@ -207,6 +214,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -217,12 +225,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad @@ -259,7 +269,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - cross_grad = regularization.CrossGradient(mesh, wire_map=wires, indActive=actv) + cross_grad = regularization.CrossGradient( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.cross_grad = cross_grad @@ -270,6 +282,7 @@ def test_order_approximate_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = True self.assertTrue(cross_grad.test()) @@ -280,12 +293,14 @@ def test_order_full_hessian(self): Test deriv and deriv2 matrix of cross-gradient with approx_hessian=True """ + np.random.seed(10) cross_grad = self.cross_grad cross_grad.approx_hessian = False self.assertTrue(cross_grad._test_deriv()) self.assertTrue(cross_grad._test_deriv2(expectedOrder=2)) def test_deriv2_no_arg(self): + np.random.seed(10) m = np.random.randn(2 * len(self.mesh)) cross_grad = self.cross_grad diff --git a/tests/base/regularizations/test_full_gradient.py b/tests/base/regularizations/test_full_gradient.py index 9f12d4f43f..a827676fc8 100644 --- a/tests/base/regularizations/test_full_gradient.py +++ b/tests/base/regularizations/test_full_gradient.py @@ -1,23 +1,236 @@ -from discretize.tests import OrderTest +from discretize.tests import assert_expected_order, check_derivative +from discretize.utils import example_simplex_mesh +import discretize import numpy as np -import matplotlib.pyplot as plt from SimPEG.regularization import SmoothnessFullGradient +import pytest -class RegOrderTest(OrderTest): - meshTypes = ["uniformTensorMesh", "uniformTree"] - meshSizes = [4, 8, 16, 32] - meshDimension = 2 +def f_2d(x, y): + return (1 - np.cos(2 * x * np.pi)) * (1 - np.cos(4 * y * np.pi)) - def getError(self): - true_val = 59.2176264065362 / 2 - x = self.M.cell_centers[:, 0] - y = self.M.cell_centers[:, 1] - # a function that is zero at edge with zero derivative - f_cc = (1 - np.cos(2 * x * np.pi)) * (1 - np.cos(2 * y * np.pi)) - reg = SmoothnessFullGradient(self.M, alphas=[1, 1]) - return reg(f_cc) - true_val +def f_3d(x, y, z): + return f_2d(x, y) * (1 - np.cos(8 * z * np.pi)) - def test_orderWeakCellGradIntegral(self): - self.orderTest() + +dir_2d = np.array([[1.0, 1.0], [-1.0, 1.0]]).T +dir_2d /= np.linalg.norm(dir_2d, axis=0) +dir_3d = np.array([[1, 1, 1], [-1, 1, 0], [-1, -1, 2]]).T +dir_3d = dir_3d / np.linalg.norm(dir_3d, axis=0) + +# a list of argument tuples to pass to pytest parameterize +# each is a tuple of (function, dim, true_value, alphas, reg_dirs) +parameterized_args = [ + (f_2d, 2, 15 * np.pi**2, [1, 1], None), # assumes reg_dirs aligned with axes + ( + f_2d, + 2, + 15 * np.pi**2, + [1, 1], + np.eye(2), + ), # test for explicitly aligned with axes + ( + f_2d, + 2, + 15 * np.pi**2, + [1, 1], + dir_2d, + ), # circular regularization should be invariant to rotation + ( + f_2d, + 2, + 27 * np.pi**2, + [1, 2], + None, + ), # elliptic regularization aligned with axes + (f_2d, 2, 111.033049512255 * 2, [1, 2], dir_2d), # rotated elliptic regularization + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + None, + ), # test for explicitly aligned with axes + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + np.eye(3), + ), # test for explicitly aligned with axes + ( + f_3d, + 3, + 189 * np.pi**2 / 2, + [1, 1, 1], + dir_3d, + ), # circular regularization should be invariant to rotation + ( + f_3d, + 3, + 513 * np.pi**2 / 2, + [1, 2, 3], + None, + ), # elliptic regularization aligned with axes + ( + f_3d, + 3, + 1065.91727531765 * 2, + [1, 2, 3], + dir_3d, + ), # rotated elliptic regularization +] + + +@pytest.mark.parametrize("mesh_class", [discretize.TensorMesh, discretize.TreeMesh]) +@pytest.mark.parametrize("func,dim,true_value,alphas,reg_dirs", parameterized_args) +def test_regulariation_order(mesh_class, func, dim, true_value, alphas, reg_dirs): + """This function is testing for the accuracy of the regularization. + Basically, is it actually measuring what we say it's measuring. + """ + n_hs = [8, 16, 32] + + def reg_error(n): + h = [n] * dim + mesh = mesh_class(h) + if mesh_class is discretize.TreeMesh: + mesh.refine(-1) + # cell widths will be the same in each dimension + dh = mesh.h[0][0] + + f_eval = func(*mesh.cell_centers.T) + + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + numerical_eval = reg(f_eval) + err = np.abs(numerical_eval - true_value) + return err, dh + + assert_expected_order(reg_error, n_hs) + + +@pytest.mark.parametrize("dim", [2, 3]) +def test_simplex_mesh(dim): + """Test to make sure it works with a simplex mesh + + We can't make as strong of an accuracy claim for this mesh type because the cell gradient + operator is not actually defined for it (it uses an approximation to the cell gradient). + It is close, but we should at least test that it works.. + """ + h = [10] * dim + points, simplices = example_simplex_mesh(h) + mesh = discretize.SimplexMesh(points, simplices) + reg = SmoothnessFullGradient(mesh) + + # multiply it by a vector to make sure we can construct everything internally + # at the very least, we should be able to confirm it evaluates to 0 for a flat model. + out = reg(np.ones(mesh.n_cells)) + np.testing.assert_allclose(out, 0) + + +@pytest.mark.parametrize( + "dim,alphas,reg_dirs", [(2, [1, 2], dir_2d), (3, [1, 2, 3], dir_3d)] +) +def test_first_derivatives(dim, alphas, reg_dirs): + """Perform a derivative test.""" + h = [10] * dim + mesh = discretize.TensorMesh(h) + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + def func(x): + return reg(x), reg.deriv(x) + + check_derivative(func, np.ones(mesh.n_cells), plotIt=False) + + +@pytest.mark.parametrize( + "dim,alphas,reg_dirs", [(2, [1, 2], dir_2d), (3, [1, 2, 3], dir_3d)] +) +def test_second_derivatives(dim, alphas, reg_dirs): + """Perform a derivative test.""" + h = [10] * dim + mesh = discretize.TensorMesh(h) + reg = SmoothnessFullGradient(mesh, alphas=alphas, reg_dirs=reg_dirs) + + def func(x): + return reg.deriv(x), lambda v: reg.deriv2(x, v) + + check_derivative(func, np.ones(mesh.n_cells), plotIt=False) + + +@pytest.mark.parametrize("with_active_cells", [True, False]) +def test_operations(with_active_cells, dim=3): + # Here we just make sure operations at least work + h = [10] * dim + mesh = discretize.TensorMesh(h) + if with_active_cells: + active_cells = mesh.cell_centers[:, -1] <= 0.75 + n_cells = active_cells.sum() + else: + active_cells = None + n_cells = mesh.n_cells + reg = SmoothnessFullGradient(mesh, active_cells=active_cells) + # create a model + m = np.arange(n_cells) + # create a vector + v = np.random.rand(n_cells) + # test the second derivative evaluates + # and gives same results with and without a vector + v1 = reg.deriv2(m, v) + v2 = reg.deriv2(m) @ v + np.testing.assert_allclose(v1, v2) + + W1 = reg.W + + # test assigning n_cells + reg.set_weights(temp_weight=np.random.rand(n_cells)) + + # setting a weight should've erased W + assert reg._W is None + + # test assigning n_total_faces face weight + reg.set_weights(temp_weight=np.random.rand(mesh.n_faces)) + + # and test it all works! + W2 = reg.W + assert W1 is not W2 + + +def test_errors(): + # bad dimension mesh + mesh1d = discretize.TensorMesh([5]) + with pytest.raises(TypeError): + SmoothnessFullGradient(mesh1d) + mesh2d = discretize.TensorMesh([5, 5]) + # test some bad alphas + with pytest.raises(ValueError): + # 3D alpha passed to 2D operator + SmoothnessFullGradient(mesh2d, [1, 2, 3]) + + with pytest.raises(IndexError): + # incorrect number cell dependent alphas + alphas = np.random.rand(mesh2d.n_cells - 5, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas) + + with pytest.raises(ValueError): + # negative alphas + SmoothnessFullGradient(mesh2d, [-1, 1, 1]) + + alphas = [1, 2] + # test some bad reg dirs + with pytest.raises(ValueError): + # 3D reg dirs to 2D reg + reg_dirs = np.random.rand(3, 3) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) + + with pytest.raises(IndexError): + # incorrect number of cell dependent reg_dirs + reg_dirs = np.random.rand(mesh2d.n_cells - 5, 2, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) + + with pytest.raises(ValueError): + # non orthnormal reg_dirs + # incorrect number of cell dependent reg_dirs + reg_dirs = np.random.rand(2, 2) + SmoothnessFullGradient(mesh2d, alphas=alphas, reg_dirs=reg_dirs) diff --git a/tests/base/regularizations/test_jtv.py b/tests/base/regularizations/test_jtv.py index 9d93d26c66..c016043da6 100644 --- a/tests/base/regularizations/test_jtv.py +++ b/tests/base/regularizations/test_jtv.py @@ -31,7 +31,7 @@ def setUp(self): jtv = regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -81,7 +81,7 @@ def setUp(self): jtv = regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh @@ -127,7 +127,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - jtv = regularization.JointTotalVariation(mesh, wire_map=wires, indActive=actv) + jtv = regularization.JointTotalVariation( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.jtv = jtv @@ -174,7 +176,9 @@ def setUp(self): # maps wires = maps.Wires(("m1", mesh.nC), ("m2", mesh.nC)) - jtv = regularization.JointTotalVariation(mesh, wire_map=wires, indActive=actv) + jtv = regularization.JointTotalVariation( + mesh, wire_map=wires, active_cells=actv + ) self.mesh = mesh self.jtv = jtv @@ -221,7 +225,7 @@ def test_bad_wires(): regularization.JointTotalVariation( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) diff --git a/tests/base/regularizations/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py index b8db90f00e..cc0ce5ac94 100644 --- a/tests/base/regularizations/test_pgi_regularization.py +++ b/tests/base/regularizations/test_pgi_regularization.py @@ -1,9 +1,11 @@ +import pytest import unittest import discretize import numpy as np from pymatsolver import SolverLU from scipy.stats import multivariate_normal + from SimPEG import regularization from SimPEG.maps import Wires from SimPEG.utils import WeightedGaussianMixture, mkvc @@ -85,9 +87,7 @@ def test_full_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) - + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -193,8 +193,7 @@ def test_tied_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -297,8 +296,7 @@ def test_diag_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -401,8 +399,7 @@ def test_spherical_covariances(self): dm = self.model - mref score_approx0 = reg(self.model) score_approx1 = 0.5 * dm.dot(reg.deriv2(self.model, dm)) - passed_score_approx = np.allclose(score_approx0, score_approx1) - self.assertTrue(passed_score_approx) + np.testing.assert_allclose(score_approx0, score_approx1) reg.objfcts[0].approx_eval = False score = reg(self.model) - reg(mref) passed_score = np.allclose(score_approx0, score, rtol=1e-4) @@ -473,5 +470,19 @@ def test_spherical_covariances(self): plt.show() +def test_removed_mref(): + """Test if PGI raises error when accessing removed mref property.""" + h = [[(2, 2)], [(2, 2)], [(2, 2)]] + mesh = discretize.TensorMesh(h) + n_components = 1 + gmm = WeightedGaussianMixture(mesh=mesh, n_components=n_components) + samples = np.random.default_rng(seed=42).normal(size=(mesh.n_cells, 2)) + gmm.fit(samples) + pgi = regularization.PGI(mesh=mesh, gmmref=gmm) + message = "mref has been removed, please use reference_model." + with pytest.raises(NotImplementedError, match=message): + pgi.mref + + if __name__ == "__main__": unittest.main() diff --git a/tests/base/regularizations/test_regularization.py b/tests/base/regularizations/test_regularization.py index d32a0c0ce6..779890667d 100644 --- a/tests/base/regularizations/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -1,19 +1,22 @@ -import inspect +import numpy as np import unittest -import discretize -import numpy as np import pytest +import inspect +import discretize from SimPEG import maps, objective_function, regularization, utils -from SimPEG.objective_function import ComboObjectiveFunction from SimPEG.regularization import ( BaseRegularization, + WeightedLeastSquares, + Sparse, + SparseSmoothness, Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder, - WeightedLeastSquares, ) +from SimPEG.objective_function import ComboObjectiveFunction + TOL = 1e-7 testReg = True @@ -36,8 +39,19 @@ "LinearCorrespondence", "JointTotalVariation", "BaseAmplitude", + "SmoothnessFullGradient", "VectorAmplitude", "CrossReferenceRegularization", + # Removed regularization classes that raise error on instantiation + "PGIwithNonlinearRelationshipsSmallness", + "PGIwithRelationships", + "Simple", + "SimpleSmall", + "SimpleSmoothDeriv", + "Small", + "SmoothDeriv", + "SmoothDeriv2", + "Tikhonov", ] @@ -80,7 +94,7 @@ def test_regularization(self): else: m = np.random.rand(mesh.nC) mref = np.ones_like(m) * np.mean(m) - reg.mref = mref + reg.reference_model = mref # test derivs passed = reg.test(m, eps=TOL) @@ -168,7 +182,7 @@ def test_property_mirroring(self): active_cells = mesh.gridCC[:, 2] < 0.6 reg = getattr(regularization, regType)(mesh, active_cells=active_cells) - self.assertTrue(reg.nP == reg.regularization_mesh.nC) + self.assertTrue(reg.nP == reg.regularization_mesh.n_cells) [ self.assertTrue(np.all(fct.active_cells == active_cells)) @@ -288,7 +302,8 @@ def test_mappings_and_cell_weights(self): wires = maps.Wires(("sigma", mesh.nC), ("mu", mesh.nC)) - reg = regularization.Smallness(mesh, mapping=wires.sigma, weights=cell_weights) + reg = regularization.Smallness(mesh, mapping=wires.sigma) + reg.set_weights(cell_weights=cell_weights) objfct = objective_function.L2ObjectiveFunction( W=utils.sdiag(np.sqrt(cell_weights * mesh.cell_volumes)), @@ -323,8 +338,7 @@ def test_update_of_sparse_norms(self): v = np.random.rand(mesh.nC) cell_weights = np.random.rand(mesh.nC) - - reg = regularization.Sparse(mesh, weights=cell_weights) + reg = regularization.Sparse(mesh, weights={"cell_weights": cell_weights}) np.testing.assert_equal(reg.norms, [1, 1, 1, 1]) @@ -385,14 +399,14 @@ def test_linked_properties(self): ] [self.assertTrue(reg.mapping is fct.mapping) for fct in reg.objfcts] - D = reg.regularization_mesh.cellDiffx + D = reg.regularization_mesh.cell_gradient_x reg.regularization_mesh._cell_gradient_x = 4 * D v = np.random.rand(D.shape[1]) [ self.assertTrue( np.all( reg.regularization_mesh._cell_gradient_x * v - == fct.regularization_mesh.cellDiffx * v + == fct.regularization_mesh.cell_gradient_x * v ) ) for fct in reg.objfcts @@ -456,7 +470,7 @@ def test_nC_residual(self): mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap regMesh = discretize.TensorMesh([mesh.h[2][mapping.maps[-1].indActive]]) - reg = regularization.Simple(regMesh) + reg = regularization.WeightedLeastSquares(regMesh) self.assertTrue(reg._nC_residual == regMesh.nC) self.assertTrue(all([fct._nC_residual == regMesh.nC for fct in reg.objfcts])) @@ -679,6 +693,12 @@ class Dummy: with pytest.raises(TypeError, match=msg): regularization.parent = invalid_parent + def test_default_parent(self, regularization): + """Test setting default parent class to a BaseRegularization.""" + mesh = discretize.TensorMesh([3, 4, 5]) + parent = WeightedLeastSquares(mesh, objfcts=[regularization]) + assert regularization.parent is parent + class TestWeightsKeys: """ @@ -705,14 +725,6 @@ def test_user_defined_weights_as_dict(self, mesh): reg = BaseRegularization(mesh, weights=weights) assert reg.weights_keys == ["dummy_weight"] - def test_user_defined_weights_as_array(self, mesh): - """ - Test weights_keys after user defined weights as dictionary - """ - weights = np.ones(mesh.n_cells) - reg = BaseRegularization(mesh, weights=weights) - assert reg.weights_keys == ["user_weights"] - @pytest.mark.parametrize( "regularization_class", (Smallness, SmoothnessFirstOrder, SmoothnessSecondOrder) ) @@ -741,13 +753,15 @@ def test_multiple_weights(self, mesh, regularization_class): assert reg.weights_keys == ["dummy_weight", "other_weights", "volume"] -class TestDeprecatedArguments: +class TestRemovedObjects: """ - Test errors after simultaneously passing new and deprecated arguments. - - Within these arguments are: + Test if errors are raised after passing removed arguments or trying to + access removed properties. * ``indActive`` (replaced by ``active_cells``) + * ``gradientType`` (replaced by ``gradient_type``) + * ``mref`` (replaced by ``reference_model``) + * ``regmesh`` (replaced by ``regularization_mesh``) * ``cell_weights`` (replaced by ``weights``) """ @@ -769,21 +783,132 @@ def mesh(self, request): @pytest.mark.parametrize( "regularization_class", (BaseRegularization, WeightedLeastSquares) ) - def test_active_cells(self, mesh, regularization_class): - """Test indActive and active_cells arguments.""" + def test_mref_property(self, mesh, regularization_class): + """Test mref property.""" + msg = "mref has been removed, please use reference_model." + reg = regularization_class(mesh) + with pytest.raises(NotImplementedError, match=msg): + reg.mref + + def test_regmesh_property(self, mesh): + """Test regmesh property.""" + msg = "regmesh has been removed, please use regularization_mesh." + reg = BaseRegularization(mesh) + with pytest.raises(NotImplementedError, match=msg): + reg.regmesh + + @pytest.mark.parametrize("regularization_class", (Sparse, SparseSmoothness)) + def test_gradient_type(self, mesh, regularization_class): + """Test gradientType argument.""" + msg = ( + "'gradientType' argument has been removed. " + "Please use 'gradient_type' instead." + ) + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, gradientType="total") + + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_ind_active(self, mesh, regularization_class): + """Test if error is raised when passing the indActive argument.""" active_cells = np.ones(len(mesh), dtype=bool) - msg = "Cannot simultaneously pass 'active_cells' and 'indActive'." - with pytest.raises(ValueError, match=msg): - regularization_class( - mesh, active_cells=active_cells, indActive=active_cells - ) + msg = ( + "'indActive' argument has been removed. " + "Please use 'active_cells' instead." + ) + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, indActive=active_cells) - def test_weights(self, mesh): - """Test cell_weights and weights.""" + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_ind_active_property(self, mesh, regularization_class): + """Test if error is raised when trying to access the indActive property.""" + active_cells = np.ones(len(mesh), dtype=bool) + reg = regularization_class(mesh, active_cells=active_cells) + msg = "indActive has been removed, please use active_cells." + with pytest.raises(NotImplementedError, match=msg): + reg.indActive + + @pytest.mark.parametrize( + "regularization_class", + (BaseRegularization, WeightedLeastSquares), + ) + def test_cell_weights_argument(self, mesh, regularization_class): + """Test if error is raised when passing the cell_weights argument.""" weights = np.ones(len(mesh)) - msg = "Cannot simultaneously pass 'weights' and 'cell_weights'." - with pytest.raises(ValueError, match=msg): - BaseRegularization(mesh, weights=weights, cell_weights=weights) + msg = "'cell_weights' argument has been removed. Please use 'weights' instead." + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, cell_weights=weights) + + @pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) + ) + def test_cell_weights_property(self, mesh, regularization_class): + """Test if error is raised when trying to access the cell_weights property.""" + weights = {"weights": np.ones(len(mesh))} + msg = ( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) + reg = regularization_class(mesh, weights=weights) + with pytest.raises(AttributeError, match=msg): + reg.cell_weights + + @pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) + ) + def test_cell_weights_setter(self, mesh, regularization_class): + """Test if error is raised when trying to set the cell_weights property.""" + msg = ( + "'cell_weights' has been removed. " + "Please access weights using the `set_weights`, `get_weights`, and " + "`remove_weights` methods." + ) + reg = regularization_class(mesh) + with pytest.raises(AttributeError, match=msg): + reg.cell_weights = "dummy variable" + + +class TestRemovedRegularizations: + """ + Test if errors are raised after creating removed regularization classes. + """ + + @pytest.mark.parametrize( + "regularization_class", + ( + regularization.PGIwithNonlinearRelationshipsSmallness, + regularization.PGIwithRelationships, + regularization.Simple, + regularization.SimpleSmall, + regularization.SimpleSmoothDeriv, + regularization.Small, + regularization.SmoothDeriv, + regularization.SmoothDeriv2, + regularization.Tikhonov, + ), + ) + def test_removed_class(self, regularization_class): + class_name = regularization_class.__name__ + msg = f"{class_name} has been removed, please use." + with pytest.raises(NotImplementedError, match=msg): + regularization_class() + + +@pytest.mark.parametrize( + "regularization_class", (BaseRegularization, WeightedLeastSquares) +) +def test_invalid_weights_type(regularization_class): + """Test error after passing weights as invalid type.""" + mesh = discretize.TensorMesh([[(2, 2)]]) + msg = "Invalid 'weights' of type ''" + with pytest.raises(TypeError, match=msg): + regularization_class(mesh, weights=np.array([1.0])) if __name__ == "__main__": diff --git a/tests/base/test_correspondance.py b/tests/base/test_correspondance.py index 4e9fc71442..64a8cbcd0a 100644 --- a/tests/base/test_correspondance.py +++ b/tests/base/test_correspondance.py @@ -30,7 +30,7 @@ def setUp(self): corr = regularization.LinearCorrespondence( mesh, wire_map=wires, - indActive=actv, + active_cells=actv, ) self.mesh = mesh diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index 8637e633af..2af5c58e32 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -64,11 +64,16 @@ def setUp(self): mesh = discretize.TensorMesh([4, 4, 4]) # Magnetic inducing field parameter (A,I,D) - B = [50000, 90, 0] + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a MAGsurvey rx = mag.Point(np.vstack([[0.25, 0.25, 0.25], [-0.25, -0.25, 0.25]])) - srcField = mag.UniformBackgroundField([rx], parameters=(B[0], B[1], B[2])) + srcField = mag.UniformBackgroundField( + receiver_list=[rx], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create the forward model operator @@ -128,21 +133,12 @@ def test_validation_in_inversion(self): inv = inversion.BaseInversion(invProb) inv.directiveList = [update_Jacobi, sensitivity_weights] - def test_sensitivity_weighting_warnings(self): - # Test setter warnings - d_temp = directives.UpdateSensitivityWeights() - d_temp.normalization_method = True - self.assertTrue(d_temp.normalization_method == "maximum") - - d_temp.normalization_method = False - self.assertTrue(d_temp.normalization_method is None) - def test_sensitivity_weighting_global(self): test_inputs = { - "everyIter": False, - "threshold": 1e-12, + "every_iteration": False, + "threshold_value": 1e-12, "threshold_method": "global", - "normalization": False, + "normalization_method": None, } # Compute test weights @@ -150,7 +146,7 @@ def test_sensitivity_weighting_global(self): np.sqrt(np.sum((self.dmis.W * self.sim.G) ** 2, axis=0)) / self.mesh.cell_volumes ) - test_weights = sqrt_diagJtJ + test_inputs["threshold"] + test_weights = sqrt_diagJtJ + test_inputs["threshold_value"] test_weights *= self.mesh.cell_volumes # Test directive @@ -165,7 +161,11 @@ def test_sensitivity_weighting_global(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) @@ -177,7 +177,7 @@ def test_sensitivity_weighting_percentile_maximum(self): "every_iteration": True, "threshold_value": 1, "threshold_method": "percentile", - "normalization": True, + "normalization_method": "maximum", } # Compute test weights @@ -205,7 +205,11 @@ def test_sensitivity_weighting_percentile_maximum(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) @@ -245,7 +249,11 @@ def test_sensitivity_weighting_amplitude_minimum(self): test_directive.update() for reg_i in reg.objfcts: - self.assertTrue(np.all(np.isclose(test_weights, reg_i.cell_weights))) + # Get all weights in regularization + weights = [reg_i.get_weights(key) for key in reg_i.weights_keys] + # Compute the product of all weights + weights = np.prod(weights, axis=0) + self.assertTrue(np.all(np.isclose(test_weights, weights))) reg_i.remove_weights("sensitivity") # self.test_sensitivity_weighting_subroutine(test_weights, test_directive) @@ -298,5 +306,96 @@ def test_save_output_dict(RegClass): assert "x SparseSmoothness.norm" in out_dict +class TestDeprecatedArguments: + """ + Test if directives raise errors after passing deprecated arguments. + """ + + def test_debug(self): + """ + Test if InversionDirective raises error after passing 'debug'. + """ + msg = "'debug' property has been removed. Please use 'verbose'." + with pytest.raises(TypeError, match=msg): + directives.InversionDirective(debug=True) + + +class TestUpdateSensitivityWeightsRemovedArgs: + """ + Test if `UpdateSensitivityWeights` raises errors after passing removed arguments. + """ + + def test_every_iter(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `everyIter`. + """ + msg = "'everyIter' property has been removed. Please use 'every_iteration'." + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(everyIter=True) + + def test_threshold(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `threshold`. + """ + msg = "'threshold' property has been removed. Please use 'threshold_value'." + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(threshold=True) + + def test_normalization(self): + """ + Test if `UpdateSensitivityWeights` raises error after passing `normalization`. + """ + msg = ( + "'normalization' property has been removed. " + "Please define normalization using 'normalization_method'." + ) + with pytest.raises(TypeError, match=msg): + directives.UpdateSensitivityWeights(normalization=True) + + +class TestUpdateSensitivityNormalization: + """ + Test the `normalization` property and setter in `UpdateSensitivityWeights` + """ + + @pytest.mark.parametrize("normalization_method", (None, "maximum", "minimum")) + def test_normalization_method_setter_valid(self, normalization_method): + """ + Test if the setter method for normalization_method in + `UpdateSensitivityWeights` works as expected on valid values. + + The `normalization_method` must be a string or a None. This test was + included as part of the removal process of the old `normalization` + property. + """ + d_temp = directives.UpdateSensitivityWeights() + # Use the setter method to assign a value to normalization_method + d_temp.normalization_method = normalization_method + assert d_temp.normalization_method == normalization_method + + @pytest.mark.parametrize("normalization_method", (True, False, "an invalid method")) + def test_normalization_method_setter_invalid(self, normalization_method): + """ + Test if the setter method for normalization_method in + `UpdateSensitivityWeights` raises error on invalid values. + + The `normalization_method` must be a string or a None. This test was + included as part of the removal process of the old `normalization` + property. + """ + d_temp = directives.UpdateSensitivityWeights() + if isinstance(normalization_method, bool): + error_type = TypeError + msg = "'normalization_method' must be a str. Got" + else: + error_type = ValueError + msg = ( + r"'normalization_method' must be in \['minimum', 'maximum'\]. " + f"Got '{normalization_method}'" + ) + with pytest.raises(error_type, match=msg): + d_temp.normalization_method = normalization_method + + if __name__ == "__main__": unittest.main() diff --git a/tests/base/test_maps.py b/tests/base/test_maps.py index ad6362c84d..c01f30d019 100644 --- a/tests/base/test_maps.py +++ b/tests/base/test_maps.py @@ -568,6 +568,51 @@ def test_Tile(self): self.assertTrue((local_mass - total_mass) / total_mass < 1e-8) + def test_logit_errors(self): + nP = 10 + scalar_lower = -2 + scalar_upper = 2 + good_vector_lower = np.random.rand(nP) - 2 + good_vector_upper = np.random.rand(nP) + 2 + + bad_vector_lower = np.random.rand(nP - 2) - 2 + bad_vector_upper = np.random.rand(nP - 2) + 2 + + # test that lower is not equal to nP + with pytest.raises( + ValueError, + match="Lower bound does not broadcast to the number of parameters.*", + ): + maps.LogisticSigmoidMap( + nP=10, lower_bound=bad_vector_lower, upper_bound=scalar_upper + ) + + # test that bad is not equal to nP + with pytest.raises( + ValueError, + match="Upper bound does not broadcast to the number of parameters.*", + ): + maps.LogisticSigmoidMap( + nP=10, lower_bound=scalar_lower, upper_bound=bad_vector_upper + ) + + # test that two upper and lower arrays will not broadcast when not specifying the number of parameters + with pytest.raises( + ValueError, match="Upper bound does not broadcast to the lower bound.*" + ): + maps.LogisticSigmoidMap( + lower_bound=good_vector_lower, upper_bound=bad_vector_upper + ) + + # test that passing a lower bound higher than an upper bound) + with pytest.raises( + ValueError, + match="A lower bound is greater than or equal to the upper bound.", + ): + maps.LogisticSigmoidMap( + lower_bound=good_vector_upper, upper_bound=good_vector_lower + ) + class TestWires(unittest.TestCase): def test_basic(self): @@ -718,6 +763,7 @@ def test_linearity(): maps.SphericalSystem(mesh2), maps.SelfConsistentEffectiveMedium(mesh2, sigma0=1, sigma1=2), maps.ExpMap(), + maps.LogisticSigmoidMap(), maps.ReciprocalMap(), maps.LogMap(), maps.ParametricCircleMap(mesh2), diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index 9b85dfad3e..48279e4b54 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -1,9 +1,13 @@ +import pytest import unittest import numpy as np + from discretize import TensorMesh -from SimPEG import utils +from SimPEG import ( + utils, +) class DepthWeightingTest(unittest.TestCase): @@ -19,7 +23,9 @@ def test_depth_weighting_3D(self): r_loc = 0.1 # Depth weighting - wz = utils.depth_weighting(mesh, r_loc, indActive=actv, exponent=5, threshold=0) + wz = utils.depth_weighting( + mesh, r_loc, active_cells=actv, exponent=5, threshold=0 + ) reference_locs = ( np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) @@ -28,14 +34,14 @@ def test_depth_weighting_3D(self): reference_locs[:, -1] = r_loc wz2 = utils.depth_weighting( - mesh, reference_locs, indActive=actv, exponent=5, threshold=0 + mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 ) np.testing.assert_allclose(wz, wz2) # testing default params all_active = np.ones(mesh.n_cells, dtype=bool) wz = utils.depth_weighting( - mesh, r_loc, indActive=all_active, exponent=2, threshold=0.5 * dh + mesh, r_loc, active_cells=all_active, exponent=2, threshold=0.5 * dh ) wz2 = utils.depth_weighting(mesh, r_loc) @@ -55,7 +61,9 @@ def test_depth_weighting_2D(self): r_loc = 0.1 # Depth weighting - wz = utils.depth_weighting(mesh, r_loc, indActive=actv, exponent=5, threshold=0) + wz = utils.depth_weighting( + mesh, r_loc, active_cells=actv, exponent=5, threshold=0 + ) reference_locs = ( np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) @@ -64,85 +72,29 @@ def test_depth_weighting_2D(self): reference_locs[:, -1] = r_loc wz2 = utils.depth_weighting( - mesh, reference_locs, indActive=actv, exponent=5, threshold=0 + mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 ) np.testing.assert_allclose(wz, wz2) -class DistancehWeightingTest(unittest.TestCase): - def test_distance_weighting_3D(self): - # Mesh - dh = 5.0 - hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] - hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] - hz = [(dh, 15)] - mesh = TensorMesh([hx, hy, hz], "CCN") - - actv = np.random.randint(0, 2, mesh.n_cells) == 1 - - reference_locs = ( - np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) - + mesh.origin - ) - - # distance weighting - wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" - ) - wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" - ) - np.testing.assert_allclose(wz_numpy, wz_numba) - - with self.assertRaises(ValueError): - utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="test" - ) - - def test_distance_weighting_2D(self): - # Mesh - dh = 5.0 - hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] - hz = [(dh, 15)] - mesh = TensorMesh([hx, hz], "CN") - - actv = np.random.randint(0, 2, mesh.n_cells) == 1 - - reference_locs = ( - np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) - + mesh.origin - ) - - # distance weighting - wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" - ) - wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" - ) - np.testing.assert_allclose(wz_numpy, wz_numba) - - def test_distance_weighting_1D(self): - # Mesh - dh = 5.0 - hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] - mesh = TensorMesh([hx], "C") - - actv = np.random.randint(0, 2, mesh.n_cells) == 1 - - reference_locs = ( - np.random.rand(1000, 1) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) - + mesh.origin - ) - - # distance weighting - wz_numpy = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="cdist" - ) - wz_numba = utils.distance_weighting( - mesh, reference_locs, active_cells=actv, exponent=3, engine="loop" - ) - np.testing.assert_allclose(wz_numpy, wz_numba) +@pytest.fixture +def mesh(): + """Sample mesh.""" + dh = 5.0 + hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] + hz = [(dh, 15)] + mesh = TensorMesh([hx, hz], "CN") + return mesh + + +def test_removed_indactive(mesh): + """ + Test if error is raised after passing removed indActive argument + """ + active_cells = np.ones(mesh.nC, dtype=bool) + msg = "'indActive' argument has been removed. " "Please use 'active_cells' instead." + with pytest.raises(TypeError, match=msg): + utils.depth_weighting(mesh, 0, indActive=active_cells) if __name__ == "__main__": diff --git a/tests/base/test_objective_function.py b/tests/base/test_objective_function.py index 78d2161361..211a9719bf 100644 --- a/tests/base/test_objective_function.py +++ b/tests/base/test_objective_function.py @@ -278,13 +278,11 @@ def test_ComboW(self): r1 = phi1.W * m r2 = phi2.W * m - print(phi(m), 0.5 * np.inner(r, r)) + print(phi(m), np.inner(r, r)) - self.assertTrue(np.allclose(phi(m), 0.5 * np.inner(r, r))) + self.assertTrue(np.allclose(phi(m), np.inner(r, r))) self.assertTrue( - np.allclose( - phi(m), 0.5 * (alpha1 * np.inner(r1, r1) + alpha2 * np.inner(r2, r2)) - ) + np.allclose(phi(m), (alpha1 * np.inner(r1, r1) + alpha2 * np.inner(r2, r2))) ) def test_ComboConstruction(self): diff --git a/tests/base/test_optimizers.py b/tests/base/test_optimizers.py index 4bdfd2cafa..212d433787 100644 --- a/tests/base/test_optimizers.py +++ b/tests/base/test_optimizers.py @@ -49,11 +49,11 @@ def test_ProjGradient_quadratic1Bound(self): self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) def test_NewtonRoot(self): - fun = ( - lambda x, return_g=True: np.sin(x) - if not return_g - else (np.sin(x), sdiag(np.cos(x))) - ) + def fun(x, return_g=True): + if return_g: + return np.sin(x), sdiag(np.cos(x)) + return np.sin(x) + x = np.array([np.pi - 0.3, np.pi + 0.1, 0]) xopt = optimization.NewtonRoot(comments=False).root(fun, x) x_true = np.array([np.pi, np.pi, 0]) diff --git a/tests/base/test_simulation.py b/tests/base/test_simulation.py index a1271d96b2..03a59c7fd7 100644 --- a/tests/base/test_simulation.py +++ b/tests/base/test_simulation.py @@ -19,32 +19,6 @@ def setUp(self): self.mtrue = mtrue - def test_forward(self): - data = np.r_[ - 7.50000000e-02, - 5.34102961e-02, - 5.26315566e-03, - -3.92235199e-02, - -4.22361894e-02, - -1.29419602e-02, - 1.30060891e-02, - 1.73572943e-02, - 7.78056876e-03, - -1.49689823e-03, - -4.50212858e-03, - -3.14559131e-03, - -9.55761370e-04, - 3.53963158e-04, - 7.24902205e-04, - 6.06022770e-04, - 3.36635644e-04, - 7.48637479e-05, - -1.10094573e-04, - -1.84905476e-04, - ] - - assert np.allclose(data, self.sim.dpred(self.mtrue)) - def test_make_synthetic_data(self): dclean = self.sim.dpred(self.mtrue) data = self.sim.make_synthetic_data(self.mtrue) diff --git a/tests/base/test_utils.py b/tests/base/test_utils.py index 5952b755fc..88938828ca 100644 --- a/tests/base/test_utils.py +++ b/tests/base/test_utils.py @@ -17,12 +17,11 @@ ind2sub, as_array_n_by_dim, TensorType, - diagEst, + estimate_diagonal, count, timeIt, Counter, download, - surface2ind_topo, coterminal, ) import discretize @@ -276,38 +275,15 @@ def test_as_array_n_by_dim(self): self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape) - def test_surface2ind_topo(self): - file_url = ( - "https://storage.googleapis.com/simpeg/tests/utils/vancouver_topo.xyz" - ) - file2load = download(file_url) - vancouver_topo = np.loadtxt(file2load) - mesh_topo = discretize.TensorMesh( - [[(500.0, 24)], [(500.0, 20)], [(10.0, 30)]], x0="CCC" - ) - - # To keep consistent with result from deprecated function - vancouver_topo[:, 2] = vancouver_topo[:, 2] + 1e-8 - - indtopoCC = surface2ind_topo( - mesh_topo, vancouver_topo, gridLoc="CC", method="nearest" - ) - indtopoN = surface2ind_topo( - mesh_topo, vancouver_topo, gridLoc="N", method="nearest" - ) - - assert len(np.where(indtopoCC)[0]) == 8728 - assert len(np.where(indtopoN)[0]) == 8211 - -class TestDiagEst(unittest.TestCase): +class TestEstimateDiagonal(unittest.TestCase): def setUp(self): self.n = 1000 self.A = np.random.rand(self.n, self.n) self.Adiag = np.diagonal(self.A) def getTest(self, testType): - Adiagtest = diagEst(self.A, self.n, self.n, testType) + Adiagtest = estimate_diagonal(self.A, self.n, self.n, testType) r = np.abs(Adiagtest - self.Adiag) err = r.dot(r) return err diff --git a/tests/dask/test_grav_inversion_linear.py b/tests/dask/test_grav_inversion_linear.py index 9e91433d36..8d35014b9f 100644 --- a/tests/dask/test_grav_inversion_linear.py +++ b/tests/dask/test_grav_inversion_linear.py @@ -105,7 +105,7 @@ def setUp(self): # Here is where the norms are applied IRLS = directives.Update_IRLS(max_irls_iterations=20, chifact_start=2.0) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi] ) diff --git a/tests/dask/test_mag_MVI_Octree.py b/tests/dask/test_mag_MVI_Octree.py index e7e5699224..37b8783745 100644 --- a/tests/dask/test_mag_MVI_Octree.py +++ b/tests/dask/test_mag_MVI_Octree.py @@ -21,7 +21,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -47,7 +47,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh @@ -112,17 +117,17 @@ def setUp(self): # Create three regularization for the different components # of magnetization - reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.p) - reg_p.mref = np.zeros(3 * nC) + reg_p = regularization.Sparse(mesh, active_cells=actv, mapping=wires.p) + reg_p.reference_model = np.zeros(3 * nC) - reg_s = regularization.Sparse(mesh, indActive=actv, mapping=wires.s) - reg_s.mref = np.zeros(3 * nC) + reg_s = regularization.Sparse(mesh, active_cells=actv, mapping=wires.s) + reg_s.reference_model = np.zeros(3 * nC) - reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.t) - reg_t.mref = np.zeros(3 * nC) + reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.t) + reg_t.reference_model = np.zeros(3 * nC) reg = reg_p + reg_s + reg_t - reg.mref = np.zeros(3 * nC) + reg.reference_model = np.zeros(3 * nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) @@ -147,7 +152,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) @@ -166,24 +171,24 @@ def setUp(self): # Create a Combo Regularization # Regularize the amplitude of the vectors - reg_a = regularization.Sparse(mesh, indActive=actv, mapping=wires.amp) + reg_a = regularization.Sparse(mesh, active_cells=actv, mapping=wires.amp) reg_a.norms = [0.0, 0.0, 0.0, 0.0] # Sparse on the model and its gradients - reg_a.mref = np.zeros(3 * nC) + reg_a.reference_model = np.zeros(3 * nC) # Regularize the vertical angle of the vectors - reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.theta) + reg_t = regularization.Sparse(mesh, active_cells=actv, mapping=wires.theta) reg_t.alpha_s = 0.0 # No reference angle reg_t.space = "spherical" reg_t.norms = [2.0, 0.0, 0.0, 0.0] # Only norm on gradients used # Regularize the horizontal angle of the vectors - reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.phi) + reg_p = regularization.Sparse(mesh, active_cells=actv, mapping=wires.phi) reg_p.alpha_s = 0.0 # No reference angle reg_p.space = "spherical" reg_p.norms = [2.0, 0.0, 0.0, 0.0] # Only norm on gradients used reg = reg_a + reg_t + reg_p - reg.mref = np.zeros(3 * nC) + reg.reference_model = np.zeros(3 * nC) Lbound = np.kron(np.asarray([0, -np.inf, -np.inf]), np.ones(nC)) Ubound = np.kron(np.asarray([10, np.inf, np.inf]), np.ones(nC)) diff --git a/tests/dask/test_mag_inversion_linear_Octree.py b/tests/dask/test_mag_inversion_linear_Octree.py index cf16cb1578..098423b4f8 100644 --- a/tests/dask/test_mag_inversion_linear_Octree.py +++ b/tests/dask/test_mag_inversion_linear_Octree.py @@ -29,7 +29,7 @@ def setUp(self): # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh h = [5, 5, 5] @@ -58,7 +58,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) self.mesh = mesh_utils.mesh_builder_xyz( diff --git a/tests/dask/test_mag_nonLinear_Amplitude.py b/tests/dask/test_mag_nonLinear_Amplitude.py index 0118ac78f9..758db82d0a 100644 --- a/tests/dask/test_mag_nonLinear_Amplitude.py +++ b/tests/dask/test_mag_nonLinear_Amplitude.py @@ -22,7 +22,7 @@ class AmpProblemTest(unittest.TestCase): def setUp(self): # We will assume a vertical inducing field - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -47,8 +47,11 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(srcField) @@ -139,9 +142,9 @@ def setUp(self): # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -186,8 +189,11 @@ def setUp(self): # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) surveyAmp = magnetics.survey.Survey(srcField) @@ -229,9 +235,9 @@ def setUp(self): data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers.py b/tests/em/em1d/test_EM1D_FD_jac_layers.py index 20c331f295..61f645be5c 100644 --- a/tests/em/em1d/test_EM1D_FD_jac_layers.py +++ b/tests/em/em1d/test_EM1D_FD_jac_layers.py @@ -155,8 +155,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2.0 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): @@ -314,8 +316,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): @@ -450,8 +454,10 @@ def test_EM1DFDJtvec_Layers(self): def misfit(m, dobs): dpred = self.sim.dpred(m) - misfit = 0.5 * np.linalg.norm(dpred - dobs) ** 2 - dmisfit = self.sim.Jtvec(m, dr) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 return misfit, dmisfit def derChk(m): diff --git a/tests/em/fdem/forward/test_FDEM_sources.py b/tests/em/fdem/forward/test_FDEM_sources.py index e044e51b28..4b499655c3 100644 --- a/tests/em/fdem/forward/test_FDEM_sources.py +++ b/tests/em/fdem/forward/test_FDEM_sources.py @@ -376,21 +376,22 @@ def test_CircularLoop_bPrimaryMu50_h(self): assert self.bPrimaryTest(src, "j") -def test_CircularLoop_test_N_assign(): +def test_removal_circular_loop_n(): """ - Test depreciation of the N argument (now n_turns) + Test if passing the N argument to CircularLoop raises an error """ - src = fdem.sources.CircularLoop( - [], - frequency=1e-3, - radius=np.sqrt(1 / np.pi), - location=[0, 0, 0], - orientation="Z", - mu=mu_0, - current=0.5, - N=2, - ) - assert src.n_turns == 2 + msg = "'N' property has been removed. Please use 'n_turns'." + with pytest.raises(TypeError, match=msg): + fdem.sources.CircularLoop( + [], + frequency=1e-3, + radius=np.sqrt(1 / np.pi), + location=[0, 0, 0], + orientation="Z", + mu=mu_0, + current=0.5, + N=2, + ) def test_line_current_failures(): diff --git a/tests/em/fdem/forward/test_properties.py b/tests/em/fdem/forward/test_properties.py index aca1cca714..5384c69d17 100644 --- a/tests/em/fdem/forward/test_properties.py +++ b/tests/em/fdem/forward/test_properties.py @@ -5,18 +5,12 @@ from SimPEG.electromagnetics import time_domain as tdem -def test_receiver_properties_validation(): +def test_removed_projcomp(): + """Test if passing the removed `projComp` argument raises an error.""" xyz = np.c_[0.0, 0.0, 0.0] - projComp = "Fx" - rx = fdem.receivers.BaseRx(xyz, projComp=projComp) - - assert rx.projComp == projComp - - with pytest.raises(ValueError): - fdem.receivers.BaseRx(xyz, component="potato") - - with pytest.raises(TypeError): - fdem.receivers.BaseRx(xyz, component=2.0) + msg = "'projComp' property has been removed." + with pytest.raises(TypeError, match=msg): + fdem.receivers.BaseRx(xyz, projComp="foo") def test_source_properties_validation(): diff --git a/tests/em/nsem/inversion/test_Problem3D_Derivs.py b/tests/em/nsem/inversion/test_Problem3D_Derivs.py index 9d1be9da3a..cd96b8127a 100644 --- a/tests/em/nsem/inversion/test_Problem3D_Derivs.py +++ b/tests/em/nsem/inversion/test_Problem3D_Derivs.py @@ -1,4 +1,5 @@ # Test functions +import pytest import unittest import numpy as np from SimPEG import tests, mkvc @@ -12,6 +13,58 @@ MU = mu_0 +@pytest.fixture() +def model_simulation_tuple(): + return nsem.utils.test_utils.setupSimpegNSEM_PrimarySecondary( + nsem.utils.test_utils.halfSpace(1e-2), [0.1], comp="All", singleFreq=False + ) + + +# Test the Jvec derivative +@pytest.mark.parametrize("weights", [True, False]) +def test_Jtjdiag(model_simulation_tuple, weights): + model, simulation = model_simulation_tuple + W = None + if weights: + W = np.eye(simulation.survey.nD) + + J = simulation.getJ(model) + if weights: + J = W @ J + + Jtjdiag = simulation.getJtJdiag(model, W=W) + np.testing.assert_allclose(Jtjdiag, np.sum(J * J, axis=0)) + + +def test_Jtjdiag_clearing(model_simulation_tuple): + model, simulation = model_simulation_tuple + J1 = simulation.getJ(model) + Jtjdiag1 = simulation.getJtJdiag(model) + + m2 = model + 2 + J2 = simulation.getJ(m2) + Jtjdiag2 = simulation.getJtJdiag(m2) + + assert J1 is not J2 + assert Jtjdiag1 is not Jtjdiag2 + + +def test_Jmatrix(model_simulation_tuple): + model, simulation = model_simulation_tuple + rng = np.random.default_rng(4421) + # create random vector + vec = rng.standard_normal(simulation.survey.nD) + + # create the J matrix + J1 = simulation.getJ(model) + Jmatrix_vec = J1.T @ vec + + # compare to JTvec function + jtvec = simulation.Jtvec(model, v=vec) + + np.testing.assert_allclose(Jmatrix_vec, jtvec) + + # Test the Jvec derivative def DerivJvecTest(inputSetup, comp="All", freq=False, expMap=True): m, simulation = nsem.utils.test_utils.setupSimpegNSEM_PrimarySecondary( diff --git a/tests/em/static/test_SIP_2D_jvecjtvecadj.py b/tests/em/static/test_SIP_2D_jvecjtvecadj.py index 887aad24db..64fabc6851 100644 --- a/tests/em/static/test_SIP_2D_jvecjtvecadj.py +++ b/tests/em/static/test_SIP_2D_jvecjtvecadj.py @@ -264,9 +264,15 @@ def setUp(self): dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) - reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=~airind) - reg_taui = regularization.Simple(mesh, mapping=wires.taui, indActive=~airind) - reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=~airind) + reg_eta = regularization.WeightedLeastSquares( + mesh, mapping=wires.eta, active_cells=~airind + ) + reg_taui = regularization.WeightedLeastSquares( + mesh, mapping=wires.taui, active_cells=~airind + ) + reg_c = regularization.WeightedLeastSquares( + mesh, mapping=wires.c, active_cells=~airind + ) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 diff --git a/tests/em/static/test_SIP_jvecjtvecadj.py b/tests/em/static/test_SIP_jvecjtvecadj.py index 00e5370bd6..a55272f50e 100644 --- a/tests/em/static/test_SIP_jvecjtvecadj.py +++ b/tests/em/static/test_SIP_jvecjtvecadj.py @@ -282,9 +282,9 @@ def setUp(self): dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) - reg_eta = regularization.Sparse(mesh, mapping=wires.eta, indActive=~airind) - reg_taui = regularization.Sparse(mesh, mapping=wires.taui, indActive=~airind) - reg_c = regularization.Sparse(mesh, mapping=wires.c, indActive=~airind) + reg_eta = regularization.Sparse(mesh, mapping=wires.eta, active_cells=~airind) + reg_taui = regularization.Sparse(mesh, mapping=wires.taui, active_cells=~airind) + reg_c = regularization.Sparse(mesh, mapping=wires.c, active_cells=~airind) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 diff --git a/tests/em/tdem/test_TDEM_sources.py b/tests/em/tdem/test_TDEM_sources.py index 7332c49db3..8d3aa9b511 100644 --- a/tests/em/tdem/test_TDEM_sources.py +++ b/tests/em/tdem/test_TDEM_sources.py @@ -1,3 +1,4 @@ +import pytest import unittest import numpy as np @@ -527,16 +528,17 @@ def test_simple_source(): assert waveform.eval(0.0) == 1.0 -def test_CircularLoop_test_N_assignment(): +def test_removal_circular_loop_n(): """ - Test depreciation of the N property + Test if passing the N argument to CircularLoop raises an error """ - loop = CircularLoop( - [], - waveform=StepOffWaveform(), - location=np.array([0.0, 0.0, 0.0]), - radius=1.0, - current=0.5, - N=2, - ) - assert loop.n_turns == 2 + msg = "'N' property has been removed. Please use 'n_turns'." + with pytest.raises(TypeError, match=msg): + CircularLoop( + [], + waveform=StepOffWaveform(), + location=np.array([0.0, 0.0, 0.0]), + radius=1.0, + current=0.5, + N=2, + ) diff --git a/tests/em/tdem/test_properties.py b/tests/em/tdem/test_properties.py index 5aa443b45b..1c1bb50136 100644 --- a/tests/em/tdem/test_properties.py +++ b/tests/em/tdem/test_properties.py @@ -4,13 +4,13 @@ from SimPEG.electromagnetics import time_domain as tdem -def test_receiver_properties(): +def test_removed_projcomp(): + """Test if passing the removed `projComp` argument raises an error.""" xyz = np.c_[0.0, 0.0, 0.0] times = np.logspace(-5, -2, 4) - projComp = "Fx" - rx = tdem.receivers.BaseRx(xyz, times, projComp=projComp) - - assert rx.projComp == projComp + msg = "'projComp' property has been removed." + with pytest.raises(TypeError, match=msg): + tdem.receivers.BaseRx(xyz, times, projComp="foo") def test_source_properties(): diff --git a/tests/em/vrm/test_vrmfwd.py b/tests/em/vrm/test_vrmfwd.py index 89f1fc54f9..7bcef1c9e0 100644 --- a/tests/em/vrm/test_vrmfwd.py +++ b/tests/em/vrm/test_vrmfwd.py @@ -5,7 +5,6 @@ class VRM_fwd_tests(unittest.TestCase): - """ Computed vs analytic dipole field """ diff --git a/tests/em/vrm/test_vrminv.py b/tests/em/vrm/test_vrminv.py index 8511f0a704..15c022c926 100644 --- a/tests/em/vrm/test_vrminv.py +++ b/tests/em/vrm/test_vrminv.py @@ -63,13 +63,16 @@ def test_basic_inversion(self): dmis = data_misfit.L2DataMisfit(data=dobs, simulation=Problem) W = ( - mkvc( - (np.sum(np.array(Problem.A) ** 2, axis=0)) / meshObj.cell_volumes**2.0 - ) + mkvc((np.sum(np.array(Problem.A) ** 2, axis=0)) / meshObj.cell_volumes**2.0) ** 0.25 ) reg = regularization.WeightedLeastSquares( - meshObj, alpha_s=0.01, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, weights=W + meshObj, + alpha_s=0.01, + alpha_x=1.0, + alpha_y=1.0, + alpha_z=1.0, + weights={"weights": W}, ) opt = optimization.ProjectedGNCG( maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4 diff --git a/tests/meta/test_dask_meta.py b/tests/meta/test_dask_meta.py new file mode 100644 index 0000000000..ac44d70a1a --- /dev/null +++ b/tests/meta/test_dask_meta.py @@ -0,0 +1,393 @@ +import numpy as np +from SimPEG.potential_fields import gravity +from SimPEG.electromagnetics.static import resistivity as dc +from SimPEG import maps +from discretize import TensorMesh +import scipy.sparse as sp +import pytest + +from SimPEG.meta import ( + MetaSimulation, + SumMetaSimulation, + RepeatedSimulation, + DaskMetaSimulation, + DaskSumMetaSimulation, + DaskRepeatedSimulation, +) + +from distributed import Client, LocalCluster + + +@pytest.fixture(scope="module") +def cluster(): + dask_cluster = LocalCluster( + n_workers=2, threads_per_worker=2, dashboard_address=None, processes=True + ) + yield dask_cluster + dask_cluster.close() + + +def test_meta_correctness(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1 + # split by chunks of sources + chunk_size = 3 + sims = [] + mappings = [] + for i in range(0, len(src_list) + 1, chunk_size): + end = min(i + chunk_size, len(src_list)) + if i == end: + break + survey_chunk = dc.Survey(src_list[i:end]) + sims.append( + dc.Simulation3DNodal( + mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap() + ) + ) + mappings.append(maps.IdentityMap()) + + serial_sim = MetaSimulation(sims, mappings) + dask_sim = DaskMetaSimulation(sims, mappings, client) + + # test fields objects + f_meta = serial_sim.fields(m_test) + f_dask = dask_sim.fields(m_test) + # Can't serialize DC nodal fields here, so can't directly test them. + # sol_meta = np.concatenate([f[:, "phiSolution"] for f in f_meta], axis=1) + # sol_dask = np.concatenate([f.result()[:, "phiSolution"] for f in f_dask], axis=1) + # np.testing.assert_allclose(sol_meta, sol_dask) + + # test data output + d_meta = serial_sim.dpred(m_test, f=f_meta) + d_dask = dask_sim.dpred(m_test, f=f_dask) + np.testing.assert_allclose(d_dask, d_meta) + + # test Jvec + rng = np.random.default_rng(seed=0) + u = rng.random(mesh.n_cells) + jvec_meta = serial_sim.Jvec(m_test, u, f=f_meta) + jvec_dask = dask_sim.Jvec(m_test, u, f=f_dask) + + np.testing.assert_allclose(jvec_dask, jvec_meta) + + # test Jtvec + v = rng.random(serial_sim.survey.nD) + jtvec_meta = serial_sim.Jtvec(m_test, v, f=f_meta) + jtvec_dask = dask_sim.Jtvec(m_test, v, f=f_dask) + + np.testing.assert_allclose(jtvec_dask, jtvec_meta) + + # test get diag + diag_meta = serial_sim.getJtJdiag(m_test, f=f_meta) + diag_dask = dask_sim.getJtJdiag(m_test, f=f_dask) + + np.testing.assert_allclose(diag_dask, diag_meta) + + # test things also works without passing optional fields + dask_sim.model = m_test + d_dask2 = dask_sim.dpred() + np.testing.assert_allclose(d_dask, d_dask2) + + jvec_dask2 = dask_sim.Jvec(m_test, u) + np.testing.assert_allclose(jvec_dask, jvec_dask2) + + jtvec_dask2 = dask_sim.Jtvec(m_test, v) + np.testing.assert_allclose(jtvec_dask, jtvec_dask2) + + # also pass a diagonal matrix here for testing. + dask_sim._jtjdiag = None + W = sp.eye(dask_sim.survey.nD) + diag_dask2 = dask_sim.getJtJdiag(m_test, W=W) + np.testing.assert_allclose(diag_dask, diag_dask2) + + +def test_sum_sim_correctness(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + # Create gravity sum sims + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + rx = gravity.Point(rx_locs, components=["gz"]) + survey = gravity.Survey(gravity.SourceField(rx)) + + mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin) + mesh_top = TensorMesh( + [mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]] + ) + + g_mappings = [ + maps.Mesh2Mesh((mesh_bot, mesh)), + maps.Mesh2Mesh((mesh_top, mesh)), + ] + g_sims = [ + gravity.Simulation3DIntegral( + mesh_bot, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ), + gravity.Simulation3DIntegral( + mesh_top, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ), + ] + + serial_sim = SumMetaSimulation(g_sims, g_mappings) + parallel_sim = DaskSumMetaSimulation(g_sims, g_mappings, client) + + m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1 + + # test fields objects + f_full = serial_sim.fields(m_test) + f_meta = parallel_sim.fields(m_test) + # Again don't serialize and collect the fields on the main + # process directly. + # np.testing.assert_allclose(f_full, sum(f_meta)) + + # test data output + d_full = serial_sim.dpred(m_test, f=f_full) + d_meta = parallel_sim.dpred(m_test, f=f_meta) + np.testing.assert_allclose(d_full, d_meta, rtol=1e-6) + + rng = np.random.default_rng(0) + + # test Jvec + u = rng.random(mesh.n_cells) + jvec_full = serial_sim.Jvec(m_test, u, f=f_full) + jvec_meta = parallel_sim.Jvec(m_test, u, f=f_meta) + + np.testing.assert_allclose(jvec_full, jvec_meta, rtol=1e-6) + + # test Jtvec + v = rng.random(survey.nD) + jtvec_full = serial_sim.Jtvec(m_test, v, f=f_full) + jtvec_meta = parallel_sim.Jtvec(m_test, v, f=f_meta) + + np.testing.assert_allclose(jtvec_full, jtvec_meta, rtol=1e-6) + + # test get diag + diag_full = serial_sim.getJtJdiag(m_test, f=f_full) + diag_meta = parallel_sim.getJtJdiag(m_test, f=f_meta) + + np.testing.assert_allclose(diag_full, diag_meta, rtol=1e-6) + + # test things also works without passing optional kwargs + parallel_sim.model = m_test + d_meta2 = parallel_sim.dpred() + np.testing.assert_allclose(d_meta, d_meta2) + + jvec_meta2 = parallel_sim.Jvec(m_test, u) + np.testing.assert_allclose(jvec_meta, jvec_meta2) + + jtvec_meta2 = parallel_sim.Jtvec(m_test, v) + np.testing.assert_allclose(jtvec_meta, jtvec_meta2) + + parallel_sim._jtjdiag = None + diag_meta2 = parallel_sim.getJtJdiag(m_test) + np.testing.assert_allclose(diag_meta, diag_meta2) + + +def test_repeat_sim_correctness(cluster): + with Client(cluster) as client: + # meta sim is tested for correctness + # so can test the repeat against the meta sim + mesh = TensorMesh([8, 8, 8], origin="CCN") + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + rx = gravity.Point(rx_locs, components=["gz"]) + survey = gravity.Survey(gravity.SourceField(rx)) + grav_sim = gravity.Simulation3DIntegral( + mesh, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1 + ) + + time_mesh = TensorMesh([8], origin=[0]) + sim_ts = np.linspace(0, 1, 6) + + repeat_mappings = [] + eye = sp.eye(mesh.n_cells, mesh.n_cells) + for t in sim_ts: + ave_time = time_mesh.get_interpolation_matrix([t]) + ave_full = sp.kron(ave_time, eye, format="csr") + repeat_mappings.append(maps.LinearMap(ave_full)) + + serial_sim = RepeatedSimulation(grav_sim, repeat_mappings) + parallel_sim = DaskRepeatedSimulation(grav_sim, repeat_mappings, client) + + rng = np.random.default_rng(0) + model = rng.random((time_mesh.n_cells, mesh.n_cells)).reshape(-1) + + # test field things + f_full = serial_sim.fields(model) + f_meta = parallel_sim.fields(model) + # np.testing.assert_equal(np.c_[f_full], np.c_[f_meta]) + + d_full = serial_sim.dpred(model, f_full) + d_repeat = parallel_sim.dpred(model, f_meta) + np.testing.assert_allclose(d_full, d_repeat, rtol=1e-6) + + # test Jvec + u = rng.random(len(model)) + jvec_full = serial_sim.Jvec(model, u, f=f_full) + jvec_meta = parallel_sim.Jvec(model, u, f=f_meta) + np.testing.assert_allclose(jvec_full, jvec_meta, rtol=1e-6) + + # test Jtvec + v = rng.random(len(sim_ts) * survey.nD) + jtvec_full = serial_sim.Jtvec(model, v, f=f_full) + jtvec_meta = parallel_sim.Jtvec(model, v, f=f_meta) + np.testing.assert_allclose(jtvec_full, jtvec_meta, rtol=1e-6) + + # test get diag + diag_full = serial_sim.getJtJdiag(model, f=f_full) + diag_meta = parallel_sim.getJtJdiag(model, f=f_meta) + np.testing.assert_allclose(diag_full, diag_meta, rtol=1e-6) + + +def test_dask_meta_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + + # split by chunks of sources + chunk_size = 3 + sims = [] + mappings = [] + for i in range(0, len(src_list) + 1, chunk_size): + end = min(i + chunk_size, len(src_list)) + if i == end: + break + survey_chunk = dc.Survey(src_list[i:end]) + sims.append( + dc.Simulation3DNodal( + mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap(mesh) + ) + ) + mappings.append(maps.IdentityMap(mesh)) + + # incompatible length of mappings and simulations lists + with pytest.raises(ValueError): + DaskMetaSimulation(sims[:-1], mappings, client) + + # Bad Simulation type? + with pytest.raises(TypeError): + DaskRepeatedSimulation( + len(sims) + * [ + lambda x: x * 2, + ], + mappings, + client, + ) + + # mappings have incompatible input lengths: + mappings[0] = maps.Projection(mesh.n_cells + 10, np.arange(mesh.n_cells) + 1) + with pytest.raises(ValueError): + DaskMetaSimulation(sims, mappings, client) + + # incompatible mapping and simulation + mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10]) + with pytest.raises(ValueError): + DaskMetaSimulation(sims, mappings, client) + + +def test_sum_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin) + mesh_top = TensorMesh( + [mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]] + ) + + mappings = [ + maps.Mesh2Mesh((mesh_bot, mesh)), + maps.Mesh2Mesh((mesh_top, mesh)), + ] + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T + + rx1 = gravity.Point(rx_locs, components=["gz"]) + survey1 = gravity.Survey(gravity.SourceField(rx1)) + rx2 = gravity.Point(rx_locs[1:], components=["gz"]) + survey2 = gravity.Survey(gravity.SourceField(rx2)) + + sims = [ + gravity.Simulation3DIntegral( + mesh_bot, + survey=survey1, + rhoMap=maps.IdentityMap(mesh_bot), + n_processes=1, + ), + gravity.Simulation3DIntegral( + mesh_top, + survey=survey2, + rhoMap=maps.IdentityMap(mesh_top), + n_processes=1, + ), + ] + + # Test simulations with different numbers of data. + with pytest.raises(ValueError): + DaskSumMetaSimulation(sims, mappings, client) + + +def test_repeat_errors(cluster): + with Client(cluster) as client: + mesh = TensorMesh([16, 16, 16], origin="CCN") + + rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j] + rx_locs = rx_locs.reshape(3, -1).T + rxs = dc.receivers.Pole(rx_locs) + source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T + src_list = [ + dc.sources.Pole( + [ + rxs, + ], + location=loc, + ) + for loc in source_locs + ] + survey = dc.Survey(src_list) + sim = dc.Simulation3DNodal(mesh, survey=survey, sigmaMap=maps.IdentityMap(mesh)) + + # split by chunks of sources + mappings = [] + for _i in range(10): + mappings.append(maps.IdentityMap(mesh)) + + # mappings have incompatible input lengths: + mappings[0] = maps.Projection(mesh.n_cells + 1, np.arange(mesh.n_cells) + 1) + with pytest.raises(ValueError): + DaskRepeatedSimulation(sim, mappings, client) + + # incompatible mappings and simulations + mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10]) + with pytest.raises(ValueError): + DaskRepeatedSimulation(sim, mappings, client) + + # Bad Simulation type? + with pytest.raises(TypeError): + DaskRepeatedSimulation(lambda x: x * 2, mappings, client) diff --git a/tests/meta/test_meta_sim.py b/tests/meta/test_meta_sim.py index 2530efcf7d..2498aeaa36 100644 --- a/tests/meta/test_meta_sim.py +++ b/tests/meta/test_meta_sim.py @@ -150,7 +150,6 @@ def test_sum_sim_correctness(): np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-6) # test Jtvec - rng = np.random.default_rng(seed=0) v = rng.random(survey.nD) jtvec_full = full_sim.Jtvec(m_test, v, f=f_full) jtvec_mult = sum_sim.Jtvec(m_test, v, f=f_mult) diff --git a/tests/meta/test_multiprocessing_sim.py b/tests/meta/test_multiprocessing_sim.py index bc816b0e24..fc1058862d 100644 --- a/tests/meta/test_multiprocessing_sim.py +++ b/tests/meta/test_multiprocessing_sim.py @@ -61,6 +61,8 @@ def test_meta_correctness(): serial_sim = MetaSimulation(dc_sims, dc_mappings) parallel_sim = MultiprocessingMetaSimulation(dc_sims2, dc_mappings, n_processes=12) + rng = np.random.default_rng(seed=0) + try: # create fields objects f_serial = serial_sim.fields(m_test) @@ -72,13 +74,13 @@ def test_meta_correctness(): np.testing.assert_allclose(d_full, d_mult) # test Jvec - u = np.random.rand(mesh.n_cells) + u = rng.random(mesh.n_cells) jvec_full = serial_sim.Jvec(m_test, u, f=f_serial) jvec_mult = parallel_sim.Jvec(m_test, u, f=f_parallel) np.testing.assert_allclose(jvec_full, jvec_mult) # test Jtvec - v = np.random.rand(serial_sim.survey.nD) + v = rng.random(serial_sim.survey.nD) jtvec_full = serial_sim.Jtvec(m_test, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(m_test, v, f=f_parallel) @@ -141,6 +143,8 @@ def test_sum_correctness(): serial_sim = SumMetaSimulation(g_sims, g_mappings) parallel_sim = MultiprocessingSumMetaSimulation(g_sims, g_mappings, n_processes=2) + + rng = np.random.default_rng(0) try: # test fields objects f_serial = serial_sim.fields(m_test) @@ -150,42 +154,42 @@ def test_sum_correctness(): # test data output d_full = serial_sim.dpred(m_test, f=f_serial) d_mult = parallel_sim.dpred(m_test, f=f_parallel) - np.testing.assert_allclose(d_full, d_mult) + np.testing.assert_allclose(d_full, d_mult, rtol=1e-06) # test Jvec - u = np.random.rand(mesh.n_cells) + u = rng.random(mesh.n_cells) jvec_full = serial_sim.Jvec(m_test, u, f=f_serial) jvec_mult = parallel_sim.Jvec(m_test, u, f=f_parallel) - np.testing.assert_allclose(jvec_full, jvec_mult) + np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-06) # test Jtvec - v = np.random.rand(survey.nD) + v = rng.random(survey.nD) jtvec_full = serial_sim.Jtvec(m_test, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(m_test, v, f=f_parallel) - np.testing.assert_allclose(jtvec_full, jtvec_mult) + np.testing.assert_allclose(jtvec_full, jtvec_mult, rtol=1e-06) # test get diag diag_full = serial_sim.getJtJdiag(m_test, f=f_serial) diag_mult = parallel_sim.getJtJdiag(m_test, f=f_parallel) - np.testing.assert_allclose(diag_full, diag_mult) + np.testing.assert_allclose(diag_full, diag_mult, rtol=1e-06) # test things also works without passing optional kwargs parallel_sim.model = m_test d_mult2 = parallel_sim.dpred() - np.testing.assert_allclose(d_mult, d_mult2) + np.testing.assert_allclose(d_mult, d_mult2, rtol=1e-06) jvec_mult2 = parallel_sim.Jvec(m_test, u) - np.testing.assert_allclose(jvec_mult, jvec_mult2) + np.testing.assert_allclose(jvec_mult, jvec_mult2, rtol=1e-06) jtvec_mult2 = parallel_sim.Jtvec(m_test, v) - np.testing.assert_allclose(jtvec_mult, jtvec_mult2) + np.testing.assert_allclose(jtvec_mult, jtvec_mult2, rtol=1e-06) parallel_sim._jtjdiag = None diag_mult2 = parallel_sim.getJtJdiag(m_test) - np.testing.assert_allclose(diag_mult, diag_mult2) + np.testing.assert_allclose(diag_mult, diag_mult2, rtol=1e-06) except Exception as err: raise err @@ -216,7 +220,10 @@ def test_repeat_correctness(): parallel_sim = MultiprocessingRepeatedSimulation( grav_sim, repeat_mappings, n_processes=2 ) - t_model = np.random.rand(time_mesh.n_cells, mesh.n_cells).reshape(-1) + + rng = np.random.default_rng(0) + + t_model = rng.random((time_mesh.n_cells, mesh.n_cells)).reshape(-1) try: # test field things @@ -226,24 +233,24 @@ def test_repeat_correctness(): d_full = serial_sim.dpred(t_model, f_serial) d_repeat = parallel_sim.dpred(t_model, f_parallel) - np.testing.assert_equal(d_full, d_repeat) + np.testing.assert_allclose(d_full, d_repeat, rtol=1e-6) # test Jvec - u = np.random.rand(len(t_model)) + u = rng.random(len(t_model)) jvec_full = serial_sim.Jvec(t_model, u, f=f_serial) jvec_mult = parallel_sim.Jvec(t_model, u, f=f_parallel) - np.testing.assert_allclose(jvec_full, jvec_mult) + np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-6) # test Jtvec - v = np.random.rand(len(sim_ts) * survey.nD) + v = rng.random(len(sim_ts) * survey.nD) jtvec_full = serial_sim.Jtvec(t_model, v, f=f_serial) jtvec_mult = parallel_sim.Jtvec(t_model, v, f=f_parallel) - np.testing.assert_allclose(jtvec_full, jtvec_mult) + np.testing.assert_allclose(jtvec_full, jtvec_mult, rtol=1e-6) # test get diag diag_full = serial_sim.getJtJdiag(t_model, f=f_serial) diag_mult = parallel_sim.getJtJdiag(t_model, f=f_parallel) - np.testing.assert_allclose(diag_full, diag_mult) + np.testing.assert_allclose(diag_full, diag_mult, rtol=1e-6) except Exception as err: raise err finally: diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 496e80b1f7..5e7d865f6b 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -1,6 +1,6 @@ -from unittest.mock import patch import pytest import discretize +import SimPEG from SimPEG import maps from SimPEG.potential_fields import gravity from geoana.gravity import Prism @@ -314,87 +314,38 @@ def test_sensitivity_dtype( assert simulation.sensitivity_dtype is np.float32 @pytest.mark.parametrize("invalid_dtype", (float, np.float16)) - def test_invalid_sensitivity_dtype_assignment( - self, simple_mesh, receivers_locations, invalid_dtype - ): + def test_invalid_sensitivity_dtype_assignment(self, simple_mesh, invalid_dtype): """ Test invalid sensitivity_dtype assignment """ - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Create simulation simulation = gravity.Simulation3DIntegral( simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, ) # Check if error is raised msg = "sensitivity_dtype must be either np.float32 or np.float64." with pytest.raises(TypeError, match=msg): simulation.sensitivity_dtype = invalid_dtype - def test_invalid_engine(self, simple_mesh, receivers_locations): + def test_invalid_engine(self, simple_mesh): """Test if error is raised after invalid engine.""" - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Check if error is raised after an invalid engine is passed engine = "invalid engine" with pytest.raises(ValueError, match=f"Invalid engine '{engine}'"): - gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine=engine, - ) + gravity.Simulation3DIntegral(simple_mesh, engine=engine) - def test_choclo_and_n_proceesses(self, simple_mesh, receivers_locations): + def test_choclo_and_n_proceesses(self, simple_mesh): """Check if warning is raised after passing n_processes with choclo engine.""" - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) - # Check if warning is raised msg = "The 'n_processes' will be ignored when selecting 'choclo'" with pytest.warns(UserWarning, match=msg): simulation = gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine="choclo", - n_processes=2, + simple_mesh, engine="choclo", n_processes=2 ) # Check if n_processes was overwritten and set to None assert simulation.n_processes is None - def test_choclo_and_sensitivity_path_as_dir( - self, simple_mesh, receivers_locations, tmp_path - ): + def test_choclo_and_sensitivity_path_as_dir(self, simple_mesh, tmp_path): """ Check if error is raised when sensitivity_path is a dir with choclo engine. """ - # Create survey - receivers = gravity.Point(receivers_locations, components="gz") - sources = gravity.SourceField([receivers]) - survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) # Create a sensitivity_path directory sensitivity_path = tmp_path / "sensitivity_dummy" sensitivity_path.mkdir() @@ -403,36 +354,62 @@ def test_choclo_and_sensitivity_path_as_dir( with pytest.raises(ValueError, match=msg): gravity.Simulation3DIntegral( simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, store_sensitivities="disk", sensitivity_path=str(sensitivity_path), engine="choclo", ) - @patch("SimPEG.potential_fields.gravity.simulation.choclo", None) - def test_choclo_missing(self, simple_mesh, receivers_locations): + def test_sensitivities_on_disk(self, simple_mesh, receivers_locations, tmp_path): """ - Check if error is raised when choclo is missing and chosen as engine. + Test if sensitivity matrix is correctly being stored in disk when asked """ - # Create survey + # Build survey receivers = gravity.Point(receivers_locations, components="gz") sources = gravity.SourceField([receivers]) survey = gravity.Survey(sources) - # Create reduced identity map for Linear Problem - active_cells = np.ones(simple_mesh.n_cells, dtype=bool) - idenMap = maps.IdentityMap(nP=simple_mesh.n_cells) + # Build simulation + sensitivities_path = tmp_path / "sensitivities" + simulation = gravity.Simulation3DIntegral( + mesh=simple_mesh, + survey=survey, + store_sensitivities="disk", + sensitivity_path=str(sensitivities_path), + engine="choclo", + ) + simulation.G + # Check if sensitivity matrix was stored in disk and is a memmap + assert sensitivities_path.is_file() + assert type(simulation.G) is np.memmap + + def test_sensitivities_on_ram(self, simple_mesh, receivers_locations, tmp_path): + """ + Test if sensitivity matrix is correctly being allocated in memory when asked + """ + # Build survey + receivers = gravity.Point(receivers_locations, components="gz") + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Build simulation + simulation = gravity.Simulation3DIntegral( + mesh=simple_mesh, + survey=survey, + store_sensitivities="ram", + engine="choclo", + ) + simulation.G + # Check if sensitivity matrix is a Numpy array (stored in memory) + assert type(simulation.G) is np.ndarray + + def test_choclo_missing(self, simple_mesh, monkeypatch): + """ + Check if error is raised when choclo is missing and chosen as engine. + """ + # Monkeypatch choclo in SimPEG.potential_fields.base + monkeypatch.setattr(SimPEG.potential_fields.gravity.simulation, "choclo", None) # Check if error is raised msg = "The choclo package couldn't be found." with pytest.raises(ImportError, match=msg): - gravity.Simulation3DIntegral( - simple_mesh, - survey=survey, - rhoMap=idenMap, - ind_active=active_cells, - engine="choclo", - ) + gravity.Simulation3DIntegral(simple_mesh, engine="choclo") class TestConversionFactor: @@ -459,3 +436,34 @@ def test_invalid_conversion_factor(self): component = "invalid-component" with pytest.raises(ValueError, match=f"Invalid component '{component}'"): gravity.simulation._get_conversion_factor(component) + + +class TestInvalidMeshChoclo: + @pytest.fixture(params=("tensormesh", "treemesh")) + def mesh(self, request): + """Sample 2D mesh.""" + hx, hy = [(0.1, 8)], [(0.1, 8)] + h = (hx, hy) + if request.param == "tensormesh": + mesh = discretize.TensorMesh(h, "CC") + else: + mesh = discretize.TreeMesh(h, origin="CC") + mesh.finalize() + return mesh + + def test_invalid_mesh_with_choclo(self, mesh): + """ + Test if simulation raises error when passing an invalid mesh and using choclo + """ + # Build survey + receivers_locations = np.array([[0, 0, 0]]) + receivers = gravity.Point(receivers_locations) + sources = gravity.SourceField([receivers]) + survey = gravity.Survey(sources) + # Check if error is raised + msg = ( + "Invalid mesh with 2 dimensions. " + "Only 3D meshes are supported when using 'choclo' as engine." + ) + with pytest.raises(ValueError, match=msg): + gravity.Simulation3DIntegral(mesh, survey, engine="choclo") diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 0e866e520d..9525edd4a1 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -1,8 +1,8 @@ -from typing import List, Tuple +import pytest +import unittest import discretize import numpy as np -import pytest from geoana.em.static import MagneticPrism from scipy.constants import mu_0 @@ -10,213 +10,76 @@ from SimPEG.potential_fields import magnetics as mag -@pytest.fixture -def mag_mesh() -> discretize.TensorMesh: - """ - a small tensor mesh for testing magnetic simulations +def test_ana_mag_forward(): + nx = 5 + ny = 5 + + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) + chi1 = 0.01 + chi2 = 0.02 - Returns - ------- - discretize.TensorMesh - the tensor mesh for testing - """ # Define a mesh cs = 0.2 hxind = [(cs, 41)] hyind = [(cs, 41)] hzind = [(cs, 41)] mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - return mesh - -@pytest.fixture -def two_blocks() -> Tuple[np.ndarray, np.ndarray]: - """ - The parameters defining two blocks - - Returns - ------- - Tuple[np.ndarray, np.ndarray] - Tuple of (3, 2) arrays of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of each block - """ + # create a model of two blocks, 1 inside the other block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) - return block1, block2 + def get_block_inds(grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + block1_inds = get_block_inds(mesh.cell_centers, block1) + block2_inds = get_block_inds(mesh.cell_centers, block2) + + model = np.zeros(mesh.n_cells) + model[block1_inds] = chi1 + model[block2_inds] = chi2 -@pytest.fixture -def receiver_locations() -> np.ndarray: - """ - a grid of receivers for testing + active_cells = model != 0.0 + model_reduced = model[active_cells] + + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) - Returns - ------- - np.ndarray - (n, 3) array of receiver locations - """ # Create plane of observations - nx, ny = 5, 5 xr = np.linspace(-20, 20, nx) yr = np.linspace(-20, 20, ny) X, Y = np.meshgrid(xr, yr) Z = np.ones_like(X) * 3.0 - return np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] - - -@pytest.fixture -def inducing_field() -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]: - """ - inducing field two ways-- (amplitude, inclination , declination) and (b_x, b_y, b_z) + locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] + components = ["bx", "by", "bz", "tmi"] - Returns - ------- - Tuple[Tuple[float, float, float], Tuple[float, float, float]] - (amplitude, inclination, declination), (b_x, b_y, b_z) - """ - H0 = (50000.0, 60.0, 250.0) - b0 = mag.analytics.IDTtoxyz(-H0[1], H0[2], H0[0]) - return H0, b0 - - -def get_block_inds(grid: np.ndarray, block: np.ndarray) -> np.ndarray: - """ - get the indices for a block - - Parameters - ---------- - grid : np.ndarray - (n, 3) array of xyz locations - block : np.ndarray - (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) dimensions of the block - - Returns - ------- - np.ndarray - boolean array of indices corresponding to the block - """ - - return np.where( - (grid[:, 0] > block[0, 0]) - & (grid[:, 0] < block[0, 1]) - & (grid[:, 1] > block[1, 0]) - & (grid[:, 1] < block[1, 1]) - & (grid[:, 2] > block[2, 0]) - & (grid[:, 2] < block[2, 1]) + rxLoc = mag.Point(locXyz, components=components) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) + survey = mag.Survey(srcField) + # Creat reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) -def create_block_model( - mesh: discretize.TensorMesh, - blocks: Tuple[np.ndarray, ...], - block_params: Tuple[np.ndarray, ...], -) -> Tuple[np.ndarray, np.ndarray]: - """ - Create a magnetic model from a sequence of blocks - - Parameters - ---------- - mesh : discretize.TensorMesh - TensorMesh object to put the model on - blocks : Tuple[np.ndarray, ...] - Tuple of block definitions (each element is (3, 2) array of (xmin, xmax), (ymin, ymax), (zmin, zmax) - dimensions of the block) - block_params : Tuple[np.ndarray, ...] - Tuple of parameters to assign for each block. Must be the same length as ``blocks``. - - Returns - ------- - Tuple[np.ndarray, np.ndarray] - Tuple of the magnetic model and active_cells (a boolean array) - - Raises - ------ - ValueError - if ``blocks`` and ``block_params`` have incompatible dimensions - """ - if len(blocks) != len(block_params): - raise ValueError( - "'blocks' and 'block_params' must have the same number of elements" - ) - model = np.zeros((mesh.n_cells, np.atleast_1d(block_params[0]).shape[0])) - for block, params in zip(blocks, block_params): - block_ind = get_block_inds(mesh.cell_centers, block) - model[block_ind] = params - active_cells = np.any(np.abs(model) > 0, axis=1) - return model.squeeze(), active_cells - - -def create_mag_survey( - components: List[str], - receiver_locations: np.ndarray, - inducing_field_params: Tuple[float, float, float], -) -> mag.Survey: - """ - create a magnetic Survey - - Parameters - ---------- - components : List[str] - List of components to model - receiver_locations : np.ndarray - (n, 3) array of xyz receiver locations - inducing_field_params : Tuple[float, float, float] - amplitude, inclination, and declination of the inducing field - - Returns - ------- - mag.Survey - a magnetic Survey instance - """ - - receivers = mag.Point(receiver_locations, components=components) - source_field = mag.UniformBackgroundField([receivers], *inducing_field_params) - return mag.Survey(source_field) - - -@pytest.mark.parametrize( - "engine,parallel_kwargs", - [ - ("geoana", {"n_processes": None}), - ("geoana", {"n_processes": 1}), - ("choclo", {"choclo_parallel": False}), - ("choclo", {"choclo_parallel": True}), - ], - ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], -) -@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) -def test_ana_mag_forward( - engine, - parallel_kwargs, - store_sensitivities, - tmp_path, - mag_mesh, - two_blocks, - receiver_locations, - inducing_field, -): - inducing_field_params, b0 = inducing_field - - chi1 = 0.01 - chi2 = 0.02 - model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) - model_reduced = model[active_cells] - # Create reduced identity map for Linear Problem - identity_map = maps.IdentityMap(nP=int(sum(active_cells))) - - survey = create_mag_survey( - components=["bx", "by", "bz", "tmi"], - receiver_locations=receiver_locations, - inducing_field_params=inducing_field_params, - ) sim = mag.Simulation3DIntegral( - mag_mesh, + mesh, survey=survey, - chiMap=identity_map, + chiMap=idenMap, ind_active=active_cells, - sensitivity_path=str(tmp_path / f"{engine}"), - store_sensitivities=store_sensitivities, - engine=engine, - **parallel_kwargs, + store_sensitivities="forward_only", + n_processes=None, ) data = sim.dpred(model_reduced) @@ -225,251 +88,25 @@ def test_ana_mag_forward( d_z = data[2::4] d_t = data[3::4] - # Compute analytical response from magnetic prism - block1, block2 = two_blocks - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) - - d = ( - prism_1.magnetic_flux_density(receiver_locations) - + prism_2.magnetic_flux_density(receiver_locations) - + prism_3.magnetic_flux_density(receiver_locations) - ) - - # TMI projection tmi = sim.tmi_projection d_t2 = d_x * tmi[0] + d_y * tmi[1] + d_z * tmi[2] - - # Check results - rtol, atol = 1e-7, 1e-6 - np.testing.assert_allclose( - d_t, d_t2, rtol=rtol, atol=atol - ) # double check internal projection - np.testing.assert_allclose(d_x, d[:, 0], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_y, d[:, 1], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_z, d[:, 2], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_t, d @ tmi, rtol=rtol, atol=atol) - - -@pytest.mark.parametrize( - "engine, parallel_kwargs", - [ - ("geoana", {"n_processes": None}), - ("geoana", {"n_processes": 1}), - ("choclo", {"choclo_parallel": False}), - ("choclo", {"choclo_parallel": True}), - ], - ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], -) -@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) -def test_ana_mag_grad_forward( - engine, - parallel_kwargs, - store_sensitivities, - tmp_path, - mag_mesh, - two_blocks, - receiver_locations, - inducing_field, -): - inducing_field_params, b0 = inducing_field - - chi1 = 0.01 - chi2 = 0.02 - model, active_cells = create_block_model(mag_mesh, two_blocks, [chi1, chi2]) - model_reduced = model[active_cells] - # Create reduced identity map for Linear Problem - identity_map = maps.IdentityMap(nP=int(sum(active_cells))) - - survey = create_mag_survey( - components=["bxx", "bxy", "bxz", "byy", "byz", "bzz"], - receiver_locations=receiver_locations, - inducing_field_params=inducing_field_params, - ) - sim = mag.Simulation3DIntegral( - mag_mesh, - survey=survey, - chiMap=identity_map, - ind_active=active_cells, - sensitivity_path=str(tmp_path / f"{engine}"), - store_sensitivities=store_sensitivities, - engine=engine, - **parallel_kwargs, - ) - if engine == "choclo": - # gradient simulation not implemented for choclo yet - with pytest.raises(NotImplementedError): - data = sim.dpred(model_reduced) - else: - data = sim.dpred(model_reduced) - d_xx = data[0::6] - d_xy = data[1::6] - d_xz = data[2::6] - d_yy = data[3::6] - d_yz = data[4::6] - d_zz = data[5::6] - - # Compute analytical response from magnetic prism - block1, block2 = two_blocks - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) - - d = ( - prism_1.magnetic_field_gradient(receiver_locations) - + prism_2.magnetic_field_gradient(receiver_locations) - + prism_3.magnetic_field_gradient(receiver_locations) - ) * mu_0 - - # Check results - rtol, atol = 1e-7, 1e-6 - np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=rtol, atol=atol) - np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=rtol, atol=atol) - - -@pytest.mark.parametrize( - "engine, parallel_kwargs", - [ - ("geoana", {"n_processes": None}), - ("geoana", {"n_processes": 1}), - ("choclo", {"choclo_parallel": False}), - ("choclo", {"choclo_parallel": True}), - ], - ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], -) -@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) -def test_ana_mag_vec_forward( - engine, - parallel_kwargs, - store_sensitivities, - tmp_path, - mag_mesh, - two_blocks, - receiver_locations, - inducing_field, -): - inducing_field_params, b0 = inducing_field - M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() - M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() - - model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) - model_reduced = model[active_cells].reshape(-1, order="F") - # Create reduced identity map for Linear Problem - identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - - survey = create_mag_survey( - components=["bx", "by", "bz", "tmi"], - receiver_locations=receiver_locations, - inducing_field_params=inducing_field_params, - ) - - sim = mag.Simulation3DIntegral( - mag_mesh, - survey=survey, - chiMap=identity_map, - ind_active=active_cells, - sensitivity_path=str(tmp_path / f"{engine}"), - store_sensitivities=store_sensitivities, - model_type="vector", - engine=engine, - **parallel_kwargs, - ) - - data = sim.dpred(model_reduced).reshape(-1, 4) + np.testing.assert_allclose(d_t, d_t2) # double check internal projection # Compute analytical response from magnetic prism - block1, block2 = two_blocks - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) - - d = ( - prism_1.magnetic_flux_density(receiver_locations) - + prism_2.magnetic_flux_density(receiver_locations) - + prism_3.magnetic_flux_density(receiver_locations) - ) - tmi = sim.tmi_projection - - # Check results - rtol, atol = 9e-6, 3e-7 - np.testing.assert_allclose(data[:, 0], d[:, 0], rtol=rtol, atol=atol) - np.testing.assert_allclose(data[:, 1], d[:, 1], rtol=rtol, atol=atol) - np.testing.assert_allclose(data[:, 2], d[:, 2], rtol=rtol, atol=atol) - np.testing.assert_allclose(data[:, 3], d @ tmi, rtol=rtol, atol=atol) - - -@pytest.mark.parametrize( - "engine, parallel_kwargs", - [ - ("geoana", {"n_processes": None}), - ("geoana", {"n_processes": 1}), - ("choclo", {"choclo_parallel": False}), - ("choclo", {"choclo_parallel": True}), - ], - ids=["geoana_serial", "geoana_parallel", "choclo_serial", "choclo_parallel"], -) -@pytest.mark.parametrize("store_sensitivities", ("ram", "disk", "forward_only")) -def test_ana_mag_amp_forward( - engine, - parallel_kwargs, - store_sensitivities, - tmp_path, - mag_mesh, - two_blocks, - receiver_locations, - inducing_field, -): - inducing_field_params, b0 = inducing_field - M1 = (utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05).squeeze() - M2 = (utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1).squeeze() - - model, active_cells = create_block_model(mag_mesh, two_blocks, [M1, M2]) - model_reduced = model[active_cells].reshape(-1, order="F") - # Create reduced identity map for Linear Problem - identity_map = maps.IdentityMap(nP=int(sum(active_cells)) * 3) - - survey = create_mag_survey( - components=["bx", "by", "bz"], - receiver_locations=receiver_locations, - inducing_field_params=inducing_field_params, - ) - - sim = mag.Simulation3DIntegral( - mag_mesh, - survey=survey, - chiMap=identity_map, - ind_active=active_cells, - sensitivity_path=str(tmp_path / f"{engine}"), - store_sensitivities=store_sensitivities, - model_type="vector", - is_amplitude_data=True, - engine=engine, - **parallel_kwargs, - ) - - data = sim.dpred(model_reduced) - - # Compute analytical response from magnetic prism - block1, block2 = two_blocks - prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) - prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) - prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) d = ( - prism_1.magnetic_flux_density(receiver_locations) - + prism_2.magnetic_flux_density(receiver_locations) - + prism_3.magnetic_flux_density(receiver_locations) + prism_1.magnetic_flux_density(locXyz) + + prism_2.magnetic_flux_density(locXyz) + + prism_3.magnetic_flux_density(locXyz) ) - d_amp = np.linalg.norm(d, axis=1) - # Check results - rtol, atol = 1e-7, 1e-6 - np.testing.assert_allclose(data, d_amp, rtol=rtol, atol=atol) + np.testing.assert_allclose(d_x, d[:, 0]) + np.testing.assert_allclose(d_y, d[:, 1]) + np.testing.assert_allclose(d_z, d[:, 2]) + np.testing.assert_allclose(d_t, d @ tmi) def test_ana_mag_tmi_grad_forward(): @@ -580,3 +217,301 @@ def get_block_inds(grid, block): atol=1.0, rtol=1e-1, ) + + +def test_ana_mag_grad_forward(): + nx = 5 + ny = 5 + + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) + chi1 = 0.01 + chi2 = 0.02 + + # Define a mesh + cs = 0.2 + hxind = [(cs, 41)] + hyind = [(cs, 41)] + hzind = [(cs, 41)] + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + + # create a model of two blocks, 1 inside the other + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + + def get_block_inds(grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + block1_inds = get_block_inds(mesh.cell_centers, block1) + block2_inds = get_block_inds(mesh.cell_centers, block2) + + model = np.zeros(mesh.n_cells) + model[block1_inds] = chi1 + model[block2_inds] = chi2 + + active_cells = model != 0.0 + model_reduced = model[active_cells] + + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + + # Create plane of observations + xr = np.linspace(-20, 20, nx) + yr = np.linspace(-20, 20, ny) + X, Y = np.meshgrid(xr, yr) + Z = np.ones_like(X) * 3.0 + locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] + components = ["bxx", "bxy", "bxz", "byy", "byz", "bzz"] + + rxLoc = mag.Point(locXyz, components=components) + srcField = mag.UniformBackgroundField( + [rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) + survey = mag.Survey(srcField) + + # Creat reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells))) + + sim = mag.Simulation3DIntegral( + mesh, + survey=survey, + chiMap=idenMap, + ind_active=active_cells, + store_sensitivities="forward_only", + n_processes=None, + ) + + data = sim.dpred(model_reduced) + d_xx = data[0::6] + d_xy = data[1::6] + d_xz = data[2::6] + d_yy = data[3::6] + d_yz = data[4::6] + d_zz = data[5::6] + + # Compute analytical response from magnetic prism + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], chi1 * b0 / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -chi1 * b0 / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], chi2 * b0 / mu_0) + + d = ( + prism_1.magnetic_field_gradient(locXyz) + + prism_2.magnetic_field_gradient(locXyz) + + prism_3.magnetic_field_gradient(locXyz) + ) * mu_0 + + np.testing.assert_allclose(d_xx, d[..., 0, 0], rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_xy, d[..., 0, 1], rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_xz, d[..., 0, 2], rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_yy, d[..., 1, 1], rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_yz, d[..., 1, 2], rtol=1e-10, atol=1e-12) + np.testing.assert_allclose(d_zz, d[..., 2, 2], rtol=1e-10, atol=1e-12) + + +def test_ana_mag_vec_forward(): + nx = 5 + ny = 5 + + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) + + M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 + M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 + + # Define a mesh + cs = 0.2 + hxind = [(cs, 41)] + hyind = [(cs, 41)] + hzind = [(cs, 41)] + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + + # create a model of two blocks, 1 inside the other + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + + def get_block_inds(grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + block1_inds = get_block_inds(mesh.cell_centers, block1) + block2_inds = get_block_inds(mesh.cell_centers, block2) + + model = np.zeros((mesh.n_cells, 3)) + model[block1_inds] = M1 + model[block2_inds] = M2 + + active_cells = np.any(model != 0.0, axis=1) + model_reduced = model[active_cells].reshape(-1, order="F") + + # Create plane of observations + xr = np.linspace(-20, 20, nx) + yr = np.linspace(-20, 20, ny) + X, Y = np.meshgrid(xr, yr) + Z = np.ones_like(X) * 3.0 + locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] + components = ["bx", "by", "bz", "tmi"] + + rxLoc = mag.Point(locXyz, components=components) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) + survey = mag.Survey(srcField) + + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) + + sim = mag.Simulation3DIntegral( + mesh, + survey=survey, + chiMap=idenMap, + ind_active=active_cells, + store_sensitivities="forward_only", + model_type="vector", + n_processes=None, + ) + + data = sim.dpred(model_reduced).reshape(-1, 4) + + # Compute analytical response from magnetic prism + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) + + d = ( + prism_1.magnetic_flux_density(locXyz) + + prism_2.magnetic_flux_density(locXyz) + + prism_3.magnetic_flux_density(locXyz) + ) + tmi = sim.tmi_projection + + np.testing.assert_allclose(data[:, 0], d[:, 0]) + np.testing.assert_allclose(data[:, 1], d[:, 1]) + np.testing.assert_allclose(data[:, 2], d[:, 2]) + np.testing.assert_allclose(data[:, 3], d @ tmi) + + +def test_ana_mag_amp_forward(): + nx = 5 + ny = 5 + + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 250.0) + b0 = mag.analytics.IDTtoxyz(-h0_inclination, h0_declination, h0_amplitude) + + M1 = utils.mat_utils.dip_azimuth2cartesian(45, -40) * 0.05 + M2 = utils.mat_utils.dip_azimuth2cartesian(120, 32) * 0.1 + + # Define a mesh + cs = 0.2 + hxind = [(cs, 41)] + hyind = [(cs, 41)] + hzind = [(cs, 41)] + mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") + + # create a model of two blocks, 1 inside the other + block1 = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) + block2 = np.array([[-0.7, 0.7], [-0.7, 0.7], [-0.7, 0.7]]) + + def get_block_inds(grid, block): + return np.where( + (grid[:, 0] > block[0, 0]) + & (grid[:, 0] < block[0, 1]) + & (grid[:, 1] > block[1, 0]) + & (grid[:, 1] < block[1, 1]) + & (grid[:, 2] > block[2, 0]) + & (grid[:, 2] < block[2, 1]) + ) + + block1_inds = get_block_inds(mesh.cell_centers, block1) + block2_inds = get_block_inds(mesh.cell_centers, block2) + + model = np.zeros((mesh.n_cells, 3)) + model[block1_inds] = M1 + model[block2_inds] = M2 + + active_cells = np.any(model != 0.0, axis=1) + model_reduced = model[active_cells].reshape(-1, order="F") + + # Create plane of observations + xr = np.linspace(-20, 20, nx) + yr = np.linspace(-20, 20, ny) + X, Y = np.meshgrid(xr, yr) + Z = np.ones_like(X) * 3.0 + locXyz = np.c_[X.reshape(-1), Y.reshape(-1), Z.reshape(-1)] + components = ["bx", "by", "bz"] + + rxLoc = mag.Point(locXyz, components=components) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) + survey = mag.Survey(srcField) + + # Create reduced identity map for Linear Problem + idenMap = maps.IdentityMap(nP=int(sum(active_cells)) * 3) + + sim = mag.Simulation3DIntegral( + mesh, + survey=survey, + chiMap=idenMap, + ind_active=active_cells, + store_sensitivities="forward_only", + model_type="vector", + is_amplitude_data=True, + n_processes=None, + ) + + data = sim.dpred(model_reduced) + + # Compute analytical response from magnetic prism + prism_1 = MagneticPrism(block1[:, 0], block1[:, 1], M1 * np.linalg.norm(b0) / mu_0) + prism_2 = MagneticPrism(block2[:, 0], block2[:, 1], -M1 * np.linalg.norm(b0) / mu_0) + prism_3 = MagneticPrism(block2[:, 0], block2[:, 1], M2 * np.linalg.norm(b0) / mu_0) + + d = ( + prism_1.magnetic_flux_density(locXyz) + + prism_2.magnetic_flux_density(locXyz) + + prism_3.magnetic_flux_density(locXyz) + ) + d_amp = np.linalg.norm(d, axis=1) + + np.testing.assert_allclose(data, d_amp) + + +def test_removed_modeltype(): + """Test if accesing removed modelType property raises error.""" + h = [[(2, 2)], [(2, 2)], [(2, 2)]] + mesh = discretize.TensorMesh(h) + receiver_location = np.array([[0, 0, 100]]) + receiver = mag.Point(receiver_location, components="tmi") + background_field = mag.UniformBackgroundField(receiver_list=[receiver]) + survey = mag.Survey(background_field) + mapping = maps.IdentityMap(mesh, nP=mesh.n_cells) + sim = mag.Simulation3DIntegral(mesh, survey=survey, chiMap=mapping) + message = "modelType has been removed, please use model_type." + with pytest.raises(NotImplementedError, match=message): + sim.modelType + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/pf/test_forward_PFproblem.py b/tests/pf/test_forward_PFproblem.py index 65793bb650..b0914da781 100644 --- a/tests/pf/test_forward_PFproblem.py +++ b/tests/pf/test_forward_PFproblem.py @@ -12,7 +12,6 @@ def setUp(self): Inc = 45.0 Dec = 45.0 Btot = 51000 - H0 = (Btot, Inc, Dec) self.b0 = mag.analytics.IDTtoxyz(-Inc, Dec, Btot) @@ -40,7 +39,12 @@ def setUp(self): self.yr = yr self.rxLoc = np.c_[utils.mkvc(X), utils.mkvc(Y), utils.mkvc(Z)] receivers = mag.Point(self.rxLoc, components=components) - srcField = mag.SourceField([receivers], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[receivers], + amplitude=Btot, + inclination=Inc, + declination=Dec, + ) self.survey = mag.Survey(srcField) diff --git a/tests/pf/test_mag_MVI_Octree.py b/tests/pf/test_mag_MVI_Octree.py index 49809e4c7d..1880095af8 100644 --- a/tests/pf/test_mag_MVI_Octree.py +++ b/tests/pf/test_mag_MVI_Octree.py @@ -20,7 +20,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -46,7 +46,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh @@ -144,7 +149,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) diff --git a/tests/pf/test_mag_inversion_linear.py b/tests/pf/test_mag_inversion_linear.py index 868ad8b34b..bf7e10dba8 100644 --- a/tests/pf/test_mag_inversion_linear.py +++ b/tests/pf/test_mag_inversion_linear.py @@ -23,7 +23,7 @@ def setUp(self): np.random.seed(0) # Define the inducing field parameter - H0 = (50000, 90, 0) + h0_amplitude, h0_inclination, h0_declination = (50000, 90, 0) # Create a mesh dx = 5.0 @@ -59,7 +59,12 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(rxLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # We can now create a susceptibility model and generate data @@ -96,9 +101,9 @@ def setUp(self): ) # Create a regularization - reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] - reg.gradientType = "components" + reg.gradient_type = "components" # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) @@ -114,7 +119,7 @@ def setUp(self): # Here is where the norms are applied IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=1) update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, betaest, update_Jacobi] ) diff --git a/tests/pf/test_mag_inversion_linear_Octree.py b/tests/pf/test_mag_inversion_linear_Octree.py index d30e8a7184..8167bf1e1f 100644 --- a/tests/pf/test_mag_inversion_linear_Octree.py +++ b/tests/pf/test_mag_inversion_linear_Octree.py @@ -26,7 +26,7 @@ def setUp(self): # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # Create a mesh h = [5, 5, 5] @@ -55,7 +55,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # self.mesh.finalize() @@ -112,7 +117,7 @@ def setUp(self): # Create a regularization reg = regularization.Sparse(self.mesh, active_cells=actv, mapping=idenMap) reg.norms = [0, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) diff --git a/tests/pf/test_mag_nonLinear_Amplitude.py b/tests/pf/test_mag_nonLinear_Amplitude.py index 318964328f..85f27266d6 100644 --- a/tests/pf/test_mag_nonLinear_Amplitude.py +++ b/tests/pf/test_mag_nonLinear_Amplitude.py @@ -21,7 +21,7 @@ class AmpProblemTest(unittest.TestCase): def setUp(self): # We will assume a vertical inducing field - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) @@ -46,8 +46,11 @@ def setUp(self): # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] receiver_list = magnetics.receivers.Point(rxLoc) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(srcField) @@ -138,9 +141,9 @@ def setUp(self): # Create a regularization function, in this case l2l2 reg = regularization.Sparse( - mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 + mesh, active_cells=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0 ) - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( @@ -185,8 +188,11 @@ def setUp(self): # receiver_list = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) - srcField = magnetics.sources.SourceField( - receiver_list=[receiver_list], parameters=H0 + srcField = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_list], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) surveyAmp = magnetics.survey.Survey(srcField) @@ -228,9 +234,9 @@ def setUp(self): data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization - reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) + reg = regularization.Sparse(mesh, active_cells=actv, mapping=idenMap) reg.norms = [1, 0, 0, 0] - reg.mref = np.zeros(nC) + reg.reference_model = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) diff --git a/tests/pf/test_mag_uniform_background_field.py b/tests/pf/test_mag_uniform_background_field.py new file mode 100644 index 0000000000..d4e72bae40 --- /dev/null +++ b/tests/pf/test_mag_uniform_background_field.py @@ -0,0 +1,29 @@ +""" +Test the UniformBackgroundField class +""" + +import pytest +from SimPEG.potential_fields.magnetics import UniformBackgroundField, SourceField + + +def test_invalid_parameters_argument(): + """ + Test if error is raised after passing 'parameters' as argument + """ + parameters = (1, 35, 60) + msg = ( + "'parameters' property has been removed." + "Please pass the amplitude, inclination and declination" + " through their own arguments." + ) + with pytest.raises(TypeError, match=msg): + UniformBackgroundField(parameters=parameters) + + +def test_deprecated_source_field(): + """ + Test if instantiating a magnetics.source.SourceField object raises an error + """ + msg = "SourceField has been removed, please use UniformBackgroundField." + with pytest.raises(NotImplementedError, match=msg): + SourceField() diff --git a/tests/pf/test_mag_vector_amplitude.py b/tests/pf/test_mag_vector_amplitude.py index 84d56ee320..5dea5ded25 100644 --- a/tests/pf/test_mag_vector_amplitude.py +++ b/tests/pf/test_mag_vector_amplitude.py @@ -20,7 +20,7 @@ class MVIProblemTest(unittest.TestCase): def setUp(self): np.random.seed(0) - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) @@ -46,7 +46,12 @@ def setUp(self): # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) - srcField = mag.SourceField([rxLoc], parameters=H0) + srcField = mag.UniformBackgroundField( + receiver_list=[rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) survey = mag.Survey(srcField) # Create a mesh @@ -136,7 +141,7 @@ def setUp(self): # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() - sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) + sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) self.inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest] ) diff --git a/tests/pf/test_pf_quadtree_inversion_linear.py b/tests/pf/test_pf_quadtree_inversion_linear.py index 78dc40de46..46bcf77c67 100644 --- a/tests/pf/test_pf_quadtree_inversion_linear.py +++ b/tests/pf/test_pf_quadtree_inversion_linear.py @@ -104,9 +104,14 @@ def create_gravity_sim_flat(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim_flat(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz_flat) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + [mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator @@ -159,9 +164,14 @@ def create_gravity_sim(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + [mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator @@ -215,9 +225,14 @@ def create_gravity_sim_active(self, block_value=1.0, noise_floor=0.01): def create_magnetics_sim_active(self, block_value=1.0, noise_floor=0.01): # Create a magnetic survey - H0 = (50000.0, 90.0, 0.0) + h0_amplitude, h0_inclination, h0_declination = (50000.0, 90.0, 0.0) mag_rxLoc = magnetics.Point(data_xyz) - mag_srcField = magnetics.SourceField([mag_rxLoc], parameters=H0) + mag_srcField = magnetics.UniformBackgroundField( + receiver_list=[mag_rxLoc], + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, + ) mag_survey = magnetics.Survey(mag_srcField) # Create the magnetics forward model operator @@ -463,7 +478,7 @@ def test_quadtree_grav_inverse(self): self.assertAlmostEqual(model_residual, 0.1, delta=0.1) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.grav_inv.invProb.dmisfit(self.grav_model) + data_misfit = self.grav_inv.invProb.dmisfit(self.grav_model) self.assertLess(data_misfit, dpred.shape[0] * 1.15) def test_quadtree_mag_inverse(self): @@ -481,7 +496,7 @@ def test_quadtree_mag_inverse(self): self.assertAlmostEqual(model_residual, 0.01, delta=0.05) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.mag_inv.invProb.dmisfit(self.mag_model) + data_misfit = self.mag_inv.invProb.dmisfit(self.mag_model) self.assertLess(data_misfit, dpred.shape[0] * 1.1) def test_quadtree_grav_inverse_activecells(self): @@ -501,7 +516,7 @@ def test_quadtree_grav_inverse_activecells(self): self.assertAlmostEqual(model_residual, 0.1, delta=0.1) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.grav_inv_active.invProb.dmisfit( + data_misfit = self.grav_inv_active.invProb.dmisfit( self.grav_model[self.active_cells] ) self.assertLess(data_misfit, dpred.shape[0] * 1.1) @@ -530,7 +545,7 @@ def test_quadtree_mag_inverse_activecells(self): self.assertAlmostEqual(model_residual, 0.01, delta=0.05) # Check data converged to less than 10% of target misfit - data_misfit = 2.0 * self.mag_inv_active.invProb.dmisfit( + data_misfit = self.mag_inv_active.invProb.dmisfit( self.mag_model[self.active_cells] ) self.assertLess(data_misfit, dpred.shape[0] * 1.1) diff --git a/tests/pf/test_sensitivity_PFproblem.py b/tests/pf/test_sensitivity_PFproblem.py index 99d5fb4f37..53c96c96cb 100644 --- a/tests/pf/test_sensitivity_PFproblem.py +++ b/tests/pf/test_sensitivity_PFproblem.py @@ -41,7 +41,7 @@ # # components = ['bx', 'by', 'bz'] # receivers = mag.Point(rxLoc, components=components) -# srcField = mag.SourceField([receivers], parameters=H0) +# srcField = mag.UniformBackgroundField([receivers], parameters=H0) # # self.survey = mag.Survey(srcField) # diff --git a/tests/utils/test_io_utils.py b/tests/utils/test_io_utils.py index 54e6282fe0..76159b6a6a 100644 --- a/tests/utils/test_io_utils.py +++ b/tests/utils/test_io_utils.py @@ -242,9 +242,12 @@ def setUp(self): xyz = np.c_[x, y, z] rx = magnetics.receivers.Point(xyz, components="tmi") - inducing_field = (50000.0, 60.0, 15.0) - source_field = magnetics.sources.SourceField( - receiver_list=rx, parameters=inducing_field + h0_amplitude, h0_inclination, h0_declination = (50000.0, 60.0, 15.0) + source_field = magnetics.sources.UniformBackgroundField( + receiver_list=rx, + amplitude=h0_amplitude, + inclination=h0_inclination, + declination=h0_declination, ) survey = magnetics.survey.Survey(source_field) @@ -253,7 +256,7 @@ def setUp(self): self.std = std rx2 = magnetics.receivers.Point(xyz, components="tmi") - src_bad = magnetics.sources.SourceField([rx, rx2]) + src_bad = magnetics.sources.UniformBackgroundField([rx, rx2]) survey_bad = magnetics.survey.Survey(src_bad) self.survey_bad = survey_bad diff --git a/tests/utils/test_mat_utils.py b/tests/utils/test_mat_utils.py index 9fc7018435..30655046b0 100644 --- a/tests/utils/test_mat_utils.py +++ b/tests/utils/test_mat_utils.py @@ -75,7 +75,7 @@ def g(k): def test_dm_eigenvalue_by_power_iteration(self): # Test for a single data misfit - dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G)) + dmis_matrix = 2 * self.G.T.dot((self.dmis.W**2).dot(self.G)) field = self.dmis.simulation.fields(self.true_model) max_eigenvalue_numpy, _ = eigsh(dmis_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( @@ -89,7 +89,7 @@ def test_dm_eigenvalue_by_power_iteration(self): WtW = 0.0 for mult, dm in zip(self.dmiscombo.multipliers, self.dmiscombo.objfcts): WtW += mult * dm.W**2 - dmiscombo_matrix = self.G.T.dot(WtW.dot(self.G)) + dmiscombo_matrix = 2 * self.G.T.dot(WtW.dot(self.G)) max_eigenvalue_numpy, _ = eigsh(dmiscombo_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( self.dmiscombo, self.true_model, n_pw_iter=30 @@ -110,7 +110,7 @@ def test_reg_eigenvalue_by_power_iteration(self): def test_combo_eigenvalue_by_power_iteration(self): reg_maxtrix = self.reg.deriv2(self.true_model) - dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G)) + dmis_matrix = 2 * self.G.T.dot((self.dmis.W**2).dot(self.G)) combo_matrix = dmis_matrix + self.beta * reg_maxtrix max_eigenvalue_numpy, _ = eigsh(combo_matrix, k=1) max_eigenvalue_directive = eigenvalue_by_power_iteration( diff --git a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py index 19ae156e89..4241d5d886 100644 --- a/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py +++ b/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.py @@ -16,7 +16,6 @@ """ - import numpy as np import matplotlib.pyplot as plt @@ -172,7 +171,7 @@ def g(k): # # Add sensitivity weights but don't update at each beta -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # Reach target misfit for L2 solution, then use IRLS until model stops changing. IRLS = directives.Update_IRLS(max_irls_iterations=40, minGNiter=1, f_min_change=1e-4) diff --git a/tutorials/03-gravity/plot_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_1a_gravity_anomaly.py index f511e9e2a1..22721d3fcb 100644 --- a/tutorials/03-gravity/plot_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_1a_gravity_anomaly.py @@ -180,19 +180,35 @@ # formulation. # -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities +############################################################################### +# Define the forward simulation. By setting the ``store_sensitivities`` keyword +# argument to ``"forward_only"``, we simulate the data without storing the +# sensitivities. +# + simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active, store_sensitivities="forward_only", + engine="choclo", ) +############################################################################### +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + +############################################################################### # Compute predicted data for some model # SimPEG uses right handed coordinate where Z is positive upward. # This causes gravity signals look "inconsistent" with density values in visualization. + dpred = simulation.dpred(model) # Plot diff --git a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py index 06753d7f2d..7ebeea969e 100644 --- a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py +++ b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py @@ -201,17 +201,33 @@ # formulation. # -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities +############################################################################### +# Define the forward simulation. By setting the ``store_sensitivities`` keyword +# argument to ``"forward_only"``, we simulate the data without storing the +# sensitivities +# + simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active, store_sensitivities="forward_only", + engine="choclo", ) +############################################################################### +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + +############################################################################### # Compute predicted data for some model + dpred = simulation.dpred(model) n_data = len(dpred) diff --git a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py index 3b0c46dcfe..34fd346d83 100644 --- a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py @@ -206,9 +206,20 @@ # Here, we define the physics of the gravity problem by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active + survey=survey, + mesh=mesh, + rhoMap=model_map, + ind_active=ind_active, + engine="choclo", ) @@ -230,7 +241,9 @@ dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) # Define the regularization (model objective function). -reg = regularization.WeightedLeastSquares(mesh, indActive=ind_active, mapping=model_map) +reg = regularization.WeightedLeastSquares( + mesh, active_cells=ind_active, mapping=model_map +) # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. @@ -268,7 +281,7 @@ target_misfit = directives.TargetMisfit(chifact=1) # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py index fdcca19bc8..cc658a6d27 100644 --- a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py +++ b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py @@ -208,9 +208,20 @@ # Here, we define the physics of the gravity problem by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active + survey=survey, + mesh=mesh, + rhoMap=model_map, + ind_active=ind_active, + engine="choclo", ) @@ -274,7 +285,7 @@ update_jacobi = directives.UpdatePreconditioner() # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/04-magnetics/plot_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_2a_magnetics_induced.py index 75e8b2bca1..af08030220 100644 --- a/tutorials/04-magnetics/plot_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_2a_magnetics_induced.py @@ -82,10 +82,12 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=strength, + inclination=inclination, + declination=declination, ) # Define the survey diff --git a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py index be8f7a63b7..6ffe6ac691 100644 --- a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py +++ b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py @@ -81,10 +81,12 @@ field_inclination = 60 field_declination = 30 field_strength = 50000 -inducing_field = (field_strength, field_inclination, field_declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=field_strength, + inclination=field_inclination, + declination=field_declination, ) # Define the survey diff --git a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py index 0b4bb43ec7..7ff2f45463 100644 --- a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py @@ -160,10 +160,12 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) -source_field = magnetics.sources.SourceField( - receiver_list=receiver_list, parameters=inducing_field +source_field = magnetics.sources.UniformBackgroundField( + receiver_list=receiver_list, + amplitude=strength, + inclination=inclination, + declination=declination, ) # Define the survey @@ -307,7 +309,7 @@ target_misfit = directives.TargetMisfit(chifact=1) # Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # The directives are defined as a list. directives_list = [ diff --git a/tutorials/05-dcr/plot_fwd_2_dcr2d.py b/tutorials/05-dcr/plot_fwd_2_dcr2d.py index 2e7935463b..747e60e024 100644 --- a/tutorials/05-dcr/plot_fwd_2_dcr2d.py +++ b/tutorials/05-dcr/plot_fwd_2_dcr2d.py @@ -22,7 +22,7 @@ # from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc @@ -123,11 +123,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -144,16 +142,12 @@ np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d.py b/tutorials/05-dcr/plot_inv_2_dcr2d.py index a0479907ad..fd47d61bc2 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d.py @@ -29,7 +29,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -173,11 +173,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -194,16 +192,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py index 2cf97a0bc8..a6a4024e1f 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py @@ -29,7 +29,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -179,11 +179,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -200,16 +198,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() @@ -302,10 +296,10 @@ reg = regularization.Sparse( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, mapping=regmap, - gradientType="total", + gradient_type="total", alpha_s=0.01, alpha_x=1, alpha_y=1, diff --git a/tutorials/05-dcr/plot_inv_3_dcr3d.py b/tutorials/05-dcr/plot_inv_3_dcr3d.py index 16498c2aad..34633b0e73 100644 --- a/tutorials/05-dcr/plot_inv_3_dcr3d.py +++ b/tutorials/05-dcr/plot_inv_3_dcr3d.py @@ -300,7 +300,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, ) diff --git a/tutorials/06-ip/plot_fwd_2_dcip2d.py b/tutorials/06-ip/plot_fwd_2_dcip2d.py index 0631392f6d..ada51a11ef 100644 --- a/tutorials/06-ip/plot_fwd_2_dcip2d.py +++ b/tutorials/06-ip/plot_fwd_2_dcip2d.py @@ -30,7 +30,7 @@ # from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc @@ -135,11 +135,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -156,16 +154,12 @@ np.reshape(electrode_locations, (4 * dc_survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() diff --git a/tutorials/06-ip/plot_inv_2_dcip2d.py b/tutorials/06-ip/plot_inv_2_dcip2d.py index 2215462ffd..fe274ce764 100644 --- a/tutorials/06-ip/plot_inv_2_dcip2d.py +++ b/tutorials/06-ip/plot_inv_2_dcip2d.py @@ -33,7 +33,7 @@ import tarfile from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz +from discretize.utils import mkvc, active_from_xyz from SimPEG.utils import model_builder from SimPEG import ( @@ -188,11 +188,9 @@ mesh = TreeMesh([hx, hz], x0="CN") # Mesh refinement based on topography -mesh = refine_tree_xyz( - mesh, +mesh.refine_surface( topo_xyz[:, [0, 2]], - octree_levels=[0, 0, 4, 4], - method="surface", + padding_cells_by_level=[0, 0, 4, 4], finalize=False, ) @@ -209,16 +207,12 @@ np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 ) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False -) +mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) # Refine core mesh region xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh = refine_tree_xyz( - mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False -) +mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) mesh.finalize() @@ -310,7 +304,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, alpha_s=0.01, alpha_x=1, @@ -542,7 +536,7 @@ # Define the regularization (model objective function) ip_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, @@ -567,7 +561,7 @@ # Here we define the directives in the same manner as the DC inverse problem. # -update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold=1e-3) +update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold_value=1e-3) starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=1) save_iteration = directives.SaveOutputEveryIteration(save_txt=False) diff --git a/tutorials/06-ip/plot_inv_3_dcip3d.py b/tutorials/06-ip/plot_inv_3_dcip3d.py index 9193d829b7..39f8a9a9fd 100644 --- a/tutorials/06-ip/plot_inv_3_dcip3d.py +++ b/tutorials/06-ip/plot_inv_3_dcip3d.py @@ -348,7 +348,7 @@ # Define the regularization (model objective function) dc_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_conductivity_model, ) @@ -608,7 +608,7 @@ # Define the regularization (model objective function) ip_regularization = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, @@ -633,7 +633,7 @@ # Here we define the directives in the same manner as the DC inverse problem. # -update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold=1e-3) +update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold_value=1e-3) starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e2) beta_schedule = directives.BetaSchedule(coolingFactor=2.5, coolingRate=1) save_iteration = directives.SaveOutputEveryIteration(save_txt=False) diff --git a/tutorials/07-fdem/plot_inv_1_em1dfm.py b/tutorials/07-fdem/plot_inv_1_em1dfm.py index 8c58fc7adc..d222fd9d62 100644 --- a/tutorials/07-fdem/plot_inv_1_em1dfm.py +++ b/tutorials/07-fdem/plot_inv_1_em1dfm.py @@ -18,7 +18,6 @@ """ - ######################################################################### # Import modules # -------------- @@ -238,7 +237,7 @@ reg = regularization.Sparse(mesh, mapping=reg_map, alpha_s=0.025, alpha_x=1.0) # reference model -reg.mref = starting_model +reg.reference_model = starting_model # Define sparse and blocky norms p, q reg.norms = [0, 0] diff --git a/tutorials/08-tdem/plot_inv_1_em1dtm.py b/tutorials/08-tdem/plot_inv_1_em1dtm.py index ae188747c0..9e189b32cb 100644 --- a/tutorials/08-tdem/plot_inv_1_em1dtm.py +++ b/tutorials/08-tdem/plot_inv_1_em1dtm.py @@ -18,7 +18,6 @@ """ - ######################################################################### # Import modules # -------------- @@ -227,7 +226,7 @@ reg = regularization.Sparse(mesh, mapping=reg_map, alpha_s=0.01, alpha_x=1.0) # set reference model -reg.mref = starting_model +reg.reference_model = starting_model # Define sparse and blocky norms p, q reg.norms = [1, 0] diff --git a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py index 56ef62c72a..cf271661d2 100755 --- a/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py +++ b/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py @@ -196,11 +196,13 @@ inclination = 90 declination = 0 strength = 50000 -inducing_field = (strength, inclination, declination) # Define the source field and survey for gravity data -source_field_mag = magnetics.sources.SourceField( - receiver_list=[receiver_mag], parameters=inducing_field +source_field_mag = magnetics.sources.UniformBackgroundField( + receiver_list=[receiver_mag], + amplitude=strength, + inclination=inclination, + declination=declination, ) survey_mag = magnetics.survey.Survey(source_field_mag) @@ -273,9 +275,21 @@ # Here, we define the physics of the gravity and magnetic problems by using the simulation # class. # +# .. tip:: +# +# Since SimPEG v0.21.0 we can use `Choclo +# `_ as the engine for running the gravity +# simulations, which results in faster and more memory efficient runs. Just +# pass ``engine="choclo"`` when constructing the simulation. +# + simulation_grav = gravity.simulation.Simulation3DIntegral( - survey=survey_grav, mesh=mesh, rhoMap=wires.density, ind_active=ind_active + survey=survey_grav, + mesh=mesh, + rhoMap=wires.density, + ind_active=ind_active, + engine="choclo", ) simulation_mag = magnetics.simulation.Simulation3DIntegral( @@ -308,15 +322,15 @@ # Define the regularization (model objective function). reg_grav = regularization.WeightedLeastSquares( - mesh, indActive=ind_active, mapping=wires.density + mesh, active_cells=ind_active, mapping=wires.density ) reg_mag = regularization.WeightedLeastSquares( - mesh, indActive=ind_active, mapping=wires.susceptibility + mesh, active_cells=ind_active, mapping=wires.susceptibility ) # Define the coupling term to connect two different physical property models lamda = 2e12 # weight for coupling term -cross_grad = regularization.CrossGradient(mesh, wires, indActive=ind_active) +cross_grad = regularization.CrossGradient(mesh, wires, active_cells=ind_active) # combo dmis = dmis_grav + dmis_mag @@ -363,7 +377,7 @@ stopping = directives.MovingAndMultiTargetStopping(tol=1e-6) -sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) +sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) # Updating the preconditionner if it is model dependent. update_jacobi = directives.UpdatePreconditioner() diff --git a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py index 59e842354d..c6e170bf27 100644 --- a/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.py @@ -22,6 +22,7 @@ `_. """ + ######################################################################### # Import modules # -------------- @@ -233,6 +234,7 @@ mesh=mesh, rhoMap=wires.den, ind_active=actv, + engine="choclo", ) dmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav) # Mag problem diff --git a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py index 6880a7c15c..0e57205665 100644 --- a/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py +++ b/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.py @@ -23,6 +23,7 @@ `_. """ + ######################################################################### # Import modules # -------------- @@ -234,6 +235,7 @@ mesh=mesh, rhoMap=wires.den, ind_active=actv, + engine="choclo", ) dmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav) # Mag problem diff --git a/tutorials/_temporary/plot_4c_fdem3d_inversion.py b/tutorials/_temporary/plot_4c_fdem3d_inversion.py index 5ac26bff26..6282d22b7f 100644 --- a/tutorials/_temporary/plot_4c_fdem3d_inversion.py +++ b/tutorials/_temporary/plot_4c_fdem3d_inversion.py @@ -313,7 +313,7 @@ # Define the regularization (model objective function) reg = regularization.WeightedLeastSquares( mesh, - indActive=ind_active, + active_cells=ind_active, reference_model=starting_model, alpha_s=1e-2, alpha_x=1, diff --git a/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py b/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py index 7cdb188cba..6419f37d23 100644 --- a/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py +++ b/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py @@ -455,7 +455,7 @@ def PolygonInd(mesh, pts): ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) norm = mpl.colors.Normalize( vmin=np.log10(true_model.min()), - vmax=np.log10(true_model.max()) + vmax=np.log10(true_model.max()), # vmin=np.log10(0.1), vmax=np.log10(1) ) cbar = mpl.colorbar.ColorbarBase(