diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..6fc8a45b Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index cd1c5f82..b8cd5242 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -7,22 +7,29 @@ on: pull_request: branches: - main + - NG20 jobs: build: + runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" python-version: ["3.9", "3.10", "3.11", "3.12"] + os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: + - name: Install Required Ubuntu Packages - name: Install Required Ubuntu Packages run: | sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html - uses: actions/checkout@v4 + - uses: mamba-org/setup-micromamba@v1 + - uses: actions/checkout@v4 - uses: mamba-org/setup-micromamba@v1 with: init-shell: bash @@ -49,6 +56,31 @@ jobs: jupyter seaborn gitpython + - name: Install Main Code + init-shell: bash + environment-name: pulsar + cache-environment: true + cache-downloads: true + create-args: >- + -c conda-forge + python=${{ matrix.python-version }} + pytest + cython + pint-pulsar + tempo2 + libstempo + enterprise-pulsar + enterprise_extensions + scikit-sparse + scikit-learn + ruamel.yaml + nbconvert + ipywidgets>=7.6.3 + weasyprint + pytest-xdist>=2.3.0 + jupyter + seaborn + gitpython - name: Install Main Code shell: bash -el {0} run: | @@ -61,13 +93,18 @@ jobs: pytest tests/test_run_notebook.py -k $PULSAR_NAME ls -lah mv tmp* nb_outputs + export JUPYTER_PLATFORM_DIRS=1 + pytest tests/test_run_notebook.py -k $PULSAR_NAME + ls -lah + mv tmp* nb_outputs - name: Archive Notebook Output Files + uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4 with: + name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} path: | nb_outputs/*/*.pdf nb_outputs/*/*.tim nb_outputs/*/*.par compression-level: 6 - diff --git a/nb_templates/process_v1.1.ipynb b/nb_templates/process_v1.1.ipynb index bc1a16ad..87ba7441 100644 --- a/nb_templates/process_v1.1.ipynb +++ b/nb_templates/process_v1.1.ipynb @@ -262,7 +262,7 @@ " # to temporarily address current inconsistency between enterprise <= v3.1.0 and pint implementations\n", " mo_new = lu.convert_enterprise_equads(mo_new)\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index ae1871cd..ee97cf91 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -48,7 +48,6 @@ "autorun = False\n", "run_Ftest = True # Set to False if you don't need F-tests and want a faster notebook run!\n", "check_excision = True\n", - "num_noise_iter = 2e5\n", "\n", "if not autorun:\n", " run_noise_analysis = False \n", @@ -257,12 +256,17 @@ "if run_noise_analysis or use_existing_noise_dir:\n", " mo_new = copy.deepcopy(mo)\n", " lu.remove_noise(mo_new)\n", - " nu.model_noise(mo_new, to, using_wideband = using_wideband, run_noise_analysis = run_noise_analysis, n_iter = num_noise_iter)\n", + " nu.model_noise(mo_new, to,\n", + " using_wideband = using_wideband,\n", + " run_noise_analysis = run_noise_analysis,\n", + " model_kwargs=tc.config['noise_run']['model'],\n", + " sampler_kwargs=tc.config['noise_run']['inference'],\n", + " )\n", " try:\n", " mo_new = nu.add_noise_to_model(mo_new, using_wideband = using_wideband, base_dir=tc.get_noise_dir(), \n", " compare_dir=tc.get_compare_noise_dir(), no_corner_plot=tc.get_no_corner())\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/pyproject.toml b/pyproject.toml index b6d1652d..697c4ecd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ authors = [ { name="Anne Archibald", email="anne.archibald@nanograv.org" }, { name="Kevin Wilson", email="kevin.wilson@nanograv.org" }, { name="Ross Jennings", email="ross.jennings@nanograv.org" }, + { name="Jeremy Baier", email="jeremy.baier@nanograv.org"} ] description = "A long-lived repository for NANOGrav Pulsar Timing analysis work." readme = "README.md" @@ -31,7 +32,9 @@ dependencies = [ "pytest-xdist[psutil]>=2.3.0", "notebook", "seaborn", - "gitpython", + "la-forge", + "arviz", + "fastshermanmorrison-pulsar", ] classifiers = [ "Programming Language :: Python :: 3", @@ -39,6 +42,9 @@ classifiers = [ "Operating System :: OS Independent", ] +[tool.setuptools.package-data] +pint_pal = ["defaults.yaml", "plot_settings.yaml"] + [project.urls] "Homepage" = "https://github.com/nanograv/pint_pal" "Bug Tracker" = "https://github.com/nanograv/pint_pal/issues" diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 00000000..a43e719c Binary files /dev/null and b/src/.DS_Store differ diff --git a/src/pint_pal/__init__.py b/src/pint_pal/__init__.py index af9b84cf..ba6ea479 100644 --- a/src/pint_pal/__init__.py +++ b/src/pint_pal/__init__.py @@ -1,4 +1,6 @@ import pint_pal.checkin +import pint_pal.config +from pint_pal.config import set_data_root, reset_data_root from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py new file mode 100644 index 00000000..faf79495 --- /dev/null +++ b/src/pint_pal/config.py @@ -0,0 +1,69 @@ +from ruamel.yaml import YAML +import os.path +yaml = YAML(typ='safe') +PACKAGE_DIR = os.path.dirname(__file__) +DATA_ROOT = '.' + +def set_data_root(path): + """ + Set the root directory of the data repository to be used with PINT Pal. + PINT Pal will search this directory for a configuration file specifying settings + such as the appropriate JPL ephemeris and version of TT(BIPM) to check for when + validating timing models. + + It will also be treated as the base directory when resolving paths in YAML + configuration files. This allows notebooks (or scripts) using YAML files within + the data repository, which specify paths relative to the data root, to be run + from other locations. + + The default value of `data_root` is '.' (the current working directory), which + is sufficient in cases where either (1) no data repository is in use, or + (2) all scripts and notebooks are run from the root of the data repository. + """ + global DATA_ROOT + DATA_ROOT = os.path.realpath(os.path.expanduser(path)) + try: + read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) + except FileNotFoundError: + pass + +def reset_data_root(): + """ + Reset the data root and config variables to the default values. + """ + global DATA_ROOT + DATA_ROOT = '.' + read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) + +def read_config_file(config_file): + """ + Read a configuration file, along the lines of `defaults.yaml`, and load the results + into a location that can be accessed by other PINT Pal code. + """ + with open(config_file, 'r') as f: + config = yaml.load(f) + + global LATEST_BIPM + global LATEST_EPHEM + global PLANET_SHAPIRO + global CORRECT_TROPOSPHERE + global FREQUENCY_RATIO + global MAX_SOLARWIND_DELAY + global LATEST_TOA_RELEASE + + if 'LATEST_BIPM' in config: + LATEST_BIPM = config['LATEST_BIPM'] + if 'LATEST_EPHEM' in config: + LATEST_EPHEM = config['LATEST_EPHEM'] + if 'PLANET_SHAPIRO' in config: + PLANET_SHAPIRO = config['PLANET_SHAPIRO'] + if 'CORRECT_TROPOSPHERE' in config: + CORRECT_TROPOSPHERE = config['CORRECT_TROPOSPHERE'] + if 'FREQUENCY_RATIO' in config: + FREQUENCY_RATIO = config['FREQUENCY_RATIO'] + if 'MAX_SOLARWIND_DELAY' in config: + MAX_SOLARWIND_DELAY = config['MAX_SOLARWIND_DELAY'] + if 'LATEST_TOA_RELEASE' in config: + LATEST_TOA_RELEASE = config['LATEST_TOA_RELEASE'] + +read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) diff --git a/src/pint_pal/defaults.py b/src/pint_pal/defaults.py deleted file mode 100644 index 0bc6c674..00000000 --- a/src/pint_pal/defaults.py +++ /dev/null @@ -1,16 +0,0 @@ -# Here we keep track of global default settings - -# Choice of clock, SSE -LATEST_BIPM = "BIPM2021" # latest clock realization to use -LATEST_EPHEM = "DE440" # latest solar system ephemeris to use - -# Toggle various corrections -PLANET_SHAPIRO = True # correct for Shapiro delay from planets -CORRECT_TROPOSPHERE = True # correct for tropospheric delays - -# DMX model defaults -FREQUENCY_RATIO = 1.1 # set the high/low frequency ratio for DMX bins -MAX_SOLARWIND_DELAY = 0.1 # set the maximum permited 'delay' from SW [us] - -# Desired TOA release tag -LATEST_TOA_RELEASE = "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/defaults.yaml b/src/pint_pal/defaults.yaml new file mode 100644 index 00000000..6cfcc3d3 --- /dev/null +++ b/src/pint_pal/defaults.yaml @@ -0,0 +1,21 @@ +# Here we keep track of global default settings +# +# These can be overridden on a per-project basis by placing a file +# called `pint_pal_project.yaml` in the `data_root` location (this +# defaults to the current working directory, but can be configured +# with `pint_pal.set_data_root()`). + +# Choice of clock, SSE +LATEST_BIPM: "BIPM2021" # latest clock realization to use +LATEST_EPHEM: "DE440" # latest solar system ephemeris to use + +# Toggle various corrections +PLANET_SHAPIRO: True # correct for Shapiro delay from planets +CORRECT_TROPOSPHERE: True # correct for tropospheric delays + +# DMX model defaults +FREQUENCY_RATIO: 1.1 # set the high/low frequency ratio for DMX bins +MAX_SOLARWIND_DELAY: 0.1 # set the maximum permited 'delay' from SW [us] + +# Desired TOA release tag +LATEST_TOA_RELEASE: "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 024f7583..2049674e 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -1,5 +1,7 @@ +from typing import Any, Optional, Tuple, Union import numpy as np from astropy import log +import pint from pint_pal.utils import apply_cut_flag, apply_cut_select class DMXParameter: @@ -9,7 +11,7 @@ class DMXParameter: aliases = {'idx':'index', 'val':'dmx_val', 'err':'dmx_err', 'ep':'epoch', 'r1':'low_mjd', 'r2':'high_mjd', 'f1':'low_freq', 'f2':'high_freq', 'mask':'toa_mask'} - def __init__(self): + def __init__(self) -> None: """ """ self.idx = 0 # index label [int] @@ -22,17 +24,17 @@ def __init__(self): self.f2 = 0.0 # highest frequency [MHz] self.mask = [] # Boolean index array for selecting TOAs - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: name = self.aliases.get(name, name) object.__setattr__(self, name, value) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'aliases': raise AttributeError # http://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html name = self.aliases.get(name, name) return object.__getattribute__(self, name) - def print_dmx(self, range_only=False, fit_flag=True, fortran=False): + def print_dmx(self, range_only: bool = False, fit_flag: bool = True, fortran: bool = False) -> None: """ Print TEMPO-style DMX parameter. @@ -60,7 +62,7 @@ def print_dmx(self, range_only=False, fit_flag=True, fortran=False): print(DMX_str) -def group_dates(toas, group_width=0.1): +def group_dates(toas: pint.toa.TOAs, group_width: float = 0.1) -> list: """ Returns MJDs of groups of TOAs no wider than a specified amount. @@ -93,8 +95,13 @@ def group_dates(toas, group_width=0.1): return group_mjds -def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, - check=True): +def get_dmx_ranges( + toas: pint.toa.TOAs, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Returns a list of low and high MJDs defining DMX ranges, covering all TOAs. @@ -151,8 +158,14 @@ def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, return dmx_ranges -def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, - strict_inclusion=True, check=True): +def get_gasp_dmx_ranges( + toas: pint.toa.TOAs, + group_width: float = 0.1, + bin_width: float = 15.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Return a list of DMX ranges that group GASP TOAs into bins. @@ -221,8 +234,15 @@ def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, return dmx_ranges -def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, - strict_inclusion=True, add_new_ranges=False, check=True): +def expand_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + add_new_ranges: bool = False, + check: bool = True +) -> list: """ Expands DMX ranges to accommodate new TOAs up to a maximum bin width. @@ -297,7 +317,12 @@ def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, return dmx_ranges -def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): +def check_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + full_return: bool = False, + quiet: bool = False +) -> Union[Tuple[list, list, list, list, list, list],None]: """ Ensures all TOAs match only one DMX bin and all bins have at least one TOA. @@ -392,7 +417,7 @@ def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): return masks, ibad, iover, iempty, inone, imult -def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): +def get_dmx_mask(toas: pint.toa.TOAs, low_mjd: float, high_mjd: float, strict_inclusion: bool = True) -> np.ndarray: """ Return a Boolean index array for selecting TOAs from toas in a DMX range. @@ -413,7 +438,7 @@ def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): return mask -def get_dmx_epoch(toas, weighted_average=True): +def get_dmx_epoch(toas: pint.toa.TOAs, weighted_average: bool = True) -> float: """ Return the epoch of a DMX bin. @@ -435,43 +460,45 @@ def get_dmx_epoch(toas, weighted_average=True): return epoch -def get_dmx_freqs(toas, allow_wideband=True): +def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = True) -> Tuple[float, float]: """ Return the lowest and highest frequency of the TOAs in a DMX bin. - toas is a PINT TOA object of TOAs in the DMX bin. + toas is a PINT TOA object containing all the relevant TOAs. + mask is a boolean mask that identifies the TOAs in this DMX bin. allow_wideband=True will consider the -fratio and -bw flags in the determination of these frequencies, if toas contains wideband TOAs. """ - freqs = toas.get_freqs().value # MHz - high_freq = 0.0 - low_freq = np.inf + freqs = toas.get_freqs()[mask].value # MHz + high_freq = np.max(freqs, initial=0.) + low_freq = np.min(freqs, initial=np.inf) - # indices of wideband TOAs - iwb = np.arange(len(toas))[np.array(toas.get_flag_value('pp_dm')[0]) \ - != None] - if allow_wideband: # the following arrays will be empty if narrowband TOAs - fratios = toas[iwb].get_flag_value('fratio') # frequency ratio / WB TOA - fratios = np.array(fratios[0]) - bws = toas[iwb].get_flag_value('bw') # bandwidth [MHz] / WB TOA - bws = np.array(bws[0]) + if allow_wideband: + # indices of wideband TOAs + wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) + # the following arrays will be empty if all TOAs are narrowband + fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA + fratios = np.array(fratios)[wb_mask] + bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA + bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) + low_freq = min(low_freq, np.min(low_freqs, initial=np.inf)) high_freqs = bws.astype('float32') + low_freqs - - for itoa in range(len(toas)): - if itoa in iwb and allow_wideband: - if low_freqs[itoa] < low_freq: low_freq = low_freqs[itoa] - if high_freqs[itoa] > high_freq: high_freq = high_freqs[itoa] - else: - if freqs[itoa] < low_freq: low_freq = freqs[itoa] - if freqs[itoa] > high_freq: high_freq = freqs[itoa] + high_freq = max(high_freq, np.max(high_freqs, initial=0.)) return low_freq, high_freq -def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, - strict_inclusion=True, allow_wideband=True, invert=False, quiet=False): +def check_frequency_ratio( + toas: pint.toa.TOAs, + dmx_ranges: list, + frequency_ratio: float = 1.1, + strict_inclusion: bool = True, + allow_wideband: bool = True, + invert: bool = False, + quiet: bool = False +) -> Tuple[np.ndarray, np.ndarray]: """ Check that the TOAs in a DMX bin pass a frequency ratio criterion. @@ -498,15 +525,15 @@ def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) if high_freq / low_freq >= frequency_ratio: # passes toa_mask += mask dmx_range_mask[irange] = True else: # fails - nfail_toas += len(toas[mask]) + nfail_toas += np.sum(mask) if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nfail_ranges = sum(np.logical_not(dmx_range_mask)) @@ -522,9 +549,20 @@ def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, np.arange(len(dmx_ranges))[np.logical_not(dmx_range_mask)] -def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, - solar_n0=5.0, allow_wideband=True, strict_inclusion=True, pad=0.0, - check=True, return_only=False, quiet=False): +def check_solar_wind( + toas: pint.toa.TOAs, + dmx_ranges: list, + model: pint.models.timing_model.TimingModel, + max_delta_t: float = 0.1, + bin_width: float = 1.0, + solar_n0: float = 5.0, + allow_wideband: bool = True, + strict_inclusion: bool = True, + pad: float = 0.0, + check: bool = True, + return_only: bool = False, + quiet: bool = False +) -> list: """ Split DMX ranges based on influence of the solar wind. @@ -569,7 +607,7 @@ def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) # Convert to time delay, using calc from David's code (fixed) theta = np.pi - phis[mask] # rad @@ -583,7 +621,7 @@ def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, toa_mask += mask dmx_range_mask[irange] = True if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nsolar = sum(dmx_range_mask) if not quiet and nsolar: @@ -608,7 +646,7 @@ def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, return dmx_ranges -def add_dmx(model, bin_width=1.0): +def add_dmx(model: pint.models.timing_model.TimingModel, bin_width: float = 1.0) -> None: """ Checks for DispersionDMX and ensures the bin width is the only parameter. @@ -628,7 +666,7 @@ def add_dmx(model, bin_width=1.0): dmx.DMX.set(bin_width) -def model_dmx_params(model): +def model_dmx_params(model: pint.models.timing_model.TimingModel) -> Tuple[list, np.ndarray, np.ndarray]: """ Get DMX ranges, values, and uncertainties from a PINT model object. @@ -655,7 +693,7 @@ def model_dmx_params(model): return dmx_ranges, dmx_vals, dmx_errs -def remove_all_dmx_ranges(model, quiet=False): +def remove_all_dmx_ranges(model: pint.models.timing_model.TimingModel, quiet: bool = False) -> None: """ Uses PINT to remove all DMX parameter ranges from a timing model. @@ -675,8 +713,15 @@ def remove_all_dmx_ranges(model, quiet=False): pass -def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, - freeze_DM=True): +def setup_dmx( + model: pint.models.timing_model.TimingModel, + toas: pint.toa.TOAs, + quiet: bool = True, + frequency_ratio: float = 1.1, + max_delta_t: float = 0.1, + bin_width: Optional[float] = None, + freeze_DM: bool = True +) -> pint.toa.TOAs: """ Sets up and checks a DMX model using a number of defaults. @@ -688,6 +733,7 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, the frequencies used are returned by get_dmx_freqs(). max_delta_t is the time delay [us] above which a DMX range will be split. quiet=True turns off some of the logged warnings and info. + bin_width=constant bin width if provided, otherwise use observatory defaults if None freeze_DM=True ensures the mean DM parameter is not fit. """ @@ -714,8 +760,12 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, adjust_old_dmx = False # Set up DMX model - if toas.observatories == set(['arecibo']): bin_width = 0.5 # day - else: bin_width = 6.5 #day + if bin_width is None: #use observatory defaults + if toas.observatories == set(['arecibo']): + bin_width = 0.5 # day + else: + bin_width = 6.5 #day + # Calculate GASP-era ranges, if applicable dmx_ranges = get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.05, check=False) @@ -813,9 +863,17 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, return toas -def make_dmx(toas, dmx_ranges, dmx_vals=None, dmx_errs=None, - strict_inclusion=True, weighted_average=True, allow_wideband=True, - start_idx=1, print_dmx=False): +def make_dmx( + toas: pint.toa.TOAs, + dmx_ranges: list, + dmx_vals: Optional[np.ndarray] = None, + dmx_errs: Optional[np.ndarray] = None, + strict_inclusion: bool = True, + weighted_average: bool = True, + allow_wideband: bool = True, + start_idx: int = 1, + print_dmx: bool = False +): """ Uses convenience functions to assemble a TEMPO-style DMX parameters. @@ -848,7 +906,7 @@ def make_dmx(toas, dmx_ranges, dmx_vals=None, dmx_errs=None, high_mjd = max(dmx_ranges[irange]) mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion) epoch = get_dmx_epoch(toas[mask], weighted_average) - low_freq, high_freq = get_dmx_freqs(toas[mask], allow_wideband) + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband) dmx_parameter = DMXParameter() dmx_parameter.idx = idx dmx_parameter.val = dmx_vals[irange] diff --git a/src/pint_pal/lite_utils.py b/src/pint_pal/lite_utils.py index 96e8c884..bd549d88 100644 --- a/src/pint_pal/lite_utils.py +++ b/src/pint_pal/lite_utils.py @@ -410,7 +410,7 @@ def add_feJumps(mo,rcvrs): if len(missing_fe_jumps) > 1: for j in missing_fe_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def add_feDMJumps(mo,rcvrs): @@ -447,7 +447,7 @@ def add_feDMJumps(mo,rcvrs): if len(missing_fe_dmjumps): for j in missing_fe_dmjumps: log.info(f"Adding frontend DMJUMP {j}") - DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3) + DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3,convert_tcb2tdb=False) dmjump.add_param(DMJUMPn,setup=True) def get_flag_val_list(toas, flag): @@ -516,7 +516,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) > 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) else: if len(missing_jumps): @@ -529,7 +529,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) >= 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def large_residuals(fo,threshold_us,threshold_dm=None,*,n_sigma=None,max_sigma=None,prefit=False,ignore_ASP_dms=True,print_bad=True, check_jumps=False): diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 930747b8..2233e6f1 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,260 +1,594 @@ -import numpy as np, os +import numpy as np, os, json, itertools, time from astropy import log from astropy.time import Time from enterprise.pulsar import Pulsar -from enterprise_extensions import models, model_utils, sampler +from enterprise_extensions import models, model_utils +from enterprise_extensions import sampler as ee_sampler import corner import pint.models as pm from pint.models.parameter import maskParameter +from pint.models.timing_model import Component -import matplotlib as mpl import matplotlib.pyplot as pl -#Imports necessary for e_e noise modeling functions -import functools -from collections import OrderedDict - -from enterprise.signals import parameter -from enterprise.signals import selections -from enterprise.signals import signal_base -from enterprise.signals import white_signals -from enterprise.signals import gp_signals -from enterprise.signals import deterministic_signals -from enterprise import constants as const +import la_forge.core as co +from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils -from enterprise_extensions import deterministic -from enterprise_extensions.timing import timing_block -#from enterprise_extensions.blocks import (white_noise_block, red_noise_block) - -import types +from enterprise_extensions.empirical_distr import (EmpiricalDistribution1D, + EmpiricalDistribution2D) -from enterprise.signals import utils -from enterprise.signals import gp_priors as gpp -def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corner = True, no_corner_plot = False, chaindir_compare=None): +def setup_sampling_groups(pta, + write_groups=True, + outdir='./'): + """ + Sets sampling groups for PTMCMCSampler. + The sampling groups can help ensure the sampler does not get stuck. + The idea is to group parameters which are more highly correlated. + + Params + ------ + pta: the enterprise pta object + write_groups: bool, write the groups to a file + outdir: str, directory to write the groups to + + returns + ------- + groups: list of lists of indices corresponding to parameter groups + + """ + + # groups + pnames = pta.param_names + groups = get_parameter_groups(pta) + # add per-backend white noise + backends = np.unique([p[p.index('_')+1:p.index('efac')-1] for p in pnames if 'efac' in p]) + for be in backends: + groups.append(group_from_params(pta,[be])) + # group red noise parameters + exclude = ['linear_timing_model','sw_r2','sw_4p39','measurement_noise', + 'ecorr_sherman-morrison', 'ecorr_fast-sherman-morrison'] + red_signals = [p[p.index('_')+1:] for p in list(pta.signals.keys()) + if not p[p.index('_')+1:] in exclude] + rn_ct = 0 + for rs in red_signals: + if len(group_from_params(pta,[rs])) > 0: + rn_ct += 1 + groups.append(group_from_params(pta,[rs])) + if rn_ct > 1: + groups.append(group_from_params(pta,red_signals)) + # add cross chromatic groups + if 'n_earth' in pnames or 'log10_sigma_ne' in pnames: + # cross SW and chrom groups + dmgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','n_earth', 'log10_sigma_ne']])] + groups.append(dmgp_sw) + if np.any(['chrom' in param for param in pnames]): + chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['chrom_gp','n_earth', 'log10_sigma_ne']])] + dmgp_chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom','n_earth', 'log10_sigma_ne']])] + groups.append(chromgp_sw) + groups.append(dmgp_chromgp_sw) + if np.any(['chrom' in param for param in pnames]): + # cross dmgp and chromgp group + dmgp_chromgp = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom']])] + groups.append(dmgp_chromgp) + # everything + groups.append([i for i in range(len(pnames))]) + # save list of params corresponding to groups + if write_groups is True: + with open(f'{outdir}/groups.txt', 'w') as fi: + for group in groups: + line = np.array(pnames)[np.array(group)] + fi.write("[" + " ".join(line) + "]\n") + # return the groups to be passed to the sampler + return groups + + +def get_mean_large_likelihoods(core, N=10): + ''' + Calculate the mean of the top N likelihood samples from the chain. + This is an alternate to fixing the noise values in the timing model to + the MAP or the median. + + Params + ====== + core: la_forge.core object + N: int, number of top likelihood samples to average + Returns + ======= + mean_data: np.array, mean of the top N likelihood samples + ''' + chain = core.chain[core.burn:,:] + lnlike_idx = core.params.index('lnlike') + sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] + vals = np.mean(sorted_data[:N,:],axis=0) + return {par: vals[p] for p, par in enumerate(core.params)} + + +def analyze_noise( + chaindir="./noise_run_chains/", + use_noise_point='mean_large_likelihood', + likelihoods_to_average=50, + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + chaindir_compare=None, + model_kwargs={}, + sampler_kwargs={}, +): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF Parameters ========== chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' + use_noise_point: point to use for noise analysis; Default: 'MAP'. + Options: 'MAP', 'median', 'mean_large_likelihood', + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + likelihoods_to_average: number of top likelihood samples to average; Default: 50 + Only applicable if use_noise_point is 'mean_large_likelihood'. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True - chaindir_compare: path to enterprise noise run chain wish to plot in corner plot for comparison; Default: None + no_corner_plot: Flag to toggle saving of corner plots; Default: False + chaindir_compare: path to noise run chain wish to plot in corner plot for comparison; Default: None Returns ======= - wn_dict: Dictionary of maximum likelihood WN values - rn_bf: Savage-Dickey BF for RN for given pulsar + noise_core: la_forge.core object which contains noise chains and run metadata + noise_dict: Dictionary of maximum a posterior noise values + rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - chainfile = chaindir + 'chain_1.txt' - chain = np.loadtxt(chainfile) - burn = int(burn_frac * chain.shape[0]) - pars = np.loadtxt(chaindir + 'pars.txt', dtype = str) - - psr_name = pars[0].split('_')[0] - + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + sampler = sampler_kwargs['sampler'] + likelihood = sampler_kwargs['likelihood'] + try: + noise_core = co.Core(chaindir=chaindir) + except: + if os.path.isfile(chaindir): + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. " \ + +"Also make sure you have an up-to-date la_forge installation. ") + raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") + else: + log.error(f"No noise runs found in {chaindir}. Make sure the path is correct.") + raise ValueError(f"Could not load noise run from {chaindir}. Check path.") + if sampler == 'PTMCMCSampler': + # standard burn ins + noise_core.set_burn(burn_frac) + else: + noise_core.set_burn(burn_frac) + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] + psr_name = noise_core.params[0].split("_")[0] + pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) + # if len(pars)+2 != chain.shape[1]: + # chain = chain[:, :len(pars)+2] + # load in same for comparison noise model if chaindir_compare is not None: - chainfile_compare = chaindir_compare + 'chain_1.txt' - chain_compare = np.loadtxt(chainfile_compare) - burn_compare = int(burn_frac * chain_compare.shape[0]) - pars_compare = np.loadtxt(chaindir_compare + 'pars.txt', dtype = str) - - psr_name_compare = pars_compare[0].split('_')[0] + compare_core = co.Core(chaindir=chaindir) + compare_core.set_burn(noise_core.burn) + chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] + pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) + # if len(pars_compare)+2 != chain_compare.shape[1]: + # chain_compare = chain_compare[:, :len(pars_compare)+2] + + psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: - log.warning(f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison") + log.warning( + f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison" + ) chaindir_compare = None + if save_corner and not no_corner_plot: - pars_short = [p.split("_",1)[1] for p in pars] + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - fig = corner.corner(chain_compare[burn:, :-4], color='orange', alpha=0.5, weights=normalization_factor, labels = compare_pars_short) + normalization_factor = ( + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) + ) + fig = corner.corner( + chain_compare, + color="orange", + alpha=0.5, + weights=normalization_factor, + labels=compare_pars_short, + ) # normal corner plot - corner.corner(chain[burn:, :-4], fig=fig, color='black', labels = pars_short) + corner.corner( + chain, fig=fig, color="black", labels=pars_short + ) if chaindir_compare is None: - corner.corner(chain[burn:, :-4], labels = pars_short) + corner.corner(chain, labels=pars_short) - if '_wb' in chaindir: + if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figname = f"./{psr_name}_noise_corner_nb.pdf" else: figname = f"./{psr_name}_noise_corner.pdf" pl.savefig(figname) - pl.savefig(figname.replace(".pdf",".png"), dpi=300) + pl.savefig(figname.replace(".pdf", ".png"), dpi=300) pl.show() - + if no_corner_plot: - + from matplotlib.backends.backend_pdf import PdfPages - if '_wb' in chaindir: + + if "_wb" in chaindir: figbase = f"./{psr_name}_noise_posterior_wb" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figbase = f"./{psr_name}_noise_posterior_nb" else: figbase = f"./{psr_name}_noise_posterior" - - pars_short = [p.split("_",1)[1] for p in pars] + + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - - #Set the shape of the subplots + normalization_factor = ( + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) + ) + + # Set the shape of the subplots shape = pars.shape[0] - - if '_wb' in chaindir: - ncols = 4 # number of columns per page + + if "_wb" in chaindir: + ncols = 4 # number of columns per page else: ncols = 3 - - nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[burn:, -4]) - if chaindir_compare is not None: mp_compare_idx = np.argmax(chain_compare[burn:, -4]) - + nrows = 5 # number of rows per page + + mp_idx = noise_core.map_idx + param_medians = [noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']] + param_medians_dict = {p: noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']} + #mp_idx = np.argmax(chain[:, a]) + if chaindir_compare is not None: + mp_compare_idx = compare_core.map_idx + nbins = 20 pp = 0 for idx, par in enumerate(pars_short): - j = idx % (nrows*ncols) + j = idx % (nrows * ncols) if j == 0: pp += 1 - fig = pl.figure(figsize=(8,11)) - - ax = fig.add_subplot(nrows, ncols, j+1) - ax.hist(chain[burn:, idx], bins = nbins, histtype = 'step', color='black', label = 'Current') - ax.axvline(chain[burn:, idx][mp_idx], ls = '--', color = 'black') + fig = pl.figure(figsize=(8, 11)) + + ax = fig.add_subplot(nrows, ncols, j + 1) + ax.hist( + chain[:, idx], + bins=nbins, + histtype="step", + color="black", + label="Current", + ) + ax.axvline(chain[:, idx][mp_idx], ls="--", color="black", label="MAP") + if use_noise_point == 'mean_large_likelihood': + lbl = "mean of 50 MLVs" + if use_noise_point == 'MAP': + lbl = "MAP" + if use_noise_point == 'median': + lbl = "median" + ax.axvline(param_medians[idx], ls="--", color="green", label=lbl) if chaindir_compare is not None: - ax.hist(chain_compare[burn:, idx], bins = nbins, histtype = 'step', color='orange', label = 'Past') - ax.axvline(chain_compare[burn:, idx][mp_compare_idx], ls = '--', color = 'orange') - if '_wb' in chaindir: ax.set_xlabel(par, fontsize=8) - else: ax.set_xlabel(par, fontsize = 10) + ax.hist( + chain_compare[:, idx], + bins=nbins, + histtype="step", + color="orange", + label="Past", + ) + ax.axvline( + chain_compare[:, idx][mp_compare_idx], ls="--", color="orange" + ) + if "_wb" in chaindir: + ax.set_xlabel(par, fontsize=8) + else: + ax.set_xlabel(par, fontsize=10) ax.set_yticks([]) ax.set_yticklabels([]) - if j == (nrows*ncols)-1 or idx == len(pars_short)-1: + if j == (nrows * ncols) - 1 or idx == len(pars_short) - 1: pl.tight_layout() pl.savefig(f"{figbase}_{pp}.pdf") # Wasn't working before, but how do I implement a legend? - #ax[nr][nc].legend(loc = 'best') + # ax[nr][nc].legend(loc = 'best') + pl.legend(loc="best") pl.show() - ml_idx = np.argmax(chain[burn:, -4]) - - wn_vals = chain[burn:, :-4][ml_idx] - - wn_dict = dict(zip(pars, wn_vals)) - - #Print bayes factor for red noise in pulsar - rn_bf = model_utils.bayes_fac(chain[burn:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] - - return wn_dict, rn_bf - -def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband = False, resume = False, run_noise_analysis = True, wb_efac_sigma = 0.25, base_op_dir = "./"): + if use_noise_point == 'MAP': + noise_dict = noise_core.get_map_dict() + elif use_noise_point == 'median': + noise_dict = param_medians_dict + elif use_noise_point == 'mean_large_likelihood': + noise_dict = get_mean_large_likelihoods(noise_core, N=likelihoods_to_average) + else: + log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") + raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' or 'mean_large_likelihood' ") + + # Print bayes factor for red noise in pulsar + rn_amp_nm = psr_name+"_red_noise_log10_A" + rn_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] + + return noise_core, noise_dict, rn_bf + + +def model_noise( + mo, + to, + using_wideband=False, + resume=False, + run_noise_analysis=True, + wb_efac_sigma=0.25, + base_op_dir="./", + model_kwargs={}, + sampler_kwargs={}, + return_sampler_without_sampling=False, +): """ - Setup enterprise PTA and perform MCMC noise analysis + Setup enterprise or discovery likelihood and perform Bayesian inference on noise model Parameters ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - red_noise: include red noise in the model - n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False + resume: Flag to resume overwrite previous run or not. run_noise_analysis: Flag to toggle execution of noise modeling; Default: True + noise_kwargs: dictionary of noise model parameters; Default: {} + sampler_kwargs: dictionary of sampler parameters; Default: {} + return_sampler: Flag to return the sampler object; Default: False + + Recommended to pass model_kwargs and sampler_kwargs from the config file. + Default kwargs given by function `get_model_and_sampler_default_settings`. + Import configuration parameters: + likelihood: choose from ['enterprise', 'discovery'] + enterprise -- enterprise likelihood + discovery -- various numpyro samplers with a discovery likelihood + sampler: for enterprise choose from ['PTMCMCSampler','GibbsSampler'] + for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] Returns ======= - None + None or + samp: sampler object """ - + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + likelihood = sampler_kwargs['likelihood'] + sampler = sampler_kwargs['sampler'] + if not using_wideband: - outdir = base_op_dir + mo.PSR.value + '_nb/' + outdir = base_op_dir + mo.PSR.value + "_nb/" else: outdir = base_op_dir + mo.PSR.value + '_wb/' - if os.path.exists(outdir) and (run_noise_analysis) and (not resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format(mo.PSR.value)) + if os.path.exists(outdir) and run_noise_analysis and not resume: + log.warning( + f"A noise directory for pulsar {mo.PSR.value} already exists! " + "Please rename the existing directory or specify a new location with " + "base_op_dir. If you're trying to resume noise modeling, use " + "resume=True with the existing directory. Skipping noise analysis." + ) + return None elif os.path.exists(outdir) and (run_noise_analysis) and (resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format(mo.PSR.value)) + log.info( + "A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( + mo.PSR.value + ) + ) if not run_noise_analysis: - log.info("Skipping noise modeling. Change run_noise_analysis = True to run noise modeling.") + log.info( + "Skipping noise modeling. Change run_noise_analysis = True to run noise modeling." + ) return None - #Ensure n_iter is an integer - n_iter = int(n_iter) - if n_iter < 1e4: - log.warning("Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4") - - #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + log.info(f"Creating enterprise.Pulsar object from model with {mo.NTOA.value} toas...") e_psr = Pulsar(mo, to) - - #Setup a single pulsar PTA using enterprise_extensions - if not using_wideband: - pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma) + ########################################################## + ################ PTMCMCSampler ################## + ########################################################## + if likelihood == "enterprise" and sampler == 'PTMCMCSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + # Setup a single pulsar PTA using enterprise_extensions + # Ensure n_iter is an integer + sampler_kwargs['n_iter'] = int(float(sampler_kwargs['n_iter'])) + + if sampler_kwargs['n_iter'] < 1e4: + log.warning( + f"Such a small number of iterations with {sampler} is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" + ) + if not using_wideband: + pta = models.model_singlepsr_noise( + e_psr, + white_vary=True, + red_var=model_kwargs['inc_rn'], # defaults True + is_wideband=False, + use_dmdata=False, + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + tm_svd=True, + # DM GP + #dm_var=model_kwargs['inc_dmgp'], + #dm_Nfreqs=model_kwargs['dmgp_nfreqs'], + # CHROM GP + #chrom_gp=model_kwargs['inc_chromgp'], + #chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + #chrom_gp_kernel='diag', # Fourier basis chromg_gp + # DM SOLAR WIND + #dm_sw_deter=model_kwargs['inc_sw_deter'], + #ACE_prior=model_kwargs['ACE_prior'], + # can pass extra signals in here + #extra_sigs=model_kwargs['extra_sigs'], + ) + pta.set_default_params({}) + else: + pta = models.model_singlepsr_noise( + e_psr, + is_wideband=True, + use_dmdata=True, + white_vary=True, + red_var=model_kwargs['inc_rn'], + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + ng_twg_setup=True, + ) + dmjump_params = {} + for param in mo.params: + if param.startswith("DMJUMP"): + dmjump_param = getattr(mo, param) + dmjump_param_name = ( + f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + ) + dmjump_params[dmjump_param_name] = dmjump_param.value + pta.set_default_params(dmjump_params) + # set groups here + groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) + ####### + # setup sampler using enterprise_extensions + if sampler_kwargs['emp_distribution'] is not None: + try: + log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['emp_distribution']}") + core = co.Core(chaindir=sampler_kwargs['emp_distribution']) + except: + log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['emp_distribution']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") + core = None + try: + if core is not None: + emp_dist = make_emp_distr(core) + log.info(f"Successfully created empirical distributions !!") + log.info("Setting up sampler ...") + except: + log.warning(f"Failed to create empirical distributions from successfully loaded directory.") + emp_dist = None + else: + log.warning("Setting up sampler without empirical distributions... Consider adding one for faster sampling by adding `emp_distribution`: //_nb to the `noise_run`->`inference` section of the config file.") + emp_dist = None + samp = ee_sampler.setup_sampler(pta, + outdir=outdir, + resume=resume, + groups=groups, + empirical_distr = emp_dist, + ) + if emp_dist is not None: + try: + samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) + except: + log.warning("Failed to add draws from empirical distribution.") + # Initial sample + # try to initialize the sampler to the maximum likelihood value from a previous run + # initialize to a random point if any points are missing + x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['emp_distribution']) + try: + log_single_likelihood_evaluation_time(pta, sampler_kwargs) + except: + log.warning("Failed to time likelihood.") + if not return_sampler_without_sampling: + # Start sampling + log.info("Beginnning to sample...") + samp.sample( + x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs + ) + log.info("Finished sampling.") + elif likelihood == "enterprise" and sampler == 'GibbsSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("GibbsSampler not yet implemented for enterprise likelihood") + elif likelihood == "discovery": + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("Discovery likelihood not yet implemented") else: - pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True) - dmjump_params = {} - for param in mo.params: - if param.startswith('DMJUMP'): - dmjump_param = getattr(mo,param) - dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" - dmjump_params[dmjump_param_name] = dmjump_param.value - pta.set_default_params(dmjump_params) - - #setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) + log.error( + f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + + "\nCan only use enterprise with PTMCMCSampler or GibbsSampler." + ) + if return_sampler_without_sampling: + return samp - #Initial sample - x0 = np.hstack([p.sample() for p in pta.params]) - #Start sampling - - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50,) - def convert_to_RNAMP(value): """ Utility function to convert enterprise RN amplitude to tempo2/PINT parfile RN amplitude """ - return (86400.*365.24*1e6)/(2.0*np.pi*np.sqrt(3.0)) * 10 ** value - -def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_plot = False, ignore_red_noise = False, using_wideband = False, rn_bf_thres = 1e2, base_dir = None, compare_dir=None): + return (86400.0 * 365.24 * 1e6) / (2.0 * np.pi * np.sqrt(3.0)) * 10**value + + +def add_noise_to_model( + model, + use_noise_point='mean_large_likelihood', + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + ignore_red_noise=False, + using_wideband=False, + rn_bf_thres=1e2, + base_dir=None, + compare_dir=None, + return_noise_core=False, +): """ - Add WN and RN parameters to timing model. + Add WN, RN, DMGP, ChromGP, and SW parameters to timing model. Parameters ========== model: PINT (or tempo2) timing model + use_noise_point: point to use for noise analysis; Default: 'mean_large_likelihood'. + Options: 'MAP', 'median', 'mean_large_likelihood' + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + Mean large likelihood takes N of the largest likelihood values and then takes the mean of those. (Recommended). burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True ignore_red_noise: Flag to manually force RN exclusion from timing model. When False, @@ -263,37 +597,59 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False base_dir: directory containing {psr}_nb and {psr}_wb chains directories; if None, will check for results in the current working directory './'. + return_noise_core: Flag to return the la_forge.core object; Default: False Returns ======= - model: New timing model which includes WN and RN parameters + model: New timing model which includes WN and RN (and potentially dmgp, chrom_gp, and solar wind) parameters + (optional) + noise_core: la_forge.core object which contains noise chains and run metadata """ # Assume results are in current working directory if not specified if not base_dir: - base_dir = './' + base_dir = "./" chaindir_compare = compare_dir if not using_wideband: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_nb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_nb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_nb/') + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_nb/") else: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_wb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_wb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_wb/') - - log.info(f'Using existing noise analysis results in {chaindir}') - log.info('Adding new noise parameters to model.') - wn_dict, rn_bf = analyze_noise(chaindir, burn_frac, save_corner, no_corner_plot, chaindir_compare=chaindir_compare) - chainfile = chaindir + 'chain_1.txt' - mtime = Time(os.path.getmtime(chainfile), format="unix") - log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_wb/") + + log.info(f"Using existing noise analysis results in {chaindir}") + log.info("Adding new noise parameters to model.") + if use_noise_point == 'mean_large_likelihood': + log.info("Using mean of top 50 likelihood samples for noise parameters.") + elif use_noise_point == 'MAP': + log.info("Using maximum a posteriori values for noise parameters.") + elif use_noise_point == 'median': + log.info("Using median values for noise parameters.") + noise_core, noise_dict, rn_bf = analyze_noise( + chaindir=chaindir, + use_noise_point=use_noise_point, + likelihoods_to_average=50, + burn_frac=burn_frac, + save_corner=save_corner, + no_corner_plot=no_corner_plot, + chaindir_compare=chaindir_compare, + ) + chainfile = chaindir + "chain_1.txt" + try: + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + except: + chainfile = chaindir+"chain.nc" + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + - #Create the maskParameter for EFACS + # Create the maskParameter for EFACS efac_params = [] equad_params = [] - rn_params = [] ecorr_params = [] dmefac_params = [] dmequad_params = [] @@ -303,142 +659,282 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl ecorr_idx = 1 dmefac_idx = 1 dmequad_idx = 1 - + + psr_name = list(noise_dict.keys())[0].split("_")[0] + noise_pars = np.array(list(noise_dict.keys())) + wn_dict = {key: val for key, val in noise_dict.items() if "efac" in key or "equad" in key or "ecorr" in key} for key, val in wn_dict.items(): - - psr_name = key.split('_')[0] - - if '_efac' in key: - - param_name = key.split('_efac')[0].split(psr_name)[1][1:] - - tp = maskParameter(name = 'EFAC', index = efac_idx, key = '-f', key_value = param_name, - value = val, units = '') + + if "_efac" in key: + + param_name = key.split("_efac")[0].split(psr_name)[1][1:] + + tp = maskParameter( + name="EFAC", + index=efac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) efac_params.append(tp) efac_idx += 1 # See https://github.com/nanograv/enterprise/releases/tag/v3.3.0 # ..._t2equad uses PINT/Tempo2/Tempo convention, resulting in total variance EFAC^2 x (toaerr^2 + EQUAD^2) - elif '_t2equad' in key: - - param_name = key.split('_t2equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + elif "_t2equad" in key: + + param_name = ( + key.split("_t2equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._tnequad uses temponest convention, resulting in total variance EFAC^2 toaerr^2 + EQUAD^2 - elif '_tnequad' in key: - - param_name = key.split('_tnequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + elif "_tnequad" in key: + + param_name = ( + key.split("_tnequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._equad uses temponest convention; generated with enterprise pre-v3.3.0 - elif '_equad' in key: - - param_name = key.split('_equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + elif "_equad" in key: + + param_name = ( + key.split("_equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 - elif ('_ecorr' in key) and (not using_wideband): - - param_name = key.split('_ecorr')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'ECORR', index = ecorr_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + elif ("_ecorr" in key) and (not using_wideband): + + param_name = ( + key.split("_ecorr")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="ECORR", + index=ecorr_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) ecorr_params.append(tp) ecorr_idx += 1 - elif ('_dmefac' in key) and (using_wideband): + elif ("_dmefac" in key) and (using_wideband): - param_name = key.split('_dmefac')[0].split(psr_name)[1][1:] + param_name = key.split("_dmefac")[0].split(psr_name)[1][1:] - tp = maskParameter(name = 'DMEFAC', index = dmefac_idx, key = '-f', key_value = param_name, - value = val, units = '') + tp = maskParameter( + name="DMEFAC", + index=dmefac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) dmefac_params.append(tp) dmefac_idx += 1 - elif ('_dmequad' in key) and (using_wideband): - - param_name = key.split('_dmequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'DMEQUAD', index = dmequad_idx, key = '-f', key_value = param_name, - value = 10 ** val, units = 'pc/cm3') + elif ("_dmequad" in key) and (using_wideband): + + param_name = ( + key.split("_dmequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="DMEQUAD", + index=dmequad_idx, + key="-f", + key_value=param_name, + value=10**val, + units="pc/cm3", + convert_tcb2tdb=False, + ) dmequad_params.append(tp) dmequad_idx += 1 # Test EQUAD convention and decide whether to convert convert_equad_to_t2 = False - if test_equad_convention(wn_dict.keys()) == 'tnequad': - log.info('WN paramaters use temponest convention; EQUAD values will be converted once added to model') + if test_equad_convention(noise_dict.keys()) == "tnequad": + log.info( + "WN paramaters use temponest convention; EQUAD values will be converted once added to model" + ) convert_equad_to_t2 = True - if np.any(['_equad' in p for p in wn_dict.keys()]): - log.info('WN parameters generated using enterprise pre-v3.3.0') - elif test_equad_convention(wn_dict.keys()) == 't2equad': - log.info('WN parameters use T2 convention; no conversion necessary') + if np.any(["_equad" in p for p in noise_dict.keys()]): + log.info("WN parameters generated using enterprise pre-v3.3.0") + elif test_equad_convention(noise_dict.keys()) == "t2equad": + log.info("WN parameters use T2 convention; no conversion necessary") # Create white noise components and add them to the model ef_eq_comp = pm.ScaleToaError() - ef_eq_comp.remove_param(param = 'EFAC1') - ef_eq_comp.remove_param(param = 'EQUAD1') - ef_eq_comp.remove_param(param = 'TNEQ1') + ef_eq_comp.remove_param(param="EFAC1") + ef_eq_comp.remove_param(param="EQUAD1") + ef_eq_comp.remove_param(param="TNEQ1") for efac_param in efac_params: - ef_eq_comp.add_param(param = efac_param, setup = True) + ef_eq_comp.add_param(param=efac_param, setup=True) for equad_param in equad_params: - ef_eq_comp.add_param(param = equad_param, setup = True) - model.add_component(ef_eq_comp, validate = True, force = True) + ef_eq_comp.add_param(param=equad_param, setup=True) + model.add_component(ef_eq_comp, validate=True, force=True) if len(dmefac_params) > 0 or len(dmequad_params) > 0: dm_comp = pm.noise_model.ScaleDmError() - dm_comp.remove_param(param = 'DMEFAC1') - dm_comp.remove_param(param = 'DMEQUAD1') + dm_comp.remove_param(param="DMEFAC1") + dm_comp.remove_param(param="DMEQUAD1") for dmefac_param in dmefac_params: - dm_comp.add_param(param = dmefac_param, setup = True) + dm_comp.add_param(param=dmefac_param, setup=True) for dmequad_param in dmequad_params: - dm_comp.add_param(param = dmequad_param, setup = True) - model.add_component(dm_comp, validate = True, force = True) + dm_comp.add_param(param=dmequad_param, setup=True) + model.add_component(dm_comp, validate=True, force=True) if len(ecorr_params) > 0: ec_comp = pm.EcorrNoise() - ec_comp.remove_param('ECORR1') + ec_comp.remove_param("ECORR1") for ecorr_param in ecorr_params: - ec_comp.add_param(param = ecorr_param, setup = True) - model.add_component(ec_comp, validate = True, force = True) + ec_comp.add_param(param=ecorr_param, setup=True) + model.add_component(ec_comp, validate=True, force=True) # Create red noise component and add it to the model log.info(f"The SD Bayes factor for red noise in this pulsar is: {rn_bf}") if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): log.info("Including red noise for this pulsar") - #Add the ML RN parameters to their component + # Add the ML RN parameters to their component rn_comp = pm.PLRedNoise() - rn_keys = np.array([key for key,val in wn_dict.items() if '_red_' in key]) - rn_comp.RNAMP.quantity = convert_to_RNAMP(wn_dict[psr_name + '_red_noise_log10_A']) - rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + '_red_noise_gamma'] - - #Add red noise to the timing model - model.add_component(rn_comp, validate = True, force = True) + rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + rn_comp.RNAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_red_noise_log10_A"] + ) + rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] + # Add red noise to the timing model + model.add_component(rn_comp, validate=True, force=True) else: log.info("Not including red noise for this pulsar") - - #Setup and validate the timing model to ensure things are correct + + # Check to see if dm noise is present + dm_pars = [key for key in noise_pars if "_dm_gp" in key] + if len(dm_pars) > 0: + ###### POWERLAW DM NOISE ###### + if f'{psr_name}_dm_gp_log10_A' in dm_pars: + #dm_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] + #log.info(f"The SD Bayes factor for dm noise in this pulsar is: {dm_bf}") + log.info('Adding Powerlaw DM GP noise as PLDMNoise to par file') + # Add the ML RN parameters to their component + dm_comp = pm.noise_model.PLDMNoise() + dm_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_dm_gp_log10_A"] + ) + dm_comp.TNDMGAM.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + dm_comp.TNDMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(dm_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_dm_gp_log10_rho_0' in dm_pars: + log.info('Adding Free Spectral DM GP as DMWaveXnoise to par file') + raise NotImplementedError('DMWaveXNoise not yet implemented') + + # Check to see if higher order chromatic noise is present + chrom_pars = [key for key in noise_pars if "_chrom_gp" in key] + if len(chrom_pars) > 0: + ###### POWERLAW CHROMATIC NOISE ###### + if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: + log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') + # Add the ML RN parameters to their component + chrom_comp = pm.noise_model.PLCMNoise() + # chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) + chrom_comp.TNCMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_chrom_gp_log10_A"] + ) + chrom_comp.TNCMGAM.quantity = -1 * noise_dict[psr_name + "_chrom_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + chrom_comp.TNCMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(chrom_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: + log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') + raise NotImplementedError('CMWaveXNoise not yet implemented') + + # Check to see if solar wind is present + sw_pars = [key for key in noise_pars if "sw_r2" in key] + if len(sw_pars) > 0: + log.info('Adding Solar Wind Dispersion to par file') + all_components = Component.component_types + noise_class = all_components["SolarWindDispersion"] + noise = noise_class() # Make the dispersion instance. + model.add_component(noise, validate=False, force=False) + # add parameters + if f'{psr_name}_n_earth' in sw_pars: + model['NE_SW'].quantity = noise_dict[f'{psr_name}_n_earth'] + model['NE_SW'].frozen = True + if f'{psr_name}_sw_gp_log10_A' in sw_pars: + sw_comp = pm.noise_model.PLSWNoise() + sw_comp.TNSWAMP.quantity = convert_to_RNAMP(noise_dict[f'{psr_name}_sw_gp_log10_A']) + sw_comp.TNSWAMP.frozen = True + sw_comp.TNSWGAM.quantity = -1.*noise_dict[f'{psr_name}_sw_gp_gamma'] + sw_comp.TNSWGAM.frozen = True + # FIXMEEEEEEE : need to figure out some way to softcode this + sw_comp.TNSWC.quantity = 10 + sw_comp.TNSWC.frozen = True + model.add_component(sw_comp, validate=False, force=True) + if f'{psr_name}_sw_gp_log10_rho' in sw_pars: + raise NotImplementedError('Solar Wind Dispersion free spec GP not yet implemented') + + + # Setup and validate the timing model to ensure things are correct model.setup() model.validate() model.meta['noise_mtime'] = mtime.isot if convert_equad_to_t2: from pint_pal.lite_utils import convert_enterprise_equads + model = convert_enterprise_equads(model) - return model + if not return_noise_core: + return model + if return_noise_core: + return model, noise_core + def test_equad_convention(pars_list): """ @@ -451,15 +947,163 @@ def test_equad_convention(pars_list): Returns ======= - convention_test: t2equad/tnequad/None + convention_test: t2equad/tnequad/None """ # Test equad convention - t2_test = np.any(['_t2equad' in p for p in pars_list]) - tn_test = np.any([('_tnequad' in p) or ('_equad' in p) for p in pars_list]) + t2_test = np.any(["_t2equad" in p for p in pars_list]) + tn_test = np.any([("_tnequad" in p) or ("_equad" in p) for p in pars_list]) if t2_test and not tn_test: - return 't2equad' + return "t2equad" elif tn_test and not t2_test: - return 'tnequad' + return "tnequad" else: - log.warning('EQUADs not present in parameter list (or something strange is going on).') + log.warning( + "EQUADs not present in parameter list (or something strange is going on)." + ) return None + + +def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): + """ + Get the initial sample from a chain directory or json file path. + If parameters are missing, draw randomly from the prior + Parameters + ========== + pta: enterprise PTA object + chaindir: path to chain directory + json_path: path to json file containing starting point + Returns + ======= + x0: initial sample + """ + try: + if chaindir is not None: + log.info(f"Attempting to initialize sampler from MAP of chain directory {chaindir}") + core = co.Core(chaindir) + starting_point = core.get_map_dict() + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + elif json_path is not None: + with open(json_path, 'r') as fin: + starting_point = json.load(fin) + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + else: + x0 = np.hstack([p.sample() for p in pta.params]) + except: + x0 = np.hstack([p.sample() for p in pta.params]) + x0_dict = None + log.warning( + f"Unable to initialize sampler from chain directory or json file. Drawing random initial sample." + ) + return x0 + +def make1d(par, samples, bins=None, nbins=81): + if bins is None: + bins = np.linspace(min(samples), max(samples), nbins) + + return EmpiricalDistribution1D(par, samples, bins) + +def make2d(pars, samples, bins=None, nbins=81): + idx = [0,1] + if bins is None: + bins = [np.linspace(min(samples[:, i]), max(samples[:, i]), nbins) for i in idx] + return EmpiricalDistribution2D(pars, samples.T, bins) + +def make_emp_distr(core): + """ + Make empirical distributions for all parameters in core. + Parameters + ========== + core: enterprise_extensions.core.Core object + + Returns + ======= + dists: list of EmpiricalDistribution1D and EmpiricalDistribution2D objects + """ + types = ['dm_gp', 'chrom_gp', 'red_noise', 'ecorr', 'chrom_s1yr', 'dm_s1yr', 'exp',] + # made 1d hist for everything + dists = [make1d(par, core(par)) for par in core.params[:-4] if 'chrom_gp_idx' not in par] + # get list of parameters minus chrom_gp_idx cuz this prior is weird. + params = [p for p in core.params if 'chrom_gp_idx' not in p] + groups = {ii: [par for par in params if ii in par] for ii in types} + # make 2ds for various related parameter subgroups + for group in groups.values(): + _ = [dists.append(make2d(pars,core(list(pars)))) for pars in list(itertools.combinations(group,2)) if len(group)>1] + # make 2d cross groups + _ = [[dists.append(make2d([ecr, dm], core([ecr, dm]))) for ecr in groups['ecorr']] for dm in groups['dm_gp']] + _ = [[dists.append(make2d([dm, chrom], core([dm, chrom]))) for dm in groups['dm_gp']] for chrom in groups['chrom_gp']] + + return dists + +def log_single_likelihood_evaluation_time(pta, sampler_kwargs): + """ + Log the time it takes to evaluate the likelihood once. + """ + log.info("Building the enterprise likelihood and estimating evaluation time...") + x1 = [[p.sample() for p in pta.params] for _ in range(11)] + pta.get_lnlikelihood(x1[0]) + start_time = time.time() + [pta.get_lnlikelihood(x1[i]) for i in range(1,11)] + end_time = time.time() + slet = (end_time-start_time)/10 + log.info(f"Single likelihood evaluation time is approximately {slet:.1e} seconds") + log.info(f"4 times {sampler_kwargs['n_iter']} likelihood evaluations will take approximately: {4*slet*float(sampler_kwargs['n_iter'])/3600/24:.2f} days") + + + +def get_model_and_sampler_default_settings(): + model_defaults = { + # white noise + 'inc_wn': True, + 'tnequad': True, + # acrhomatic red noise + 'inc_rn': True, + 'rn_psd': 'powerlaw', + 'rn_nfreqs': 30, + # dm gp + 'inc_dmgp': False, + 'dmgp_psd': 'powerlaw', + 'dmgp_nfreqs': 100, + # higher order chromatic gp + 'inc_chromgp': False, + 'chromgp_psd': 'powerlaw', + 'chromgp_nfreqs': 100, + 'chrom_idx': 4, + 'chrom_quad': False, + # solar wind + 'inc_sw_deter': False, + # GP perturbations ontop of the deterministic model + 'inc_swgp': False, + 'ACE_prior': False, + # + 'extra_sigs': None, + # misc + 'tm_svd': True + } + sampler_defaults = { + 'likelihood': 'enterprise', + 'sampler': 'PTMCMCSampler', + # ptmcmc kwargs + 'n_iter': 2.5e5, + 'emp_distribution': None, + # numpyro kwargs + 'num_steps': 25, + 'num_warmup': 500, + 'num_samples': 2500, + 'num_chains': 4, + 'chain_method': 'parallel', + 'max_tree_depth': 5, + 'dense_mass': False, + } + return model_defaults, sampler_defaults diff --git a/src/pint_pal/notebook_runner.py b/src/pint_pal/notebook_runner.py index 495f6fbd..26db8126 100644 --- a/src/pint_pal/notebook_runner.py +++ b/src/pint_pal/notebook_runner.py @@ -10,6 +10,7 @@ yaml = YAML(typ='safe') import pint_pal +import pint_pal.config from pint_pal.notebook_templater import transform_notebook ansi_color = re.compile(r'\x1b\[([0-9]{1,3};)*[0-9]{1,3}m') @@ -36,8 +37,8 @@ def run_template_notebook(template_nb, config_file, output_nb=None, err_file=Non verbose: Print a description of replacements made in the template notebook. transformations: Transformations to apply to the notebook. """ - # base_dir = parent directory of directory containing config_file - base_dir = os.path.dirname(os.path.dirname(os.path.abspath(config_file))) + # base_dir = root of data repository + base_dir = pint_pal.config.DATA_ROOT nb_name = os.path.splitext(os.path.split(template_nb)[1])[0] cfg_name = os.path.splitext(os.path.split(config_file)[1])[0] diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index 011c489a..6ecf6856 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,7 +4,7 @@ import copy from astropy import log import astropy.units as u -from pint_pal.defaults import * +import pint_pal.config from pint.modelutils import model_equatorial_to_ecliptic def check_if_fit(model, *param): @@ -284,8 +284,9 @@ def check_ephem(toa): UserWarning If ephemeris is not set to the latest version. """ - if toa.ephem != LATEST_EPHEM: - msg = f"Wrong Solar System ephemeris in use ({toa.ephem}); should be {LATEST_EPHEM}." + if toa.ephem != pint_pal.config.LATEST_EPHEM: + msg = (f"Wrong Solar System ephemeris in use ({toa.ephem});" + f" should be {pint_pal.config.LATEST_EPHEM}.") log.warning(msg) else: msg = f"Current Solar System ephemeris in use is {toa.ephem}." @@ -304,8 +305,9 @@ def check_bipm(toa): UserWarning If BIPM correction is not set to the latest version. """ - if toa.clock_corr_info['bipm_version'] != LATEST_BIPM: - msg = f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']}); should be {LATEST_BIPM}." + if toa.clock_corr_info['bipm_version'] != pint_pal.config.LATEST_BIPM: + msg = (f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']});" + f" should be {pint_pal.config.LATEST_BIPM}.") log.warning(msg) else: msg = f"BIPM version in use is {toa.clock_corr_info['bipm_version']}." @@ -356,9 +358,10 @@ def check_troposphere(model): msg = "Added TroposphereDelay to model components." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value - if tropo != CORRECT_TROPOSPHERE: - model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( \ - CORRECT_TROPOSPHERE) + if tropo != pint_pal.config.CORRECT_TROPOSPHERE: + model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( + pint_pal.config.CORRECT_TROPOSPHERE + ) msg = "Switching CORRECT_TROPOSPHERE setting." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value @@ -385,9 +388,10 @@ def check_planet_shapiro(model): msg = "Added SolarSystemShapiro to model components." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value - if sss != PLANET_SHAPIRO: - model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( \ - PLANET_SHAPIRO) + if sss != pint_pal.config.PLANET_SHAPIRO: + model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( + pint_pal.config.PLANET_SHAPIRO + ) msg = "Switching PLANET_SHAPIRO setting." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value @@ -449,7 +453,9 @@ def check_toa_release(toas): if len(set(release_flags)) > 1: log.error(f'TOAs from multiple releases should not be combined: {set(release_flags)}') else: - if release_flags[0] == LATEST_TOA_RELEASE: - log.info(f'All TOAs are from the latest release ({LATEST_TOA_RELEASE}).') + if release_flags[0] == pint_pal.config.LATEST_TOA_RELEASE: + log.info(f'All TOAs are from the latest release ({pint_pal.config.LATEST_TOA_RELEASE}).') else: - log.warning(f'TOAs in use are from an old release {release_flags[0]}, not {LATEST_TOA_RELEASE}; update tim-directory in the .yaml accordingly.') + log.warning(f'TOAs in use are from an old release {release_flags[0]}, ' + f'not {pint_pal.config.LATEST_TOA_RELEASE}; ' + f'update tim-directory in the .yaml accordingly.') diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml new file mode 100644 index 00000000..3bc2e260 --- /dev/null +++ b/src/pint_pal/plot_settings.yaml @@ -0,0 +1,290 @@ +# This YAML contains various marker colors and shapes for the three plotting schemes +# Changes here will be applied to notebook plotting immediately (after restarting the kernel) + +obs_c: { + "ao": "#40C3C3", + "arecibo": "#40C3C3" , + "gbt": "#61C853", + "vla": "#40635F", + "CHIME": "#A3DB8B", + "nancay": "#407BD5", + "ncyobs": "#407BD5", + "effelsberg_asterix": "#00A9E2", + "effelsberg": "#00A9E2", + "leap": "#004B97", + "jodrell": "#404BD5", + "jbroach": "#404BD5", + "wsrt": "#404B97", + "parkes": "#BE0119", + "gmrt": "#855CA0", + "meerkat": "#FD9927", + "fast": "#FD99CA", + "nenufar": "#E0DEFF", + "lofar": "#E0EAFF", + "None": "#808080", +} + +pta_c: { + "InPTA": "#855CA0", + "EPTA": "#407BD5", + "NANOGrav": "#61C853", + "PPTA": "#BE0119", + "MPTA": "#FD9927", + "CPTA": "#FD99CA", + "None": "#808080", +} + +febe_c: { + "327_ASP": "#6BA9E2", + "327_PUPPI": "#6BA9E2", + "430_ASP": "#6BA9E2", + "430_PUPPI": "#6BA9E2", + "L-wide_ASP": "#6BA9E2", + "L-wide_PUPPI": "#6BA9E2", + "Rcvr1_2_GASP": "#61C853", + "Rcvr1_2_GUPPI": "#61C853", + "Rcvr1_2_VEGAS": "hotpink", + "Rcvr_800_GASP": "#61C853", + "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "violet", + "S-wide_ASP": "#6BA9E2", + "S-wide_PUPPI": "#6BA9E2", + "1.5GHz_YUPPI": "#40635F", + "3GHz_YUPPI": "#40635F", + "6GHz_YUPPI": "#40635F", + "CHIME": "#ECE133", + "unknown_LEAP": "#FD9927", + "NRT.BON.1600": "#FD9927", + "NRT.BON.1400": "#FD9927", + "NRT.BON.2000": "#FD9927", + "NRT.NUPPI.1484": "#FD9927", + "NRT.NUPPI.1854": "#FD9927", + "NRT.NUPPI.2154": "#FD9927", + "NRT.NUPPI.2539": "#FD9927", + "EFF.EBPP.1360": "#855CA0", + "EFF.EBPP.1410": "#855CA0", + "EFF.EBPP.2639": "#855CA0", + "S60-2_asterix": "#855CA0", + "JBO.DFB.1400": "#407BD5", + "JBO.DFB.1520": "#407BD5", + "WSRT.P2.1380": "#E5A4CB", + "WSRT.P1.1380.C": "#E5A4CB", + "WSRT.P1.2273.C": "#E5A4CB", + "WSRT.P1.323.C": "#40635F", + "WSRT.P1.367.C": "#40635F", + "P217-3_asterix": "#855CA0", + "unknown_asterix": "#855CA0", + "P200-3_asterix": "#855CA0", + "P217-3_PuMa2": "#855CA0", + "P217-6_LEAP": "#855CA0", + "P217-3_LEAP": "#855CA0", + "R217-3_LEAP": "#855CA0", + "P200-3_LEAP": "#855CA0", + "JBO.ROACH.1620": "#407BD5", + "1050CM_PDFB4": "#BE0119", + "1050CM_PDFB1": "#BE0119", + "1050CM_PDFB2": "#BE0119", + "1050CM_PDFB3": "#BE0119", + "1050CM_WBCORR": "#BE0119", + "1050CM_CPSR2": "#BE0119", + "1050CM_CASPSR": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_PDFB1": "#BE0119", + "H-OH_PDFB1": "#BE0119", + "H-OH_CPSR2n": "#BE0119", + "H-OH_CPSR2m": "#BE0119", + "H-OH_PDFB4": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_CPSR2n": "#BE0119", + "MULTI_WBCORR": "#BE0119", + "MULTI_PDFB2": "#BE0119", + "MULTI_PDFB3": "#BE0119", + "MULTI_PDFB4": "#BE0119", + "UWL_Medusa": "#BE0119", + "UWL_CASPSR": "#BE0119", + "UWL_PDFB4": "#BE0119", + "UWL_PDFB4_10CM": "#BE0119", + "UWL_PDFB4_40CM": "#BE0119", + "None": "#808080", + "unknown_asterix": "#855CA0", + "CHIME": "#A3DB8B", + "unknown_LuMP": "#E0EAFF", + "unknown_COBALT": "#E0EAFF", + "unknown_LOFAR": "#E0EAFF", + "LaNewBa_LUPPI": "#E0DEFF", + "19BEAM_MB4K": "#FD99CA", +} + +ng20_c: { + "CHIME": "#FFA733", + "327_ASP": "#BE0119", + "327_PUPPI": "#BE0119", + "430_ASP": "#FD9927", + "430_PUPPI": "#FD9927", + "L-wide_ASP": "#BDB6F6", + "L-wide_PUPPI": "#BDB6F6", + "Rcvr1_2_GASP": "#79A3E2", + "Rcvr1_2_GUPPI": "#79A3E2", + "Rcvr1_2_VEGAS": "#79A3E2", + "Rcvr_800_GASP": "#8DD883", + "Rcvr_800_GUPPI": "#8DD883", + "Rcvr_800_VEGAS": "#8DD883", + "S-wide_ASP": "#C4457A", + "S-wide_PUPPI": "#C4457A", + "1.5GHz_YUPPI": "#EBADCB", + "3GHz_YUPPI": "#E79CC1", + "6GHz_YUPPI": "#DB6BA1", +} + +obs_m: { + "ao": "x", + "arecibo": "x", + "gbt": "x", + "vla": "x", + "CHIME": "x", + "leap": "x", + "nancay": "x", + "ncyobs": "x", + "effelsberg_asterix": "x", + "effelsberg": "x", + "jodrell": "x", + "jbroach": "x", + "wsrt": "x", + "parkes": "x", + "gmrt": "x", + "meerkat": "x", + "fast": "x", + "lofar": "x", + "nenufar": "x", + "None": "x", +} + +pta_m: { + "InPTA": "x", + "EPTA": "x", + "NANOGrav": "x", + "PPTA": "x", + "MPTA": "x", + "CPTA": "x", + "None": "x", +} + +ng20_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "x", + "Rcvr_800_GUPPI": "x", + "Rcvr_800_VEGAS": "x", + "S-wide_ASP": "x", + "S-wide_PUPPI": "x", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", +} + +febe_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "o", + "Rcvr_800_GUPPI": "o", + "Rcvr_800_VEGAS": "o", + "S-wide_ASP": "o", + "S-wide_PUPPI": "o", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "o", + "6GHz_YUPPI": "^", + "CHIME": "x", + "NRT.BON.1600": "x", + "NRT.BON.1400": "o", + "NRT.BON.2000": "^", + "NRT.NUPPI.1484": "x", + "NRT.NUPPI.1854": "o", + "NRT.NUPPI.2154": "^", + "NRT.NUPPI.2539": "^", + "EFF.EBPP.1360": "o", + "EFF.EBPP.1410": "x", + "EFF.EBPP.2639": "^", + "S60-2_asterix": "v", + "P217-3_asterix": "x", + "P200-3_asterix": "v", + "unknown_asterix": "v", + "P217-3_PuMa2": "x", + "P200-3_LEAP": "v", + "P217-6_LEAP": "x", + "P217-3_LEAP": "x", + "R217-3_LEAP": "x", + "unknown_LEAP": "x", + "JBO.DFB.1400": "x", + "JBO.DFB.1520": "o", + "JBO.ROACH.1620": "^", + "WSRT.P2.1380": "v", + "WSRT.P1.1380.C": "x", + "WSRT.P1.2273.C": "o", + "WSRT.P1.323.C": "x", + "WSRT.P1.367.C": "x", + "1050CM_PDFB4": "x", + "1050CM_PDFB1": "x", + "1050CM_PDFB2": "x", + "1050CM_PDFB3": "x", + "1050CM_WBCORR": "x", + "1050CM_CPSR2": "x", + "1050CM_CPSR2m": "x", + "1050CM_CASPSR": "x", + "MULTI_CPSR2m": "o", + "MULTI_PDFB1": "o", + "H-OH_PDFB1": "^", + "H-OH_CPSR2m": "^", + "H-OH_CPSR2n": "^", + "H-OH_PDFB4": "^", + "MULTI_CPSR2n": "o", + "MULTI_WBCORR": "o", + "MULTI_PDFB2": "o", + "MULTI_PDFB3": "o", + "MULTI_PDFB4": "o", + "UWL_Medusa": "v", + "UWL_PDFB4": "v", + "UWL_PDFB4_10CM": "v", + "UWL_PDFB4_40CM": "v", + "UWL_CASPSR": "v", + "None": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", + "19BEAM_MB4K": "x", +} + +label_names: { + "327_ASP": "ASP 327 MHz", + "327_PUPPI": "PUPPI 327 MHz", + "430_ASP": "ASP 430 MHz", + "430_PUPPI": "PUPPI 430 MHz", + "L-wide_ASP": "ASP L-wide", + "L-wide_PUPPI": "PUPPI L-wide", + "Rcvr1_2_GASP": "GASP L-band", + "Rcvr1_2_GUPPI": "GUPPI L-band", + "Rcvr1_2_VEGAS": "VEGAS L-band", + "Rcvr_800_GASP": "GASP 820 MHz", + "Rcvr_800_GUPPI": "GUPPI 820 MHz", + "Rcvr_800_VEGAS": "VEGAS 820 MHz", + "S-wide_ASP": "ASP S-wide", + "S-wide_PUPPI": "PUPPI S-wide", + "1.5GHz_YUPPI": "YUPPI 1.5 GHz", + "3GHz_YUPPI": "YUPPI 3 GHz", + "6GHz_YUPPI": "YUPPI 6 GHz", + "CHIME": "CHIME", +} diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 225b3ef4..bb27d97e 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -2,246 +2,75 @@ # -*- coding: utf-8 -*- """ Created on Tue Feb 4 09:30:59 2020 - @author: bshapiroalbert +Code since butchered by many timers. """ import numpy as np import matplotlib.pyplot as plt -import sys, copy +import copy from astropy import log import astropy.units as u -# Import PINT +import yaml + import pint.toa as toa import pint.models as model import pint.fitter as fitter import pint.utils as pu import subprocess -# import extra util functions brent wrote + from pint_pal.utils import * import os from pint_pal.timingconfiguration import TimingConfiguration import pint_pal.lite_utils as lu -# color blind friends colors and markers? -#CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] -#MARKERS = ['.', 'v', 's', 'x', '^', 'D', 'p', 'P', '*'] - -# Color scheme for consistent reciever-backend combos, same as published 12.5 yr -colorschemes = {'observatories':{ - "ao": "#6BA9E2", - "arecibo": "#6BA9E2", - "gbt": "#61C853", - "vla": "#40635F", - "CHIME": "#ECE133", - "nancay": "#407BD5", - "ncyobs": "#407BD5", - "effelsberg_asterix": "#407BD5", - "effelsberg": "#407BD5", - "leap": "#ECE133", - "jodrell": "#407BD5", - "jbroach": "#407BD5", - "wsrt": "#E5A4CB", - "parkes": "#BE0119", - "gmrt": "#855CA0", - "meerkat": "#FD9927", - "None": "#808080" - }, - - 'pta':{ - "InPTA": "#855CA0", - "EPTA": "#407BD5", - "NANOGrav": "#61C853", - "PPTA": "#BE0119", - "MPTA": "#FD9927", - "None": "#808080" - }, - 'febe':{ - "327_ASP": "#6BA9E2", - "327_PUPPI": "#6BA9E2", - "430_ASP": "#6BA9E2", - "430_PUPPI": "#6BA9E2", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#61C853", - "Rcvr1_2_GUPPI": "#61C853", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "S-wide_ASP": "#6BA9E2", - "S-wide_PUPPI": "#6BA9E2", - "1.5GHz_YUPPI": "#40635F", - "3GHz_YUPPI": "#40635F", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - "unknown_LEAP": "#FD9927", - "NRT.BON.1600": "#FD9927", - "NRT.BON.1400": "#FD9927", - "NRT.BON.2000": "#FD9927", - "NRT.NUPPI.1484": "#FD9927", - "NRT.NUPPI.1854": "#FD9927", - "NRT.NUPPI.2154": "#FD9927", - "NRT.NUPPI.2539": "#FD9927", - "EFF.EBPP.1360": "#855CA0", - "EFF.EBPP.1410": "#855CA0", - "EFF.EBPP.2639": "#855CA0", - "S60-2_asterix": "#855CA0", - "JBO.DFB.1400": "#407BD5", - "JBO.DFB.1520": "#407BD5", - "WSRT.P2.1380": "#E5A4CB", - "WSRT.P1.1380.C": "#E5A4CB", - "WSRT.P1.2273.C": "#E5A4CB", - "WSRT.P1.323.C": "#40635F", - "WSRT.P1.367.C": "#40635F", - "P217-3_asterix": "#855CA0", - "unknown_asterix": "#855CA0", - "P200-3_asterix": "#855CA0", - "P217-3_PuMa2": "#855CA0", - "P217-6_LEAP": "#855CA0", - "P217-3_LEAP": "#855CA0", - "R217-3_LEAP": "#855CA0", - "P200-3_LEAP": "#855CA0", - "JBO.ROACH.1620": "#407BD5", - "1050CM_PDFB4": "#BE0119", - "1050CM_PDFB1": "#BE0119", - "1050CM_PDFB2": "#BE0119", - "1050CM_PDFB3": "#BE0119", - "1050CM_WBCORR": "#BE0119", - "1050CM_CPSR2": "#BE0119", - "1050CM_CASPSR": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_PDFB1": "#BE0119", - "H-OH_PDFB1": "#BE0119", - "H-OH_CPSR2n": "#BE0119", - "H-OH_CPSR2m": "#BE0119", - "H-OH_PDFB4": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_CPSR2n": "#BE0119", - "MULTI_WBCORR": "#BE0119", - "MULTI_PDFB2": "#BE0119", - "MULTI_PDFB3": "#BE0119", - "MULTI_PDFB4": "#BE0119", - "UWL_Medusa": "#BE0119", - "UWL_CASPSR": "#BE0119", - "UWL_PDFB4": "#BE0119", - "UWL_PDFB4_10CM": "#BE0119", - "UWL_PDFB4_40CM": "#BE0119", - "None": "#808080", - "unknown_asterix": "#855CA0", - "CHIME": "#ECE133" - }} - - -# marker dictionary to be used if desired, currently all 'x' -markers = {'observatories':{ - "ao": "x", - "arecibo": "x", - "gbt": "x", - "vla": "x", - "CHIME": "x", - "leap": "x", - "nancay": "x", - "ncyobs": "x", - "effelsberg_asterix": "x", - "effelsberg": "x", - "jodrell": "x", - "jbroach": "x", - "wsrt": "x", - "parkes": "x", - "gmrt": "x", - "meerkat": "x", - "None": "x" - }, - 'pta':{ - "InPTA": "x", - "EPTA": "x", - "NANOGrav": "x", - "PPTA": "x", - "MPTA": "x", - "None": "x" - }, - 'febe': {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr_800_GASP": "o", - "Rcvr_800_GUPPI": "o", - "S-wide_ASP": "o", - "S-wide_PUPPI": "o", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "o", - "6GHz_YUPPI": "^", - "CHIME": "x", - "NRT.BON.1600": "x", - "NRT.BON.1400": "o", - "NRT.BON.2000": "^", - "NRT.NUPPI.1484": "x", - "NRT.NUPPI.1854": "o", - "NRT.NUPPI.2154": "^", - "NRT.NUPPI.2539": "^", - "EFF.EBPP.1360": "o", - "EFF.EBPP.1410": "x", - "EFF.EBPP.2639": "^", - "S60-2_asterix": "v", - "P217-3_asterix": "x", - "P200-3_asterix": "v", - "unknown_asterix": "v", - "P217-3_PuMa2": "x", - "P200-3_LEAP": "v", - "P217-6_LEAP": "x", - "P217-3_LEAP": "x", - "R217-3_LEAP": "x", - "unknown_LEAP": "x", - "JBO.DFB.1400": "x", - "JBO.DFB.1520": "o", - "JBO.ROACH.1620": "^", - "WSRT.P2.1380": "v", - "WSRT.P1.1380.C": "x", - "WSRT.P1.2273.C": "o", - "WSRT.P1.323.C": "x", - "WSRT.P1.367.C": "x", - "1050CM_PDFB4": "x", - "1050CM_PDFB1": "x", - "1050CM_PDFB2": "x", - "1050CM_PDFB3": "x", - "1050CM_WBCORR": "x", - "1050CM_CPSR2": "x", - "1050CM_CPSR2m": "x", - "1050CM_CASPSR": "x", - "MULTI_CPSR2m": "o", - "MULTI_PDFB1": "o", - "H-OH_PDFB1": "^", - "H-OH_CPSR2m": "^", - "H-OH_CPSR2n": "^", - "H-OH_PDFB4": "^", - "MULTI_CPSR2n": "o", - "MULTI_WBCORR": "o", - "MULTI_PDFB2": "o", - "MULTI_PDFB3": "o", - "MULTI_PDFB4": "o", - "UWL_Medusa": "v", - "UWL_PDFB4": "v", - "UWL_PDFB4_10CM": "v", - "UWL_PDFB4_40CM": "v", - "UWL_CASPSR": "v", - "None": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - }} -# Define the color map option -#colorscheme = colorschemes['thankful_2'] -#colorscheme = thesis_colorschemes['thesis'] +PACKAGE_DIR = os.path.dirname(__file__) +with open(os.path.join(PACKAGE_DIR, "plot_settings.yaml"), "r") as cf: + config = yaml.safe_load(cf) +# plot_settings.yaml now has a NANOGrav 20-yr specific colorscheme (ng20_c). +# If you want to go back to the old colors (or are doing DR3), change this to +# colorschemes["febe"] = config["febe_c"] AND markers["febe"] = config["febe_m"] +colorschemes, markers, labels = {}, {}, {} +colorschemes["obs"] = config["obs_c"] +colorschemes["pta"] = config["pta_c"] +colorschemes["febe"] = config["ng20_c"] +markers["obs"] = config["obs_m"] +markers["pta"] = config["pta_m"] +markers["febe"] = config["ng20_m"] +labels = config["label_names"] -def call(x): - subprocess.call(x,shell=True) - -def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): +def call(x): + subprocess.call(x, shell=True) + + +def set_color_and_marker(colorby): + if colorby == "pta": + colorscheme = colorschemes["pta"] + markerscheme = markers["pta"] + elif colorby == "obs": + colorscheme = colorschemes["observatories"] + markerscheme = markers["observatories"] + elif colorby == "f": + colorscheme = colorschemes["febe"] + markerscheme = markers["febe"] + return colorscheme, markerscheme + + +def plot_residuals_time( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + mixed_ecorr=False, + **kwargs, +): """ Make a plot of the residuals vs. time @@ -255,7 +84,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope + 'obs' - color residuals by telescope 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. @@ -267,7 +96,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- @@ -285,320 +114,396 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - + # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: - if restype != 'both': + if whitened == True and ("res" not in kwargs.keys()): + if avg == True and mixed_ecorr == True: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: - if restype != 'both': + elif avg == True and mixed_ecorr == False: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - if avg == True and mixed_ecorr == True : - mjds = avg_dict['mjds'].value - mjds_no_avg = no_avg_dict['mjds'].value - years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0 + if avg == True and mixed_ecorr == True: + mjds = avg_dict["mjds"].value + mjds_no_avg = no_avg_dict["mjds"].value + years_no_avg = (mjds_no_avg - 51544.0) / 365.25 + 2000.0 elif avg == True and mixed_ecorr == False: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value # Convert to years - years = (mjds - 51544.0)/365.25 + 2000.0 - + years = (mjds - 51544.0) / 365.25 + 2000.0 + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_years = np.hstack((years, years_no_avg)) - if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + if restype == "both": + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - + # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) - + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) + CB = set(cb) - - if colorby== 'pta': - colorscheme = colorschemes['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - + colorscheme, markerscheme = set_color_and_marker(colorby) - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,5) + figsize = (10, 5) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: fig = plt.gcf() ax1 = axs - for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markers[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg == True and mixed_ecorr == True: if plotsig: - combo_sig = combo_res[inds]/combo_[inds] - ax1.errorbar(combo_years[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_years[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_[inds] + ax1.errorbar( + combo_years[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_years[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(combo_years[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(combo_years[inds], combo_res_rpe[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - + ax1.errorbar( + combo_years[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + combo_years[inds], + combo_res_rpe[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) + else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(years[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(years[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + years[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + years[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(years[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(years[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + years[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + years[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -611,17 +516,17 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color stamp_color = "#FD9927" @@ -630,49 +535,65 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, axs = None, legend=True, show_bin=True, **kwargs): + +def plot_FD_delay( + fitter=None, + model_object=None, + save=False, + title=True, + axs=None, + legend=True, + show_bin=True, + **kwargs, +): """ - Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. + Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival Time Measurements, and Analysis of 37 Millisecond Pulsars, The Astrophysical Journal, Volume 813, Issue 1, article id. 65, 31 pp.(2015). Eq.(2): FDdelay = sum(c_i * (log(obs_freq/1GHz))^i) - - This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. - + + This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. + Arguments ---------- - + fitter[object] : The PINT fitter object. model[object] : The PINT model object. Can be used instead of fitter save [boolean] : If True will save plot with the name "FD_delay.png"[default: False]. title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- freqs [list/array] : List or array of frequencies (MHz) to plot. Will override values from toa object. @@ -683,133 +604,153 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, alpha [float] : matplotlib alpha options for error regions [default: 0.2] loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True """ - - #Make sure that either a fitter or model object has been specified + + # Make sure that either a fitter or model object has been specified if fitter == None and model_object == None: raise Exception("Need to specify either a fitter or model object") - - #Get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + + # Get frequencies + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] elif model_object is not None: - raise Exception("Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over") + raise Exception( + "Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over" + ) else: freqs = fitter.toas.get_freqs().value freqs = np.sort(freqs) - - #Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions - def get_FD_delay(pint_model_object,freqs): - FD_map = model.TimingModel.get_prefix_mapping(pint_model_object,"FD") + + # Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions + def get_FD_delay(pint_model_object, freqs): + FD_map = model.TimingModel.get_prefix_mapping(pint_model_object, "FD") FD_names = list(FD_map.values()) FD_names.reverse() FD_vals = [] FD_uncert = [] for i in FD_names: - FD_vals.append(pint_model_object.get_params_dict(which="all",kind="value")[i]) - FD_uncert.append(pint_model_object.get_params_dict(which="all",kind="uncertainty")[i]) + FD_vals.append( + pint_model_object.get_params_dict(which="all", kind="value")[i] + ) + FD_uncert.append( + pint_model_object.get_params_dict(which="all", kind="uncertainty")[i] + ) FD_vals.append(0.0) FD_uncert.append(0.0) FD_vals = np.array(FD_vals) FD_uncert = np.array(FD_uncert) - delay = np.polyval(FD_vals,np.log10(freqs)) - delta_delay_plus = np.polyval(FD_uncert+FD_vals,np.log10(freqs)) - delta_delay_minus = np.polyval(FD_vals-FD_uncert,np.log10(freqs)) + delay = np.polyval(FD_vals, np.log10(freqs)) + delta_delay_plus = np.polyval(FD_uncert + FD_vals, np.log10(freqs)) + delta_delay_minus = np.polyval(FD_vals - FD_uncert, np.log10(freqs)) if len(FD_vals) - 1 > 1: FD_phrase = "FD1-%s" % (len(FD_vals) - 1) else: FD_phrase = "FD1" - return delay *1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6 , FD_phrase - - #Get FD params if fitter object is given + return delay * 1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6, FD_phrase + + # Get FD params if fitter object is given if fitter is not None: - #Check if the fitter object has FD parameters + # Check if the fitter object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(fitter.model, freqs*1e-3) - #print(FD_delay) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + fitter.model, freqs * 1e-3 + ) psr_name = fitter.model.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: - nbins = fitter.toas['nbin'].astype(int).min() - P0 = 1/fitter.model.F0.value - P0_bin_max = P0/nbins + nbins = fitter.toas["nbin"].astype(int).min() + P0 = 1 / fitter.model.F0.value + P0_bin_max = P0 / nbins except: print("No FD parameters in this model! Exitting...") - #sys.exit() - - #Get FD params if model object is given + + # Get FD params if model object is given if model_object is not None: - #Check if the model object has FD parameters + # Check if the model object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(model_object, freqs*1e-3) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + model_object, freqs * 1e-3 + ) psr_name = model_object.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: - print("show_bin requires a fitter object, cannot be used with the model alone") + print( + "show_bin requires a fitter object, cannot be used with the model alone" + ) show_bin = False except: print("No FD parameters in this model! Exitting...") - #sys.exit() - - #Get plotting preferences. - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + # Get plotting preferences. + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (8,4) + figsize = (8, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: fig = plt.gcf() ax1 = axs - if 'ls' in kwargs.keys(): - linestyle = kwargs['ls'] + if "ls" in kwargs.keys(): + linestyle = kwargs["ls"] else: - linestyle = '-' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + linestyle = "-" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = "green" - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.2 - if 'loc' in kwargs.keys(): - loc = kwargs['loc'] + if "loc" in kwargs.keys(): + loc = kwargs["loc"] else: loc = "upper right" - #Plot frequency (MHz) vs delay (microseconds) - ax1.plot(freqs,FD_delay,label = legend_text,color=clr,ls=linestyle) - ax1.fill_between(freqs, - FD_delay_err_plus, - FD_delay_err_minus, - color=clr,alpha=alpha) + # Plot frequency (MHz) vs delay (microseconds) + ax1.plot(freqs, FD_delay, label=legend_text, color=clr, ls=linestyle) + ax1.fill_between( + freqs, FD_delay_err_plus, FD_delay_err_minus, color=clr, alpha=alpha + ) if show_bin: if (FD_delay > 0).any(): - ax1.axhline(P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(P0_bin_max * 1e6, label="1 profile bin") if (FD_delay < 0).any(): - ax1.axhline(-P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(-P0_bin_max * 1e6, label="1 profile bin") ax1.set_xlabel("Frequency (MHz)") ax1.set_ylabel("Delay ($\mu$s)") if title: ax1.set_title("%s FD Delay" % psr_name) if legend: - ax1.legend(loc=loc) + ax1.legend(loc=loc) if axs == None: plt.tight_layout() if save: plt.savefig("%s_fd_delay.png" % psr_name) - return + return -def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = False, avg = False, mixed_ecorr=False,\ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_residuals_freq( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency @@ -823,8 +764,8 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope - 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). + 'obs' - color residuals by telescope + 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False]. @@ -852,209 +793,203 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) - - # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: - if restype != 'both': + if whitened == True and ("res" not in kwargs.keys()): + if avg == True and mixed_ecorr == True: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: - if restype != 'both': + elif avg == True and mixed_ecorr == False: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) - if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + if restype == "both": + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get freqs - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: freqs = fitter.toas.get_freqs().value - - + # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) - + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) + CB = set(cb) - - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] - - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + colorscheme, markerscheme = set_color_and_marker(colorby) + + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -1063,103 +998,180 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' - if restype == 'both': - mkr_pre = '.' + mkr = "x" + if restype == "both": + mkr_pre = "." log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 - + if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(freqs[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(freqs[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + freqs[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + freqs[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(freqs[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + freqs[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(freqs[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + freqs[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set axis - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s frequency residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s frequency residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1172,48 +1184,62 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_freq%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points - text = ax1.text(0,0,"") - stamp_color= "#FD9927" + text = ax1.text(0, 0, "") + stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = freqs if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ - axs = None, title = True, compare = False, **kwargs): +def plot_dmx_time( + fitter, + savedmx=False, + save=False, + legend=True, + axs=None, + title=True, + compare=False, + **kwargs, +): """ Make a plot of DMX vs. time @@ -1251,111 +1277,161 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ dmxname = "%s_dmxparse.nb.out" % (psrname) # Get plotting dmx and error values for WB - if 'dmx' in kwargs.keys(): - DMXs = kwargs['dmx'] + if "dmx" in kwargs.keys(): + DMXs = kwargs["dmx"] else: # get dmx dictionary from pint dmxparse function dmx_dict = pu.dmxparse(fitter, save="dmxparse.out") - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # move file name if savedmx: os.rename("dmxparse.out", dmxname) # Double check/overwrite errors if necessary - if 'errs' in kwargs.keys(): - DMX_vErrs = kwargs['errs'] + if "errs" in kwargs.keys(): + DMX_vErrs = kwargs["errs"] # Double check/overwrite dmx mjd epochs if necessary - if 'mjds' in kwargs.keys(): - DMX_center_MJD = kwargs['mjds'] - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + if "mjds" in kwargs.keys(): + DMX_center_MJD = kwargs["mjds"] + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # If we want to compare WB to NB, we need to look for the right output file if compare == True: # Look for other dmx file if NB: - #log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.wb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.wb.out" % (psrname)): raise RuntimeError("Cannot find Wideband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.wb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.wb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) else: - #log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.nb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.nb.out" % (psrname)): raise RuntimeError("Cannot find Narrowband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.nb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) - dmx_mid_yr = (dmx_epochs- 51544.0)/365.25 + 2000.0 - + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.nb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) + dmx_mid_yr = (dmx_epochs - 51544.0) / 365.25 + 2000.0 + # Define the plotting function if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: - mkr = 's' + mkr = "s" if compare: - mkr_nb = 'o' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + mkr_nb = "o" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: - clr = 'gray' + clr = "gray" if compare: - clr_nb = 'k' - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + clr_nb = "k" + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 # Not actually plot if NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) elif not NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) elif compare: if NB: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Wideband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Wideband", + ) else: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Narrowband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Narrowband", + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") if legend: - ax1.legend(loc='best') + ax1.legend(loc="best") if title: if NB and not compare: - plt.title("%s narrowband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s narrowband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif not NB and not compare: - plt.title("%s wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif compare: - plt.title("%s narrowband and wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title( + "%s narrowband and wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1] + ) if axs == None: plt.tight_layout() if save: @@ -1370,32 +1446,36 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") # Define color for highlighting points stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = DMX_center_Year - ydata = DMXs*10**3 + ydata = DMXs * 10**3 # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 's', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="s", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): - """ Make simple dmx vs. time plot with dmxout file(s) as input + +def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model=None): + """Make simple dmx vs. time plot with dmxout file(s) as input Parameters ========== @@ -1416,41 +1496,72 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): dmxout information (mjd, val, err, r1, r2) for each label """ from astropy.time import Time - if isinstance(dmxout_files, str): dmxout_files = [dmxout_files] - if isinstance(labels, str): labels = [labels] - - figsize = (10,4) + + if isinstance(dmxout_files, str): + dmxout_files = [dmxout_files] + if isinstance(labels, str): + labels = [labels] + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") dmxDict = {} - for ii,(df,lab) in enumerate(zip(dmxout_files,labels)): - dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(df, unpack=True, usecols=range(0,5)) - idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} - ax2.errorbar(dmxmjd, dmxval*10**3, yerr=dmxerr*10**3, label=lab, marker='o', ls='', markerfacecolor='none') + for ii, (df, lab) in enumerate(zip(dmxout_files, labels)): + dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt( + df, unpack=True, usecols=range(0, 5) + ) + idmxDict = { + "mjd": dmxmjd, + "val": dmxval, + "err": dmxerr, + "r1": dmxr1, + "r2": dmxr2, + } + ax2.errorbar( + dmxmjd, + dmxval * 10**3, + yerr=dmxerr * 10**3, + label=lab, + marker="o", + ls="", + markerfacecolor="none", + ) dmxDict[lab] = idmxDict # set ax1 lims (year) based on ax2 lims (mjd) mjd_xlo, mjd_xhi = ax2.get_xlim() - dy_xlo = Time(mjd_xlo,format='mjd').decimalyear - dy_xhi = Time(mjd_xhi,format='mjd').decimalyear - ax1.set_xlim(dy_xlo,dy_xhi) + dy_xlo = Time(mjd_xlo, format="mjd").decimalyear + dy_xhi = Time(mjd_xhi, format="mjd").decimalyear + ax1.set_xlim(dy_xlo, dy_xhi) # capture ylim orig_ylim = ax2.get_ylim() - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) if model: from pint.simulation import make_fake_toas_fromMJDs from pint_pal.lite_utils import remove_noise - fake_mjds = np.linspace(np.min(dmxmjd),np.max(dmxmjd),num=int(np.max(dmxmjd)-np.min(dmxmjd))) - fake_mjdTime = Time(fake_mjds,format='mjd') + + fake_mjds = np.linspace( + np.min(dmxmjd), np.max(dmxmjd), num=int(np.max(dmxmjd) - np.min(dmxmjd)) + ) + fake_mjdTime = Time(fake_mjds, format="mjd") # copy the model and add sw component mo_swm = copy.deepcopy(model) @@ -1458,20 +1569,22 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): mo_swm.NE_SW.value = 10.0 # generate fake TOAs and calculate excess DM due to solar wind - fake_toas = make_fake_toas_fromMJDs(fake_mjdTime,mo_swm) - sun_dm_delays = mo_swm.solar_wind_dm(fake_toas)*10**3 # same scaling as above - ax2.plot(fake_mjds,sun_dm_delays,c='lightgray',label='Excess DM') + fake_toas = make_fake_toas_fromMJDs(fake_mjdTime, mo_swm) + sun_dm_delays = mo_swm.solar_wind_dm(fake_toas) * 10**3 # same scaling as above + ax2.plot(fake_mjds, sun_dm_delays, c="lightgray", label="Excess DM") # don't change ylim based on excess dm trace, if plotted ax2.set_ylim(orig_ylim) - ax2.legend(loc='best') + ax2.legend(loc="best") plt.tight_layout() - if outfile: plt.savefig(outfile) + if outfile: + plt.savefig(outfile) return dmxDict + def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): - """ Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values + """Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values Parameters ========== @@ -1489,78 +1602,126 @@ def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): None? """ # should check that both nb/wb entries exist first... - nbmjd = dmxDict['nb']['mjd'] - wbmjd = dmxDict['wb']['mjd'] + nbmjd = dmxDict["nb"]["mjd"] + wbmjd = dmxDict["wb"]["mjd"] allmjds = set(list(nbmjd) + list(wbmjd)) # May need slightly more curation if nb/wb mjds are *almost* identical - wbonly = allmjds-set(nbmjd) - nbonly = allmjds-set(wbmjd) + wbonly = allmjds - set(nbmjd) + nbonly = allmjds - set(wbmjd) both = set(nbmjd).intersection(set(wbmjd)) # assemble arrays of common inds for plotting later; probably a better way to do this nb_common_inds = [] wb_common_inds = [] for b in both: - nb_common_inds.append(np.where(nbmjd==b)[0][0]) - wb_common_inds.append(np.where(wbmjd==b)[0][0]) + nb_common_inds.append(np.where(nbmjd == b)[0][0]) + wb_common_inds.append(np.where(wbmjd == b)[0][0]) nb_common_inds, wb_common_inds = np.array(nb_common_inds), np.array(wb_common_inds) - nbdmx,nbdmxerr = dmxDict['nb']['val'],dmxDict['nb']['err'] - wbdmx,wbdmxerr = dmxDict['wb']['val'],dmxDict['wb']['err'] + nbdmx, nbdmxerr = dmxDict["nb"]["val"], dmxDict["nb"]["err"] + wbdmx, wbdmxerr = dmxDict["wb"]["val"], dmxDict["wb"]["err"] # propagate errors as quadrature sum, though Michael thinks geometric mean might be better? - nbwb_dmx_diffs = nbdmx[nb_common_inds]-wbdmx[wb_common_inds] - nbwb_err_prop = np.sqrt(nbdmxerr[nb_common_inds]**2 + wbdmxerr[wb_common_inds]**2) + nbwb_dmx_diffs = nbdmx[nb_common_inds] - wbdmx[wb_common_inds] + nbwb_err_prop = np.sqrt( + nbdmxerr[nb_common_inds] ** 2 + wbdmxerr[wb_common_inds] ** 2 + ) # make the plot from astropy.time import Time - figsize = (10,4) + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"$\Delta$DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") botharray = np.array(list(both)) - mjdbothTime = Time(botharray,format='mjd') + mjdbothTime = Time(botharray, format="mjd") dybothTime = mjdbothTime.decimalyear minmjd, maxmjd = np.sort(botharray)[0], np.sort(botharray)[-1] ax2.set_xlim(minmjd, maxmjd) - ax1.errorbar(dybothTime,nbwb_dmx_diffs*1e3,yerr=nbwb_err_prop*1e3, - marker='o', ls='', markerfacecolor='none',label='nb - wb') + ax1.errorbar( + dybothTime, + nbwb_dmx_diffs * 1e3, + yerr=nbwb_err_prop * 1e3, + marker="o", + ls="", + markerfacecolor="none", + label="nb - wb", + ) # want arrows indicating missing nb/wb DMX values to difference if show_missing: stddiffs = np.std(nbwb_dmx_diffs) - mjdnbonlyTime = Time(np.array(list(nbonly)),format='mjd') + mjdnbonlyTime = Time(np.array(list(nbonly)), format="mjd") dynbonlyTime = mjdnbonlyTime.decimalyear - ax1.scatter(dynbonlyTime,np.zeros(len(nbonly))+stddiffs*1e3,marker='v',c='r',label='nb only') + ax1.scatter( + dynbonlyTime, + np.zeros(len(nbonly)) + stddiffs * 1e3, + marker="v", + c="r", + label="nb only", + ) nbonlystr = [str(no) for no in nbonly] - if nbonlystr: log.warning(f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}") + if nbonlystr: + log.warning( + f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}" + ) - mjdwbonlyTime = Time(np.array(list(wbonly)),format='mjd') + mjdwbonlyTime = Time(np.array(list(wbonly)), format="mjd") dywbonlyTime = mjdwbonlyTime.decimalyear - ax1.scatter(dywbonlyTime,np.zeros(len(wbonly))-stddiffs*1e3,marker='^',c='r',label='wb only') + ax1.scatter( + dywbonlyTime, + np.zeros(len(wbonly)) - stddiffs * 1e3, + marker="^", + c="r", + label="wb only", + ) wbonlystr = [str(wo) for wo in wbonly] - if wbonlystr: log.warning(f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}") - - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if wbonlystr: + log.warning( + f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}" + ) + + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) plt.tight_layout() - ax1.legend(loc='best') - if outfile: plt.savefig(outfile) + ax1.legend(loc="best") + if outfile: + plt.savefig(outfile) return None + # Now we want to make wideband DM vs. time plot, this uses the premade dm_resids from PINT -def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False, legend = True, title = True,\ - axs = None, mean_sub = True, **kwargs): +def plot_dm_residuals( + fitter, + restype="postfit", + plotsig=False, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a plot of Wideband timing DM residuals v. time. @@ -1597,60 +1758,64 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False # Check if wideband if not fitter.is_wideband: - raise RuntimeError("Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead.") + raise RuntimeError( + "Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead." + ) # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) + # Get the MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - years = (mjds - 51544.0)/365.25 + 2000.0 - + years = (mjds - 51544.0) / 365.25 + 2000.0 + # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: ylabel = r"DM/Uncertainty" @@ -1661,84 +1826,133 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False ylabel = r"$\Delta$DM/Uncertainty" else: ylabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if restype == 'both': - mkr_pre = '.' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if restype == "both": + mkr_pre = "." + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 # Do plotting command - if restype == 'both': + if restype == "both": if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - dm_sig_pre = dm_resids_init[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_sig_pre, yerr=len(dm_error_init[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + dm_sig_pre = dm_resids_init[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_sig_pre, + yerr=len(dm_error_init[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_resids_init[inds], yerr=dm_error_init[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_resids_init[inds], + yerr=dm_error_init[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) else: if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) # Set second axis ax1.set_ylabel(ylabel) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1753,7 +1967,7 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: @@ -1765,27 +1979,42 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = dm_resids/dm_error + ydata = dm_resids / dm_error else: ydata = dm_resids # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) + + fig.canvas.mpl_connect("button_press_event", onclick) - fig.canvas.mpl_connect('button_press_event', onclick) - return -def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = 50, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_measurements_v_res( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -1808,7 +2037,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- res [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -1825,188 +2054,228 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - + # Check if want epoch averaged residuals - if avg == True and restype == 'prefit': + if avg == True and restype == "prefit": avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit': + elif avg == True and restype == "postfit": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both': + elif avg == True and restype == "both": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - - + # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) - + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) + # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) - + # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - + # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs - - xmax=0 + + xmax = 0 for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] if plotsig: - sig = res[inds]/errs[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(sig),max(-sig)) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = res[inds] / errs[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(sig), max(-sig)) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) else: - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(res[inds]),max(-res[inds])) - if restype == 'both': - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") - + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(res[inds]), max(-res[inds])) + if restype == "both": + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) + ax1.grid(True) ax1.set_ylabel("Number of measurements") if plotsig: if avg and whitened: - ax1.set_xlabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual/Uncertainty') + ax1.set_xlabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_xlabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_xlabel('Residual/Uncertainty') + ax1.set_xlabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_xlabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual ($\mu$s)') + ax1.set_xlabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_xlabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_xlabel('Residual ($\mu$s)') - ax1.set_xlim(-1.1*xmax,1.1*xmax) + ax1.set_xlabel("Residual ($\mu$s)") + ax1.set_xlim(-1.1 * xmax, 1.1 * xmax) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2019,18 +2288,29 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return -def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin = 50, \ - save = False, legend = True, title = True, axs = None, mean_sub = True, **kwargs): + +def plot_measurements_v_dmres( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -2052,7 +2332,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. mean_sub [boolean] : If False, will not mean subtract the DM residuals to be centered on zero [default: True] - + Optional Arguments: -------------------- dmres [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -2066,53 +2346,57 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin # Check if wideband if not fitter.is_wideband: - raise ValueError("Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead.") - + raise ValueError( + "Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead." + ) + # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value - + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value + # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) + # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: xlabel = r"DM/Uncertainty" @@ -2123,75 +2407,118 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin xlabel = r"$\Delta$DM/Uncertainty" else: xlabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - + if plotsig: - sig = dm_resids[inds]/dm_error[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - sig_pre = dm_resids_init[inds]/dm_error_init[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = dm_resids[inds] / dm_error[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + sig_pre = dm_resids_init[inds] / dm_error_init[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) else: - ax1.hist(dm_resids[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - ax1.hist(dm_resids_init[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") - + ax1.hist( + dm_resids[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + ax1.hist( + dm_resids_init[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) + ax1.grid(True) ax1.set_ylabel("Number of measurements") ax1.set_xlabel(xlabel) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: ext = "" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_DM_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return -def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, mixed_ecorr=False, \ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): +def plot_residuals_orb( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. orbital phase. @@ -2229,222 +2556,213 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - - + # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) - - # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: - if restype != 'both': + if whitened == True and ("res" not in kwargs.keys()): + if avg == True and mixed_ecorr == True: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: - if restype != 'both': + elif avg == True and mixed_ecorr == False: + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'orbphase' not in kwargs.keys(): + if "orbphase" not in kwargs.keys(): mjds = fitter.toas.get_mjds().value if avg == True: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value if mixed_ecorr == True: - mjds_no_avg = no_avg_dict['mjds'].value - - + mjds_no_avg = no_avg_dict["mjds"].value # Now we need to the orbital phases; start with binary model name - if 'orbphase' in kwargs.keys(): - orbphase = kwargs['orbphase'] + if "orbphase" in kwargs.keys(): + orbphase = kwargs["orbphase"] else: - orbphase = fitter.model.orbital_phase(mjds, radians = False) + orbphase = fitter.model.orbital_phase(mjds, radians=False) if avg and mixed_ecorr: - no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians = False) - - + no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians=False) + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_orbphase = np.hstack((orbphase, no_avg_orbphase)) - if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + if restype == "both": + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - - + # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) - #. Seems to run a little faster but not robust to obs + # . Seems to run a little faster but not robust to obs # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) - + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) - - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] - - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + + colorscheme, markerscheme = set_color_and_marker(colorby) + + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -2452,99 +2770,168 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal fig = plt.gcf() ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by flag value doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by flag value doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: - ax1.errorbar(combo_orbphase[inds], combo_res[inds], yerr = combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(combo_orbphase[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + ax1.errorbar( + combo_orbphase[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + combo_orbphase[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(orbphase[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(orbphase[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + orbphase[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + orbphase[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: - ax1.errorbar(orbphase[inds], res[inds], yerr = errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(orbphase[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + ax1.errorbar( + orbphase[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + orbphase[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) # Set second axis - ax1.set_xlabel(r'Orbital Phase') + ax1.set_xlabel(r"Orbital Phase") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2557,9 +2944,9 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2567,37 +2954,43 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") stamp_color = "#FD9927" # Define color for highlighting points - #if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: + # if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: # stamp_color = "#61C853" - #else: + # else: # stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = orbphase if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt((xdata - xclick)**2 + ((ydata - yclick)/100.0)**2) + d = np.sqrt((xdata - xclick) ** 2 + ((ydata - yclick) / 100.0) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return @@ -3048,9 +3441,18 @@ def onclick(event): return - -def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whitened = False, save = False, \ - legend = True, title = True, axs = None, **kwargs): +def plot_fd_res_v_freq( + fitter, + plotsig=False, + comp_FD=True, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency, can do WB as well. Note, if WB fitter, comp_FD may not work. If comp_FD is True, the panels are organized as follows: @@ -3058,7 +3460,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi Middle: Best fit residuals with no FD parameters. Bottom: Residuals with FD correction included. Note - This function may take a while to run if there are many TOAs. - + Arguments --------- fitter [object] : The PINT fitter object. @@ -3090,87 +3492,89 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - + # Check if want epoch averaged residuals if avg: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - + # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: res = whiten_resids(avg_dict) res = res.to(u.us) else: res = whiten_resids(fitter) res = res.to(u.us) - + # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + # get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: if avg == True: - freqs = avg_dict['freqs'].value + freqs = avg_dict["freqs"].value else: freqs = fitter.toas.get_freqs().value - + # Check if comparing the FD parameters if comp_FD: if axs != None: log.warn("Cannot do full comparison with three panels") axs = None - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,12) + figsize = (4, 12) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(313) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(311) else: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,4) + figsize = (4, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -3179,52 +3583,66 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Make the plot of residual vs. frequency for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) # assign axis labels - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ylabel = 'Average Residual/Uncertainty \n (Whitened)' + ylabel = "Average Residual/Uncertainty \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual/Uncertainty' + ylabel = "Average Residual/Uncertainty" elif whitened and not avg: - ylabel ='Residual/Uncertainty \n (Whitened)' + ylabel = "Residual/Uncertainty \n (Whitened)" else: - ylabel ='Residual/Uncertainty' + ylabel = "Residual/Uncertainty" else: if avg and whitened: - ylabel = 'Average Residual ($\mu$s) \n (Whitened)' + ylabel = "Average Residual ($\mu$s) \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual ($\mu$s)' + ylabel = "Average Residual ($\mu$s)" elif whitened and not avg: - ylabel = 'Residual ($\mu$s) \n (Whitened)' + ylabel = "Residual ($\mu$s) \n (Whitened)" else: - ylabel = 'Residual ($\mu$s)' + ylabel = "Residual ($\mu$s)" ax1.set_ylabel(ylabel) # Now if we want to show the other plots, we plot them @@ -3237,22 +3655,22 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi sorted_freqs = np.linspace(np.min(freqs), np.max(freqs), 1000) FD_line = np.zeros(np.size(sorted_freqs)) for i, fd in enumerate(cur_fd): - fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds - FD_offsets += fd_val * np.log(freqs/1000.0)**(i+1) - FD_line += fd_val * np.log(sorted_freqs/1000.0)**(i+1) + fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds + FD_offsets += fd_val * np.log(freqs / 1000.0) ** (i + 1) + FD_line += fd_val * np.log(sorted_freqs / 1000.0) ** (i + 1) # Now edit residuals fd_cor_res = res.value + FD_offsets # Now we need to redo the fit without the FD parameters psr_fitter_nofd = copy.deepcopy(fitter) try: - psr_fitter_nofd.model.remove_component('FD') + psr_fitter_nofd.model.remove_component("FD") except: log.warning("No FD parameters in the initial timing model...") # Check if fitter is wideband or not if psr_fitter_nofd.is_wideband: - resids = psr_fitter_nofd.resids.residual_objs['toa'] + resids = psr_fitter_nofd.resids.residual_objs["toa"] else: resids = psr_fitter_nofd.resids @@ -3267,7 +3685,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res_nofd = wres_avg.to(u.us).value else: # need to average - res_nofd = avg['time_resids'].to(u.us).value + res_nofd = avg["time_resids"].to(u.us).value elif whitened: # Need to whiten wres_nofd = whiten_resids(psr_fitter_nofd) @@ -3277,55 +3695,93 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Now plot for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = fd_cor_res[inds]/errs[inds] - ax3.errorbar(freqs[inds], sig.value, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - sig_nofd = res_nofd[inds]/errs[inds].value - ax2.errorbar(freqs[inds], sig_nofd, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = fd_cor_res[inds] / errs[inds] + ax3.errorbar( + freqs[inds], + sig.value, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + sig_nofd = res_nofd[inds] / errs[inds].value + ax2.errorbar( + freqs[inds], + sig_nofd, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax3.errorbar(freqs[inds], fd_cor_res[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax2.errorbar(freqs[inds], res_nofd[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax3.plot(sorted_freqs, FD_line, c = 'k', ls = '--') + ax3.errorbar( + freqs[inds], + fd_cor_res[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax2.errorbar( + freqs[inds], + res_nofd[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax3.plot(sorted_freqs, FD_line, c="k", ls="--") # assign axis labels - ax3.set_xlabel(r'Frequency (MHz)') + ax3.set_xlabel(r"Frequency (MHz)") ax3.set_ylabel(ylabel) ax3.grid(True) - ax2.set_xlabel(r'Frequency (MHz)') + ax2.set_xlabel(r"Frequency (MHz)") ax2.set_ylabel(ylabel) ax2.grid(True) if legend: if comp_FD: - ax3.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax3.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) else: - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) if title: - plt.title("%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0+1.0/figsize[1]) + plt.title( + "%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0 + 1.0 / figsize[1] + ) plt.tight_layout() if save: ext = "" @@ -3345,7 +3801,11 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi We also offer some options for convenience plotting functions, one that will show all possible summary plots, and another that will show just the summary plots that are typically created in finalize_timing.py in that order. """ -def summary_plots(fitter, title = None, legends = False, save = False, avg = True, whitened = True): + + +def summary_plots( + fitter, title=None, legends=False, save=False, avg=True, whitened=True +): """ Function to make a composite set of summary plots for sets of TOAs. NOTE - This is noe the same set of plots as will be in the pdf writer @@ -3363,7 +3823,9 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru if fitter.is_wideband: if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) # Determine how long the figure size needs to be figlength = 18 gs_rows = 6 @@ -3377,7 +3839,7 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength += 18 gs_rows += 4 # adjust size if not in a binary - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): sub_rows = 1 sub_len = 3 if whitened: @@ -3392,126 +3854,241 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength -= sub_len gs_rows -= sub_rows - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax1 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, axs = ax1, figsize=(12,3)) + ax1 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, plotsig=True, axs=ax1, figsize=(12, 3) + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax2 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax2 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb(fitter, title=False, legend=False, axs=ax2, figsize=(12, 3)) k += 1 # Now add the measurement vs. uncertainty - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, axs = ax3_0, \ - figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, axs = ax3_1, \ - figsize=(6,3),) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + axs=ax3_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # and the DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # And residual vs. Frequency - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 # Now if whitened add the whitened residual plots if whitened: - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax6, figsize=(12, 3) + ) k += 1 # Plot the residuals divided by uncertainty vs. time - ax7 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, whitened = True, axs = ax7, figsize=(12,3)) + ax7 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax7, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax8 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax8, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax8 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax8, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax9_0 = fig.add_subplot(gs[count+k, 0]) - ax9_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, whitened = True,\ - axs = ax9_0, figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax9_1, figsize=(6,3),) + ax9_0 = fig.add_subplot(gs[count + k, 0]) + ax9_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + whitened=True, + axs=ax9_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax9_1, + figsize=(6, 3), + ) k += 1 # Now plot the average residuals if avg: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, title=False, avg=True, axs=ax10, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax11 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, axs = ax11, figsize=(12,3)) + ax11 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + axs=ax11, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 # Now add the measurement vs. uncertainty - ax13_0 = fig.add_subplot(gs[count+k, 0]) - ax13_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False,\ - avg = True, axs = ax13_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, axs = ax13_1, figsize=(6,3)) + ax13_0 = fig.add_subplot(gs[count + k, 0]) + ax13_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + axs=ax13_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + axs=ax13_1, + figsize=(6, 3), + ) k += 1 # Now plot the whitened average residuals if avg and whitened: - ax14 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, avg = True, whitened = True, axs = ax14, figsize=(12,3)) + ax14 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, avg=True, whitened=True, axs=ax14, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, whitened = True,\ - axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, axs = ax16, \ - figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_1, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 plt.tight_layout() @@ -3520,8 +4097,11 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru return + """We also define a function to output the summary plots exactly as is done in finalize_timing.py (for now)""" -def summary_plots_ft(fitter, title = None, legends = False, save = False): + + +def summary_plots_ft(fitter, title=None, legends=False, save=False): """ Function to make a composite set of summary plots for sets of TOAs NOTE - This is note the same set of plots as will be in the pdf writer @@ -3536,131 +4116,247 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): """ # Define the figure # Determine how long the figure size needs to be - figlength = 18*3 + figlength = 18 * 3 gs_rows = 13 - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): figlength -= 9 gs_rows -= 3 if fitter.is_wideband: figlength -= 9 gs_rows -= 3 - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Then the epoch averaged residuals v. time if not fitter.is_wideband: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, avg=True, axs=ax10, figsize=(12, 3) + ) k += 1 # Epoch averaged vs. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 else: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax12, figsize=(12, 3) + ) k += 1 # And DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # Whitened residuals v. time - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, whitened=True, axs=ax6, figsize=(12, 3)) k += 1 # Whitened epoch averaged residuals v. time if not fitter.is_wideband: - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = False, avg = True, \ - whitened = True, axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=False, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Whitened epoch averaged residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = True, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 else: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = False, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals and epoch averaged - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, plotsig=False, \ - whitened = True, axs = ax3_0, figsize=(6,3)) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + plotsig=False, + whitened=True, + axs=ax3_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, \ - whitened = True, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, \ - whitened = False, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # Whitened residual/uncertainty v. time - ax26 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, plotsig = True, title = False, legend = False, whitened = True,\ - axs = ax26, figsize=(12,3)) + ax26 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax26, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. time if not fitter.is_wideband: - ax25 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax25, figsize=(12,3)) + ax25 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax25, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = True, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 else: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = False, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals/uncertainty and epoch averaged/uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax17_0, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = True, whitened = True, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = False, whitened =False, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=False, + whitened=False, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 # Now plot the frequencies of the TOAs vs. time - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 plt.tight_layout() @@ -3669,13 +4365,14 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): return + # JUST THE PLOTS FOR THE PDF WRITERS LEFT -def plots_for_summary_pdf_nb(fitter, title = None, legends = False): +def plots_for_summary_pdf_nb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`. By definition, this function will save all plots as "psrname"_summary_plot_#.nb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3683,144 +4380,304 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): title [boolean] : If True, will add titles to ALL plots [default: False]. legend [boolean] : If True, will add legends to ALL plots [default: False]. """ - + if fitter.is_wideband: - raise ValueError("Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead.") + raise ValueError( + "Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - gs = fig.add_gridspec(nrows = 4, ncols = 1) + gs = fig.add_gridspec(nrows=4, ncols=1) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) # Plot residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot averaged residuals v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, avg = True, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + avg=True, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) else: - log.warning("ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals.") - plot_residuals_time(fitter, avg = False, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + log.warning( + "ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals." + ) + plot_residuals_time( + fitter, + avg=False, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) # Plot residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + axs=ax2, + figsize=(8, 2.5), + ) # plot dmx v. time - if 'dispersion_dmx' in fitter.model.get_components_by_category().keys(): - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if "dispersion_dmx" in fitter.model.get_components_by_category().keys(): + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) else: log.warning("No DMX bins in timing model, cannot plot DMX v. Time.") plt.tight_layout() plt.savefig("%s_summary_plot_1_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals v time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # plot whitened, epoch averaged residuals v time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, avg = True, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, avg = False, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened, epoch averaged residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals histogram - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals/uncertainty v. time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = False,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = False, whitened = True, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals/uncertainty histogram - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals/uncertainties histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, whitened = True, axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = False, whitened = True, axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_nb.png" % (fitter.model.PSR.value)) plt.close() -def plots_for_summary_pdf_wb(fitter, title = None, legends = False): + +def plots_for_summary_pdf_wb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`. By definition, this function will save all plots as "psrname"_summary_plot_#.wb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3829,239 +4686,294 @@ def plots_for_summary_pdf_wb(fitter, title = None, legends = False): legend [boolean] : If True, will add legends to ALL plots [default: False]. """ if not fitter.is_wideband: - raise ValueError("Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead.") + raise ValueError( + "Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(nrows = 4, ncols = 1) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(nrows=4, ncols=1) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) else: - gs = fig.add_gridspec(nrows = 3, ncols = 1) - ax3 = fig.add_subplot(gs[2,:]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(nrows=3, ncols=1) + ax3 = fig.add_subplot(gs[2, :]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # Plot time residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot DM residuals v. time - plot_dm_residuals(fitter, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, save=False, legend=False, title=False, axs=ax1, figsize=(8, 2.5) + ) # Plot time residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(8,2.5)) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax2, figsize=(8, 2.5) + ) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_1_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(3,2) - ax2 = fig.add_subplot(gs[1,:]) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(3, 2) + ax2 = fig.add_subplot(gs[1, :]) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) else: - gs = fig.add_gridspec(2,2) - ax3 = fig.add_subplot(gs[1,0]) - ax4 = fig.add_subplot(gs[1,1]) - ax0 = fig.add_subplot(gs[0,:]) - #ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(2, 2) + ax3 = fig.add_subplot(gs[1, 0]) + ax4 = fig.add_subplot(gs[1, 1]) + ax0 = fig.add_subplot(gs[0, :]) + # ax1 = fig.add_subplot(gs[1,:]) # Plot whitened time residuals v. time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # Plot whitened time residuals v. time - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # Plot number of whitened residuals histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals histograms - plot_measurements_v_dmres(fitter, nbin = 50, legend = False, title = False, axs = ax4) + plot_measurements_v_dmres( + fitter, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened time residuals/uncertainty v time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # Plot DM residuals/uncertainty v. time - plot_dm_residuals(fitter, plotsig = True, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, + plotsig=True, + save=False, + legend=False, + title=False, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened time residuals/uncertainty v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, \ - plotsig = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened time residuals/uncertainty histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals/uncertainty histograms - plot_measurements_v_dmres(fitter, plotsig = True, nbin = 50, legend = False, title = False, \ - axs = ax4) + plot_measurements_v_dmres( + fitter, plotsig=True, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_wb.png" % (fitter.model.PSR.value)) plt.close() -def plot_settings(): + +def plot_settings(colorby="f"): """ Initialize plot rc params, define color scheme """ fig_width_pt = 620 - inches_per_pt = 1.0/72.27 # Convert pt to inches - golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio - fig_width = fig_width_pt*inches_per_pt # width in inches - fig_height = fig_width*golden_mean*2 # height in inches - fig_size = [fig_width,fig_height] + inches_per_pt = 1.0 / 72.27 # Convert pt to inches + golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio + fig_width = fig_width_pt * inches_per_pt # width in inches + fig_height = fig_width * golden_mean * 2 # height in inches + fig_size = [fig_width, fig_height] fontsize = 20 # for xlabel, backend labels - plotting_params = {'backend': 'pdf', 'axes.labelsize': 12, 'lines.markersize': 4, 'font.size': 12, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'xtick.major.width': 0.5, 'ytick.major.width': 0.5, 'xtick.minor.width': 0.5, 'ytick.minor.width': 0.5, 'lines.markeredgewidth': 1, 'axes.linewidth': 1.2, 'legend.fontsize': 10, 'xtick.labelsize': 12, 'ytick.labelsize': 10, 'savefig.dpi': 400, 'path.simplify': True, 'font.family': 'serif', 'font.serif': 'Times', 'text.usetex': True, 'figure.figsize': fig_size, 'text.latex.preamble': r'\usepackage{amsmath}'} + plotting_params = { + "backend": "pdf", + "axes.labelsize": 12, + "lines.markersize": 4, + "font.size": 12, + "xtick.major.size": 6, + "xtick.minor.size": 3, + "ytick.major.size": 6, + "ytick.minor.size": 3, + "xtick.major.width": 0.5, + "ytick.major.width": 0.5, + "xtick.minor.width": 0.5, + "ytick.minor.width": 0.5, + "lines.markeredgewidth": 1, + "axes.linewidth": 1.2, + "legend.fontsize": 10, + "xtick.labelsize": 12, + "ytick.labelsize": 10, + "savefig.dpi": 400, + "path.simplify": True, + "font.family": "serif", + "font.serif": "Times", + "text.usetex": True, + "figure.figsize": fig_size, + "text.latex.preamble": r"\usepackage{amsmath}", + } plt.rcParams.update(plotting_params) - # Color scheme for consistent reciever-backend combos, same as published 12.5 yr - colorschemes = {'thankful_2':{ - "327_ASP": "#BE0119", - "327_PUPPI": "#BE0119", - "430_ASP": "#FD9927", - "430_PUPPI": "#FD9927", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#407BD5", - "Rcvr1_2_GUPPI": "#407BD5", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "S-wide_ASP": "#855CA0", - "S-wide_PUPPI": "#855CA0", - "1.5GHz_YUPPI": "#45062E", - "3GHz_YUPPI": "#E5A4CB", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - }} - - # marker dictionary to be used if desired, currently all 'x' - markers = {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr_800_GASP": "x", - "Rcvr_800_GUPPI": "x", - "S-wide_ASP": "x", - "S-wide_PUPPI": "x", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - } - - # Define the color map option - colorscheme = colorschemes['thankful_2'] - - return markers, colorscheme + colorscheme, markerscheme = set_color_and_marker(colorby) + return markerscheme, colorscheme + def get_fitter(yaml): """ Get the fitter and model from a given YAML - + Parameters ========== yaml: str yaml to use for locating latest results - + """ tc = TimingConfiguration(yaml) mo, to = tc.get_model_and_toas(excised=True, usepickle=True) tc.manual_cuts(to) receivers = lu.get_receivers(to) - if tc.get_toa_type() == 'WB': + if tc.get_toa_type() == "WB": lu.add_feDMJumps(mo, receivers) else: lu.add_feJumps(mo, receivers) fo = tc.construct_fitter(to, mo) return fo, mo + def get_avg_years(fo_nb, fo_wb, avg_dict): """ Get MJDS for each data set in years - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ mjd_nb = fo_nb.toas.get_mjds().value - years_nb = (mjd_nb - 51544.0)/365.25 + 2000.0 - mjd_wb = fo_wb.toas.get_mjds().value - years_wb = (mjd_wb - 51544.0)/365.25 + 2000.0 - mjds_avg = avg_dict['mjds'].value - years_avg = (mjds_avg - 51544.0)/365.25 + 2000.0 + years_nb = (mjd_nb - 51544.0) / 365.25 + 2000.0 + mjd_wb = fo_wb.toas.get_mjds().value + years_wb = (mjd_wb - 51544.0) / 365.25 + 2000.0 + mjds_avg = avg_dict["mjds"].value + years_avg = (mjds_avg - 51544.0) / 365.25 + 2000.0 return years_nb, years_wb, years_avg + def get_backends(fo_nb, fo_wb, avg_dict): """ Grab backends via flags to make plotting easier - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ - rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value('f')[0]) + rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value("f")[0]) rcvr_set_nb = set(rcvr_bcknds_nb) - rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value('f')[0]) + rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value("f")[0]) rcvr_set_wb = set(rcvr_bcknds_wb) avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds_nb[iis[0]]) rcvr_bcknds_avg = np.array(avg_rcvr_bcknds) rcvr_set_avg = set(rcvr_bcknds_avg) return rcvr_bcknds_nb, rcvr_bcknds_wb, rcvr_bcknds_avg + def get_DMX_info(fo): """ Get DMX timeseries info from dmxparse - + Parameters ========== fo: fitter object - + """ dmx_dict = pint.utils.dmxparse(fo) - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD - 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 return DMXs, DMX_vErrs, DMX_center_Year + def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ Plot color-divided-by-receiver/BE points on any axis - + Parameters ========== ax: axis for plotting @@ -4070,53 +4982,52 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ markers, colorscheme = plot_settings() for i, r_b in enumerate(set(bknds)): - inds = np.where(bknds==r_b)[0] + inds = np.where(bknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = bknds[inds][0] mkr = markers[r_b_label] clr = colorscheme[r_b_label] - ax.errorbar(x[inds], y[inds] - (rn_off * u.us), yerr=err[inds], fmt=mkr, color=clr, label=r_b_label, alpha=0.5) - - ylim = (max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value) + ax.errorbar( + x[inds], + y[inds] - (rn_off * u.us), + yerr=err[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=0.5, + ) + + ylim = max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value ax.set_ylim(-1 * ylim * 1.08, ylim * 1.08) if be_legend: handles, labels = ax.get_legend_handles_labels() - labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) - label_names = {"327_ASP": "ASP 327 MHz", - "327_PUPPI": "PUPPI 327 MHz", - "430_ASP": "ASP 430 MHz", - "430_PUPPI": "PUPPI 430 MHz", - "L-wide_ASP": "ASP L-wide", - "L-wide_PUPPI": "PUPPI L-wide", - "Rcvr1_2_GASP": "GASP L-band", - "Rcvr1_2_GUPPI": "GUPPI L-band", - "Rcvr_800_GASP": "GASP 820 MHz", - "Rcvr_800_GUPPI": "GUPPI 820 MHz", - "S-wide_ASP": "ASP S-wide", - "S-wide_PUPPI": "PUPPI S-wide", - "1.5GHz_YUPPI": "YUPPI 1.5 GHz", - "3GHz_YUPPI": "YUPPI 3 GHz", - "6GHz_YUPPI": "YUPPI 6 GHz", - "CHIME": "CHIME", - } + labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) fixed_labels = [label_names[l] for l in labels] - if be_format == 'vert': + if be_format == "vert": plt.legend(handles, fixed_labels, loc=(1.005, 0), fontsize=12) - if be_format == 'horiz': - plt.legend(handles, fixed_labels, loc='lower left', ncol=len(fixed_labels), borderpad=0.1, columnspacing=0.1) + if be_format == "horiz": + plt.legend( + handles, + fixed_labels, + loc="lower left", + ncol=len(fixed_labels), + borderpad=0.1, + columnspacing=0.1, + ) ax.set_ylim(-1 * ylim * 1.2, ylim * 1.08) + def rec_labels(axs, bcknds, years_avg): """ Mark transitions between backends - + Parameters ========== axs: axis for plotting @@ -4125,7 +5036,7 @@ def rec_labels(axs, bcknds, years_avg): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ guppi = 2010.1 puppi = 2012.1 @@ -4139,64 +5050,135 @@ def rec_labels(axs, bcknds, years_avg): has_yuppi = False for r in bcknds: - if 'ASP' in r: + if "ASP" in r: has_asp = True - if 'PUPPI' in r: + if "PUPPI" in r: has_puppi = True - if 'GASP' in r: + if "GASP" in r: has_gasp = True - if 'GUPPI' in r: + if "GUPPI" in r: has_guppi = True - if 'YUPPI' in r: + if "YUPPI" in r: has_yuppi = True if has_asp and has_puppi: for a in axs: has_ao = True - a.axvline(puppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(puppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) if has_gasp and has_guppi: for a in axs: has_gbt = True - a.axvline(guppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(guppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) ycoord = 1.1 x_min_yr = min(years_avg) x_max_yr = max(years_avg) tform = axs[0].get_xaxis_transform() - va = ha = 'center' + va = ha = "center" if has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'ASP/GASP', transform=tform, va=va, ha=ha) - axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI", + transform=tform, + va=va, + ha=ha, + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "ASP/GASP", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + puppi) / 2.0, ycoord, "ASP/GUPPI", transform=tform, va=va, ha=ha + ) elif has_ao and not has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) - axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, ycoord, "PUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (puppi + x_min_yr) / 2.0 - 0.2, ycoord, "ASP", transform=tform, va=va, ha=ha + ) elif not has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((guppi+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'GASP', transform=tform, va=va, ha=ha) + axs[0].text( + (guppi + x_max_yr) / 2.0, ycoord, "GUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "GASP", transform=tform, va=va, ha=ha + ) if has_puppi and not has_asp and not has_gasp and not has_guppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_guppi and not has_asp and not has_gasp and not has_puppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_yuppi and not has_guppi and not has_puppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, ycoord, "YUPPI", transform=tform, va=va, ha=ha + ) + def rn_sub(testing, rn_subtract, fo_nb, fo_wb): if rn_subtract: @@ -4204,8 +5186,8 @@ def rn_sub(testing, rn_subtract, fo_nb, fo_wb): rn_nb = 0.0 rn_wb = 0.0 else: - rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0,0] * 1e6 - rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0,0] * 1e6 + rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0, 0] * 1e6 + rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0, 0] * 1e6 else: rn_nb = 0.0 rn_wb = 0.0 diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 1ccf1ed5..1c7c7218 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.defaults import * +import pint_pal.config class TimingConfiguration: """ @@ -45,13 +45,51 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non tim_directory (optional) : override the tim directory specified in the config par_directory (optional) : override the par directory specified in the config """ - self.filename = filename - with open(filename) as FILE: + self.filename = os.path.realpath(os.path.expanduser(filename)) + with open(self.filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) - self.tim_directory = self.config['tim-directory'] if tim_directory is None else tim_directory - self.par_directory = self.config['par-directory'] if par_directory is None else par_directory + if tim_directory is not None: + self.config['tim-directory'] = tim_directory + if par_directory is not None: + self.config['par-directory'] = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' + @property + def tim_directory(self): + """ + Location of tim files, as specified in the config. + This returns the absolute path to the tim directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) + ) + + @tim_directory.setter + def tim_directory(self, tim_directory): + """ + Set tim directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['tim-directory'] = tim_directory + + @property + def par_directory(self): + """ + Location of par files, as specified in the config. + This returns the absolute path to the par directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) + ) + + @par_directory.setter + def par_directory(self, par_directory): + """ + Set par directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['par-directory'] = par_directory + def get_source(self): """ Return the source name """ return self.config['source'] @@ -133,7 +171,7 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial usepickle=usepickle, bipm_version=BIPM, ephem=EPHEM, - planets=PLANET_SHAPIRO, + planets=pint_pal.config.PLANET_SHAPIRO, model=m, picklefilename=picklefilename, include_pn=include_pn @@ -336,8 +374,11 @@ def get_fitter(self): def construct_fitter(self, to, mo): """ Return the fitter, tracking pulse numbers if available """ fitter_name = self.config['fitter'] - fitter_class = getattr(pint.fitter, fitter_name) - return fitter_class(to, mo) + if fitter_name == 'Auto': + return pint.fitter.Fitter.auto(to, mo) + else: + fitter_class = getattr(pint.fitter, fitter_name) + return fitter_class(to, mo) def get_toa_type(self): """ Return the toa-type string """ @@ -359,7 +400,7 @@ def get_niter(self): def get_excised(self): """ Return excised-tim file if set and exists""" - if 'excised-tim' in self.config['intermediate-results'].keys(): + if 'excised-tim' in self.config['intermediate-results'].keys() and self.config['intermediate-results']['excised-tim']: if os.path.exists(self.config['intermediate-results']['excised-tim']): return self.config['intermediate-results']['excised-tim'] return None @@ -641,13 +682,13 @@ def get_fratio(self): """ Return desired frequency ratio """ if 'fratio' in self.config['dmx'].keys(): return self.config['dmx']['fratio'] - return FREQUENCY_RATIO + return pint_pal.config.FREQUENCY_RATIO def get_sw_delay(self): """ Return desired max(solar wind delay) threshold """ if 'max-sw-delay' in self.config['dmx'].keys(): return self.config['dmx']['max-sw-delay'] - return MAX_SOLARWIND_DELAY + return pint_pal.config.MAX_SOLARWIND_DELAY def get_custom_dmx(self): """ Return MJD/binning params for handling DM events, etc. """ @@ -744,7 +785,7 @@ def apply_ignore(self,toas,specify_keys=None,warn=False,model=None): if self.get_snr_cut() > 25.0 and self.get_toa_type() == 'WB': log.warning('snr-cut should be set to 25; try excising TOAs using other methods.') if 'poor-febe' in valid_valued: - fs = np.array([(f['f'] if 'f' in f else None) in toas.orig_table['flags']]) + fs = np.array([(f['f'] if 'f' in f else None) for f in toas.orig_table['flags']]) for febe in self.get_poor_febes(): febeinds = np.where(fs==febe)[0] apply_cut_flag(toas,febeinds,'poorfebe',warn=warn) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index ba71d011..b2b940a9 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -544,7 +544,9 @@ def alert(s): # Get some values from the fitter start = fitter.toas.first_MJD.value + start_ymd = fitter.toas.first_MJD.to_value(format='iso') finish = fitter.toas.last_MJD.value + finish_ymd = fitter.toas.last_MJD.to_value(format='iso') span = finish - start label = f"{psr} {'narrowband' if NB else 'wideband'}" @@ -573,8 +575,8 @@ def alert(s): for tf in tim_files: fsum.write(r'\item ' + verb(tf.split('/')[-1]) + '\n') fsum.write(r'\end{itemize}' + "\n") - fsum.write('Span: %.1f years (%.1f -- %.1f)\\\\\n ' % (span/365.24, - year(float(start)), year(float(finish)))) + fsum.write('Span: %.1f years (%s -- %s)\\\\\n ' % (span/365.24, + str(start_ymd).split(' ')[0], str(finish_ymd).split(' ')[0])) if NB: try: @@ -716,7 +718,7 @@ def alert(s): and pm.frozen and pm.value is not None and pm.value != 0): - if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX"}: + if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX", "CHI2R", "TRES", "SWP"}: ignoring.append(p) continue skip = False @@ -1236,18 +1238,35 @@ def check_recentness_noise(tc): name of the most recent available set of chains """ if not tc.get_noise_dir(): - log.warning(f"Yaml file does not have a noise-dir field (or it is unset).") + log.warning(f"Yaml file does not have a noise-dir field (or it is unset). Will check working directory.") return None, None d = os.path.abspath(tc.get_noise_dir()) + if glob.glob(os.path.join(d,"chain*.txt")): + log.warning(f'Ignoring chains directly in {d}. Chains should be in a subdirectory of {os.path.split(d)[1]} called {tc.get_source()}_{tc.get_toa_type().lower()}') noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, "..", - "*.Noise.*", + "????-??-*", tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] + + if not noise_runs: + log.warning(f'Looking for noise chains in given noise-dir ({d}), but does not follow current conventions (shared chains in /nanograv/share/20yr/noise-chains///).') + noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] + if len(noise_runs) > 0: + if len(noise_runs) == 1: + log.info(f'{len(noise_runs)} noise chain found in noise-dir ({d}).') + else: + log.info(f'{len(noise_runs)} noise chains found in noise-dir ({d}). Using first in sorted list.') + used_chains = os.path.abspath(noise_runs[0]) + available_chains = [os.path.abspath(n) for n in noise_runs] + + if not noise_runs: + log.warning('No chains found. Will search working directory and apply if found.') + log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") try: @@ -1348,4 +1367,4 @@ def no_ecorr_average(toas, resids, use_noise_model=True): return no_avg - \ No newline at end of file + diff --git a/src/pint_pal/yamlio.py b/src/pint_pal/yamlio.py index 25878847..a5819f7f 100644 --- a/src/pint_pal/yamlio.py +++ b/src/pint_pal/yamlio.py @@ -8,11 +8,11 @@ import glob from astropy import log import numpy as np -from pint_pal.defaults import * +from pint_pal import config import os yaml = YAML() -RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{LATEST_TOA_RELEASE}/' +RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{config.LATEST_TOA_RELEASE}/' def fix_toa_info(yaml_file,current_release=RELEASE,overwrite=True,extension='fix'): """Checks/fixes tim-directory, toas, toa-type from existing yaml; writes new one. diff --git a/tests/.DS_Store b/tests/.DS_Store new file mode 100644 index 00000000..55c6c62b Binary files /dev/null and b/tests/.DS_Store differ diff --git a/tests/configs/J0605+3757.nb.yaml b/tests/configs/J0605+3757.nb.yaml index 949bd3f5..b08dae5b 100644 --- a/tests/configs/J0605+3757.nb.yaml +++ b/tests/configs/J0605+3757.nb.yaml @@ -25,6 +25,16 @@ outlier: # control outlier analysis runs n-burn: 1000 n-samples: 20000 +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20220301.Noise.nb.ac12e98/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220222.Noise.nb.4e07003/ @@ -53,3 +63,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.nb.par' - '2022-03-08 joe.swiggum READY_FOR: v1.1' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' diff --git a/tests/configs/J0605+3757.wb.yaml b/tests/configs/J0605+3757.wb.yaml index bf7b6d16..6734fa6b 100644 --- a/tests/configs/J0605+3757.wb.yaml +++ b/tests/configs/J0605+3757.wb.yaml @@ -23,6 +23,16 @@ dmx: # control dmx windowing/fixing max-sw-delay: 0.1 # finer binning when solar wind delay > threshold (us) custom-dmx: [] # designated by [mjd_low,mjd_hi,binsize] +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20221021.Noise.wb.a8ff4ddc/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220822.Noise.wb.a77c37bb/ @@ -49,3 +59,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.wb.par' - '2022-08-24 thankful.cromartie NOISE: changed to 20220822.Noise.wb.a77c37bb' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' diff --git a/tests/test_run_notebook.py b/tests/test_run_notebook.py index 4f45fe77..e61874f3 100644 --- a/tests/test_run_notebook.py +++ b/tests/test_run_notebook.py @@ -3,15 +3,17 @@ from datetime import datetime from glob import glob import pytest +import pint_pal from pint_pal.notebook_runner import run_template_notebook - -base_dir = dirname(dirname(__file__)) +test_dir = dirname(__file__) +base_dir = dirname(test_dir) +pint_pal.set_data_root(test_dir) def config_files(): - config_files = (glob(join(base_dir, 'tests/configs/B*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/B*.wb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.wb.yaml'))) + config_files = (glob(join(test_dir, 'configs', 'B*.nb.yaml')) + + glob(join(test_dir, 'configs', 'J*.nb.yaml')) + + glob(join(test_dir, 'configs', 'B*.wb.yaml')) + + glob(join(test_dir, 'configs', 'J*.wb.yaml'))) config_files = sorted(config_files) basenames = [splitext(split(filename)[1])[0] for filename in config_files] print(config_files) @@ -38,6 +40,7 @@ def test_run_notebook(config_file, output_dir): `pytest -n tests/test_run_notebook.py` is the number of worker processes to launch (e.g. 4 to use 4 CPU threads) """ + pint_pal.set_data_root(dirname(__file__)) global_log = join(output_dir, f'test-run-notebook.log') with open(global_log, 'a') as f: run_template_notebook(