From 28453c54f45c34871bc7082f66b952c8c1a6e955 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Tue, 7 May 2024 21:17:04 +0000 Subject: [PATCH 001/193] Add PINT autofitter option --- src/pint_pal/timingconfiguration.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index a4f8f99e..a04ff55b 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -336,8 +336,11 @@ def get_fitter(self): def construct_fitter(self, to, mo): """ Return the fitter, tracking pulse numbers if available """ fitter_name = self.config['fitter'] - fitter_class = getattr(pint.fitter, fitter_name) - return fitter_class(to, mo) + if fitter_name == 'Auto': + return pint.fitter.Fitter.auto(to, mo) + else: + fitter_class = getattr(pint.fitter, fitter_name) + return fitter_class(to, mo) def get_toa_type(self): """ Return the toa-type string """ From d68ecab07f95996da3e42d845c583ba05fc404fd Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Tue, 7 May 2024 18:07:22 -0400 Subject: [PATCH 002/193] Also run tests on PRs targeting NG20 branch --- .github/workflows/test_notebook.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index b430d589..eddc18d6 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -7,6 +7,7 @@ on: pull_request: branches: - main + - NG20 jobs: build: From e831c122ca50fdfa0b470869cd835dcb2e33b2ed Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 16:44:04 -0400 Subject: [PATCH 003/193] Add config module --- pyproject.toml | 3 ++ src/pint_pal/__init__.py | 2 + src/pint_pal/config.py | 61 +++++++++++++++++++++++++++++ src/pint_pal/defaults.py | 16 -------- src/pint_pal/defaults.yaml | 21 ++++++++++ src/pint_pal/par_checker.py | 8 +++- src/pint_pal/timingconfiguration.py | 2 +- src/pint_pal/yamlio.py | 2 +- 8 files changed, 96 insertions(+), 19 deletions(-) create mode 100644 src/pint_pal/config.py delete mode 100644 src/pint_pal/defaults.py create mode 100644 src/pint_pal/defaults.yaml diff --git a/pyproject.toml b/pyproject.toml index 427ac4ce..0f91604b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,9 @@ classifiers = [ "Operating System :: OS Independent", ] +[tool.setuptools.package-data] +pint_pal = ["defaults.yaml"] + [project.urls] "Homepage" = "https://github.com/nanograv/pint_pal" "Bug Tracker" = "https://github.com/nanograv/pint_pal/issues" diff --git a/src/pint_pal/__init__.py b/src/pint_pal/__init__.py index af9b84cf..3429e639 100644 --- a/src/pint_pal/__init__.py +++ b/src/pint_pal/__init__.py @@ -1,4 +1,6 @@ import pint_pal.checkin +import pint_pal.config +from pint_pal.config import set_data_root from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py new file mode 100644 index 00000000..0997474c --- /dev/null +++ b/src/pint_pal/config.py @@ -0,0 +1,61 @@ +from ruamel.yaml import YAML +import os.path +yaml = YAML(typ='safe') +PACKAGE_DIR = os.path.dirname(__file__) +DATA_ROOT = '.' + +def set_data_root(path): + """ + Set the root directory of the data repository to be used with PINT Pal. + PINT Pal will search this directory for a configuration file specifying settings + such as the appropriate JPL ephemeris and version of TT(BIPM) to check for when + validating timing models. + + It will also be treated as the base directory when resolving paths in YAML + configuration files. This allows notebooks (or scripts) using YAML files within + the data repository, which specify paths relative to the data root, to be run + from other locations. + + The default value of `data_root` is '.' (the current working directory), which + is sufficient in cases where either (1) no data repository is in use, or + (2) all scripts and notebooks are run from the root of the data repository. + """ + global DATA_ROOT + DATA_ROOT = path + try: + read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) + except FileNotFoundError: + pass + +def read_config_file(config_file): + """ + Read a configuration file, along the lines of `defaults.yaml`, and load the results + into a location that can be accessed by other PINT Pal code. + """ + with open(config_file, 'r') as f: + config = yaml.load(f) + + global LATEST_BIPM + global LATEST_EPHEM + global PLANET_SHAPIRO + global CORRECT_TROPOSPHERE + global FREQUENCY_RATIO + global MAX_SOLARWIND_DELAY + global LATEST_TOA_RELEASE + + if 'LATEST_BIPM' in config: + LATEST_BIPM = config['LATEST_BIPM'] + if 'LATEST_EPHEM' in config: + LATEST_EPHEM = config['LATEST_EPHEM'] + if 'PLANET_SHAPIRO' in config: + PLANET_SHAPIRO = config['PLANET_SHAPIRO'] + if 'CORRECT_TROPOSPHERE' in config: + CORRECT_TROPOSPHERE = config['CORRECT_TROPOSPHERE'] + if 'FREQUENCY_RATIO' in config: + FREQUENCY_RATIO = config['FREQUENCY_RATIO'] + if 'MAX_SOLARWIND_DELAY' in config: + MAX_SOLARWIND_DELAY = config['MAX_SOLARWIND_DELAY'] + if 'LATEST_TOA_RELEASE' in config: + LATEST_TOA_RELEASE = config['LATEST_TOA_RELEASE'] + +read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) diff --git a/src/pint_pal/defaults.py b/src/pint_pal/defaults.py deleted file mode 100644 index 0bc6c674..00000000 --- a/src/pint_pal/defaults.py +++ /dev/null @@ -1,16 +0,0 @@ -# Here we keep track of global default settings - -# Choice of clock, SSE -LATEST_BIPM = "BIPM2021" # latest clock realization to use -LATEST_EPHEM = "DE440" # latest solar system ephemeris to use - -# Toggle various corrections -PLANET_SHAPIRO = True # correct for Shapiro delay from planets -CORRECT_TROPOSPHERE = True # correct for tropospheric delays - -# DMX model defaults -FREQUENCY_RATIO = 1.1 # set the high/low frequency ratio for DMX bins -MAX_SOLARWIND_DELAY = 0.1 # set the maximum permited 'delay' from SW [us] - -# Desired TOA release tag -LATEST_TOA_RELEASE = "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/defaults.yaml b/src/pint_pal/defaults.yaml new file mode 100644 index 00000000..6cfcc3d3 --- /dev/null +++ b/src/pint_pal/defaults.yaml @@ -0,0 +1,21 @@ +# Here we keep track of global default settings +# +# These can be overridden on a per-project basis by placing a file +# called `pint_pal_project.yaml` in the `data_root` location (this +# defaults to the current working directory, but can be configured +# with `pint_pal.set_data_root()`). + +# Choice of clock, SSE +LATEST_BIPM: "BIPM2021" # latest clock realization to use +LATEST_EPHEM: "DE440" # latest solar system ephemeris to use + +# Toggle various corrections +PLANET_SHAPIRO: True # correct for Shapiro delay from planets +CORRECT_TROPOSPHERE: True # correct for tropospheric delays + +# DMX model defaults +FREQUENCY_RATIO: 1.1 # set the high/low frequency ratio for DMX bins +MAX_SOLARWIND_DELAY: 0.1 # set the maximum permited 'delay' from SW [us] + +# Desired TOA release tag +LATEST_TOA_RELEASE: "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index 011c489a..eb23ec06 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,7 +4,13 @@ import copy from astropy import log import astropy.units as u -from pint_pal.defaults import * +from pint_pal.defaults import ( + LATEST_BIPM, + LATEST_EPHEM, + PLANET_SHAPIRO, + CORRECT_TROPOSPHERE, + LATEST_TOA_RELEASE, +) from pint.modelutils import model_equatorial_to_ecliptic def check_if_fit(model, *param): diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index a04ff55b..fd98c35a 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.defaults import * +from pint_pal.config import PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY class TimingConfiguration: """ diff --git a/src/pint_pal/yamlio.py b/src/pint_pal/yamlio.py index 25878847..5cfe6868 100644 --- a/src/pint_pal/yamlio.py +++ b/src/pint_pal/yamlio.py @@ -8,7 +8,7 @@ import glob from astropy import log import numpy as np -from pint_pal.defaults import * +from pint_pal.config import LATEST_TOA_RELEASE import os yaml = YAML() From a726635f73fdb35f9dbf8b7d46a184b6f0f88e38 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 17:08:30 -0400 Subject: [PATCH 004/193] Replace one last instance of pint_pal.defaults --- src/pint_pal/par_checker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index eb23ec06..4128209e 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,7 +4,7 @@ import copy from astropy import log import astropy.units as u -from pint_pal.defaults import ( +from pint_pal.config import ( LATEST_BIPM, LATEST_EPHEM, PLANET_SHAPIRO, From 5ee27a7bf08c0ba3c46b6ef55e2d3556be15b4d1 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 17:32:26 -0400 Subject: [PATCH 005/193] Actually use DATA_ROOT to resolve paths --- src/pint_pal/notebook_runner.py | 5 +++-- src/pint_pal/timingconfiguration.py | 16 +++++++++++++--- tests/test_run_notebook.py | 14 ++++++++------ 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/pint_pal/notebook_runner.py b/src/pint_pal/notebook_runner.py index 495f6fbd..2e2223ee 100644 --- a/src/pint_pal/notebook_runner.py +++ b/src/pint_pal/notebook_runner.py @@ -11,6 +11,7 @@ import pint_pal from pint_pal.notebook_templater import transform_notebook +from pint_pal.config import DATA_ROOT ansi_color = re.compile(r'\x1b\[([0-9]{1,3};)*[0-9]{1,3}m') @@ -36,8 +37,8 @@ def run_template_notebook(template_nb, config_file, output_nb=None, err_file=Non verbose: Print a description of replacements made in the template notebook. transformations: Transformations to apply to the notebook. """ - # base_dir = parent directory of directory containing config_file - base_dir = os.path.dirname(os.path.dirname(os.path.abspath(config_file))) + # base_dir = root of data repository + base_dir = DATA_ROOT nb_name = os.path.splitext(os.path.split(template_nb)[1])[0] cfg_name = os.path.splitext(os.path.split(config_file)[1])[0] diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index fd98c35a..2af573c8 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.config import PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY +from pint_pal.config import DATA_ROOT, PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY class TimingConfiguration: """ @@ -48,8 +48,18 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.filename = filename with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) - self.tim_directory = self.config['tim-directory'] if tim_directory is None else tim_directory - self.par_directory = self.config['par-directory'] if par_directory is None else par_directory + if tim_directory is None: + self.tim_directory = os.path.realpath( + os.path.join(DATA_ROOT, self.config['tim-directory']) + ) + else: + self.tim_directory = tim_directory + if par_directory is None: + self.par_directory = os.path.realpath( + os.path.join(DATA_ROOT, self.config['par-directory']) + ) + else: + self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' def get_source(self): diff --git a/tests/test_run_notebook.py b/tests/test_run_notebook.py index 4f45fe77..b1f47262 100644 --- a/tests/test_run_notebook.py +++ b/tests/test_run_notebook.py @@ -3,15 +3,17 @@ from datetime import datetime from glob import glob import pytest +import pint_pal from pint_pal.notebook_runner import run_template_notebook - -base_dir = dirname(dirname(__file__)) +test_dir = dirname(__file__) +base_dir = dirname(test_dir) +pint_pal.set_data_root(test_dir) def config_files(): - config_files = (glob(join(base_dir, 'tests/configs/B*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/B*.wb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.wb.yaml'))) + config_files = (glob(join(test_dir, 'configs', 'B*.nb.yaml')) + + glob(join(test_dir, 'configs', 'J*.nb.yaml')) + + glob(join(test_dir, 'configs', 'B*.wb.yaml')) + + glob(join(test_dir, 'configs', 'J*.wb.yaml'))) config_files = sorted(config_files) basenames = [splitext(split(filename)[1])[0] for filename in config_files] print(config_files) From 37794c4a50ff986acec39b3cb10d30ce49fccb1b Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 12:57:44 -0400 Subject: [PATCH 006/193] Can't import config vars directly --- src/pint_pal/notebook_runner.py | 4 +-- src/pint_pal/par_checker.py | 40 ++++++++++++++--------------- src/pint_pal/timingconfiguration.py | 12 ++++----- src/pint_pal/yamlio.py | 4 +-- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/notebook_runner.py b/src/pint_pal/notebook_runner.py index 2e2223ee..26db8126 100644 --- a/src/pint_pal/notebook_runner.py +++ b/src/pint_pal/notebook_runner.py @@ -10,8 +10,8 @@ yaml = YAML(typ='safe') import pint_pal +import pint_pal.config from pint_pal.notebook_templater import transform_notebook -from pint_pal.config import DATA_ROOT ansi_color = re.compile(r'\x1b\[([0-9]{1,3};)*[0-9]{1,3}m') @@ -38,7 +38,7 @@ def run_template_notebook(template_nb, config_file, output_nb=None, err_file=Non transformations: Transformations to apply to the notebook. """ # base_dir = root of data repository - base_dir = DATA_ROOT + base_dir = pint_pal.config.DATA_ROOT nb_name = os.path.splitext(os.path.split(template_nb)[1])[0] cfg_name = os.path.splitext(os.path.split(config_file)[1])[0] diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index 4128209e..6ecf6856 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,13 +4,7 @@ import copy from astropy import log import astropy.units as u -from pint_pal.config import ( - LATEST_BIPM, - LATEST_EPHEM, - PLANET_SHAPIRO, - CORRECT_TROPOSPHERE, - LATEST_TOA_RELEASE, -) +import pint_pal.config from pint.modelutils import model_equatorial_to_ecliptic def check_if_fit(model, *param): @@ -290,8 +284,9 @@ def check_ephem(toa): UserWarning If ephemeris is not set to the latest version. """ - if toa.ephem != LATEST_EPHEM: - msg = f"Wrong Solar System ephemeris in use ({toa.ephem}); should be {LATEST_EPHEM}." + if toa.ephem != pint_pal.config.LATEST_EPHEM: + msg = (f"Wrong Solar System ephemeris in use ({toa.ephem});" + f" should be {pint_pal.config.LATEST_EPHEM}.") log.warning(msg) else: msg = f"Current Solar System ephemeris in use is {toa.ephem}." @@ -310,8 +305,9 @@ def check_bipm(toa): UserWarning If BIPM correction is not set to the latest version. """ - if toa.clock_corr_info['bipm_version'] != LATEST_BIPM: - msg = f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']}); should be {LATEST_BIPM}." + if toa.clock_corr_info['bipm_version'] != pint_pal.config.LATEST_BIPM: + msg = (f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']});" + f" should be {pint_pal.config.LATEST_BIPM}.") log.warning(msg) else: msg = f"BIPM version in use is {toa.clock_corr_info['bipm_version']}." @@ -362,9 +358,10 @@ def check_troposphere(model): msg = "Added TroposphereDelay to model components." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value - if tropo != CORRECT_TROPOSPHERE: - model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( \ - CORRECT_TROPOSPHERE) + if tropo != pint_pal.config.CORRECT_TROPOSPHERE: + model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( + pint_pal.config.CORRECT_TROPOSPHERE + ) msg = "Switching CORRECT_TROPOSPHERE setting." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value @@ -391,9 +388,10 @@ def check_planet_shapiro(model): msg = "Added SolarSystemShapiro to model components." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value - if sss != PLANET_SHAPIRO: - model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( \ - PLANET_SHAPIRO) + if sss != pint_pal.config.PLANET_SHAPIRO: + model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( + pint_pal.config.PLANET_SHAPIRO + ) msg = "Switching PLANET_SHAPIRO setting." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value @@ -455,7 +453,9 @@ def check_toa_release(toas): if len(set(release_flags)) > 1: log.error(f'TOAs from multiple releases should not be combined: {set(release_flags)}') else: - if release_flags[0] == LATEST_TOA_RELEASE: - log.info(f'All TOAs are from the latest release ({LATEST_TOA_RELEASE}).') + if release_flags[0] == pint_pal.config.LATEST_TOA_RELEASE: + log.info(f'All TOAs are from the latest release ({pint_pal.config.LATEST_TOA_RELEASE}).') else: - log.warning(f'TOAs in use are from an old release {release_flags[0]}, not {LATEST_TOA_RELEASE}; update tim-directory in the .yaml accordingly.') + log.warning(f'TOAs in use are from an old release {release_flags[0]}, ' + f'not {pint_pal.config.LATEST_TOA_RELEASE}; ' + f'update tim-directory in the .yaml accordingly.') diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 2af573c8..8f41229d 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.config import DATA_ROOT, PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY +import pint_pal.config class TimingConfiguration: """ @@ -50,13 +50,13 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is None: self.tim_directory = os.path.realpath( - os.path.join(DATA_ROOT, self.config['tim-directory']) + os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) ) else: self.tim_directory = tim_directory if par_directory is None: self.par_directory = os.path.realpath( - os.path.join(DATA_ROOT, self.config['par-directory']) + os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) ) else: self.par_directory = par_directory @@ -143,7 +143,7 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial usepickle=usepickle, bipm_version=BIPM, ephem=EPHEM, - planets=PLANET_SHAPIRO, + planets=pint_pal.config.PLANET_SHAPIRO, model=m, picklefilename=picklefilename, include_pn=include_pn @@ -654,13 +654,13 @@ def get_fratio(self): """ Return desired frequency ratio """ if 'fratio' in self.config['dmx'].keys(): return self.config['dmx']['fratio'] - return FREQUENCY_RATIO + return pint_pal.config.FREQUENCY_RATIO def get_sw_delay(self): """ Return desired max(solar wind delay) threshold """ if 'max-sw-delay' in self.config['dmx'].keys(): return self.config['dmx']['max-sw-delay'] - return MAX_SOLARWIND_DELAY + return pint_pal.config.MAX_SOLARWIND_DELAY def get_custom_dmx(self): """ Return MJD/binning params for handling DM events, etc. """ diff --git a/src/pint_pal/yamlio.py b/src/pint_pal/yamlio.py index 5cfe6868..a5819f7f 100644 --- a/src/pint_pal/yamlio.py +++ b/src/pint_pal/yamlio.py @@ -8,11 +8,11 @@ import glob from astropy import log import numpy as np -from pint_pal.config import LATEST_TOA_RELEASE +from pint_pal import config import os yaml = YAML() -RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{LATEST_TOA_RELEASE}/' +RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{config.LATEST_TOA_RELEASE}/' def fix_toa_info(yaml_file,current_release=RELEASE,overwrite=True,extension='fix'): """Checks/fixes tim-directory, toas, toa-type from existing yaml; writes new one. From 964e4ec093feb1cd0b36a129d9a12ceab8d8aaac Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:10:14 -0400 Subject: [PATCH 007/193] Resolve paths according to latest config --- src/pint_pal/timingconfiguration.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 8f41229d..2a310f16 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -49,19 +49,35 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is None: - self.tim_directory = os.path.realpath( - os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) - ) + self.config['tim-directory'] = tim_directory else: self.tim_directory = tim_directory if par_directory is None: - self.par_directory = os.path.realpath( - os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) - ) + self.config['par-directory'] = par_directory else: self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' + @property + def tim_directory(self): + """ + Location of tim files, as specified in the config. + This returns the absolute path to the tim directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) + ) + + @property + def par_directory(self): + """ + Location of par files, as specified in the config. + This returns the absolute path to the par directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) + ) + def get_source(self): """ Return the source name """ return self.config['source'] From 6a7c2146d2ea7d394e80f701ccae9b671abfb10e Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:21:37 -0400 Subject: [PATCH 008/193] Resolve paths using current config --- src/pint_pal/timingconfiguration.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 2a310f16..53b91ca1 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -48,14 +48,10 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.filename = filename with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) - if tim_directory is None: + if tim_directory is not None: self.config['tim-directory'] = tim_directory - else: - self.tim_directory = tim_directory - if par_directory is None: + if par_directory is not None: self.config['par-directory'] = par_directory - else: - self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' @property @@ -68,6 +64,14 @@ def tim_directory(self): os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) ) + @tim_directory.setter + def set_tim_directory(self, tim_directory): + """ + Set tim directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['tim-directory'] = tim_directory + @property def par_directory(self): """ @@ -78,6 +82,14 @@ def par_directory(self): os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) ) + @par_directory.setter + def set_par_directory(self, par_directory): + """ + Set par directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['par-directory'] = par_directory + def get_source(self): """ Return the source name """ return self.config['source'] From 29dfcc700ae5462138b7322c51559b028117ff6b Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:22:06 -0400 Subject: [PATCH 009/193] Use set_data_root() in tests --- tests/test_run_notebook.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_run_notebook.py b/tests/test_run_notebook.py index b1f47262..e61874f3 100644 --- a/tests/test_run_notebook.py +++ b/tests/test_run_notebook.py @@ -40,6 +40,7 @@ def test_run_notebook(config_file, output_dir): `pytest -n tests/test_run_notebook.py` is the number of worker processes to launch (e.g. 4 to use 4 CPU threads) """ + pint_pal.set_data_root(dirname(__file__)) global_log = join(output_dir, f'test-run-notebook.log') with open(global_log, 'a') as f: run_template_notebook( From ae80f8d6b51d1cb7f28f90194e035eaca18b0a2a Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:42:41 -0400 Subject: [PATCH 010/193] Convert DATA_ROOT into absolute path --- src/pint_pal/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py index 0997474c..c53a4e55 100644 --- a/src/pint_pal/config.py +++ b/src/pint_pal/config.py @@ -21,7 +21,7 @@ def set_data_root(path): (2) all scripts and notebooks are run from the root of the data repository. """ global DATA_ROOT - DATA_ROOT = path + DATA_ROOT = os.path.realpath(path) try: read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) except FileNotFoundError: From 778075e91373e00c60f37b1731970938022e935e Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 14:10:14 -0400 Subject: [PATCH 011/193] set_data_root(): add reset, allow ~ in paths --- src/pint_pal/__init__.py | 2 +- src/pint_pal/config.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/__init__.py b/src/pint_pal/__init__.py index 3429e639..ba6ea479 100644 --- a/src/pint_pal/__init__.py +++ b/src/pint_pal/__init__.py @@ -1,6 +1,6 @@ import pint_pal.checkin import pint_pal.config -from pint_pal.config import set_data_root +from pint_pal.config import set_data_root, reset_data_root from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py index c53a4e55..faf79495 100644 --- a/src/pint_pal/config.py +++ b/src/pint_pal/config.py @@ -21,12 +21,20 @@ def set_data_root(path): (2) all scripts and notebooks are run from the root of the data repository. """ global DATA_ROOT - DATA_ROOT = os.path.realpath(path) + DATA_ROOT = os.path.realpath(os.path.expanduser(path)) try: read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) except FileNotFoundError: pass +def reset_data_root(): + """ + Reset the data root and config variables to the default values. + """ + global DATA_ROOT + DATA_ROOT = '.' + read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) + def read_config_file(config_file): """ Read a configuration file, along the lines of `defaults.yaml`, and load the results From e898655b41c8f834b7ebcfddb745b925f1d833b4 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 14:27:12 -0400 Subject: [PATCH 012/193] Accept ~ in YAML path too --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 53b91ca1..12e48a3c 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -45,8 +45,8 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non tim_directory (optional) : override the tim directory specified in the config par_directory (optional) : override the par directory specified in the config """ - self.filename = filename - with open(filename) as FILE: + self.filename = os.path.realpath(os.path.expanduser(filename)) + with open(self.filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is not None: self.config['tim-directory'] = tim_directory From 6637bf05561a58754155e9cb16eed2f3aba12470 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 13 Jun 2024 17:46:51 +0000 Subject: [PATCH 013/193] Don't let tc barf if excise-tim is there but unset --- src/pint_pal/timingconfiguration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index a4f8f99e..1b91e68b 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -359,7 +359,7 @@ def get_niter(self): def get_excised(self): """ Return excised-tim file if set and exists""" - if 'excised-tim' in self.config['intermediate-results'].keys(): + if 'excised-tim' in self.config['intermediate-results'].keys() and self.config['intermediate-results']['excised-tim']: if os.path.exists(self.config['intermediate-results']['excised-tim']): return self.config['intermediate-results']['excised-tim'] return None From 4b33ffa5a5cc098e2caa01247c382c7a3b42f8e7 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Sat, 10 Aug 2024 06:43:36 +0000 Subject: [PATCH 014/193] add convert_tcb2tdb=False flags --- src/pint_pal/noise_utils.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index e4cd07b5..bca27bdd 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -313,7 +313,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_efac')[0].split(psr_name)[1][1:] tp = maskParameter(name = 'EFAC', index = efac_idx, key = '-f', key_value = param_name, - value = val, units = '') + value = val, units = '', convert_tcb2tdb=False) efac_params.append(tp) efac_idx += 1 @@ -324,7 +324,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_t2equad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -334,7 +334,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_tnequad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -344,7 +344,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_equad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -353,7 +353,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_ecorr')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'ECORR', index = ecorr_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) ecorr_params.append(tp) ecorr_idx += 1 @@ -362,7 +362,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_dmefac')[0].split(psr_name)[1][1:] tp = maskParameter(name = 'DMEFAC', index = dmefac_idx, key = '-f', key_value = param_name, - value = val, units = '') + value = val, units = '', convert_tcb2tdb=False) dmefac_params.append(tp) dmefac_idx += 1 @@ -371,7 +371,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_dmequad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'DMEQUAD', index = dmequad_idx, key = '-f', key_value = param_name, - value = 10 ** val, units = 'pc/cm3') + value = 10 ** val, units = 'pc/cm3', convert_tcb2tdb=False) dmequad_params.append(tp) dmequad_idx += 1 From 2ca0f3508e9312ed8b9849d926144e18a79ec683 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 19:08:14 +0000 Subject: [PATCH 015/193] tcb2tdb fix in lite_utils --- src/pint_pal/lite_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/lite_utils.py b/src/pint_pal/lite_utils.py index acdfde1a..235f272c 100644 --- a/src/pint_pal/lite_utils.py +++ b/src/pint_pal/lite_utils.py @@ -410,7 +410,7 @@ def add_feJumps(mo,rcvrs): if len(missing_fe_jumps) > 1: for j in missing_fe_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def add_feDMJumps(mo,rcvrs): @@ -447,7 +447,7 @@ def add_feDMJumps(mo,rcvrs): if len(missing_fe_dmjumps): for j in missing_fe_dmjumps: log.info(f"Adding frontend DMJUMP {j}") - DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3) + DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3,convert_tcb2tdb=False) dmjump.add_param(DMJUMPn,setup=True) def get_flag_val_list(toas, flag): @@ -516,7 +516,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) > 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) else: if len(missing_jumps): @@ -529,7 +529,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) >= 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def large_residuals(fo,threshold_us,threshold_dm=None,*,n_sigma=None,max_sigma=None,prefit=False,ignore_ASP_dms=True,print_bad=True): From 90cec1d0004d0b8cf9df39ad809800df481aba2c Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 21:40:07 -0400 Subject: [PATCH 016/193] add VEGAS to plot_utils --- src/pint_pal/plot_utils.py | 282 +++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 140 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index d46b3467..338325de 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -46,7 +46,7 @@ "meerkat": "#FD9927", "None": "#808080" }, - + 'pta':{ "InPTA": "#855CA0", "EPTA": "#407BD5", @@ -64,8 +64,10 @@ "L-wide_PUPPI": "#6BA9E2", "Rcvr1_2_GASP": "#61C853", "Rcvr1_2_GUPPI": "#61C853", + "Rcvr1_2_VEGAS": "#61C853", "Rcvr_800_GASP": "#61C853", "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "#61C853", "S-wide_ASP": "#6BA9E2", "S-wide_PUPPI": "#6BA9E2", "1.5GHz_YUPPI": "#40635F", @@ -157,7 +159,7 @@ "PPTA": "x", "MPTA": "x", "None": "x" - }, + }, 'febe': {"327_ASP": "x", "327_PUPPI": "x", "430_ASP": "x", @@ -239,7 +241,7 @@ def call(x): subprocess.call(x,shell=True) - + def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): """ @@ -255,7 +257,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope + 'obs' - color residuals by telescope 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. @@ -267,7 +269,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- @@ -288,7 +290,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg == True and restype == 'prefit' and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -358,7 +360,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -371,14 +373,14 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -406,7 +408,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -441,18 +443,18 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa mjds = avg_dict['mjds'].value # Convert to years years = (mjds - 51544.0)/365.25 + 2000.0 - + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_years = np.hstack((years, years_no_avg)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -469,23 +471,23 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] elif colorby == 'obs': colorscheme = colorschemes['observatories'] elif colorby == 'f': colorscheme = colorschemes['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -497,7 +499,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa else: fig = plt.gcf() ax1 = axs - + for i, c in enumerate(CB): inds = np.where(cb==c)[0] @@ -543,7 +545,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa if restype == 'both': ax1.errorbar(combo_years[inds], combo_res_rpe[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - + else: if plotsig: sig = res[inds]/errs[inds] @@ -618,7 +620,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points text = ax2.text(0,0,"") @@ -654,25 +656,25 @@ def onclick(event): def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, axs = None, legend=True, show_bin=True, **kwargs): """ - Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. + Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival Time Measurements, and Analysis of 37 Millisecond Pulsars, The Astrophysical Journal, Volume 813, Issue 1, article id. 65, 31 pp.(2015). Eq.(2): FDdelay = sum(c_i * (log(obs_freq/1GHz))^i) - - This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. - + + This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. + Arguments ---------- - + fitter[object] : The PINT fitter object. model[object] : The PINT model object. Can be used instead of fitter save [boolean] : If True will save plot with the name "FD_delay.png"[default: False]. title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- freqs [list/array] : List or array of frequencies (MHz) to plot. Will override values from toa object. @@ -683,11 +685,11 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, alpha [float] : matplotlib alpha options for error regions [default: 0.2] loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True """ - - #Make sure that either a fitter or model object has been specified + + #Make sure that either a fitter or model object has been specified if fitter == None and model_object == None: raise Exception("Need to specify either a fitter or model object") - + #Get frequencies if 'freqs' in kwargs.keys(): freqs = kwargs['freqs'] @@ -696,7 +698,7 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, else: freqs = fitter.toas.get_freqs().value freqs = np.sort(freqs) - + #Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions def get_FD_delay(pint_model_object,freqs): FD_map = model.TimingModel.get_prefix_mapping(pint_model_object,"FD") @@ -719,7 +721,7 @@ def get_FD_delay(pint_model_object,freqs): else: FD_phrase = "FD1" return delay *1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6 , FD_phrase - + #Get FD params if fitter object is given if fitter is not None: #Check if the fitter object has FD parameters @@ -727,9 +729,9 @@ def get_FD_delay(pint_model_object,freqs): FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(fitter.model, freqs*1e-3) #print(FD_delay) psr_name = fitter.model.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: nbins = fitter.toas['nbin'].astype(int).min() @@ -738,26 +740,26 @@ def get_FD_delay(pint_model_object,freqs): except: print("No FD parameters in this model! Exitting...") #sys.exit() - - #Get FD params if model object is given + + #Get FD params if model object is given if model_object is not None: #Check if the model object has FD parameters try: FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(model_object, freqs*1e-3) psr_name = model_object.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: print("show_bin requires a fitter object, cannot be used with the model alone") show_bin = False except: print("No FD parameters in this model! Exitting...") - #sys.exit() - + #sys.exit() - #Get plotting preferences. + + #Get plotting preferences. if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] else: @@ -800,13 +802,13 @@ def get_FD_delay(pint_model_object,freqs): if title: ax1.set_title("%s FD Delay" % psr_name) if legend: - ax1.legend(loc=loc) + ax1.legend(loc=loc) if axs == None: plt.tight_layout() if save: plt.savefig("%s_fd_delay.png" % psr_name) - return + return def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = False, avg = False, mixed_ecorr=False,\ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): @@ -823,8 +825,8 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope - 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). + 'obs' - color residuals by telescope + 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False]. @@ -923,11 +925,11 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - - + + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -940,14 +942,14 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -975,7 +977,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -997,15 +999,15 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get freqs @@ -1013,8 +1015,8 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal freqs = kwargs['freqs'] else: freqs = fitter.toas.get_freqs().value - - + + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -1031,15 +1033,15 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] markerscheme = markers['pta'] @@ -1049,7 +1051,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal elif colorby == 'f': colorscheme = colorschemes['febe'] markerscheme = markers['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1093,7 +1095,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal alpha = kwargs['alpha'] else: alpha = 0.5 - + if avg and mixed_ecorr: if plotsig: combo_sig = combo_res[inds]/combo_errs[inds] @@ -1179,7 +1181,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_freq%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points text = ax1.text(0,0,"") @@ -1292,7 +1294,7 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.nb.out"%(psrname),\ unpack=True, usecols=(0,1,2,3,4)) dmx_mid_yr = (dmx_epochs- 51544.0)/365.25 + 2000.0 - + # Define the plotting function if axs == None: if 'figsize' in kwargs.keys(): @@ -1418,7 +1420,7 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): from astropy.time import Time if isinstance(dmxout_files, str): dmxout_files = [dmxout_files] if isinstance(labels, str): labels = [labels] - + figsize = (10,4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -1431,7 +1433,7 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): dmxDict = {} for ii,(df,lab) in enumerate(zip(dmxout_files,labels)): dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(df, unpack=True, usecols=range(0,5)) - idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} + idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} ax2.errorbar(dmxmjd, dmxval*10**3, yerr=dmxerr*10**3, label=lab, marker='o', ls='', markerfacecolor='none') dmxDict[lab] = idmxDict @@ -1622,14 +1624,14 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False elif restype == 'both': dm_error = fitter.resids.residual_objs['dm'].get_data_error().value dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + # Get the MJDs if 'mjds' in kwargs.keys(): mjds = kwargs['mjds'] else: mjds = fitter.toas.get_mjds().value years = (mjds - 51544.0)/365.25 + 2000.0 - + # Get the receiver-backend combos if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -1661,7 +1663,7 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False ylabel = r"$\Delta$DM/Uncertainty" else: ylabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1781,7 +1783,7 @@ def onclick(event): text.set_text("DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) fig.canvas.mpl_connect('button_press_event', onclick) - + return def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = 50, avg = False, whitened = False, \ @@ -1808,7 +1810,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- res [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -1828,7 +1830,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg == True and restype == 'prefit': avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -1837,8 +1839,8 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = elif avg == True and restype == 'both': avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - - + + # Get residuals if 'res' in kwargs.keys(): res = kwargs['res'] @@ -1873,7 +1875,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = else: raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): if avg == True: @@ -1883,7 +1885,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -1892,7 +1894,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = res_pre = whiten_resids(fitter, restype='postfit') res_pre = res_pre.to(u.us) res = res.to(u.us) - + # Get errors if 'errs' in kwargs.keys(): errs = kwargs['errs'] @@ -1921,7 +1923,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = else: errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - + # Get receiver backends if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -1934,7 +1936,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1944,7 +1946,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = ax1 = fig.add_subplot(111) else: ax1 = axs - + xmax=0 for i, r_b in enumerate(RCVR_BCKNDS): inds = np.where(rcvr_bcknds==r_b)[0] @@ -1971,7 +1973,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if restype == 'both': ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ label=r_b_label+" Prefit") - + ax1.grid(True) ax1.set_ylabel("Number of measurements") if plotsig: @@ -2026,7 +2028,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin = 50, \ @@ -2052,7 +2054,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. mean_sub [boolean] : If False, will not mean subtract the DM residuals to be centered on zero [default: True] - + Optional Arguments: -------------------- dmres [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -2067,7 +2069,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin # Check if wideband if not fitter.is_wideband: raise ValueError("Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead.") - + # Get the DM residuals if 'dmres' in kwargs.keys(): dm_resids = kwargs['dmres'] @@ -2079,7 +2081,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == 'both': dm_resids = fitter.resids.residual_objs['dm'].resids.value dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value - + # Get the DM residual errors if "errs" in kwargs.keys(): dm_error = kwargs['errs'] @@ -2091,7 +2093,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == 'both': dm_error = fitter.resids.residual_objs['dm'].get_data_error().value dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + # Get the receiver-backend combos if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -2123,7 +2125,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin xlabel = r"$\Delta$DM/Uncertainty" else: xlabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -2144,7 +2146,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin clr = kwargs['color'] else: clr = colorscheme[r_b_label] - + if plotsig: sig = dm_resids[inds]/dm_error[inds] ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) @@ -2157,7 +2159,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin if restype == 'both': ax1.hist(dm_resids_init[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ label=r_b_label+" Prefit") - + ax1.grid(True) ax1.set_ylabel("Number of measurements") ax1.set_xlabel(xlabel) @@ -2186,7 +2188,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_DM_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return @@ -2232,8 +2234,8 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - - + + # Check if want epoch averaged residuals if avg == True and restype == 'prefit' and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -2301,11 +2303,11 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - - + + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -2318,14 +2320,14 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -2353,7 +2355,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -2383,7 +2385,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal if mixed_ecorr == True: mjds_no_avg = no_avg_dict['mjds'].value - + # Now we need to the orbital phases; start with binary model name if 'orbphase' in kwargs.keys(): @@ -2392,20 +2394,20 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal orbphase = fitter.model.orbital_phase(mjds, radians = False) if avg and mixed_ecorr: no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians = False) - - + + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_orbphase = np.hstack((orbphase, no_avg_orbphase)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - - + + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -2422,15 +2424,15 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] markerscheme = markers['pta'] @@ -2440,7 +2442,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal elif colorby == 'f': colorscheme = colorschemes['febe'] markerscheme = markers['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] else: @@ -2612,7 +2614,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi Middle: Best fit residuals with no FD parameters. Bottom: Residuals with FD correction included. Note - This function may take a while to run if there are many TOAs. - + Arguments --------- fitter [object] : The PINT fitter object. @@ -2647,11 +2649,11 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - + # Get residuals if 'res' in kwargs.keys(): res = kwargs['res'] @@ -2663,7 +2665,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res = fitter.resids.time_resids.to(u.us) else: res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): if avg == True: @@ -2672,7 +2674,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi else: res = whiten_resids(fitter) res = res.to(u.us) - + # Get errors if 'errs' in kwargs.keys(): errs = kwargs['errs'] @@ -2697,7 +2699,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + # get frequencies if 'freqs' in kwargs.keys(): freqs = kwargs['freqs'] @@ -2706,7 +2708,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi freqs = avg_dict['freqs'].value else: freqs = fitter.toas.get_freqs().value - + # Check if comparing the FD parameters if comp_FD: if axs != None: @@ -3229,7 +3231,7 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`. By definition, this function will save all plots as "psrname"_summary_plot_#.nb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3237,7 +3239,7 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): title [boolean] : If True, will add titles to ALL plots [default: False]. legend [boolean] : If True, will add legends to ALL plots [default: False]. """ - + if fitter.is_wideband: raise ValueError("Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead.") # Need to make four sets of plots @@ -3374,7 +3376,7 @@ def plots_for_summary_pdf_wb(fitter, title = None, legends = False): Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`. By definition, this function will save all plots as "psrname"_summary_plot_#.wb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3537,12 +3539,12 @@ def plot_settings(): def get_fitter(yaml): """ Get the fitter and model from a given YAML - + Parameters ========== yaml: str yaml to use for locating latest results - + """ tc = TimingConfiguration(yaml) mo, to = tc.get_model_and_toas(excised=True, usepickle=True) @@ -3558,17 +3560,17 @@ def get_fitter(yaml): def get_avg_years(fo_nb, fo_wb, avg_dict): """ Get MJDS for each data set in years - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ mjd_nb = fo_nb.toas.get_mjds().value years_nb = (mjd_nb - 51544.0)/365.25 + 2000.0 - mjd_wb = fo_wb.toas.get_mjds().value + mjd_wb = fo_wb.toas.get_mjds().value years_wb = (mjd_wb - 51544.0)/365.25 + 2000.0 mjds_avg = avg_dict['mjds'].value years_avg = (mjds_avg - 51544.0)/365.25 + 2000.0 @@ -3577,13 +3579,13 @@ def get_avg_years(fo_nb, fo_wb, avg_dict): def get_backends(fo_nb, fo_wb, avg_dict): """ Grab backends via flags to make plotting easier - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value('f')[0]) rcvr_set_nb = set(rcvr_bcknds_nb) @@ -3599,11 +3601,11 @@ def get_backends(fo_nb, fo_wb, avg_dict): def get_DMX_info(fo): """ Get DMX timeseries info from dmxparse - + Parameters ========== fo: fitter object - + """ dmx_dict = pint.utils.dmxparse(fo) DMXs = dmx_dict['dmxs'].value @@ -3615,7 +3617,7 @@ def get_DMX_info(fo): def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ Plot color-divided-by-receiver/BE points on any axis - + Parameters ========== ax: axis for plotting @@ -3624,7 +3626,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ markers, colorscheme = plot_settings() for i, r_b in enumerate(set(bknds)): @@ -3642,7 +3644,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): if be_legend: handles, labels = ax.get_legend_handles_labels() - labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) + labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) label_names = {"327_ASP": "ASP 327 MHz", "327_PUPPI": "PUPPI 327 MHz", "430_ASP": "ASP 430 MHz", @@ -3670,7 +3672,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): def rec_labels(axs, bcknds, years_avg): """ Mark transitions between backends - + Parameters ========== axs: axis for plotting @@ -3679,7 +3681,7 @@ def rec_labels(axs, bcknds, years_avg): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ guppi = 2010.1 puppi = 2012.1 @@ -3726,13 +3728,13 @@ def rec_labels(axs, bcknds, years_avg): else: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI', transform=tform, va=va, ha=ha) axs[0].text((guppi+x_min_yr)/2., ycoord, 'ASP/GASP', transform=tform, va=va, ha=ha) - axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) + axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) elif has_ao and not has_gbt: if has_yuppi: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) else: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) - axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) + axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) elif not has_ao and has_gbt: if has_yuppi: axs[0].text((puppi+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) From 5f1fd309a40017c594530ea3d68c6bb29ceb7c86 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 22:04:49 -0400 Subject: [PATCH 017/193] add VEGAS marker styles --- src/pint_pal/plot_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 338325de..6d8427d5 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -168,8 +168,10 @@ "L-wide_PUPPI": "x", "Rcvr1_2_GASP": "x", "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", "Rcvr_800_GASP": "o", "Rcvr_800_GUPPI": "o", + "Rcvr_800_VEGAS": "o", "S-wide_ASP": "o", "S-wide_PUPPI": "o", "1.5GHz_YUPPI": "x", From 4ed98345947c1b27b26c5647e7176253d068c3da Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 22 Aug 2024 02:48:47 +0000 Subject: [PATCH 018/193] More VEGAS defaults, pta no longer default scheme --- src/pint_pal/plot_utils.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 6d8427d5..2b7db1c7 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -244,7 +244,7 @@ def call(x): subprocess.call(x,shell=True) -def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ +def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, whitened = False, \ save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): """ Make a plot of the residuals vs. time @@ -812,7 +812,7 @@ def get_FD_delay(pint_model_object,freqs): return -def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = False, avg = False, mixed_ecorr=False,\ +def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False, avg = False, mixed_ecorr=False,\ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): """ Make a plot of the residuals vs. frequency @@ -1960,6 +1960,8 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if 'color' in kwargs.keys(): clr = kwargs['color'] else: + print(colorscheme) + print(r_b_label) clr = colorscheme[r_b_label] if plotsig: sig = res[inds]/errs[inds] @@ -2194,7 +2196,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin return -def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, mixed_ecorr=False, \ +def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, mixed_ecorr=False, \ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): """ Make a plot of the residuals vs. orbital phase. @@ -3504,8 +3506,10 @@ def plot_settings(): "L-wide_PUPPI": "#6BA9E2", "Rcvr1_2_GASP": "#407BD5", "Rcvr1_2_GUPPI": "#407BD5", + "Rcvr1_2_VEGAS": "#61C853", "Rcvr_800_GASP": "#61C853", "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "#61C853", "S-wide_ASP": "#855CA0", "S-wide_PUPPI": "#855CA0", "1.5GHz_YUPPI": "#45062E", @@ -3523,8 +3527,10 @@ def plot_settings(): "L-wide_PUPPI": "x", "Rcvr1_2_GASP": "x", "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", "Rcvr_800_GASP": "x", "Rcvr_800_GUPPI": "x", + "Rcvr_800_VEGAS": "x", "S-wide_ASP": "x", "S-wide_PUPPI": "x", "1.5GHz_YUPPI": "x", @@ -3655,8 +3661,10 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): "L-wide_PUPPI": "PUPPI L-wide", "Rcvr1_2_GASP": "GASP L-band", "Rcvr1_2_GUPPI": "GUPPI L-band", + "Rcvr1_2_VEGAS": "VEGAS L-band", "Rcvr_800_GASP": "GASP 820 MHz", "Rcvr_800_GUPPI": "GUPPI 820 MHz", + "Rcvr_800_VEGAS": "VEGAS 820 MHz", "S-wide_ASP": "ASP S-wide", "S-wide_PUPPI": "PUPPI S-wide", "1.5GHz_YUPPI": "YUPPI 1.5 GHz", From a7cff71f171ec9c4e3292bffd7c2a057aeae33c4 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 22 Aug 2024 02:52:26 +0000 Subject: [PATCH 019/193] Take out print statements for testing --- src/pint_pal/plot_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 2b7db1c7..b93af983 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -1960,8 +1960,6 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if 'color' in kwargs.keys(): clr = kwargs['color'] else: - print(colorscheme) - print(r_b_label) clr = colorscheme[r_b_label] if plotsig: sig = res[inds]/errs[inds] From 01b0f99da1f278f974bd0aefef9a161a2b0ce560 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 05:55:25 +0000 Subject: [PATCH 020/193] allow bin_width as argument to setup_dmx; introduce type hinting --- src/pint_pal/dmx_utils.py | 123 ++++++++++++++++++++++++++++---------- 1 file changed, 93 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 024f7583..2782e8f2 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -1,5 +1,7 @@ +from typing import Any, Optional, Tuple import numpy as np from astropy import log +import pint from pint_pal.utils import apply_cut_flag, apply_cut_select class DMXParameter: @@ -9,7 +11,7 @@ class DMXParameter: aliases = {'idx':'index', 'val':'dmx_val', 'err':'dmx_err', 'ep':'epoch', 'r1':'low_mjd', 'r2':'high_mjd', 'f1':'low_freq', 'f2':'high_freq', 'mask':'toa_mask'} - def __init__(self): + def __init__(self) -> None: """ """ self.idx = 0 # index label [int] @@ -22,17 +24,17 @@ def __init__(self): self.f2 = 0.0 # highest frequency [MHz] self.mask = [] # Boolean index array for selecting TOAs - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: name = self.aliases.get(name, name) object.__setattr__(self, name, value) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'aliases': raise AttributeError # http://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html name = self.aliases.get(name, name) return object.__getattribute__(self, name) - def print_dmx(self, range_only=False, fit_flag=True, fortran=False): + def print_dmx(self, range_only: bool = False, fit_flag: bool = True, fortran: bool = False) -> None: """ Print TEMPO-style DMX parameter. @@ -60,7 +62,7 @@ def print_dmx(self, range_only=False, fit_flag=True, fortran=False): print(DMX_str) -def group_dates(toas, group_width=0.1): +def group_dates(toas: pint.toa.TOAs, group_width: float = 0.1) -> list: """ Returns MJDs of groups of TOAs no wider than a specified amount. @@ -93,8 +95,13 @@ def group_dates(toas, group_width=0.1): return group_mjds -def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, - check=True): +def get_dmx_ranges( + toas: pint.toa.TOAs, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Returns a list of low and high MJDs defining DMX ranges, covering all TOAs. @@ -151,8 +158,14 @@ def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, return dmx_ranges -def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, - strict_inclusion=True, check=True): +def get_gasp_dmx_ranges( + toas: pint.toa.TOAs, + group_width: float = 0.1, + bin_width: float = 15.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Return a list of DMX ranges that group GASP TOAs into bins. @@ -221,8 +234,15 @@ def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, return dmx_ranges -def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, - strict_inclusion=True, add_new_ranges=False, check=True): +def expand_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + add_new_ranges: bool = False, + check: bool = True +) -> list: """ Expands DMX ranges to accommodate new TOAs up to a maximum bin width. @@ -297,7 +317,12 @@ def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, return dmx_ranges -def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): +def check_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + full_return: bool = False, + quiet: bool = False +) -> Tuple[list, list, list, list, list, list] | None: """ Ensures all TOAs match only one DMX bin and all bins have at least one TOA. @@ -392,7 +417,7 @@ def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): return masks, ibad, iover, iempty, inone, imult -def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): +def get_dmx_mask(toas: pint.toa.TOAs, low_mjd: float, high_mjd: float, strict_inclusion: bool = True) -> np.ndarray: """ Return a Boolean index array for selecting TOAs from toas in a DMX range. @@ -413,7 +438,7 @@ def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): return mask -def get_dmx_epoch(toas, weighted_average=True): +def get_dmx_epoch(toas: pint.toa.TOAs, weighted_average: bool = True) -> float: """ Return the epoch of a DMX bin. @@ -435,7 +460,7 @@ def get_dmx_epoch(toas, weighted_average=True): return epoch -def get_dmx_freqs(toas, allow_wideband=True): +def get_dmx_freqs(toas: pint.toa.TOAs, allow_wideband: bool = True) -> Tuple[float, float]: """ Return the lowest and highest frequency of the TOAs in a DMX bin. @@ -470,8 +495,15 @@ def get_dmx_freqs(toas, allow_wideband=True): return low_freq, high_freq -def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, - strict_inclusion=True, allow_wideband=True, invert=False, quiet=False): +def check_frequency_ratio( + toas: pint.toa.TOAs, + dmx_ranges: list, + frequency_ratio: float = 1.1, + strict_inclusion: bool = True, + allow_wideband: bool = True, + invert: bool = False, + quiet: bool = False +) -> Tuple[np.ndarray, np.ndarray]: """ Check that the TOAs in a DMX bin pass a frequency ratio criterion. @@ -522,9 +554,20 @@ def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, np.arange(len(dmx_ranges))[np.logical_not(dmx_range_mask)] -def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, - solar_n0=5.0, allow_wideband=True, strict_inclusion=True, pad=0.0, - check=True, return_only=False, quiet=False): +def check_solar_wind( + toas: pint.toa.TOAs, + dmx_ranges: list, + model: pint.models.timing_model.TimingModel, + max_delta_t: float = 0.1, + bin_width: float = 1.0, + solar_n0: float = 5.0, + allow_wideband: bool = True, + strict_inclusion: bool = True, + pad: float = 0.0, + check: bool = True, + return_only: bool = False, + quiet: bool = False +) -> list: """ Split DMX ranges based on influence of the solar wind. @@ -608,7 +651,7 @@ def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, return dmx_ranges -def add_dmx(model, bin_width=1.0): +def add_dmx(model: pint.models.timing_model.TimingModel, bin_width: float = 1.0) -> None: """ Checks for DispersionDMX and ensures the bin width is the only parameter. @@ -628,7 +671,7 @@ def add_dmx(model, bin_width=1.0): dmx.DMX.set(bin_width) -def model_dmx_params(model): +def model_dmx_params(model: pint.models.timing_model.TimingModel) -> Tuple[list, np.ndarray, np.ndarray]: """ Get DMX ranges, values, and uncertainties from a PINT model object. @@ -655,7 +698,7 @@ def model_dmx_params(model): return dmx_ranges, dmx_vals, dmx_errs -def remove_all_dmx_ranges(model, quiet=False): +def remove_all_dmx_ranges(model: pint.models.timing_model.TimingModel, quiet: bool = False) -> None: """ Uses PINT to remove all DMX parameter ranges from a timing model. @@ -675,8 +718,15 @@ def remove_all_dmx_ranges(model, quiet=False): pass -def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, - freeze_DM=True): +def setup_dmx( + model: pint.models.timing_model.TimingModel, + toas: pint.toa.TOAs, + quiet: bool = True, + frequency_ratio: float = 1.1, + max_delta_t: float = 0.1, + bin_width: Optional[float] = None, + freeze_DM: bool = True +) -> pint.toa.TOAs: """ Sets up and checks a DMX model using a number of defaults. @@ -688,6 +738,7 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, the frequencies used are returned by get_dmx_freqs(). max_delta_t is the time delay [us] above which a DMX range will be split. quiet=True turns off some of the logged warnings and info. + bin_width=constant bin width if provided, otherwise use observatory defaults if None freeze_DM=True ensures the mean DM parameter is not fit. """ @@ -714,8 +765,12 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, adjust_old_dmx = False # Set up DMX model - if toas.observatories == set(['arecibo']): bin_width = 0.5 # day - else: bin_width = 6.5 #day + if bin_width is None: #use observatory defaults + if toas.observatories == set(['arecibo']): + bin_width = 0.5 # day + else: + bin_width = 6.5 #day + # Calculate GASP-era ranges, if applicable dmx_ranges = get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.05, check=False) @@ -813,9 +868,17 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, return toas -def make_dmx(toas, dmx_ranges, dmx_vals=None, dmx_errs=None, - strict_inclusion=True, weighted_average=True, allow_wideband=True, - start_idx=1, print_dmx=False): +def make_dmx( + toas: pint.toa.TOAs, + dmx_ranges: list, + dmx_vals: Optional[np.ndarray] = None, + dmx_errs: Optional[np.ndarray] = None, + strict_inclusion: bool = True, + weighted_average: bool = True, + allow_wideband: bool = True, + start_idx: int = 1, + print_dmx: bool = False +): """ Uses convenience functions to assemble a TEMPO-style DMX parameters. From 137f5e8e26cccc54cf3d22379f716a60c26b84c7 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 07:43:49 +0000 Subject: [PATCH 021/193] switch | to Union for earlier python support --- src/pint_pal/dmx_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 2782e8f2..63554e9b 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -322,7 +322,7 @@ def check_dmx_ranges( dmx_ranges: list, full_return: bool = False, quiet: bool = False -) -> Tuple[list, list, list, list, list, list] | None: +) -> Union[Tuple[list, list, list, list, list, list],None]: """ Ensures all TOAs match only one DMX bin and all bins have at least one TOA. From 294b864e724ddb04ce7c82e1a9263b7fbab445b6 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 08:01:27 +0000 Subject: [PATCH 022/193] switch | to Union for earlier python support --- src/pint_pal/dmx_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 63554e9b..55a65383 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Tuple +from typing import Any, Optional, Tuple, Union import numpy as np from astropy import log import pint From 816599a7229370e503959a5716298e0ff6bc9922 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 25 Aug 2024 03:17:16 +0000 Subject: [PATCH 023/193] adding a gibbs sampler for noise analyses --- src/pint_pal/gibbs_sampler.py | 615 ++++++++++++++++++++++++++++++++++ 1 file changed, 615 insertions(+) create mode 100644 src/pint_pal/gibbs_sampler.py diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py new file mode 100644 index 00000000..e33115a3 --- /dev/null +++ b/src/pint_pal/gibbs_sampler.py @@ -0,0 +1,615 @@ +import numpy as np +from tqdm import tqdm +import scipy.linalg as sl +from functools import cached_property +import os +import glob +import warnings +from enterprise_extensions import model_utils, blocks +from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc +from enterprise.signals import signal_base, gp_signals +from scipy.linalg import solve_triangular as st_solve +from scipy.linalg import cho_factor, cho_solve + + +class BayesPowerSingle(object): + + """ + The Gibbs Method class used for single-pulsar noise analyses. + + Based on: + + Article by van Haasteren & Vallisneri (2014), + "New advances in the Gaussian-process approach + to pulsar-timing data analysis", + Physical Review D, Volume 90, Issue 10, id.104012 + arXiv:1407.1838 + + Initial structure of the code is based on https://github.com/jellis18/gibbs_student_t + + Authors: + + S. R. Taylor + N. Laal + """ + + def __init__( + self, + psr=None, + Tspan=None, + select="backend", + white_vary=False, + inc_ecorr=False, + ecorr_type="kernel", + noise_dict=None, + tm_marg=False, + rn_components=30, + dm_components=None, + chrom_components=None, + dm_type = "gibbs", + chrom_type = "gibbs", + tnequad=True, + log10rhomin=-9.0, + log10rhomax=-4.0, + ): + """ + Parameters + ----------- + + psr : object + instance of an ENTERPRISE psr object for a single pulsar + + Tspan: float (optional) + if given, the baseline of the pulsar is fixed to the input value. If not, + baseline is determined inetrnally + + select: str + the selection of backend ('backend' or 'none') for the white-noise parameters + + white_vary: bool + whether to vary the white noise + + inc_ecorr: bool + whether to include ecorr + + ecorr_type: str + the type of ecorr to use. Choose between 'basis' or 'kernel' + + noise_dict: dict + white noise dictionary in case 'white_vary' is set to False + + tm_marg: bool + whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) + + rn_components: int + number of red noise Fourier modes to include + + dm_components: int + number of DM noise Fourier modes to include + + chrom_components: int + number of chromatic noise Fourier modes to include + + dm_type: str + the type of DM noise to use. Choose between 'gibbs' or 'mcmc' or None (for DMX) + + chrom_type: str + the type of chromatic noise to use. Choose between 'gibbs' or 'mcmc' or None (for no chromatic noise) + + log10rhomin: float + lower bound for the log10 of the rho parameter. + + log10rhomax: float + upper bound for the log10 of the rho parameter + + tnequad: string + whether to use the temponest convension of efac and equad + """ + + self.psr = [psr] + if Tspan: + self.Tspan = Tspan + else: + self.Tspan = model_utils.get_tspan(self.psr) + self.name = self.psr[0].name + self.inc_ecorr = inc_ecorr + self.ecorr_type = ecorr_type + self.white_vary = white_vary + self.tm_marg = tm_marg + self.wn_names = ["efac", "equad", "ecorr"] + self.rhomin = log10rhomin + self.rhomax = log10rhomax + self.rn_components = rn_components + self.dm_components = dm_components + self.chrom_components = chrom_components + self.dm_type = dm_type + self.chrom_type = chrom_type + self.low = 10 ** (2 * self.rhomin) + self.high = 10 ** (2 * self.rhomax) + + # Making the pta object + if self.tm_marg: + tm = gp_signals.MarginalizingTimingModel(use_svd=True) + if self.white_vary: + warnings.warn( + "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" + ) + else: + tm = gp_signals.TimingModel(use_svd=True) + + if self.ecorr_type == "basis": + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=True, + select=select, + tnequad=tnequad, + ) + else: + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=False, + select=select, + tnequad=tnequad, + ) + + rn = blocks.common_red_noise_block( + psd="spectrum", + prior="log-uniform", + Tspan=self.Tspan, + logmin=self.rhomin, + logmax=self.rhomax, + components=rn_components, + gamma_val=None, + name="gw", + ) + s = tm + wn + rn + self.pta = signal_base.PTA( + [s(p) for p in self.psr], + lnlikelihood=signal_base.LogLikelihoodDenseCholesky, + ) + if not white_vary: + self.pta.set_default_params(noise_dict) + self.Nmat = self.pta.get_ndiag(params={})[0] + self.TNr = self.pta.get_TNr(params={})[0] + self.TNT = self.pta.get_TNT(params={})[0] + else: + self.Nmat = None + + if self.inc_ecorr and "basis" in self.ecorr_type: + # grabbing priors on ECORR params + for ct, par in enumerate(self.pta.params): + if "ecorr" in str(par): + ind = ct + ecorr_priors = str(self.pta.params[ind].params[0]) + ecorr_priors = ecorr_priors.split("(")[1].split(")")[0].split(", ") + self.ecorrmin, self.ecorrmax = ( + 10 ** (2 * float(ecorr_priors[0].split("=")[1])), + 10 ** (2 * float(ecorr_priors[1].split("=")[1])), + ) + + # Getting residuals + self._residuals = self.pta.get_residuals()[0] + # Intial guess for the model params + self._xs = np.array([p.sample() + for p in self.pta.params], dtype=object) + # Initializign the b-coefficients. The shape is 2*freq_bins if tm_marg + # = True. + self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) + self.Tmat = self.pta.get_basis(params={})[0] + self.phiinv = None + + # find basis indices of GW process + self.gwid = [] + ct = 0 + psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] + for sig in psigs: + Fmat = self.pta.signals[sig].get_basis() + if "gw" in self.pta.signals[sig].name: + self.gwid.append(ct + np.arange(0, Fmat.shape[1])) + # Avoid None-basis processes. + # Also assume red + GW signals share basis. + if Fmat is not None and "red" not in sig: + ct += Fmat.shape[1] + + @cached_property + def params(self): + return self.pta.params + + @cached_property + def param_names(self): + return self.pta.param_names + + def map_params(self, xs): + return self.pta.map_params(xs) + + @cached_property + def get_red_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "log10_A" in par or "gamma" in par or "rho" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_efacequad_indices(self): + ind = [] + if "basis" in self.ecorr_type: + for ct, par in enumerate(self.param_names): + if "efac" in par or "equad" in par: + ind.append(ct) + else: + for ct, par in enumerate(self.param_names): + if "ecorr" in par or "efac" in par or "equad" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_basis_ecorr_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "ecorr" in par: + ind.append(ct) + return np.array(ind) + + def update_red_params(self, xs): + """ + Function to perform log10_rho updates given the Fourier coefficients. + """ + tau = self._b[tuple(self.gwid)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-1] = 0.5 * np.log10(rhonew) + return xs + + def update_b(self, xs): + """ + Function to perform updates on Fourier coefficients given other model parameters. + """ + params = self.pta.map_params(np.hstack(xs)) + self._phiinv = self.pta.get_phiinv(params, logdet=False)[0] + + try: + TNT = self.TNT.copy() + except BaseException: + T = self.Tmat + TNT = self.Nmat.solve(T, left_array=T) + try: + TNr = self.TNr.copy() + except BaseException: + T = self.Tmat + TNr = self.Nmat.solve(self._residuals, left_array=T) + + np.fill_diagonal(TNT, TNT.diagonal() + self._phiinv) + try: + chol = cho_factor( + TNT, + lower=True, + overwrite_a=False, + check_finite=False) + mean = cho_solve( + chol, + b=TNr, + overwrite_b=False, + check_finite=False) + self._b = mean + st_solve( + chol[0], + np.random.normal(loc=0, scale=1, size=TNT.shape[0]), + lower=True, + unit_diagonal=False, + overwrite_b=False, + check_finite=False, + trans=1, + ) + except np.linalg.LinAlgError: + if self.bchain.any(): + self._b = self.bchain[ + np.random.default_rng().integers(0, len(self.bchain)) + ] + else: + bchain = np.memmap( + self._savepath + "/chain_1", + dtype="float32", + mode="r", + shape=(self.niter, self.len_x + self.len_b), + )[:, -len(self._b):] + self._b = bchain[np.random.default_rng().integers( + 0, len(bchain))] + + def update_white_params(self, xs, iters=10): + """ + Function to perform WN updates given other model parameters. + If kernel ecorr is chosen, WN includes ecorr as well. + """ + # get white noise parameter indices + wind = self.get_efacequad_indices + xnew = xs + x0 = xnew[wind].copy() + lnlike0, lnprior0 = self.get_lnlikelihood_white( + x0), self.get_wn_lnprior(x0) + lnprob0 = lnlike0 + lnprior0 + + for ii in range( + self.start_wn_iter + 1, + self.start_wn_iter + iters + 1): + x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) + xnew[wind] = x0 + self.start_wn_iter = ii + + # Do some caching of "later needed" parameters for improved performance + self.Nmat = self.pta.get_ndiag(self.map_params(xnew))[0] + Tmat = self.Tmat + if "basis" not in self.ecorr_type: + self.TNT = self.Nmat.solve(Tmat, left_array=Tmat) + else: + TN = Tmat / self.Nmat[:, None] + self.TNT = Tmat.T @ TN + residuals = self._residuals + self.rNr = np.sum(residuals**2 / self.Nmat) + self.logdet_N = np.sum(np.log(self.Nmat)) + self.d = TN.T @ residuals + + return xnew + + def update_basis_ecorr_params(self, xs, iters=10): + """ + Function to perform basis ecorr updates. + """ + # get white noise parameter indices + eind = self.get_basis_ecorr_indices + xnew = xs + x0 = xnew[eind].copy() + lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood( + x0 + ), self.get_basis_ecorr_lnprior(x0) + lnprob0 = lnlike0 + lnprior0 + + for ii in range( + self.start_ec_iter + 1, + self.start_ec_iter + iters + 1): + x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) + xnew[eind] = x0 + self.start_ec_iter = ii + + return xnew + + def get_lnlikelihood_white(self, xs): + """ + Function to calculate WN log-liklihood. + """ + x0 = self._xs.copy() + x0[self.get_efacequad_indices] = xs + + params = self.map_params(x0) + Nmat = self.pta.get_ndiag(params)[0] + # whitened residuals + yred = self._residuals - self.Tmat @ self._b + try: + if "basis" not in self.ecorr_type: + rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet=True) + else: + rNr = np.sum(yred**2 / Nmat) + logdet_N = np.sum(np.log(Nmat)) + except BaseException: + return -np.inf + # first component of likelihood function + loglike = -0.5 * (logdet_N + rNr) + + return loglike + + def get_basis_ecorr_lnlikelihood(self, xs): + """ + Function to calculate basis ecorr log-liklihood. + """ + x0 = np.hstack(self._xs.copy()) + x0[self.get_basis_ecorr_indices] = xs + + params = self.map_params(x0) + # start likelihood calculations + loglike = 0 + # get auxiliaries + phiinv, logdet_phi = self.pta.get_phiinv(params, logdet=True)[0] + # first component of likelihood function + loglike += -0.5 * (self.logdet_N + self.rNr) + # Red noise piece + Sigma = self.TNT + np.diag(phiinv) + try: + cf = sl.cho_factor(Sigma) + expval = sl.cho_solve(cf, self.d) + except np.linalg.LinAlgError: + return -np.inf + + logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) + loglike += 0.5 * (self.d @ expval - logdet_sigma - logdet_phi) + + return loglike + + def get_wn_lnprior(self, xs): + """ + Function to calculate WN log-prior. + """ + x0 = self._xs.copy() + x0[self.get_efacequad_indices] = xs + + return np.sum([p.get_logpdf(value=x0[ct]) + for ct, p in enumerate(self.params)]) + + def get_basis_ecorr_lnprior(self, xs): + """ + Function to calculate basis ecorr log-prior. + """ + x0 = self._xs.copy() + x0[self.get_basis_ecorr_indices] = xs + + return np.sum([p.get_logpdf(value=x0[ct]) + for ct, p in enumerate(self.params)]) + + def sample( + self, + niter=int(1e4), + wniters=30, + eciters=10, + savepath=None, + SCAMweight=30, + AMweight=15, + DEweight=50, + covUpdate=1000, + burn=10000, + **kwargs + ): + """ + Gibbs Sampling + + Parameters + ----------- + niter: integer + total number of Gibbs sampling iterations + + wniters: + number of white noise MCMC sampling iterations within each Gibbs step + + eciters: + number of basis ecorr MCMC sampling iterations within each Gibbs step + + savepath: str + the path to save the chains + + covUpdate: integer + Number of iterations between AM covariance updates + + SCAMweight: integer + Weight of SCAM jumps in overall jump cycle + + AMweight: integer + Weight of AM jumps in overall jump cycle + + DEweight: integer + Weight of DE jumps in overall jump cycle + + kwargs: dict + PTMCMC initialization settings not mentioned above + """ + self.start_wn_iter = 0 + self.start_ec_iter = 0 + + os.makedirs(savepath, exist_ok=True) + + if self.white_vary: + # large number to avoid saving the white noise choice in a txt file + isave = int(4e9) + thin = 1 + Niter = int(niter * wniters + 1) + + x0 = self._xs[self.get_efacequad_indices] + ndim = len(x0) + cov = np.diag( + np.ones(ndim) * 0.01**2 + ) # helps to tune MCMC proposal distribution + self.sampler_wn = ptmcmc( + ndim, + self.get_lnlikelihood_white, + self.get_wn_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_wn.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + if "basis" in self.ecorr_type and self.white_vary: + x0 = self._xs[self.get_basis_ecorr_indices] + ndim = len(x0) + cov = np.diag(np.ones(ndim) * 0.01**2) + self.sampler_ec = ptmcmc( + ndim, + self.get_basis_ecorr_lnlikelihood, + self.get_basis_ecorr_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_ec.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + np.savetxt(savepath + "/pars.txt", + list(map(str, self.pta.param_names)), fmt="%s") + np.savetxt( + savepath + "/priors.txt", + list(map(lambda x: str(x.__repr__()), self.pta.params)), + fmt="%s", + ) + rn_freqs = np.arange( + 1 / self.Tspan, + (self.rn_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/rn_freqs.npy", rn_freqs) + + if self.dm_components is not None: + dm_freqs = np.arange( + 1 / self.Tspan, + (self.dm_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/dm_freqs.npy", dm_freqs) + if self.chrom_components is not None: + chrom_freqs = np.arange( + 1 / self.Tspan, + (self.chrom_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/chrom_freqs.npy", chrom_freqs) + [os.remove(dpa) for dpa in glob.glob(savepath + "/*jump.txt")] + + xnew = self._xs.copy() + + len_b = len(self._b) + len_x = len(np.hstack(self._xs)) + self._savepath = savepath + + fp = np.lib.format.open_memmap( + savepath + "/chain_1.npy", + mode="w+", + dtype="float32", + shape=(niter, len_x + len_b), + fortran_order=False, + ) + + pbar = tqdm(range(niter), colour="GREEN") + pbar.set_description("Sampling %s" % self.name) + for ii in pbar: + if self.white_vary: + xnew = self.update_white_params(xnew, iters=wniters) + + if self.inc_ecorr and "basis" in self.ecorr_type: + xnew = self.update_basis_ecorr_params(xnew, iters=eciters) + + self.update_b(xs=xnew) + xnew = self.update_red_params(xs=xnew) + + fp[ii, -len_b:] = self._b + fp[ii, 0:len_x] = np.hstack(xnew) + From d609d11367695d4d42a5cb7d9a6bbc20951da5a6 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 25 Aug 2024 03:35:25 +0000 Subject: [PATCH 024/193] i initially added the wrong version of the gibbs sampler -- lol --- src/pint_pal/gibbs_sampler.py | 237 ++++++++++++++++++++++++++-------- 1 file changed, 181 insertions(+), 56 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index e33115a3..b98e6c12 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -12,7 +12,7 @@ from scipy.linalg import cho_factor, cho_solve -class BayesPowerSingle(object): +class GibbsSampler(object): """ The Gibbs Method class used for single-pulsar noise analyses. @@ -31,6 +31,7 @@ class BayesPowerSingle(object): S. R. Taylor N. Laal + J. G. Baier """ def __init__( @@ -38,19 +39,21 @@ def __init__( psr=None, Tspan=None, select="backend", - white_vary=False, + vary_wn=False, inc_ecorr=False, ecorr_type="kernel", noise_dict=None, tm_marg=False, + vary_rn=True, rn_components=30, - dm_components=None, - chrom_components=None, - dm_type = "gibbs", - chrom_type = "gibbs", tnequad=True, log10rhomin=-9.0, log10rhomax=-4.0, + vary_dm=False, + dm_components=50, + vary_chrom=False, + chrom_components=50, + include_quadratic=False, ): """ Parameters @@ -66,7 +69,7 @@ def __init__( select: str the selection of backend ('backend' or 'none') for the white-noise parameters - white_vary: bool + self.vary_wn: bool whether to vary the white noise inc_ecorr: bool @@ -76,7 +79,7 @@ def __init__( the type of ecorr to use. Choose between 'basis' or 'kernel' noise_dict: dict - white noise dictionary in case 'white_vary' is set to False + white noise dictionary in case 'self.vary_wn' is set to False tm_marg: bool whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) @@ -90,12 +93,15 @@ def __init__( chrom_components: int number of chromatic noise Fourier modes to include - dm_type: str - the type of DM noise to use. Choose between 'gibbs' or 'mcmc' or None (for DMX) + dm_var: bool + wheter to include a free spectrum gibbs dm_gp - chrom_type: str - the type of chromatic noise to use. Choose between 'gibbs' or 'mcmc' or None (for no chromatic noise) - + chrom_var: bool + whether to include a free spectrum gibbs chrom_gp + + include_quadratic: bool + whether or not to fit out a quadratic trend in chrom_gp (think DM2) + log10rhomin: float lower bound for the log10 of the rho parameter. @@ -114,7 +120,7 @@ def __init__( self.name = self.psr[0].name self.inc_ecorr = inc_ecorr self.ecorr_type = ecorr_type - self.white_vary = white_vary + self.vary_wn = vary_wn self.tm_marg = tm_marg self.wn_names = ["efac", "equad", "ecorr"] self.rhomin = log10rhomin @@ -122,15 +128,17 @@ def __init__( self.rn_components = rn_components self.dm_components = dm_components self.chrom_components = chrom_components - self.dm_type = dm_type - self.chrom_type = chrom_type + self.vary_rn = vary_rn + self.vary_dm = vary_dm + self.vary_chrom = vary_chrom + self.include_quadratic = include_quadratic self.low = 10 ** (2 * self.rhomin) self.high = 10 ** (2 * self.rhomax) # Making the pta object if self.tm_marg: tm = gp_signals.MarginalizingTimingModel(use_svd=True) - if self.white_vary: + if self.vary_wn: warnings.warn( "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" ) @@ -139,7 +147,7 @@ def __init__( if self.ecorr_type == "basis": wn = blocks.white_noise_block( - vary=self.white_vary, + vary=self.vary_wn, inc_ecorr=self.inc_ecorr, gp_ecorr=True, select=select, @@ -147,29 +155,62 @@ def __init__( ) else: wn = blocks.white_noise_block( - vary=self.white_vary, + vary=self.vary_wn, inc_ecorr=self.inc_ecorr, gp_ecorr=False, select=select, tnequad=tnequad, ) - rn = blocks.common_red_noise_block( - psd="spectrum", - prior="log-uniform", - Tspan=self.Tspan, - logmin=self.rhomin, - logmax=self.rhomax, - components=rn_components, - gamma_val=None, - name="gw", - ) - s = tm + wn + rn + if self.vary_rn: + rn = blocks.red_noise_block( + psd="spectrum", + prior="log-uniform", + Tspan=self.Tspan, + #logmin=self.rhomin, + #logmax=self.rhomax, + components=self.rn_components, + gamma_val=None, + ) + + if self.vary_dm: + dm = blocks.dm_noise_block( + gp_kernel='diag', + psd='spectrum', + prior='log-uniform', + Tspan=self.Tspan, + components=self.dm_components, + gamma_val=None, + coefficients=False + ) + + if self.vary_chrom: + chrom = blocks.chromatic_noise_block( + gp_kernel='diag', + psd='spectrum', + prior='log-uniform', + idx=4, + include_quadratic=self.include_quadratic, + Tspan=self.Tspan, + name='chrom', + components=self.chrom_components, + ) + + s = tm + wn + + if self.vary_rn: + s += rn + if self.vary_dm: + s += dm + if self.vary_chrom: + s += chrom + self.pta = signal_base.PTA( [s(p) for p in self.psr], lnlikelihood=signal_base.LogLikelihoodDenseCholesky, ) - if not white_vary: + #print(self.pta.signals.keys()) + if not self.vary_wn: self.pta.set_default_params(noise_dict) self.Nmat = self.pta.get_ndiag(params={})[0] self.TNr = self.pta.get_TNr(params={})[0] @@ -177,7 +218,7 @@ def __init__( else: self.Nmat = None - if self.inc_ecorr and "basis" in self.ecorr_type: + if self.inc_ecorr and "basis" in self.ecorr_type and self.vary_wn: # grabbing priors on ECORR params for ct, par in enumerate(self.pta.params): if "ecorr" in str(par): @@ -188,30 +229,58 @@ def __init__( 10 ** (2 * float(ecorr_priors[0].split("=")[1])), 10 ** (2 * float(ecorr_priors[1].split("=")[1])), ) + #print(self.ecorrmin, self.ecorrmax) # Getting residuals self._residuals = self.pta.get_residuals()[0] + ## FIXME : maybe don't cache this -- could lead to memory issues. # Intial guess for the model params self._xs = np.array([p.sample() for p in self.pta.params], dtype=object) - # Initializign the b-coefficients. The shape is 2*freq_bins if tm_marg - # = True. + # Initializign the b-coefficients. + # The shape is 2*rn_comp+2*dm_comp+2*chrom_comp if tm_marg = True + # if tm_marg = False, + # then the shape is more because there are some tm params in there? self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) + # when including dm and chromatic models, the b's are + # the concantenation of the red noise, dm, and chromatic noise fourier coefficients + #print("len b: ", len(self._b)) + #print(self.pta.get_basis(self._xs)[0].shape) self.Tmat = self.pta.get_basis(params={})[0] self.phiinv = None - + # print(self._xs.shape) + # print(self.pta.params) + # print("dm", self.get_dm_param_indices) + # print("chrom", self.get_chrom_param_indices) + # print("rn:", self.get_rn_param_indices) # find basis indices of GW process - self.gwid = [] + ### jeremy : changing the below from gwid to rn_id and adding dm_id and chrom_id + self.rn_id = [] + self.dm_id = [] + self.chrom_id = [] ct = 0 psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] for sig in psigs: Fmat = self.pta.signals[sig].get_basis() - if "gw" in self.pta.signals[sig].name: - self.gwid.append(ct + np.arange(0, Fmat.shape[1])) + if "red_noise" in self.pta.signals[sig].name: + self.rn_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + if "dm_gp" in self.pta.signals[sig].name: + self.dm_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + if "chrom_gp" in self.pta.signals[sig].name: + self.chrom_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + ### jeremy : chaning the above to red_noise and adding dm and chrom as well # Avoid None-basis processes. # Also assume red + GW signals share basis. - if Fmat is not None and "red" not in sig: + if Fmat is not None and "red" not in sig and 'dm_gp' not in sig and 'chrom_gp' not in sig: ct += Fmat.shape[1] + #print(sig) + #print(ct) + #print("rn", self.rn_id) + #print("dm", self.dm_id) + #print("chrom", self.chrom_id) @cached_property def params(self): @@ -225,10 +294,26 @@ def map_params(self, xs): return self.pta.map_params(xs) @cached_property - def get_red_param_indices(self): + def get_rn_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "red_noise" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_dm_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "dm_gp" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_chrom_param_indices(self): ind = [] for ct, par in enumerate(self.param_names): - if "log10_A" in par or "gamma" in par or "rho" in par: + if "chrom_gp" in par: ind.append(ct) return np.array(ind) @@ -255,9 +340,10 @@ def get_basis_ecorr_indices(self): def update_red_params(self, xs): """ - Function to perform log10_rho updates given the Fourier coefficients. + Function to perform red_noise_log10_rho updates given + the red noise Fourier coefficients. """ - tau = self._b[tuple(self.gwid)] ** 2 + tau = self._b[tuple(self.rn_id)] ** 2 tau = (tau[0::2] + tau[1::2]) / 2 Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) @@ -266,6 +352,34 @@ def update_red_params(self, xs): xs[-1] = 0.5 * np.log10(rhonew) return xs + def update_dm_params(self, xs): + """ + Function to perform dm_gp_log10_rho updates given + the dm gp Fourier coefficients. + """ + tau = self._b[tuple(self.dm_id)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-2] = 0.5 * np.log10(rhonew) + return xs + + def update_chrom_params(self, xs): + """ + Function to perform chrom_gp_log10_rho updates given + the chromatic gp Fourier coefficients. + """ + tau = self._b[tuple(self.chrom_id)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-3] = 0.5 * np.log10(rhonew) + return xs + def update_b(self, xs): """ Function to perform updates on Fourier coefficients given other model parameters. @@ -306,6 +420,7 @@ def update_b(self, xs): trans=1, ) except np.linalg.LinAlgError: + print("oh sh******t; a spiiiiiddddeeeerrrrrr") if self.bchain.any(): self._b = self.bchain[ np.random.default_rng().integers(0, len(self.bchain)) @@ -455,8 +570,8 @@ def get_basis_ecorr_lnprior(self, xs): def sample( self, niter=int(1e4), - wniters=30, - eciters=10, + wniters=100, + eciters=15, savepath=None, SCAMweight=30, AMweight=15, @@ -502,7 +617,7 @@ def sample( os.makedirs(savepath, exist_ok=True) - if self.white_vary: + if self.vary_wn: # large number to avoid saving the white noise choice in a txt file isave = int(4e9) thin = 1 @@ -533,7 +648,7 @@ def sample( **kwargs ) - if "basis" in self.ecorr_type and self.white_vary: + if "basis" in self.ecorr_type and self.vary_wn and self.inc_ecorr: x0 = self._xs[self.get_basis_ecorr_indices] ndim = len(x0) cov = np.diag(np.ones(ndim) * 0.01**2) @@ -564,19 +679,20 @@ def sample( list(map(lambda x: str(x.__repr__()), self.pta.params)), fmt="%s", ) - rn_freqs = np.arange( - 1 / self.Tspan, - (self.rn_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/rn_freqs.npy", rn_freqs) + if self.vary_rn: + rn_freqs = np.arange( + 1 / self.Tspan, + (self.rn_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/rn_freqs.npy", rn_freqs) - if self.dm_components is not None: + if self.vary_dm: dm_freqs = np.arange( 1 / self.Tspan, (self.dm_components + 0.001) / self.Tspan, 1 / self.Tspan) np.save(savepath + "/dm_freqs.npy", dm_freqs) - if self.chrom_components is not None: + if self.vary_chrom: chrom_freqs = np.arange( 1 / self.Tspan, (self.chrom_components + 0.001) / self.Tspan, @@ -600,16 +716,25 @@ def sample( pbar = tqdm(range(niter), colour="GREEN") pbar.set_description("Sampling %s" % self.name) +# num_gibbs = np.sum([int(self.vary_rn), int(self.vary_dm), int(self.vary_chrom)]) for ii in pbar: - if self.white_vary: + if self.vary_wn: xnew = self.update_white_params(xnew, iters=wniters) if self.inc_ecorr and "basis" in self.ecorr_type: xnew = self.update_basis_ecorr_params(xnew, iters=eciters) +# turn = ii % num_gibbs + #if self.vary_rn and turn == 0: self.update_b(xs=xnew) xnew = self.update_red_params(xs=xnew) - + #if self.vary_dm and turn == 1: + #self.update_b(xs=xnew) + xnew = self.update_dm_params(xs=xnew) + #if self.vary_chrom and turn == 2: + #self.update_b(xs=xnew) + xnew = self.update_chrom_params(xs=xnew) + fp[ii, -len_b:] = self._b fp[ii, 0:len_x] = np.hstack(xnew) From 3e4629eb0072264b290c40efa20f1fb2077b9a74 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 24 Aug 2024 21:20:33 -0700 Subject: [PATCH 025/193] adding gibbs to noise_utils --- src/pint_pal/gibbs_sampler.py | 8 ++--- src/pint_pal/noise_utils.py | 64 ++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index b98e6c12..ac8021f8 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -47,8 +47,8 @@ def __init__( vary_rn=True, rn_components=30, tnequad=True, - log10rhomin=-9.0, - log10rhomax=-4.0, + #log10rhomin=-9.0, i think these would only apply to HD correlations + #log10rhomax=-4.0, on gibbs sampling. IRN and DM/CHROM are diagonal ?? vary_dm=False, dm_components=50, vary_chrom=False, @@ -123,8 +123,6 @@ def __init__( self.vary_wn = vary_wn self.tm_marg = tm_marg self.wn_names = ["efac", "equad", "ecorr"] - self.rhomin = log10rhomin - self.rhomax = log10rhomax self.rn_components = rn_components self.dm_components = dm_components self.chrom_components = chrom_components @@ -167,8 +165,6 @@ def __init__( psd="spectrum", prior="log-uniform", Tspan=self.Tspan, - #logmin=self.rhomin, - #logmax=self.rhomax, components=self.rn_components, gamma_val=None, ) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index bca27bdd..2b5b9ce7 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -8,6 +8,7 @@ import pint.models as pm from pint.models.parameter import maskParameter +from pint_pal.gibbs_sampler import GibbsSampler import matplotlib as mpl import matplotlib.pyplot as pl @@ -178,7 +179,13 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne return wn_dict, rn_bf -def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband = False, resume = False, run_noise_analysis = True, wb_efac_sigma = 0.25, base_op_dir = "./"): +def model_noise(mo, to, sampler = 'PTMCMCSampler', + vary_red_noise = True, n_iter = int(1e5), + using_wideband = False, resume = False, + run_noise_analysis = True, + wb_efac_sigma = 0.25, base_op_dir = "./", + noise_kwargs = {}, sampler_kwargs = {}, + ): """ Setup enterprise PTA and perform MCMC noise analysis @@ -186,6 +193,7 @@ def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs + sampler: either 'PTMCMCSampler' or 'gibbs' red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False @@ -219,28 +227,38 @@ def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - #Setup a single pulsar PTA using enterprise_extensions - if not using_wideband: - pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma) - else: - pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True) - dmjump_params = {} - for param in mo.params: - if param.startswith('DMJUMP'): - dmjump_param = getattr(mo,param) - dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" - dmjump_params[dmjump_param_name] = dmjump_param.value - pta.set_default_params(dmjump_params) - - #setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) - - #Initial sample - x0 = np.hstack([p.sample() for p in pta.params]) - - #Start sampling - - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50,) + if sampler == 'PTMCMCSampler': + log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + #Setup a single pulsar PTA using enterprise_extensions + if not using_wideband: + pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) + else: + pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True, **noise_kwargs) + dmjump_params = {} + for param in mo.params: + if param.startswith('DMJUMP'): + dmjump_param = getattr(mo,param) + dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + dmjump_params[dmjump_param_name] = dmjump_param.value + pta.set_default_params(dmjump_params) + # FIXME: set groups here + ####### + #setup sampler using enterprise_extensions + samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) + + #Initial sample + x0 = np.hstack([p.sample() for p in pta.params]) + + #Start sampling + + samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) + elif sampler == 'gibbs': + log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + samp = GibbsSampler(e_psr, + **noise_kwargs, + ) + samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) + pass def convert_to_RNAMP(value): """ From 14f848b091cf7a221f4ac46af9678724c80b5924 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 24 Aug 2024 21:32:36 -0700 Subject: [PATCH 026/193] un-hard-coding model inclusion --- src/pint_pal/gibbs_sampler.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index ac8021f8..8675ffdd 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -130,10 +130,15 @@ def __init__( self.vary_dm = vary_dm self.vary_chrom = vary_chrom self.include_quadratic = include_quadratic - self.low = 10 ** (2 * self.rhomin) - self.high = 10 ** (2 * self.rhomax) - + #self.low = 10 ** (2 * self.rhomin) + #self.high = 10 ** (2 * self.rhomax) # Making the pta object + # need to keep track of which parameters are being varied + # they appear alphebetically in signal_collections + # FIXME: this would probably break if you added a solar wind model + self.rn_idx = -1 + self.dm_idx = -1 - int(self.vary_rn) + self.chrom_idx = -1 - int(self.vary_rn) - int(self.vary_dm) if self.tm_marg: tm = gp_signals.MarginalizingTimingModel(use_svd=True) if self.vary_wn: @@ -191,6 +196,7 @@ def __init__( name='chrom', components=self.chrom_components, ) + s = tm + wn @@ -277,6 +283,7 @@ def __init__( #print("rn", self.rn_id) #print("dm", self.dm_id) #print("chrom", self.chrom_id) + @cached_property def params(self): @@ -345,7 +352,7 @@ def update_red_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-1] = 0.5 * np.log10(rhonew) + xs[self.rn_idx] = 0.5 * np.log10(rhonew) return xs def update_dm_params(self, xs): @@ -359,7 +366,7 @@ def update_dm_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-2] = 0.5 * np.log10(rhonew) + xs[self.dm_idx] = 0.5 * np.log10(rhonew) return xs def update_chrom_params(self, xs): @@ -373,7 +380,7 @@ def update_chrom_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-3] = 0.5 * np.log10(rhonew) + xs[self.chrom_idx] = 0.5 * np.log10(rhonew) return xs def update_b(self, xs): From 2be8f09cdb8375301c051dc7118af4d4dbab2447 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 28 Aug 2024 12:02:18 -0700 Subject: [PATCH 027/193] fix sampler import bug --- src/pint_pal/noise_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 2b5b9ce7..0b573fc1 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -179,7 +179,7 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne return wn_dict, rn_bf -def model_noise(mo, to, sampler = 'PTMCMCSampler', +def model_noise(mo, to, which_sampler = 'PTMCMCSampler', vary_red_noise = True, n_iter = int(1e5), using_wideband = False, resume = False, run_noise_analysis = True, @@ -227,7 +227,7 @@ def model_noise(mo, to, sampler = 'PTMCMCSampler', #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - if sampler == 'PTMCMCSampler': + if which_sampler == 'PTMCMCSampler': log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") #Setup a single pulsar PTA using enterprise_extensions if not using_wideband: @@ -252,7 +252,7 @@ def model_noise(mo, to, sampler = 'PTMCMCSampler', #Start sampling samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif sampler == 'gibbs': + elif which_sampler == 'gibbs': log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") samp = GibbsSampler(e_psr, **noise_kwargs, From 6eb337e918df609f4f7379604a8a9b5eb5f0d707 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Wed, 4 Sep 2024 16:49:39 +0000 Subject: [PATCH 028/193] initial changes to plot_utils incl. adding plot_settings.yaml --- src/pint_pal/plot_settings.yaml | 281 +++ src/pint_pal/plot_utils.py | 4008 +++++++++++++++++++------------ 2 files changed, 2771 insertions(+), 1518 deletions(-) create mode 100644 src/pint_pal/plot_settings.yaml diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml new file mode 100644 index 00000000..9f67ae96 --- /dev/null +++ b/src/pint_pal/plot_settings.yaml @@ -0,0 +1,281 @@ +obs_c: { + "ao": "#6BA9E2", + "arecibo": "#6BA9E2", + "gbt": "#61C853", + "vla": "#40635F", + "CHIME": "#ECE133", + "nancay": "#407BD5", + "ncyobs": "#407BD5", + "effelsberg_asterix": "#407BD5", + "effelsberg": "#407BD5", + "leap": "#ECE133", + "jodrell": "#407BD5", + "jbroach": "#407BD5", + "wsrt": "#E5A4CB", + "parkes": "#BE0119", + "gmrt": "#855CA0", + "meerkat": "#FD9927", + "None": "#808080", +} + +pta_c: { + "InPTA": "#855CA0", + "EPTA": "#407BD5", + "NANOGrav": "#61C853", + "PPTA": "#BE0119", + "MPTA": "#FD9927", + "None": "#808080", +} + +febe_c: { + "327_ASP": "#6BA9E2", + "327_PUPPI": "#6BA9E2", + "430_ASP": "#6BA9E2", + "430_PUPPI": "#6BA9E2", + "L-wide_ASP": "#6BA9E2", + "L-wide_PUPPI": "#6BA9E2", + "Rcvr1_2_GASP": "#61C853", + "Rcvr1_2_GUPPI": "#61C853", + "Rcvr1_2_VEGAS": "hotpink", + "Rcvr_800_GASP": "#61C853", + "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "violet", + "S-wide_ASP": "#6BA9E2", + "S-wide_PUPPI": "#6BA9E2", + "1.5GHz_YUPPI": "#40635F", + "3GHz_YUPPI": "#40635F", + "6GHz_YUPPI": "#40635F", + "CHIME": "#ECE133", + "unknown_LEAP": "#FD9927", + "NRT.BON.1600": "#FD9927", + "NRT.BON.1400": "#FD9927", + "NRT.BON.2000": "#FD9927", + "NRT.NUPPI.1484": "#FD9927", + "NRT.NUPPI.1854": "#FD9927", + "NRT.NUPPI.2154": "#FD9927", + "NRT.NUPPI.2539": "#FD9927", + "EFF.EBPP.1360": "#855CA0", + "EFF.EBPP.1410": "#855CA0", + "EFF.EBPP.2639": "#855CA0", + "S60-2_asterix": "#855CA0", + "JBO.DFB.1400": "#407BD5", + "JBO.DFB.1520": "#407BD5", + "WSRT.P2.1380": "#E5A4CB", + "WSRT.P1.1380.C": "#E5A4CB", + "WSRT.P1.2273.C": "#E5A4CB", + "WSRT.P1.323.C": "#40635F", + "WSRT.P1.367.C": "#40635F", + "P217-3_asterix": "#855CA0", + "unknown_asterix": "#855CA0", + "P200-3_asterix": "#855CA0", + "P217-3_PuMa2": "#855CA0", + "P217-6_LEAP": "#855CA0", + "P217-3_LEAP": "#855CA0", + "R217-3_LEAP": "#855CA0", + "P200-3_LEAP": "#855CA0", + "JBO.ROACH.1620": "#407BD5", + "1050CM_PDFB4": "#BE0119", + "1050CM_PDFB1": "#BE0119", + "1050CM_PDFB2": "#BE0119", + "1050CM_PDFB3": "#BE0119", + "1050CM_WBCORR": "#BE0119", + "1050CM_CPSR2": "#BE0119", + "1050CM_CASPSR": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_PDFB1": "#BE0119", + "H-OH_PDFB1": "#BE0119", + "H-OH_CPSR2n": "#BE0119", + "H-OH_CPSR2m": "#BE0119", + "H-OH_PDFB4": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_CPSR2n": "#BE0119", + "MULTI_WBCORR": "#BE0119", + "MULTI_PDFB2": "#BE0119", + "MULTI_PDFB3": "#BE0119", + "MULTI_PDFB4": "#BE0119", + "UWL_Medusa": "#BE0119", + "UWL_CASPSR": "#BE0119", + "UWL_PDFB4": "#BE0119", + "UWL_PDFB4_10CM": "#BE0119", + "UWL_PDFB4_40CM": "#BE0119", + "None": "#808080", + "unknown_asterix": "#855CA0", + "CHIME": "#ECE133", +} + +ng20_c: { + "CHIME": "#FFA733", + "327_ASP": "#BE0119", + "327_PUPPI": "#BE0119", + "430_ASP": "#FD9927", + "430_PUPPI": "#FD9927", + "L-wide_ASP": "#BDB6F6", + "L-wide_PUPPI": "#BDB6F6", + # "L-wide_ASP": "#C3BEF7", + # "L-wide_PUPPI": "#A393BF", + # "Rcvr1_2_GASP": "#81BDEE", + "Rcvr1_2_GASP": "#79A3E2", + "Rcvr1_2_GUPPI": "#79A3E2", + "Rcvr1_2_VEGAS": "#79A3E2", + "Rcvr_800_GASP": "#8DD883", + "Rcvr_800_GUPPI": "#8DD883", + "Rcvr_800_VEGAS": "#8DD883", + # "VEGAS": "#465922", + # "S-wide_ASP": "#D81159", + # "S-wide_PUPPI": "#D81159", + "S-wide_ASP": "#C4457A", + "S-wide_PUPPI": "#C4457A", + "1.5GHz_YUPPI": "#EBADCB", + "3GHz_YUPPI": "#E79CC1", + "6GHz_YUPPI": "#DB6BA1", + # "CHIME": "#F3689B", + # "Rcvr_CHIME": "#F3689B", +} + +obs_m: { + "ao": "x", + "arecibo": "x", + "gbt": "x", + "vla": "x", + "CHIME": "x", + "leap": "x", + "nancay": "x", + "ncyobs": "x", + "effelsberg_asterix": "x", + "effelsberg": "x", + "jodrell": "x", + "jbroach": "x", + "wsrt": "x", + "parkes": "x", + "gmrt": "x", + "meerkat": "x", + "None": "x", +} + +pta_m: { + "InPTA": "x", + "EPTA": "x", + "NANOGrav": "x", + "PPTA": "x", + "MPTA": "x", + "None": "x", +} + +ng20_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "x", + "Rcvr_800_GUPPI": "x", + "Rcvr_800_VEGAS": "x", + "S-wide_ASP": "x", + "S-wide_PUPPI": "x", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", +} + +febe_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "o", + "Rcvr_800_GUPPI": "o", + "Rcvr_800_VEGAS": "o", + "S-wide_ASP": "o", + "S-wide_PUPPI": "o", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "o", + "6GHz_YUPPI": "^", + "CHIME": "x", + "NRT.BON.1600": "x", + "NRT.BON.1400": "o", + "NRT.BON.2000": "^", + "NRT.NUPPI.1484": "x", + "NRT.NUPPI.1854": "o", + "NRT.NUPPI.2154": "^", + "NRT.NUPPI.2539": "^", + "EFF.EBPP.1360": "o", + "EFF.EBPP.1410": "x", + "EFF.EBPP.2639": "^", + "S60-2_asterix": "v", + "P217-3_asterix": "x", + "P200-3_asterix": "v", + "unknown_asterix": "v", + "P217-3_PuMa2": "x", + "P200-3_LEAP": "v", + "P217-6_LEAP": "x", + "P217-3_LEAP": "x", + "R217-3_LEAP": "x", + "unknown_LEAP": "x", + "JBO.DFB.1400": "x", + "JBO.DFB.1520": "o", + "JBO.ROACH.1620": "^", + "WSRT.P2.1380": "v", + "WSRT.P1.1380.C": "x", + "WSRT.P1.2273.C": "o", + "WSRT.P1.323.C": "x", + "WSRT.P1.367.C": "x", + "1050CM_PDFB4": "x", + "1050CM_PDFB1": "x", + "1050CM_PDFB2": "x", + "1050CM_PDFB3": "x", + "1050CM_WBCORR": "x", + "1050CM_CPSR2": "x", + "1050CM_CPSR2m": "x", + "1050CM_CASPSR": "x", + "MULTI_CPSR2m": "o", + "MULTI_PDFB1": "o", + "H-OH_PDFB1": "^", + "H-OH_CPSR2m": "^", + "H-OH_CPSR2n": "^", + "H-OH_PDFB4": "^", + "MULTI_CPSR2n": "o", + "MULTI_WBCORR": "o", + "MULTI_PDFB2": "o", + "MULTI_PDFB3": "o", + "MULTI_PDFB4": "o", + "UWL_Medusa": "v", + "UWL_PDFB4": "v", + "UWL_PDFB4_10CM": "v", + "UWL_PDFB4_40CM": "v", + "UWL_CASPSR": "v", + "None": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", +} + +label_names: { + "327_ASP": "ASP 327 MHz", + "327_PUPPI": "PUPPI 327 MHz", + "430_ASP": "ASP 430 MHz", + "430_PUPPI": "PUPPI 430 MHz", + "L-wide_ASP": "ASP L-wide", + "L-wide_PUPPI": "PUPPI L-wide", + "Rcvr1_2_GASP": "GASP L-band", + "Rcvr1_2_GUPPI": "GUPPI L-band", + "Rcvr1_2_VEGAS": "VEGAS L-band", + "Rcvr_800_GASP": "GASP 820 MHz", + "Rcvr_800_GUPPI": "GUPPI 820 MHz", + "Rcvr_800_VEGAS": "VEGAS 820 MHz", + "S-wide_ASP": "ASP S-wide", + "S-wide_PUPPI": "PUPPI S-wide", + "1.5GHz_YUPPI": "YUPPI 1.5 GHz", + "3GHz_YUPPI": "YUPPI 3 GHz", + "6GHz_YUPPI": "YUPPI 6 GHz", + "CHIME": "CHIME", +} diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index b93af983..f959553e 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -2,250 +2,75 @@ # -*- coding: utf-8 -*- """ Created on Tue Feb 4 09:30:59 2020 - @author: bshapiroalbert +Code since butchered by many timers. """ import numpy as np import matplotlib.pyplot as plt -import sys, copy +import copy from astropy import log import astropy.units as u -# Import PINT +import yaml + import pint.toa as toa import pint.models as model import pint.fitter as fitter import pint.utils as pu import subprocess -# import extra util functions brent wrote + from pint_pal.utils import * import os from pint_pal.timingconfiguration import TimingConfiguration import pint_pal.lite_utils as lu -# color blind friends colors and markers? -#CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] -#MARKERS = ['.', 'v', 's', 'x', '^', 'D', 'p', 'P', '*'] - -# Color scheme for consistent reciever-backend combos, same as published 12.5 yr -colorschemes = {'observatories':{ - "ao": "#6BA9E2", - "arecibo": "#6BA9E2", - "gbt": "#61C853", - "vla": "#40635F", - "CHIME": "#ECE133", - "nancay": "#407BD5", - "ncyobs": "#407BD5", - "effelsberg_asterix": "#407BD5", - "effelsberg": "#407BD5", - "leap": "#ECE133", - "jodrell": "#407BD5", - "jbroach": "#407BD5", - "wsrt": "#E5A4CB", - "parkes": "#BE0119", - "gmrt": "#855CA0", - "meerkat": "#FD9927", - "None": "#808080" - }, - - 'pta':{ - "InPTA": "#855CA0", - "EPTA": "#407BD5", - "NANOGrav": "#61C853", - "PPTA": "#BE0119", - "MPTA": "#FD9927", - "None": "#808080" - }, - 'febe':{ - "327_ASP": "#6BA9E2", - "327_PUPPI": "#6BA9E2", - "430_ASP": "#6BA9E2", - "430_PUPPI": "#6BA9E2", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#61C853", - "Rcvr1_2_GUPPI": "#61C853", - "Rcvr1_2_VEGAS": "#61C853", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "Rcvr_800_VEGAS": "#61C853", - "S-wide_ASP": "#6BA9E2", - "S-wide_PUPPI": "#6BA9E2", - "1.5GHz_YUPPI": "#40635F", - "3GHz_YUPPI": "#40635F", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - "unknown_LEAP": "#FD9927", - "NRT.BON.1600": "#FD9927", - "NRT.BON.1400": "#FD9927", - "NRT.BON.2000": "#FD9927", - "NRT.NUPPI.1484": "#FD9927", - "NRT.NUPPI.1854": "#FD9927", - "NRT.NUPPI.2154": "#FD9927", - "NRT.NUPPI.2539": "#FD9927", - "EFF.EBPP.1360": "#855CA0", - "EFF.EBPP.1410": "#855CA0", - "EFF.EBPP.2639": "#855CA0", - "S60-2_asterix": "#855CA0", - "JBO.DFB.1400": "#407BD5", - "JBO.DFB.1520": "#407BD5", - "WSRT.P2.1380": "#E5A4CB", - "WSRT.P1.1380.C": "#E5A4CB", - "WSRT.P1.2273.C": "#E5A4CB", - "WSRT.P1.323.C": "#40635F", - "WSRT.P1.367.C": "#40635F", - "P217-3_asterix": "#855CA0", - "unknown_asterix": "#855CA0", - "P200-3_asterix": "#855CA0", - "P217-3_PuMa2": "#855CA0", - "P217-6_LEAP": "#855CA0", - "P217-3_LEAP": "#855CA0", - "R217-3_LEAP": "#855CA0", - "P200-3_LEAP": "#855CA0", - "JBO.ROACH.1620": "#407BD5", - "1050CM_PDFB4": "#BE0119", - "1050CM_PDFB1": "#BE0119", - "1050CM_PDFB2": "#BE0119", - "1050CM_PDFB3": "#BE0119", - "1050CM_WBCORR": "#BE0119", - "1050CM_CPSR2": "#BE0119", - "1050CM_CASPSR": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_PDFB1": "#BE0119", - "H-OH_PDFB1": "#BE0119", - "H-OH_CPSR2n": "#BE0119", - "H-OH_CPSR2m": "#BE0119", - "H-OH_PDFB4": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_CPSR2n": "#BE0119", - "MULTI_WBCORR": "#BE0119", - "MULTI_PDFB2": "#BE0119", - "MULTI_PDFB3": "#BE0119", - "MULTI_PDFB4": "#BE0119", - "UWL_Medusa": "#BE0119", - "UWL_CASPSR": "#BE0119", - "UWL_PDFB4": "#BE0119", - "UWL_PDFB4_10CM": "#BE0119", - "UWL_PDFB4_40CM": "#BE0119", - "None": "#808080", - "unknown_asterix": "#855CA0", - "CHIME": "#ECE133" - }} - - -# marker dictionary to be used if desired, currently all 'x' -markers = {'observatories':{ - "ao": "x", - "arecibo": "x", - "gbt": "x", - "vla": "x", - "CHIME": "x", - "leap": "x", - "nancay": "x", - "ncyobs": "x", - "effelsberg_asterix": "x", - "effelsberg": "x", - "jodrell": "x", - "jbroach": "x", - "wsrt": "x", - "parkes": "x", - "gmrt": "x", - "meerkat": "x", - "None": "x" - }, - 'pta':{ - "InPTA": "x", - "EPTA": "x", - "NANOGrav": "x", - "PPTA": "x", - "MPTA": "x", - "None": "x" - }, - 'febe': {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr1_2_VEGAS": "x", - "Rcvr_800_GASP": "o", - "Rcvr_800_GUPPI": "o", - "Rcvr_800_VEGAS": "o", - "S-wide_ASP": "o", - "S-wide_PUPPI": "o", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "o", - "6GHz_YUPPI": "^", - "CHIME": "x", - "NRT.BON.1600": "x", - "NRT.BON.1400": "o", - "NRT.BON.2000": "^", - "NRT.NUPPI.1484": "x", - "NRT.NUPPI.1854": "o", - "NRT.NUPPI.2154": "^", - "NRT.NUPPI.2539": "^", - "EFF.EBPP.1360": "o", - "EFF.EBPP.1410": "x", - "EFF.EBPP.2639": "^", - "S60-2_asterix": "v", - "P217-3_asterix": "x", - "P200-3_asterix": "v", - "unknown_asterix": "v", - "P217-3_PuMa2": "x", - "P200-3_LEAP": "v", - "P217-6_LEAP": "x", - "P217-3_LEAP": "x", - "R217-3_LEAP": "x", - "unknown_LEAP": "x", - "JBO.DFB.1400": "x", - "JBO.DFB.1520": "o", - "JBO.ROACH.1620": "^", - "WSRT.P2.1380": "v", - "WSRT.P1.1380.C": "x", - "WSRT.P1.2273.C": "o", - "WSRT.P1.323.C": "x", - "WSRT.P1.367.C": "x", - "1050CM_PDFB4": "x", - "1050CM_PDFB1": "x", - "1050CM_PDFB2": "x", - "1050CM_PDFB3": "x", - "1050CM_WBCORR": "x", - "1050CM_CPSR2": "x", - "1050CM_CPSR2m": "x", - "1050CM_CASPSR": "x", - "MULTI_CPSR2m": "o", - "MULTI_PDFB1": "o", - "H-OH_PDFB1": "^", - "H-OH_CPSR2m": "^", - "H-OH_CPSR2n": "^", - "H-OH_PDFB4": "^", - "MULTI_CPSR2n": "o", - "MULTI_WBCORR": "o", - "MULTI_PDFB2": "o", - "MULTI_PDFB3": "o", - "MULTI_PDFB4": "o", - "UWL_Medusa": "v", - "UWL_PDFB4": "v", - "UWL_PDFB4_10CM": "v", - "UWL_PDFB4_40CM": "v", - "UWL_CASPSR": "v", - "None": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - }} -# Define the color map option -#colorscheme = colorschemes['thankful_2'] -#colorscheme = thesis_colorschemes['thesis'] +PACKAGE_DIR = os.path.dirname(__file__) +with open(os.path.join(PACKAGE_DIR, "plot_settings.yaml"), "r") as cf: + config = yaml.safe_load(cf) +# plot_settings.yaml now has a NANOGrav 20-yr specific colorscheme (ng20_c). +# If you want to go back to the old colors (or are doing DR3), change this to +# colorschemes["febe"] = config["febe_c"] AND markers["febe"] = config["febe_m"] +colorschemes, markers, labels = {}, {}, {} +colorschemes["obs"] = config["obs_c"] +colorschemes["pta"] = config["pta_c"] +colorschemes["febe"] = config["ng20_c"] +markers["obs"] = config["obs_m"] +markers["pta"] = config["pta_m"] +markers["febe"] = config["ng20_m"] +labels = config["label_names"] -def call(x): - subprocess.call(x,shell=True) - -def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): +def call(x): + subprocess.call(x, shell=True) + + +def set_color_and_marker(colorby): + if colorby == "pta": + colorscheme = colorschemes["pta"] + markerscheme = markers["pta"] + elif colorby == "obs": + colorscheme = colorschemes["observatories"] + markerscheme = markers["observatories"] + elif colorby == "f": + colorscheme = colorschemes["febe"] + markerscheme = markers["febe"] + return colorscheme, markerscheme + + +def plot_residuals_time( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + mixed_ecorr=False, + **kwargs, +): """ Make a plot of the residuals vs. time @@ -289,162 +114,169 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - if avg == True and mixed_ecorr == True : - mjds = avg_dict['mjds'].value - mjds_no_avg = no_avg_dict['mjds'].value - years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0 + if avg == True and mixed_ecorr == True: + mjds = avg_dict["mjds"].value + mjds_no_avg = no_avg_dict["mjds"].value + years_no_avg = (mjds_no_avg - 51544.0) / 365.25 + 2000.0 elif avg == True and mixed_ecorr == False: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value # Convert to years - years = (mjds - 51544.0)/365.25 + 2000.0 + years = (mjds - 51544.0) / 365.25 + 2000.0 # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays @@ -453,48 +285,41 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_years = np.hstack((years, years_no_avg)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) + colorscheme, markerscheme = set_color_and_marker(colorby) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - - - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,5) + figsize = (10, 5) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -502,107 +327,183 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals fig = plt.gcf() ax1 = axs - for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markers[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg == True and mixed_ecorr == True: if plotsig: - combo_sig = combo_res[inds]/combo_[inds] - ax1.errorbar(combo_years[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_years[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_[inds] + ax1.errorbar( + combo_years[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_years[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(combo_years[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(combo_years[inds], combo_res_rpe[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + combo_years[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + combo_years[inds], + combo_res_rpe[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(years[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(years[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - else: - ax1.errorbar(years[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(years[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + years[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + years[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) + else: + ax1.errorbar( + years[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + years[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -615,9 +516,9 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -625,7 +526,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color stamp_color = "#FD9927" @@ -634,29 +535,45 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, axs = None, legend=True, show_bin=True, **kwargs): + +def plot_FD_delay( + fitter=None, + model_object=None, + save=False, + title=True, + axs=None, + legend=True, + show_bin=True, + **kwargs, +): """ Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival @@ -688,117 +605,124 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True """ - #Make sure that either a fitter or model object has been specified + # Make sure that either a fitter or model object has been specified if fitter == None and model_object == None: raise Exception("Need to specify either a fitter or model object") - #Get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + # Get frequencies + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] elif model_object is not None: - raise Exception("Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over") + raise Exception( + "Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over" + ) else: freqs = fitter.toas.get_freqs().value freqs = np.sort(freqs) - #Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions - def get_FD_delay(pint_model_object,freqs): - FD_map = model.TimingModel.get_prefix_mapping(pint_model_object,"FD") + # Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions + def get_FD_delay(pint_model_object, freqs): + FD_map = model.TimingModel.get_prefix_mapping(pint_model_object, "FD") FD_names = list(FD_map.values()) FD_names.reverse() FD_vals = [] FD_uncert = [] for i in FD_names: - FD_vals.append(pint_model_object.get_params_dict(which="all",kind="value")[i]) - FD_uncert.append(pint_model_object.get_params_dict(which="all",kind="uncertainty")[i]) + FD_vals.append( + pint_model_object.get_params_dict(which="all", kind="value")[i] + ) + FD_uncert.append( + pint_model_object.get_params_dict(which="all", kind="uncertainty")[i] + ) FD_vals.append(0.0) FD_uncert.append(0.0) FD_vals = np.array(FD_vals) FD_uncert = np.array(FD_uncert) - delay = np.polyval(FD_vals,np.log10(freqs)) - delta_delay_plus = np.polyval(FD_uncert+FD_vals,np.log10(freqs)) - delta_delay_minus = np.polyval(FD_vals-FD_uncert,np.log10(freqs)) + delay = np.polyval(FD_vals, np.log10(freqs)) + delta_delay_plus = np.polyval(FD_uncert + FD_vals, np.log10(freqs)) + delta_delay_minus = np.polyval(FD_vals - FD_uncert, np.log10(freqs)) if len(FD_vals) - 1 > 1: FD_phrase = "FD1-%s" % (len(FD_vals) - 1) else: FD_phrase = "FD1" - return delay *1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6 , FD_phrase + return delay * 1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6, FD_phrase - #Get FD params if fitter object is given + # Get FD params if fitter object is given if fitter is not None: - #Check if the fitter object has FD parameters + # Check if the fitter object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(fitter.model, freqs*1e-3) - #print(FD_delay) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + fitter.model, freqs * 1e-3 + ) psr_name = fitter.model.PSR.value """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) """ if show_bin: - nbins = fitter.toas['nbin'].astype(int).min() - P0 = 1/fitter.model.F0.value - P0_bin_max = P0/nbins + nbins = fitter.toas["nbin"].astype(int).min() + P0 = 1 / fitter.model.F0.value + P0_bin_max = P0 / nbins except: print("No FD parameters in this model! Exitting...") - #sys.exit() - #Get FD params if model object is given + # Get FD params if model object is given if model_object is not None: - #Check if the model object has FD parameters + # Check if the model object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(model_object, freqs*1e-3) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + model_object, freqs * 1e-3 + ) psr_name = model_object.PSR.value """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) """ if show_bin: - print("show_bin requires a fitter object, cannot be used with the model alone") + print( + "show_bin requires a fitter object, cannot be used with the model alone" + ) show_bin = False except: print("No FD parameters in this model! Exitting...") - #sys.exit() - - #Get plotting preferences. - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + # Get plotting preferences. + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (8,4) + figsize = (8, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: fig = plt.gcf() ax1 = axs - if 'ls' in kwargs.keys(): - linestyle = kwargs['ls'] + if "ls" in kwargs.keys(): + linestyle = kwargs["ls"] else: - linestyle = '-' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + linestyle = "-" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = "green" - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.2 - if 'loc' in kwargs.keys(): - loc = kwargs['loc'] + if "loc" in kwargs.keys(): + loc = kwargs["loc"] else: loc = "upper right" - #Plot frequency (MHz) vs delay (microseconds) - ax1.plot(freqs,FD_delay,label = legend_text,color=clr,ls=linestyle) - ax1.fill_between(freqs, - FD_delay_err_plus, - FD_delay_err_minus, - color=clr,alpha=alpha) + # Plot frequency (MHz) vs delay (microseconds) + ax1.plot(freqs, FD_delay, label=legend_text, color=clr, ls=linestyle) + ax1.fill_between( + freqs, FD_delay_err_plus, FD_delay_err_minus, color=clr, alpha=alpha + ) if show_bin: if (FD_delay > 0).any(): - ax1.axhline(P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(P0_bin_max * 1e6, label="1 profile bin") if (FD_delay < 0).any(): - ax1.axhline(-P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(-P0_bin_max * 1e6, label="1 profile bin") ax1.set_xlabel("Frequency (MHz)") ax1.set_ylabel("Delay ($\mu$s)") if title: @@ -812,8 +736,21 @@ def get_FD_delay(pint_model_object,freqs): return -def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False, avg = False, mixed_ecorr=False,\ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_residuals_freq( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency @@ -856,209 +793,203 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) - - + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get freqs - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: freqs = fitter.toas.get_freqs().value - # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] - + colorscheme, markerscheme = set_color_and_marker(colorby) - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -1067,103 +998,180 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' - if restype == 'both': - mkr_pre = '.' + mkr = "x" + if restype == "both": + mkr_pre = "." log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(freqs[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(freqs[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - else: - ax1.errorbar(freqs[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + freqs[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + freqs[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) + else: + ax1.errorbar( + freqs[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(freqs[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + freqs[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) + else: + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set axis - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s frequency residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s frequency residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1176,9 +1184,9 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -1186,38 +1194,52 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False if axs == None: # Define clickable points - text = ax1.text(0,0,"") - stamp_color= "#FD9927" + text = ax1.text(0, 0, "") + stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = freqs if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ - axs = None, title = True, compare = False, **kwargs): +def plot_dmx_time( + fitter, + savedmx=False, + save=False, + legend=True, + axs=None, + title=True, + compare=False, + **kwargs, +): """ Make a plot of DMX vs. time @@ -1255,111 +1277,161 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ dmxname = "%s_dmxparse.nb.out" % (psrname) # Get plotting dmx and error values for WB - if 'dmx' in kwargs.keys(): - DMXs = kwargs['dmx'] + if "dmx" in kwargs.keys(): + DMXs = kwargs["dmx"] else: # get dmx dictionary from pint dmxparse function dmx_dict = pu.dmxparse(fitter, save="dmxparse.out") - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # move file name if savedmx: os.rename("dmxparse.out", dmxname) # Double check/overwrite errors if necessary - if 'errs' in kwargs.keys(): - DMX_vErrs = kwargs['errs'] + if "errs" in kwargs.keys(): + DMX_vErrs = kwargs["errs"] # Double check/overwrite dmx mjd epochs if necessary - if 'mjds' in kwargs.keys(): - DMX_center_MJD = kwargs['mjds'] - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + if "mjds" in kwargs.keys(): + DMX_center_MJD = kwargs["mjds"] + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # If we want to compare WB to NB, we need to look for the right output file if compare == True: # Look for other dmx file if NB: - #log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.wb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.wb.out" % (psrname)): raise RuntimeError("Cannot find Wideband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.wb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.wb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) else: - #log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.nb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.nb.out" % (psrname)): raise RuntimeError("Cannot find Narrowband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.nb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) - dmx_mid_yr = (dmx_epochs- 51544.0)/365.25 + 2000.0 + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.nb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) + dmx_mid_yr = (dmx_epochs - 51544.0) / 365.25 + 2000.0 # Define the plotting function if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: - mkr = 's' + mkr = "s" if compare: - mkr_nb = 'o' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + mkr_nb = "o" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: - clr = 'gray' + clr = "gray" if compare: - clr_nb = 'k' - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + clr_nb = "k" + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 # Not actually plot if NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) elif not NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) elif compare: if NB: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Wideband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Wideband", + ) else: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Narrowband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Narrowband", + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") if legend: - ax1.legend(loc='best') + ax1.legend(loc="best") if title: if NB and not compare: - plt.title("%s narrowband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s narrowband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif not NB and not compare: - plt.title("%s wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif compare: - plt.title("%s narrowband and wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title( + "%s narrowband and wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1] + ) if axs == None: plt.tight_layout() if save: @@ -1374,32 +1446,36 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") # Define color for highlighting points stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = DMX_center_Year - ydata = DMXs*10**3 + ydata = DMXs * 10**3 # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 's', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="s", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): - """ Make simple dmx vs. time plot with dmxout file(s) as input + +def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model=None): + """Make simple dmx vs. time plot with dmxout file(s) as input Parameters ========== @@ -1420,41 +1496,72 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): dmxout information (mjd, val, err, r1, r2) for each label """ from astropy.time import Time - if isinstance(dmxout_files, str): dmxout_files = [dmxout_files] - if isinstance(labels, str): labels = [labels] - figsize = (10,4) + if isinstance(dmxout_files, str): + dmxout_files = [dmxout_files] + if isinstance(labels, str): + labels = [labels] + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") dmxDict = {} - for ii,(df,lab) in enumerate(zip(dmxout_files,labels)): - dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(df, unpack=True, usecols=range(0,5)) - idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} - ax2.errorbar(dmxmjd, dmxval*10**3, yerr=dmxerr*10**3, label=lab, marker='o', ls='', markerfacecolor='none') + for ii, (df, lab) in enumerate(zip(dmxout_files, labels)): + dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt( + df, unpack=True, usecols=range(0, 5) + ) + idmxDict = { + "mjd": dmxmjd, + "val": dmxval, + "err": dmxerr, + "r1": dmxr1, + "r2": dmxr2, + } + ax2.errorbar( + dmxmjd, + dmxval * 10**3, + yerr=dmxerr * 10**3, + label=lab, + marker="o", + ls="", + markerfacecolor="none", + ) dmxDict[lab] = idmxDict # set ax1 lims (year) based on ax2 lims (mjd) mjd_xlo, mjd_xhi = ax2.get_xlim() - dy_xlo = Time(mjd_xlo,format='mjd').decimalyear - dy_xhi = Time(mjd_xhi,format='mjd').decimalyear - ax1.set_xlim(dy_xlo,dy_xhi) + dy_xlo = Time(mjd_xlo, format="mjd").decimalyear + dy_xhi = Time(mjd_xhi, format="mjd").decimalyear + ax1.set_xlim(dy_xlo, dy_xhi) # capture ylim orig_ylim = ax2.get_ylim() - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) if model: from pint.simulation import make_fake_toas_fromMJDs from pint_pal.lite_utils import remove_noise - fake_mjds = np.linspace(np.min(dmxmjd),np.max(dmxmjd),num=int(np.max(dmxmjd)-np.min(dmxmjd))) - fake_mjdTime = Time(fake_mjds,format='mjd') + + fake_mjds = np.linspace( + np.min(dmxmjd), np.max(dmxmjd), num=int(np.max(dmxmjd) - np.min(dmxmjd)) + ) + fake_mjdTime = Time(fake_mjds, format="mjd") # copy the model and add sw component mo_swm = copy.deepcopy(model) @@ -1462,20 +1569,22 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): mo_swm.NE_SW.value = 10.0 # generate fake TOAs and calculate excess DM due to solar wind - fake_toas = make_fake_toas_fromMJDs(fake_mjdTime,mo_swm) - sun_dm_delays = mo_swm.solar_wind_dm(fake_toas)*10**3 # same scaling as above - ax2.plot(fake_mjds,sun_dm_delays,c='lightgray',label='Excess DM') + fake_toas = make_fake_toas_fromMJDs(fake_mjdTime, mo_swm) + sun_dm_delays = mo_swm.solar_wind_dm(fake_toas) * 10**3 # same scaling as above + ax2.plot(fake_mjds, sun_dm_delays, c="lightgray", label="Excess DM") # don't change ylim based on excess dm trace, if plotted ax2.set_ylim(orig_ylim) - ax2.legend(loc='best') + ax2.legend(loc="best") plt.tight_layout() - if outfile: plt.savefig(outfile) + if outfile: + plt.savefig(outfile) return dmxDict + def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): - """ Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values + """Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values Parameters ========== @@ -1493,78 +1602,126 @@ def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): None? """ # should check that both nb/wb entries exist first... - nbmjd = dmxDict['nb']['mjd'] - wbmjd = dmxDict['wb']['mjd'] + nbmjd = dmxDict["nb"]["mjd"] + wbmjd = dmxDict["wb"]["mjd"] allmjds = set(list(nbmjd) + list(wbmjd)) # May need slightly more curation if nb/wb mjds are *almost* identical - wbonly = allmjds-set(nbmjd) - nbonly = allmjds-set(wbmjd) + wbonly = allmjds - set(nbmjd) + nbonly = allmjds - set(wbmjd) both = set(nbmjd).intersection(set(wbmjd)) # assemble arrays of common inds for plotting later; probably a better way to do this nb_common_inds = [] wb_common_inds = [] for b in both: - nb_common_inds.append(np.where(nbmjd==b)[0][0]) - wb_common_inds.append(np.where(wbmjd==b)[0][0]) + nb_common_inds.append(np.where(nbmjd == b)[0][0]) + wb_common_inds.append(np.where(wbmjd == b)[0][0]) nb_common_inds, wb_common_inds = np.array(nb_common_inds), np.array(wb_common_inds) - nbdmx,nbdmxerr = dmxDict['nb']['val'],dmxDict['nb']['err'] - wbdmx,wbdmxerr = dmxDict['wb']['val'],dmxDict['wb']['err'] + nbdmx, nbdmxerr = dmxDict["nb"]["val"], dmxDict["nb"]["err"] + wbdmx, wbdmxerr = dmxDict["wb"]["val"], dmxDict["wb"]["err"] # propagate errors as quadrature sum, though Michael thinks geometric mean might be better? - nbwb_dmx_diffs = nbdmx[nb_common_inds]-wbdmx[wb_common_inds] - nbwb_err_prop = np.sqrt(nbdmxerr[nb_common_inds]**2 + wbdmxerr[wb_common_inds]**2) + nbwb_dmx_diffs = nbdmx[nb_common_inds] - wbdmx[wb_common_inds] + nbwb_err_prop = np.sqrt( + nbdmxerr[nb_common_inds] ** 2 + wbdmxerr[wb_common_inds] ** 2 + ) # make the plot from astropy.time import Time - figsize = (10,4) + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"$\Delta$DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") botharray = np.array(list(both)) - mjdbothTime = Time(botharray,format='mjd') + mjdbothTime = Time(botharray, format="mjd") dybothTime = mjdbothTime.decimalyear minmjd, maxmjd = np.sort(botharray)[0], np.sort(botharray)[-1] ax2.set_xlim(minmjd, maxmjd) - ax1.errorbar(dybothTime,nbwb_dmx_diffs*1e3,yerr=nbwb_err_prop*1e3, - marker='o', ls='', markerfacecolor='none',label='nb - wb') + ax1.errorbar( + dybothTime, + nbwb_dmx_diffs * 1e3, + yerr=nbwb_err_prop * 1e3, + marker="o", + ls="", + markerfacecolor="none", + label="nb - wb", + ) # want arrows indicating missing nb/wb DMX values to difference if show_missing: stddiffs = np.std(nbwb_dmx_diffs) - mjdnbonlyTime = Time(np.array(list(nbonly)),format='mjd') + mjdnbonlyTime = Time(np.array(list(nbonly)), format="mjd") dynbonlyTime = mjdnbonlyTime.decimalyear - ax1.scatter(dynbonlyTime,np.zeros(len(nbonly))+stddiffs*1e3,marker='v',c='r',label='nb only') + ax1.scatter( + dynbonlyTime, + np.zeros(len(nbonly)) + stddiffs * 1e3, + marker="v", + c="r", + label="nb only", + ) nbonlystr = [str(no) for no in nbonly] - if nbonlystr: log.warning(f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}") + if nbonlystr: + log.warning( + f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}" + ) - mjdwbonlyTime = Time(np.array(list(wbonly)),format='mjd') + mjdwbonlyTime = Time(np.array(list(wbonly)), format="mjd") dywbonlyTime = mjdwbonlyTime.decimalyear - ax1.scatter(dywbonlyTime,np.zeros(len(wbonly))-stddiffs*1e3,marker='^',c='r',label='wb only') + ax1.scatter( + dywbonlyTime, + np.zeros(len(wbonly)) - stddiffs * 1e3, + marker="^", + c="r", + label="wb only", + ) wbonlystr = [str(wo) for wo in wbonly] - if wbonlystr: log.warning(f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}") - - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if wbonlystr: + log.warning( + f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}" + ) + + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) plt.tight_layout() - ax1.legend(loc='best') - if outfile: plt.savefig(outfile) + ax1.legend(loc="best") + if outfile: + plt.savefig(outfile) return None + # Now we want to make wideband DM vs. time plot, this uses the premade dm_resids from PINT -def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False, legend = True, title = True,\ - axs = None, mean_sub = True, **kwargs): +def plot_dm_residuals( + fitter, + restype="postfit", + plotsig=False, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a plot of Wideband timing DM residuals v. time. @@ -1601,60 +1758,64 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False # Check if wideband if not fitter.is_wideband: - raise RuntimeError("Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead.") + raise RuntimeError( + "Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead." + ) # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) # Get the MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - years = (mjds - 51544.0)/365.25 + 2000.0 + years = (mjds - 51544.0) / 365.25 + 2000.0 # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: ylabel = r"DM/Uncertainty" @@ -1667,82 +1828,131 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False ylabel = r"$\Delta$DM [cm$^{-3}$ pc]" if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if restype == 'both': - mkr_pre = '.' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if restype == "both": + mkr_pre = "." + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 # Do plotting command - if restype == 'both': + if restype == "both": if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - dm_sig_pre = dm_resids_init[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_sig_pre, yerr=len(dm_error_init[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) - else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_resids_init[inds], yerr=dm_error_init[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + dm_sig_pre = dm_resids_init[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_sig_pre, + yerr=len(dm_error_init[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) + else: + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_resids_init[inds], + yerr=dm_error_init[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) else: if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) # Set second axis ax1.set_ylabel(ylabel) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1757,7 +1967,7 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: @@ -1769,27 +1979,42 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = dm_resids/dm_error + ydata = dm_resids / dm_error else: ydata = dm_resids # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = 50, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_measurements_v_res( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -1829,188 +2054,228 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit': + if avg == True and restype == "prefit": avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit': + elif avg == True and restype == "postfit": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both': + elif avg == True and restype == "both": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs - xmax=0 + xmax = 0 for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] if plotsig: - sig = res[inds]/errs[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(sig),max(-sig)) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") - else: - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(res[inds]),max(-res[inds])) - if restype == 'both': - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = res[inds] / errs[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(sig), max(-sig)) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) + else: + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(res[inds]), max(-res[inds])) + if restype == "both": + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) ax1.grid(True) ax1.set_ylabel("Number of measurements") if plotsig: if avg and whitened: - ax1.set_xlabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual/Uncertainty') + ax1.set_xlabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_xlabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_xlabel('Residual/Uncertainty') + ax1.set_xlabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_xlabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual ($\mu$s)') + ax1.set_xlabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_xlabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_xlabel('Residual ($\mu$s)') - ax1.set_xlim(-1.1*xmax,1.1*xmax) + ax1.set_xlabel("Residual ($\mu$s)") + ax1.set_xlim(-1.1 * xmax, 1.1 * xmax) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2023,9 +2288,9 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2033,8 +2298,19 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = return -def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin = 50, \ - save = False, legend = True, title = True, axs = None, mean_sub = True, **kwargs): + +def plot_measurements_v_dmres( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -2070,53 +2346,57 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin # Check if wideband if not fitter.is_wideband: - raise ValueError("Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead.") + raise ValueError( + "Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead." + ) # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: xlabel = r"DM/Uncertainty" @@ -2129,63 +2409,94 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin xlabel = r"$\Delta$DM [cm$^{-3}$ pc]" if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] if plotsig: - sig = dm_resids[inds]/dm_error[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - sig_pre = dm_resids_init[inds]/dm_error_init[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") - else: - ax1.hist(dm_resids[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - ax1.hist(dm_resids_init[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = dm_resids[inds] / dm_error[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + sig_pre = dm_resids_init[inds] / dm_error_init[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) + else: + ax1.hist( + dm_resids[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + ax1.hist( + dm_resids_init[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) ax1.grid(True) ax1.set_ylabel("Number of measurements") ax1.set_xlabel(xlabel) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: ext = "" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2194,8 +2505,20 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin return -def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, mixed_ecorr=False, \ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): +def plot_residuals_orb( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. orbital phase. @@ -2233,170 +2556,171 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) - - + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'orbphase' not in kwargs.keys(): + if "orbphase" not in kwargs.keys(): mjds = fitter.toas.get_mjds().value if avg == True: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value if mixed_ecorr == True: - mjds_no_avg = no_avg_dict['mjds'].value - - + mjds_no_avg = no_avg_dict["mjds"].value # Now we need to the orbital phases; start with binary model name - if 'orbphase' in kwargs.keys(): - orbphase = kwargs['orbphase'] + if "orbphase" in kwargs.keys(): + orbphase = kwargs["orbphase"] else: - orbphase = fitter.model.orbital_phase(mjds, radians = False) + orbphase = fitter.model.orbital_phase(mjds, radians=False) if avg and mixed_ecorr: - no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians = False) - + no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians=False) # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays @@ -2405,50 +2729,40 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_orbphase = np.hstack((orbphase, no_avg_orbphase)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) - #. Seems to run a little faster but not robust to obs + # . Seems to run a little faster but not robust to obs # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) - + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] + colorscheme, markerscheme = set_color_and_marker(colorby) - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -2456,99 +2770,168 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False fig = plt.gcf() ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by flag value doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by flag value doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) - else: - ax1.errorbar(combo_orbphase[inds], combo_res[inds], yerr = combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(combo_orbphase[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) + else: + ax1.errorbar( + combo_orbphase[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + combo_orbphase[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(orbphase[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(orbphase[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) - else: - ax1.errorbar(orbphase[inds], res[inds], yerr = errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(orbphase[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + orbphase[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + orbphase[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) + else: + ax1.errorbar( + orbphase[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + orbphase[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) # Set second axis - ax1.set_xlabel(r'Orbital Phase') + ax1.set_xlabel(r"Orbital Phase") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2561,9 +2944,9 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2571,44 +2954,59 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") stamp_color = "#FD9927" # Define color for highlighting points - #if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: + # if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: # stamp_color = "#61C853" - #else: + # else: # stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = orbphase if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt((xdata - xclick)**2 + ((ydata - yclick)/100.0)**2) + d = np.sqrt((xdata - xclick) ** 2 + ((ydata - yclick) / 100.0) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return - -def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whitened = False, save = False, \ - legend = True, title = True, axs = None, **kwargs): +def plot_fd_res_v_freq( + fitter, + plotsig=False, + comp_FD=True, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency, can do WB as well. Note, if WB fitter, comp_FD may not work. If comp_FD is True, the panels are organized as follows: @@ -2648,7 +3046,9 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True @@ -2657,19 +3057,19 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi avg_dict = fitter.resids.ecorr_average(use_noise_model=True) # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: res = whiten_resids(avg_dict) res = res.to(u.us) @@ -2678,36 +3078,36 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: if avg == True: - freqs = avg_dict['freqs'].value + freqs = avg_dict["freqs"].value else: freqs = fitter.toas.get_freqs().value @@ -2716,19 +3116,19 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi if axs != None: log.warn("Cannot do full comparison with three panels") axs = None - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,12) + figsize = (4, 12) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(313) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(311) else: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,4) + figsize = (4, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -2737,52 +3137,66 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Make the plot of residual vs. frequency for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) # assign axis labels - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ylabel = 'Average Residual/Uncertainty \n (Whitened)' + ylabel = "Average Residual/Uncertainty \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual/Uncertainty' + ylabel = "Average Residual/Uncertainty" elif whitened and not avg: - ylabel ='Residual/Uncertainty \n (Whitened)' + ylabel = "Residual/Uncertainty \n (Whitened)" else: - ylabel ='Residual/Uncertainty' + ylabel = "Residual/Uncertainty" else: if avg and whitened: - ylabel = 'Average Residual ($\mu$s) \n (Whitened)' + ylabel = "Average Residual ($\mu$s) \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual ($\mu$s)' + ylabel = "Average Residual ($\mu$s)" elif whitened and not avg: - ylabel = 'Residual ($\mu$s) \n (Whitened)' + ylabel = "Residual ($\mu$s) \n (Whitened)" else: - ylabel = 'Residual ($\mu$s)' + ylabel = "Residual ($\mu$s)" ax1.set_ylabel(ylabel) # Now if we want to show the other plots, we plot them @@ -2795,22 +3209,22 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi sorted_freqs = np.linspace(np.min(freqs), np.max(freqs), 1000) FD_line = np.zeros(np.size(sorted_freqs)) for i, fd in enumerate(cur_fd): - fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds - FD_offsets += fd_val * np.log(freqs/1000.0)**(i+1) - FD_line += fd_val * np.log(sorted_freqs/1000.0)**(i+1) + fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds + FD_offsets += fd_val * np.log(freqs / 1000.0) ** (i + 1) + FD_line += fd_val * np.log(sorted_freqs / 1000.0) ** (i + 1) # Now edit residuals fd_cor_res = res.value + FD_offsets # Now we need to redo the fit without the FD parameters psr_fitter_nofd = copy.deepcopy(fitter) try: - psr_fitter_nofd.model.remove_component('FD') + psr_fitter_nofd.model.remove_component("FD") except: log.warning("No FD parameters in the initial timing model...") # Check if fitter is wideband or not if psr_fitter_nofd.is_wideband: - resids = psr_fitter_nofd.resids.residual_objs['toa'] + resids = psr_fitter_nofd.resids.residual_objs["toa"] else: resids = psr_fitter_nofd.resids @@ -2825,7 +3239,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res_nofd = wres_avg.to(u.us).value else: # need to average - res_nofd = avg['time_resids'].to(u.us).value + res_nofd = avg["time_resids"].to(u.us).value elif whitened: # Need to whiten wres_nofd = whiten_resids(psr_fitter_nofd) @@ -2835,55 +3249,93 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Now plot for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = fd_cor_res[inds]/errs[inds] - ax3.errorbar(freqs[inds], sig.value, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - sig_nofd = res_nofd[inds]/errs[inds].value - ax2.errorbar(freqs[inds], sig_nofd, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = fd_cor_res[inds] / errs[inds] + ax3.errorbar( + freqs[inds], + sig.value, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + sig_nofd = res_nofd[inds] / errs[inds].value + ax2.errorbar( + freqs[inds], + sig_nofd, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax3.errorbar(freqs[inds], fd_cor_res[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax2.errorbar(freqs[inds], res_nofd[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax3.plot(sorted_freqs, FD_line, c = 'k', ls = '--') + ax3.errorbar( + freqs[inds], + fd_cor_res[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax2.errorbar( + freqs[inds], + res_nofd[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax3.plot(sorted_freqs, FD_line, c="k", ls="--") # assign axis labels - ax3.set_xlabel(r'Frequency (MHz)') + ax3.set_xlabel(r"Frequency (MHz)") ax3.set_ylabel(ylabel) ax3.grid(True) - ax2.set_xlabel(r'Frequency (MHz)') + ax2.set_xlabel(r"Frequency (MHz)") ax2.set_ylabel(ylabel) ax2.grid(True) if legend: if comp_FD: - ax3.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax3.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) else: - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) if title: - plt.title("%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0+1.0/figsize[1]) + plt.title( + "%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0 + 1.0 / figsize[1] + ) plt.tight_layout() if save: ext = "" @@ -2903,7 +3355,11 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi We also offer some options for convenience plotting functions, one that will show all possible summary plots, and another that will show just the summary plots that are typically created in finalize_timing.py in that order. """ -def summary_plots(fitter, title = None, legends = False, save = False, avg = True, whitened = True): + + +def summary_plots( + fitter, title=None, legends=False, save=False, avg=True, whitened=True +): """ Function to make a composite set of summary plots for sets of TOAs. NOTE - This is noe the same set of plots as will be in the pdf writer @@ -2921,7 +3377,9 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru if fitter.is_wideband: if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) # Determine how long the figure size needs to be figlength = 18 gs_rows = 6 @@ -2935,7 +3393,7 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength += 18 gs_rows += 4 # adjust size if not in a binary - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): sub_rows = 1 sub_len = 3 if whitened: @@ -2950,126 +3408,241 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength -= sub_len gs_rows -= sub_rows - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax1 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, axs = ax1, figsize=(12,3)) + ax1 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, plotsig=True, axs=ax1, figsize=(12, 3) + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax2 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax2 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb(fitter, title=False, legend=False, axs=ax2, figsize=(12, 3)) k += 1 # Now add the measurement vs. uncertainty - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, axs = ax3_0, \ - figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, axs = ax3_1, \ - figsize=(6,3),) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + axs=ax3_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # and the DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # And residual vs. Frequency - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 # Now if whitened add the whitened residual plots if whitened: - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax6, figsize=(12, 3) + ) k += 1 # Plot the residuals divided by uncertainty vs. time - ax7 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, whitened = True, axs = ax7, figsize=(12,3)) + ax7 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax7, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax8 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax8, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax8 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax8, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax9_0 = fig.add_subplot(gs[count+k, 0]) - ax9_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, whitened = True,\ - axs = ax9_0, figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax9_1, figsize=(6,3),) + ax9_0 = fig.add_subplot(gs[count + k, 0]) + ax9_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + whitened=True, + axs=ax9_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax9_1, + figsize=(6, 3), + ) k += 1 # Now plot the average residuals if avg: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, title=False, avg=True, axs=ax10, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax11 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, axs = ax11, figsize=(12,3)) + ax11 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + axs=ax11, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 # Now add the measurement vs. uncertainty - ax13_0 = fig.add_subplot(gs[count+k, 0]) - ax13_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False,\ - avg = True, axs = ax13_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, axs = ax13_1, figsize=(6,3)) + ax13_0 = fig.add_subplot(gs[count + k, 0]) + ax13_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + axs=ax13_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + axs=ax13_1, + figsize=(6, 3), + ) k += 1 # Now plot the whitened average residuals if avg and whitened: - ax14 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, avg = True, whitened = True, axs = ax14, figsize=(12,3)) + ax14 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, avg=True, whitened=True, axs=ax14, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, whitened = True,\ - axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, axs = ax16, \ - figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_1, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 plt.tight_layout() @@ -3078,8 +3651,11 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru return + """We also define a function to output the summary plots exactly as is done in finalize_timing.py (for now)""" -def summary_plots_ft(fitter, title = None, legends = False, save = False): + + +def summary_plots_ft(fitter, title=None, legends=False, save=False): """ Function to make a composite set of summary plots for sets of TOAs NOTE - This is note the same set of plots as will be in the pdf writer @@ -3094,131 +3670,247 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): """ # Define the figure # Determine how long the figure size needs to be - figlength = 18*3 + figlength = 18 * 3 gs_rows = 13 - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): figlength -= 9 gs_rows -= 3 if fitter.is_wideband: figlength -= 9 gs_rows -= 3 - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Then the epoch averaged residuals v. time if not fitter.is_wideband: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, avg=True, axs=ax10, figsize=(12, 3) + ) k += 1 # Epoch averaged vs. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 else: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax12, figsize=(12, 3) + ) k += 1 # And DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # Whitened residuals v. time - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, whitened=True, axs=ax6, figsize=(12, 3)) k += 1 # Whitened epoch averaged residuals v. time if not fitter.is_wideband: - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = False, avg = True, \ - whitened = True, axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=False, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Whitened epoch averaged residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = True, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 else: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = False, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals and epoch averaged - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, plotsig=False, \ - whitened = True, axs = ax3_0, figsize=(6,3)) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + plotsig=False, + whitened=True, + axs=ax3_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, \ - whitened = True, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, \ - whitened = False, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # Whitened residual/uncertainty v. time - ax26 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, plotsig = True, title = False, legend = False, whitened = True,\ - axs = ax26, figsize=(12,3)) + ax26 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax26, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. time if not fitter.is_wideband: - ax25 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax25, figsize=(12,3)) + ax25 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax25, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = True, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 else: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = False, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals/uncertainty and epoch averaged/uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax17_0, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = True, whitened = True, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = False, whitened =False, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=False, + whitened=False, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 # Now plot the frequencies of the TOAs vs. time - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 plt.tight_layout() @@ -3227,8 +3919,9 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): return + # JUST THE PLOTS FOR THE PDF WRITERS LEFT -def plots_for_summary_pdf_nb(fitter, title = None, legends = False): +def plots_for_summary_pdf_nb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`. @@ -3243,137 +3936,297 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): """ if fitter.is_wideband: - raise ValueError("Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead.") + raise ValueError( + "Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - gs = fig.add_gridspec(nrows = 4, ncols = 1) + gs = fig.add_gridspec(nrows=4, ncols=1) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) # Plot residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot averaged residuals v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, avg = True, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + avg=True, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) else: - log.warning("ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals.") - plot_residuals_time(fitter, avg = False, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + log.warning( + "ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals." + ) + plot_residuals_time( + fitter, + avg=False, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) # Plot residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + axs=ax2, + figsize=(8, 2.5), + ) # plot dmx v. time - if 'dispersion_dmx' in fitter.model.get_components_by_category().keys(): - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if "dispersion_dmx" in fitter.model.get_components_by_category().keys(): + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) else: log.warning("No DMX bins in timing model, cannot plot DMX v. Time.") plt.tight_layout() plt.savefig("%s_summary_plot_1_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) - else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) + else: + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals v time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # plot whitened, epoch averaged residuals v time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, avg = True, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, avg = False, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened, epoch averaged residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals histogram - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) - else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) + else: + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals/uncertainty v. time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = False,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = False, whitened = True, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals/uncertainty histogram - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals/uncertainties histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, whitened = True, axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = False, whitened = True, axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_nb.png" % (fitter.model.PSR.value)) plt.close() -def plots_for_summary_pdf_wb(fitter, title = None, legends = False): + +def plots_for_summary_pdf_wb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`. @@ -3387,160 +4240,207 @@ def plots_for_summary_pdf_wb(fitter, title = None, legends = False): legend [boolean] : If True, will add legends to ALL plots [default: False]. """ if not fitter.is_wideband: - raise ValueError("Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead.") + raise ValueError( + "Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(nrows = 4, ncols = 1) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) - else: - gs = fig.add_gridspec(nrows = 3, ncols = 1) - ax3 = fig.add_subplot(gs[2,:]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(nrows=4, ncols=1) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) + else: + gs = fig.add_gridspec(nrows=3, ncols=1) + ax3 = fig.add_subplot(gs[2, :]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # Plot time residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot DM residuals v. time - plot_dm_residuals(fitter, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, save=False, legend=False, title=False, axs=ax1, figsize=(8, 2.5) + ) # Plot time residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(8,2.5)) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax2, figsize=(8, 2.5) + ) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_1_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(3,2) - ax2 = fig.add_subplot(gs[1,:]) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - else: - gs = fig.add_gridspec(2,2) - ax3 = fig.add_subplot(gs[1,0]) - ax4 = fig.add_subplot(gs[1,1]) - ax0 = fig.add_subplot(gs[0,:]) - #ax1 = fig.add_subplot(gs[1,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(3, 2) + ax2 = fig.add_subplot(gs[1, :]) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + else: + gs = fig.add_gridspec(2, 2) + ax3 = fig.add_subplot(gs[1, 0]) + ax4 = fig.add_subplot(gs[1, 1]) + ax0 = fig.add_subplot(gs[0, :]) + # ax1 = fig.add_subplot(gs[1,:]) # Plot whitened time residuals v. time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # Plot whitened time residuals v. time - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # Plot number of whitened residuals histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals histograms - plot_measurements_v_dmres(fitter, nbin = 50, legend = False, title = False, axs = ax4) + plot_measurements_v_dmres( + fitter, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) - else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) + else: + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened time residuals/uncertainty v time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # Plot DM residuals/uncertainty v. time - plot_dm_residuals(fitter, plotsig = True, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, + plotsig=True, + save=False, + legend=False, + title=False, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened time residuals/uncertainty v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, \ - plotsig = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened time residuals/uncertainty histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals/uncertainty histograms - plot_measurements_v_dmres(fitter, plotsig = True, nbin = 50, legend = False, title = False, \ - axs = ax4) + plot_measurements_v_dmres( + fitter, plotsig=True, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_wb.png" % (fitter.model.PSR.value)) plt.close() -def plot_settings(): + +def plot_settings(colorby="f"): """ Initialize plot rc params, define color scheme """ fig_width_pt = 620 - inches_per_pt = 1.0/72.27 # Convert pt to inches - golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio - fig_width = fig_width_pt*inches_per_pt # width in inches - fig_height = fig_width*golden_mean*2 # height in inches - fig_size = [fig_width,fig_height] + inches_per_pt = 1.0 / 72.27 # Convert pt to inches + golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio + fig_width = fig_width_pt * inches_per_pt # width in inches + fig_height = fig_width * golden_mean * 2 # height in inches + fig_size = [fig_width, fig_height] fontsize = 20 # for xlabel, backend labels - plotting_params = {'backend': 'pdf', 'axes.labelsize': 12, 'lines.markersize': 4, 'font.size': 12, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'xtick.major.width': 0.5, 'ytick.major.width': 0.5, 'xtick.minor.width': 0.5, 'ytick.minor.width': 0.5, 'lines.markeredgewidth': 1, 'axes.linewidth': 1.2, 'legend.fontsize': 10, 'xtick.labelsize': 12, 'ytick.labelsize': 10, 'savefig.dpi': 400, 'path.simplify': True, 'font.family': 'serif', 'font.serif': 'Times', 'text.usetex': True, 'figure.figsize': fig_size, 'text.latex.preamble': r'\usepackage{amsmath}'} + plotting_params = { + "backend": "pdf", + "axes.labelsize": 12, + "lines.markersize": 4, + "font.size": 12, + "xtick.major.size": 6, + "xtick.minor.size": 3, + "ytick.major.size": 6, + "ytick.minor.size": 3, + "xtick.major.width": 0.5, + "ytick.major.width": 0.5, + "xtick.minor.width": 0.5, + "ytick.minor.width": 0.5, + "lines.markeredgewidth": 1, + "axes.linewidth": 1.2, + "legend.fontsize": 10, + "xtick.labelsize": 12, + "ytick.labelsize": 10, + "savefig.dpi": 400, + "path.simplify": True, + "font.family": "serif", + "font.serif": "Times", + "text.usetex": True, + "figure.figsize": fig_size, + "text.latex.preamble": r"\usepackage{amsmath}", + } plt.rcParams.update(plotting_params) - # Color scheme for consistent reciever-backend combos, same as published 12.5 yr - colorschemes = {'thankful_2':{ - "327_ASP": "#BE0119", - "327_PUPPI": "#BE0119", - "430_ASP": "#FD9927", - "430_PUPPI": "#FD9927", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#407BD5", - "Rcvr1_2_GUPPI": "#407BD5", - "Rcvr1_2_VEGAS": "#61C853", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "Rcvr_800_VEGAS": "#61C853", - "S-wide_ASP": "#855CA0", - "S-wide_PUPPI": "#855CA0", - "1.5GHz_YUPPI": "#45062E", - "3GHz_YUPPI": "#E5A4CB", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - }} - - # marker dictionary to be used if desired, currently all 'x' - markers = {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr1_2_VEGAS": "x", - "Rcvr_800_GASP": "x", - "Rcvr_800_GUPPI": "x", - "Rcvr_800_VEGAS": "x", - "S-wide_ASP": "x", - "S-wide_PUPPI": "x", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - } - - # Define the color map option - colorscheme = colorschemes['thankful_2'] - - return markers, colorscheme + colorscheme, markerscheme = set_color_and_marker(colorby) + return markerscheme, colorscheme + def get_fitter(yaml): """ @@ -3556,13 +4456,14 @@ def get_fitter(yaml): mo, to = tc.get_model_and_toas(excised=True, usepickle=True) tc.manual_cuts(to) receivers = lu.get_receivers(to) - if tc.get_toa_type() == 'WB': + if tc.get_toa_type() == "WB": lu.add_feDMJumps(mo, receivers) else: lu.add_feJumps(mo, receivers) fo = tc.construct_fitter(to, mo) return fo, mo + def get_avg_years(fo_nb, fo_wb, avg_dict): """ Get MJDS for each data set in years @@ -3575,13 +4476,14 @@ def get_avg_years(fo_nb, fo_wb, avg_dict): """ mjd_nb = fo_nb.toas.get_mjds().value - years_nb = (mjd_nb - 51544.0)/365.25 + 2000.0 + years_nb = (mjd_nb - 51544.0) / 365.25 + 2000.0 mjd_wb = fo_wb.toas.get_mjds().value - years_wb = (mjd_wb - 51544.0)/365.25 + 2000.0 - mjds_avg = avg_dict['mjds'].value - years_avg = (mjds_avg - 51544.0)/365.25 + 2000.0 + years_wb = (mjd_wb - 51544.0) / 365.25 + 2000.0 + mjds_avg = avg_dict["mjds"].value + years_avg = (mjds_avg - 51544.0) / 365.25 + 2000.0 return years_nb, years_wb, years_avg + def get_backends(fo_nb, fo_wb, avg_dict): """ Grab backends via flags to make plotting easier @@ -3593,17 +4495,18 @@ def get_backends(fo_nb, fo_wb, avg_dict): avg_dict: from fo.resids.ecorr_average() """ - rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value('f')[0]) + rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value("f")[0]) rcvr_set_nb = set(rcvr_bcknds_nb) - rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value('f')[0]) + rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value("f")[0]) rcvr_set_wb = set(rcvr_bcknds_wb) avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds_nb[iis[0]]) rcvr_bcknds_avg = np.array(avg_rcvr_bcknds) rcvr_set_avg = set(rcvr_bcknds_avg) return rcvr_bcknds_nb, rcvr_bcknds_wb, rcvr_bcknds_avg + def get_DMX_info(fo): """ Get DMX timeseries info from dmxparse @@ -3614,12 +4517,13 @@ def get_DMX_info(fo): """ dmx_dict = pint.utils.dmxparse(fo) - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD - 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 return DMXs, DMX_vErrs, DMX_center_Year + def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ Plot color-divided-by-receiver/BE points on any axis @@ -3636,47 +4540,44 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ markers, colorscheme = plot_settings() for i, r_b in enumerate(set(bknds)): - inds = np.where(bknds==r_b)[0] + inds = np.where(bknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = bknds[inds][0] mkr = markers[r_b_label] clr = colorscheme[r_b_label] - ax.errorbar(x[inds], y[inds] - (rn_off * u.us), yerr=err[inds], fmt=mkr, color=clr, label=r_b_label, alpha=0.5) - - ylim = (max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value) + ax.errorbar( + x[inds], + y[inds] - (rn_off * u.us), + yerr=err[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=0.5, + ) + + ylim = max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value ax.set_ylim(-1 * ylim * 1.08, ylim * 1.08) if be_legend: handles, labels = ax.get_legend_handles_labels() labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) - label_names = {"327_ASP": "ASP 327 MHz", - "327_PUPPI": "PUPPI 327 MHz", - "430_ASP": "ASP 430 MHz", - "430_PUPPI": "PUPPI 430 MHz", - "L-wide_ASP": "ASP L-wide", - "L-wide_PUPPI": "PUPPI L-wide", - "Rcvr1_2_GASP": "GASP L-band", - "Rcvr1_2_GUPPI": "GUPPI L-band", - "Rcvr1_2_VEGAS": "VEGAS L-band", - "Rcvr_800_GASP": "GASP 820 MHz", - "Rcvr_800_GUPPI": "GUPPI 820 MHz", - "Rcvr_800_VEGAS": "VEGAS 820 MHz", - "S-wide_ASP": "ASP S-wide", - "S-wide_PUPPI": "PUPPI S-wide", - "1.5GHz_YUPPI": "YUPPI 1.5 GHz", - "3GHz_YUPPI": "YUPPI 3 GHz", - "6GHz_YUPPI": "YUPPI 6 GHz", - "CHIME": "CHIME", - } fixed_labels = [label_names[l] for l in labels] - if be_format == 'vert': + if be_format == "vert": plt.legend(handles, fixed_labels, loc=(1.005, 0), fontsize=12) - if be_format == 'horiz': - plt.legend(handles, fixed_labels, loc='lower left', ncol=len(fixed_labels), borderpad=0.1, columnspacing=0.1) + if be_format == "horiz": + plt.legend( + handles, + fixed_labels, + loc="lower left", + ncol=len(fixed_labels), + borderpad=0.1, + columnspacing=0.1, + ) ax.set_ylim(-1 * ylim * 1.2, ylim * 1.08) + def rec_labels(axs, bcknds, years_avg): """ Mark transitions between backends @@ -3703,64 +4604,135 @@ def rec_labels(axs, bcknds, years_avg): has_yuppi = False for r in bcknds: - if 'ASP' in r: + if "ASP" in r: has_asp = True - if 'PUPPI' in r: + if "PUPPI" in r: has_puppi = True - if 'GASP' in r: + if "GASP" in r: has_gasp = True - if 'GUPPI' in r: + if "GUPPI" in r: has_guppi = True - if 'YUPPI' in r: + if "YUPPI" in r: has_yuppi = True if has_asp and has_puppi: for a in axs: has_ao = True - a.axvline(puppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(puppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) if has_gasp and has_guppi: for a in axs: has_gbt = True - a.axvline(guppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(guppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) ycoord = 1.1 x_min_yr = min(years_avg) x_max_yr = max(years_avg) tform = axs[0].get_xaxis_transform() - va = ha = 'center' + va = ha = "center" if has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'ASP/GASP', transform=tform, va=va, ha=ha) - axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI", + transform=tform, + va=va, + ha=ha, + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "ASP/GASP", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + puppi) / 2.0, ycoord, "ASP/GUPPI", transform=tform, va=va, ha=ha + ) elif has_ao and not has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) - axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, ycoord, "PUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (puppi + x_min_yr) / 2.0 - 0.2, ycoord, "ASP", transform=tform, va=va, ha=ha + ) elif not has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((guppi+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'GASP', transform=tform, va=va, ha=ha) + axs[0].text( + (guppi + x_max_yr) / 2.0, ycoord, "GUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "GASP", transform=tform, va=va, ha=ha + ) if has_puppi and not has_asp and not has_gasp and not has_guppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_guppi and not has_asp and not has_gasp and not has_puppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_yuppi and not has_guppi and not has_puppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, ycoord, "YUPPI", transform=tform, va=va, ha=ha + ) + def rn_sub(testing, rn_subtract, fo_nb, fo_wb): if rn_subtract: @@ -3768,8 +4740,8 @@ def rn_sub(testing, rn_subtract, fo_nb, fo_wb): rn_nb = 0.0 rn_wb = 0.0 else: - rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0,0] * 1e6 - rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0,0] * 1e6 + rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0, 0] * 1e6 + rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0, 0] * 1e6 else: rn_nb = 0.0 rn_wb = 0.0 From 0d652e4560858939507937f34c863e4d106c6dd2 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Wed, 4 Sep 2024 17:09:55 +0000 Subject: [PATCH 029/193] Removed commented entries from yaml --- src/pint_pal/plot_settings.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml index 9f67ae96..f8bb90f0 100644 --- a/src/pint_pal/plot_settings.yaml +++ b/src/pint_pal/plot_settings.yaml @@ -1,3 +1,6 @@ +# This YAML contains various marker colors and shapes for the three plotting schemes +# Changes here will be applied to notebook plotting immediately (after restarting the kernel) + obs_c: { "ao": "#6BA9E2", "arecibo": "#6BA9E2", @@ -111,25 +114,17 @@ ng20_c: { "430_PUPPI": "#FD9927", "L-wide_ASP": "#BDB6F6", "L-wide_PUPPI": "#BDB6F6", - # "L-wide_ASP": "#C3BEF7", - # "L-wide_PUPPI": "#A393BF", - # "Rcvr1_2_GASP": "#81BDEE", "Rcvr1_2_GASP": "#79A3E2", "Rcvr1_2_GUPPI": "#79A3E2", "Rcvr1_2_VEGAS": "#79A3E2", "Rcvr_800_GASP": "#8DD883", "Rcvr_800_GUPPI": "#8DD883", "Rcvr_800_VEGAS": "#8DD883", - # "VEGAS": "#465922", - # "S-wide_ASP": "#D81159", - # "S-wide_PUPPI": "#D81159", "S-wide_ASP": "#C4457A", "S-wide_PUPPI": "#C4457A", "1.5GHz_YUPPI": "#EBADCB", "3GHz_YUPPI": "#E79CC1", "6GHz_YUPPI": "#DB6BA1", - # "CHIME": "#F3689B", - # "Rcvr_CHIME": "#F3689B", } obs_m: { From f30616e3d6417edd652b724ebc012f2393950bda Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 11 Sep 2024 11:50:36 -0700 Subject: [PATCH 030/193] moving gibbs sampler to enterprise extensions; adding some initial discovery set up --- src/pint_pal/gibbs_sampler.py | 743 ---------------------------------- src/pint_pal/noise_utils.py | 46 ++- 2 files changed, 37 insertions(+), 752 deletions(-) delete mode 100644 src/pint_pal/gibbs_sampler.py diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py deleted file mode 100644 index 8675ffdd..00000000 --- a/src/pint_pal/gibbs_sampler.py +++ /dev/null @@ -1,743 +0,0 @@ -import numpy as np -from tqdm import tqdm -import scipy.linalg as sl -from functools import cached_property -import os -import glob -import warnings -from enterprise_extensions import model_utils, blocks -from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc -from enterprise.signals import signal_base, gp_signals -from scipy.linalg import solve_triangular as st_solve -from scipy.linalg import cho_factor, cho_solve - - -class GibbsSampler(object): - - """ - The Gibbs Method class used for single-pulsar noise analyses. - - Based on: - - Article by van Haasteren & Vallisneri (2014), - "New advances in the Gaussian-process approach - to pulsar-timing data analysis", - Physical Review D, Volume 90, Issue 10, id.104012 - arXiv:1407.1838 - - Initial structure of the code is based on https://github.com/jellis18/gibbs_student_t - - Authors: - - S. R. Taylor - N. Laal - J. G. Baier - """ - - def __init__( - self, - psr=None, - Tspan=None, - select="backend", - vary_wn=False, - inc_ecorr=False, - ecorr_type="kernel", - noise_dict=None, - tm_marg=False, - vary_rn=True, - rn_components=30, - tnequad=True, - #log10rhomin=-9.0, i think these would only apply to HD correlations - #log10rhomax=-4.0, on gibbs sampling. IRN and DM/CHROM are diagonal ?? - vary_dm=False, - dm_components=50, - vary_chrom=False, - chrom_components=50, - include_quadratic=False, - ): - """ - Parameters - ----------- - - psr : object - instance of an ENTERPRISE psr object for a single pulsar - - Tspan: float (optional) - if given, the baseline of the pulsar is fixed to the input value. If not, - baseline is determined inetrnally - - select: str - the selection of backend ('backend' or 'none') for the white-noise parameters - - self.vary_wn: bool - whether to vary the white noise - - inc_ecorr: bool - whether to include ecorr - - ecorr_type: str - the type of ecorr to use. Choose between 'basis' or 'kernel' - - noise_dict: dict - white noise dictionary in case 'self.vary_wn' is set to False - - tm_marg: bool - whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) - - rn_components: int - number of red noise Fourier modes to include - - dm_components: int - number of DM noise Fourier modes to include - - chrom_components: int - number of chromatic noise Fourier modes to include - - dm_var: bool - wheter to include a free spectrum gibbs dm_gp - - chrom_var: bool - whether to include a free spectrum gibbs chrom_gp - - include_quadratic: bool - whether or not to fit out a quadratic trend in chrom_gp (think DM2) - - log10rhomin: float - lower bound for the log10 of the rho parameter. - - log10rhomax: float - upper bound for the log10 of the rho parameter - - tnequad: string - whether to use the temponest convension of efac and equad - """ - - self.psr = [psr] - if Tspan: - self.Tspan = Tspan - else: - self.Tspan = model_utils.get_tspan(self.psr) - self.name = self.psr[0].name - self.inc_ecorr = inc_ecorr - self.ecorr_type = ecorr_type - self.vary_wn = vary_wn - self.tm_marg = tm_marg - self.wn_names = ["efac", "equad", "ecorr"] - self.rn_components = rn_components - self.dm_components = dm_components - self.chrom_components = chrom_components - self.vary_rn = vary_rn - self.vary_dm = vary_dm - self.vary_chrom = vary_chrom - self.include_quadratic = include_quadratic - #self.low = 10 ** (2 * self.rhomin) - #self.high = 10 ** (2 * self.rhomax) - # Making the pta object - # need to keep track of which parameters are being varied - # they appear alphebetically in signal_collections - # FIXME: this would probably break if you added a solar wind model - self.rn_idx = -1 - self.dm_idx = -1 - int(self.vary_rn) - self.chrom_idx = -1 - int(self.vary_rn) - int(self.vary_dm) - if self.tm_marg: - tm = gp_signals.MarginalizingTimingModel(use_svd=True) - if self.vary_wn: - warnings.warn( - "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" - ) - else: - tm = gp_signals.TimingModel(use_svd=True) - - if self.ecorr_type == "basis": - wn = blocks.white_noise_block( - vary=self.vary_wn, - inc_ecorr=self.inc_ecorr, - gp_ecorr=True, - select=select, - tnequad=tnequad, - ) - else: - wn = blocks.white_noise_block( - vary=self.vary_wn, - inc_ecorr=self.inc_ecorr, - gp_ecorr=False, - select=select, - tnequad=tnequad, - ) - - if self.vary_rn: - rn = blocks.red_noise_block( - psd="spectrum", - prior="log-uniform", - Tspan=self.Tspan, - components=self.rn_components, - gamma_val=None, - ) - - if self.vary_dm: - dm = blocks.dm_noise_block( - gp_kernel='diag', - psd='spectrum', - prior='log-uniform', - Tspan=self.Tspan, - components=self.dm_components, - gamma_val=None, - coefficients=False - ) - - if self.vary_chrom: - chrom = blocks.chromatic_noise_block( - gp_kernel='diag', - psd='spectrum', - prior='log-uniform', - idx=4, - include_quadratic=self.include_quadratic, - Tspan=self.Tspan, - name='chrom', - components=self.chrom_components, - ) - - - s = tm + wn - - if self.vary_rn: - s += rn - if self.vary_dm: - s += dm - if self.vary_chrom: - s += chrom - - self.pta = signal_base.PTA( - [s(p) for p in self.psr], - lnlikelihood=signal_base.LogLikelihoodDenseCholesky, - ) - #print(self.pta.signals.keys()) - if not self.vary_wn: - self.pta.set_default_params(noise_dict) - self.Nmat = self.pta.get_ndiag(params={})[0] - self.TNr = self.pta.get_TNr(params={})[0] - self.TNT = self.pta.get_TNT(params={})[0] - else: - self.Nmat = None - - if self.inc_ecorr and "basis" in self.ecorr_type and self.vary_wn: - # grabbing priors on ECORR params - for ct, par in enumerate(self.pta.params): - if "ecorr" in str(par): - ind = ct - ecorr_priors = str(self.pta.params[ind].params[0]) - ecorr_priors = ecorr_priors.split("(")[1].split(")")[0].split(", ") - self.ecorrmin, self.ecorrmax = ( - 10 ** (2 * float(ecorr_priors[0].split("=")[1])), - 10 ** (2 * float(ecorr_priors[1].split("=")[1])), - ) - #print(self.ecorrmin, self.ecorrmax) - - # Getting residuals - self._residuals = self.pta.get_residuals()[0] - ## FIXME : maybe don't cache this -- could lead to memory issues. - # Intial guess for the model params - self._xs = np.array([p.sample() - for p in self.pta.params], dtype=object) - # Initializign the b-coefficients. - # The shape is 2*rn_comp+2*dm_comp+2*chrom_comp if tm_marg = True - # if tm_marg = False, - # then the shape is more because there are some tm params in there? - self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) - # when including dm and chromatic models, the b's are - # the concantenation of the red noise, dm, and chromatic noise fourier coefficients - #print("len b: ", len(self._b)) - #print(self.pta.get_basis(self._xs)[0].shape) - self.Tmat = self.pta.get_basis(params={})[0] - self.phiinv = None - # print(self._xs.shape) - # print(self.pta.params) - # print("dm", self.get_dm_param_indices) - # print("chrom", self.get_chrom_param_indices) - # print("rn:", self.get_rn_param_indices) - # find basis indices of GW process - ### jeremy : changing the below from gwid to rn_id and adding dm_id and chrom_id - self.rn_id = [] - self.dm_id = [] - self.chrom_id = [] - ct = 0 - psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] - for sig in psigs: - Fmat = self.pta.signals[sig].get_basis() - if "red_noise" in self.pta.signals[sig].name: - self.rn_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - if "dm_gp" in self.pta.signals[sig].name: - self.dm_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - if "chrom_gp" in self.pta.signals[sig].name: - self.chrom_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - ### jeremy : chaning the above to red_noise and adding dm and chrom as well - # Avoid None-basis processes. - # Also assume red + GW signals share basis. - if Fmat is not None and "red" not in sig and 'dm_gp' not in sig and 'chrom_gp' not in sig: - ct += Fmat.shape[1] - #print(sig) - #print(ct) - #print("rn", self.rn_id) - #print("dm", self.dm_id) - #print("chrom", self.chrom_id) - - - @cached_property - def params(self): - return self.pta.params - - @cached_property - def param_names(self): - return self.pta.param_names - - def map_params(self, xs): - return self.pta.map_params(xs) - - @cached_property - def get_rn_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "red_noise" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_dm_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "dm_gp" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_chrom_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "chrom_gp" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_efacequad_indices(self): - ind = [] - if "basis" in self.ecorr_type: - for ct, par in enumerate(self.param_names): - if "efac" in par or "equad" in par: - ind.append(ct) - else: - for ct, par in enumerate(self.param_names): - if "ecorr" in par or "efac" in par or "equad" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_basis_ecorr_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "ecorr" in par: - ind.append(ct) - return np.array(ind) - - def update_red_params(self, xs): - """ - Function to perform red_noise_log10_rho updates given - the red noise Fourier coefficients. - """ - tau = self._b[tuple(self.rn_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.rn_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_dm_params(self, xs): - """ - Function to perform dm_gp_log10_rho updates given - the dm gp Fourier coefficients. - """ - tau = self._b[tuple(self.dm_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.dm_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_chrom_params(self, xs): - """ - Function to perform chrom_gp_log10_rho updates given - the chromatic gp Fourier coefficients. - """ - tau = self._b[tuple(self.chrom_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.chrom_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_b(self, xs): - """ - Function to perform updates on Fourier coefficients given other model parameters. - """ - params = self.pta.map_params(np.hstack(xs)) - self._phiinv = self.pta.get_phiinv(params, logdet=False)[0] - - try: - TNT = self.TNT.copy() - except BaseException: - T = self.Tmat - TNT = self.Nmat.solve(T, left_array=T) - try: - TNr = self.TNr.copy() - except BaseException: - T = self.Tmat - TNr = self.Nmat.solve(self._residuals, left_array=T) - - np.fill_diagonal(TNT, TNT.diagonal() + self._phiinv) - try: - chol = cho_factor( - TNT, - lower=True, - overwrite_a=False, - check_finite=False) - mean = cho_solve( - chol, - b=TNr, - overwrite_b=False, - check_finite=False) - self._b = mean + st_solve( - chol[0], - np.random.normal(loc=0, scale=1, size=TNT.shape[0]), - lower=True, - unit_diagonal=False, - overwrite_b=False, - check_finite=False, - trans=1, - ) - except np.linalg.LinAlgError: - print("oh sh******t; a spiiiiiddddeeeerrrrrr") - if self.bchain.any(): - self._b = self.bchain[ - np.random.default_rng().integers(0, len(self.bchain)) - ] - else: - bchain = np.memmap( - self._savepath + "/chain_1", - dtype="float32", - mode="r", - shape=(self.niter, self.len_x + self.len_b), - )[:, -len(self._b):] - self._b = bchain[np.random.default_rng().integers( - 0, len(bchain))] - - def update_white_params(self, xs, iters=10): - """ - Function to perform WN updates given other model parameters. - If kernel ecorr is chosen, WN includes ecorr as well. - """ - # get white noise parameter indices - wind = self.get_efacequad_indices - xnew = xs - x0 = xnew[wind].copy() - lnlike0, lnprior0 = self.get_lnlikelihood_white( - x0), self.get_wn_lnprior(x0) - lnprob0 = lnlike0 + lnprior0 - - for ii in range( - self.start_wn_iter + 1, - self.start_wn_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep( - x0, lnlike0, lnprob0, ii - ) - xnew[wind] = x0 - self.start_wn_iter = ii - - # Do some caching of "later needed" parameters for improved performance - self.Nmat = self.pta.get_ndiag(self.map_params(xnew))[0] - Tmat = self.Tmat - if "basis" not in self.ecorr_type: - self.TNT = self.Nmat.solve(Tmat, left_array=Tmat) - else: - TN = Tmat / self.Nmat[:, None] - self.TNT = Tmat.T @ TN - residuals = self._residuals - self.rNr = np.sum(residuals**2 / self.Nmat) - self.logdet_N = np.sum(np.log(self.Nmat)) - self.d = TN.T @ residuals - - return xnew - - def update_basis_ecorr_params(self, xs, iters=10): - """ - Function to perform basis ecorr updates. - """ - # get white noise parameter indices - eind = self.get_basis_ecorr_indices - xnew = xs - x0 = xnew[eind].copy() - lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood( - x0 - ), self.get_basis_ecorr_lnprior(x0) - lnprob0 = lnlike0 + lnprior0 - - for ii in range( - self.start_ec_iter + 1, - self.start_ec_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep( - x0, lnlike0, lnprob0, ii - ) - xnew[eind] = x0 - self.start_ec_iter = ii - - return xnew - - def get_lnlikelihood_white(self, xs): - """ - Function to calculate WN log-liklihood. - """ - x0 = self._xs.copy() - x0[self.get_efacequad_indices] = xs - - params = self.map_params(x0) - Nmat = self.pta.get_ndiag(params)[0] - # whitened residuals - yred = self._residuals - self.Tmat @ self._b - try: - if "basis" not in self.ecorr_type: - rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet=True) - else: - rNr = np.sum(yred**2 / Nmat) - logdet_N = np.sum(np.log(Nmat)) - except BaseException: - return -np.inf - # first component of likelihood function - loglike = -0.5 * (logdet_N + rNr) - - return loglike - - def get_basis_ecorr_lnlikelihood(self, xs): - """ - Function to calculate basis ecorr log-liklihood. - """ - x0 = np.hstack(self._xs.copy()) - x0[self.get_basis_ecorr_indices] = xs - - params = self.map_params(x0) - # start likelihood calculations - loglike = 0 - # get auxiliaries - phiinv, logdet_phi = self.pta.get_phiinv(params, logdet=True)[0] - # first component of likelihood function - loglike += -0.5 * (self.logdet_N + self.rNr) - # Red noise piece - Sigma = self.TNT + np.diag(phiinv) - try: - cf = sl.cho_factor(Sigma) - expval = sl.cho_solve(cf, self.d) - except np.linalg.LinAlgError: - return -np.inf - - logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) - loglike += 0.5 * (self.d @ expval - logdet_sigma - logdet_phi) - - return loglike - - def get_wn_lnprior(self, xs): - """ - Function to calculate WN log-prior. - """ - x0 = self._xs.copy() - x0[self.get_efacequad_indices] = xs - - return np.sum([p.get_logpdf(value=x0[ct]) - for ct, p in enumerate(self.params)]) - - def get_basis_ecorr_lnprior(self, xs): - """ - Function to calculate basis ecorr log-prior. - """ - x0 = self._xs.copy() - x0[self.get_basis_ecorr_indices] = xs - - return np.sum([p.get_logpdf(value=x0[ct]) - for ct, p in enumerate(self.params)]) - - def sample( - self, - niter=int(1e4), - wniters=100, - eciters=15, - savepath=None, - SCAMweight=30, - AMweight=15, - DEweight=50, - covUpdate=1000, - burn=10000, - **kwargs - ): - """ - Gibbs Sampling - - Parameters - ----------- - niter: integer - total number of Gibbs sampling iterations - - wniters: - number of white noise MCMC sampling iterations within each Gibbs step - - eciters: - number of basis ecorr MCMC sampling iterations within each Gibbs step - - savepath: str - the path to save the chains - - covUpdate: integer - Number of iterations between AM covariance updates - - SCAMweight: integer - Weight of SCAM jumps in overall jump cycle - - AMweight: integer - Weight of AM jumps in overall jump cycle - - DEweight: integer - Weight of DE jumps in overall jump cycle - - kwargs: dict - PTMCMC initialization settings not mentioned above - """ - self.start_wn_iter = 0 - self.start_ec_iter = 0 - - os.makedirs(savepath, exist_ok=True) - - if self.vary_wn: - # large number to avoid saving the white noise choice in a txt file - isave = int(4e9) - thin = 1 - Niter = int(niter * wniters + 1) - - x0 = self._xs[self.get_efacequad_indices] - ndim = len(x0) - cov = np.diag( - np.ones(ndim) * 0.01**2 - ) # helps to tune MCMC proposal distribution - self.sampler_wn = ptmcmc( - ndim, - self.get_lnlikelihood_white, - self.get_wn_lnprior, - cov, - outDir=savepath, - resume=False, - ) - self.sampler_wn.initialize( - Niter=Niter, - isave=isave, - thin=thin, - SCAMweight=SCAMweight, - AMweight=AMweight, - DEweight=DEweight, - covUpdate=covUpdate, - burn=burn, - **kwargs - ) - - if "basis" in self.ecorr_type and self.vary_wn and self.inc_ecorr: - x0 = self._xs[self.get_basis_ecorr_indices] - ndim = len(x0) - cov = np.diag(np.ones(ndim) * 0.01**2) - self.sampler_ec = ptmcmc( - ndim, - self.get_basis_ecorr_lnlikelihood, - self.get_basis_ecorr_lnprior, - cov, - outDir=savepath, - resume=False, - ) - self.sampler_ec.initialize( - Niter=Niter, - isave=isave, - thin=thin, - SCAMweight=SCAMweight, - AMweight=AMweight, - DEweight=DEweight, - covUpdate=covUpdate, - burn=burn, - **kwargs - ) - - np.savetxt(savepath + "/pars.txt", - list(map(str, self.pta.param_names)), fmt="%s") - np.savetxt( - savepath + "/priors.txt", - list(map(lambda x: str(x.__repr__()), self.pta.params)), - fmt="%s", - ) - if self.vary_rn: - rn_freqs = np.arange( - 1 / self.Tspan, - (self.rn_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/rn_freqs.npy", rn_freqs) - - if self.vary_dm: - dm_freqs = np.arange( - 1 / self.Tspan, - (self.dm_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/dm_freqs.npy", dm_freqs) - if self.vary_chrom: - chrom_freqs = np.arange( - 1 / self.Tspan, - (self.chrom_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/chrom_freqs.npy", chrom_freqs) - [os.remove(dpa) for dpa in glob.glob(savepath + "/*jump.txt")] - - xnew = self._xs.copy() - - len_b = len(self._b) - len_x = len(np.hstack(self._xs)) - self._savepath = savepath - - fp = np.lib.format.open_memmap( - savepath + "/chain_1.npy", - mode="w+", - dtype="float32", - shape=(niter, len_x + len_b), - fortran_order=False, - ) - - pbar = tqdm(range(niter), colour="GREEN") - pbar.set_description("Sampling %s" % self.name) -# num_gibbs = np.sum([int(self.vary_rn), int(self.vary_dm), int(self.vary_chrom)]) - for ii in pbar: - if self.vary_wn: - xnew = self.update_white_params(xnew, iters=wniters) - - if self.inc_ecorr and "basis" in self.ecorr_type: - xnew = self.update_basis_ecorr_params(xnew, iters=eciters) - -# turn = ii % num_gibbs - #if self.vary_rn and turn == 0: - self.update_b(xs=xnew) - xnew = self.update_red_params(xs=xnew) - #if self.vary_dm and turn == 1: - #self.update_b(xs=xnew) - xnew = self.update_dm_params(xs=xnew) - #if self.vary_chrom and turn == 2: - #self.update_b(xs=xnew) - xnew = self.update_chrom_params(xs=xnew) - - fp[ii, -len_b:] = self._b - fp[ii, 0:len_x] = np.hstack(xnew) - diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0b573fc1..4e329ad1 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -8,7 +8,6 @@ import pint.models as pm from pint.models.parameter import maskParameter -from pint_pal.gibbs_sampler import GibbsSampler import matplotlib as mpl import matplotlib.pyplot as pl @@ -193,7 +192,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: either 'PTMCMCSampler' or 'gibbs' + sampler: either 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False @@ -220,7 +219,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', #Ensure n_iter is an integer n_iter = int(n_iter) - + if n_iter < 1e4: log.warning("Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4") @@ -228,7 +227,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', e_psr = Pulsar(mo, to) if which_sampler == 'PTMCMCSampler': - log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") #Setup a single pulsar PTA using enterprise_extensions if not using_wideband: pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) @@ -248,17 +247,20 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', #Initial sample x0 = np.hstack([p.sample() for p in pta.params]) - #Start sampling - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif which_sampler == 'gibbs': - log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + elif which_sampler == 'GibbsSampler': + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler(e_psr, **noise_kwargs, ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) - pass + pass + elif which_sampler == 'discovery': + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + pass + else: + log.error("Invalid sampler specified. Please use \'PTMCMCSampler\' or \'GibbsSampler\' or \'discovery\' ") def convert_to_RNAMP(value): """ @@ -458,6 +460,32 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl return model +def setup_gibbs_sampler(): + """ + Setup the Gibbs sampler for noise analysis from enterprise extensions + """ + # check that a sufficiently up-to-date version of enterprise_extensions is installed + try: + from enterprise_extensions.gibbs_sampling import gibbs + except ImportError: + log.error("Please install the latest version of enterprise_extensions") + return None + + pass + +def setup_discovery_sampler(): + """ + Setup the discovery sampler for noise analysis from enterprise extensions + """ + # check that a sufficiently up-to-date version of enterprise_extensions is installed + try: + import discovery as ds + except ImportError: + log.error("Please install the latest version of discovery") + return None + + pass + def test_equad_convention(pars_list): """ If (t2/tn)equad present, report convention used. From 8c6a839cdcbc1f55374363a386c573784621792b Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 17 Oct 2024 20:46:05 -0700 Subject: [PATCH 031/193] getting back onnnit --- src/pint_pal/noise_utils.py | 647 +++++++++++++++++++++++++----------- 1 file changed, 454 insertions(+), 193 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 4e329ad1..9ba3ae91 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -12,7 +12,11 @@ import matplotlib as mpl import matplotlib.pyplot as pl -#Imports necessary for e_e noise modeling functions +import la_forge.core as co +import la_forge.diagnostics as dg +import la_forge.utils as lu + +# Imports necessary for e_e noise modeling functions import functools from collections import OrderedDict @@ -27,14 +31,22 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block -#from enterprise_extensions.blocks import (white_noise_block, red_noise_block) + +# from enterprise_extensions.blocks import (white_noise_block, red_noise_block) import types from enterprise.signals import utils from enterprise.signals import gp_priors as gpp -def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corner = True, no_corner_plot = False, chaindir_compare=None): + +def analyze_noise( + chaindir="./noise_run_chains/", + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + chaindir_compare=None, +): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -50,141 +62,201 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne wn_dict: Dictionary of maximum likelihood WN values rn_bf: Savage-Dickey BF for RN for given pulsar """ - chainfile = chaindir + 'chain_1.txt' - chain = np.loadtxt(chainfile) - burn = int(burn_frac * chain.shape[0]) - pars = np.loadtxt(chaindir + 'pars.txt', dtype = str) - - psr_name = pars[0].split('_')[0] - + #### replacing this with la_forge to be more flexible + # chainfile = chaindir + "chain_1.txt" + # chain = np.loadtxt(chainfile) + # burn = int(burn_frac * chain.shape[0]) + # pars = np.loadtxt(chaindir + "pars.txt", dtype=str) + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}") + return None + noise_core.burn(burn_frac) + chain = noise_core.chain + psr_name = noise_core.pars[0].split("_")[0] + pars = noise_core.pars + # load in same for comparison noise model if chaindir_compare is not None: - chainfile_compare = chaindir_compare + 'chain_1.txt' + chainfile_compare = chaindir_compare + "chain_1.txt" chain_compare = np.loadtxt(chainfile_compare) burn_compare = int(burn_frac * chain_compare.shape[0]) - pars_compare = np.loadtxt(chaindir_compare + 'pars.txt', dtype = str) + pars_compare = np.loadtxt(chaindir_compare + "pars.txt", dtype=str) - psr_name_compare = pars_compare[0].split('_')[0] + psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: - log.warning(f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison") + log.warning( + f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison" + ) chaindir_compare = None if save_corner and not no_corner_plot: - pars_short = [p.split("_",1)[1] for p in pars] + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - fig = corner.corner(chain_compare[burn:, :-4], color='orange', alpha=0.5, weights=normalization_factor, labels = compare_pars_short) + normalization_factor = ( + np.ones(len(chain_compare[:, :-4])) + * len(chain[:, :-4]) + / len(chain_compare[:, :-4]) + ) + fig = corner.corner( + chain_compare[:, :-4], + color="orange", + alpha=0.5, + weights=normalization_factor, + labels=compare_pars_short, + ) # normal corner plot - corner.corner(chain[burn:, :-4], fig=fig, color='black', labels = pars_short) + corner.corner( + chain[:, :-4], fig=fig, color="black", labels=pars_short + ) if chaindir_compare is None: - corner.corner(chain[burn:, :-4], labels = pars_short) + corner.corner(chain[:, :-4], labels=pars_short) - if '_wb' in chaindir: + if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figname = f"./{psr_name}_noise_corner_nb.pdf" else: figname = f"./{psr_name}_noise_corner.pdf" pl.savefig(figname) - pl.savefig(figname.replace(".pdf",".png"), dpi=300) + pl.savefig(figname.replace(".pdf", ".png"), dpi=300) pl.show() - + if no_corner_plot: - + from matplotlib.backends.backend_pdf import PdfPages - if '_wb' in chaindir: + + if "_wb" in chaindir: figbase = f"./{psr_name}_noise_posterior_wb" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figbase = f"./{psr_name}_noise_posterior_nb" else: figbase = f"./{psr_name}_noise_posterior" - - pars_short = [p.split("_",1)[1] for p in pars] + + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - - #Set the shape of the subplots + normalization_factor = ( + np.ones(len(chain_compare[:, :-4])) + * len(chain[:, :-4]) + / len(chain_compare[:, :-4]) + ) + + # Set the shape of the subplots shape = pars.shape[0] - - if '_wb' in chaindir: - ncols = 4 # number of columns per page + + if "_wb" in chaindir: + ncols = 4 # number of columns per page else: ncols = 3 - - nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[burn:, -4]) - if chaindir_compare is not None: mp_compare_idx = np.argmax(chain_compare[burn:, -4]) - + nrows = 5 # number of rows per page + + mp_idx = np.argmax(chain[:, -4]) + if chaindir_compare is not None: + mp_compare_idx = np.argmax(chain_compare[:, -4]) + nbins = 20 pp = 0 for idx, par in enumerate(pars_short): - j = idx % (nrows*ncols) + j = idx % (nrows * ncols) if j == 0: pp += 1 - fig = pl.figure(figsize=(8,11)) - - ax = fig.add_subplot(nrows, ncols, j+1) - ax.hist(chain[burn:, idx], bins = nbins, histtype = 'step', color='black', label = 'Current') - ax.axvline(chain[burn:, idx][mp_idx], ls = '--', color = 'black') + fig = pl.figure(figsize=(8, 11)) + + ax = fig.add_subplot(nrows, ncols, j + 1) + ax.hist( + chain[:, idx], + bins=nbins, + histtype="step", + color="black", + label="Current", + ) + ax.axvline(chain[:, idx][mp_idx], ls="--", color="black") if chaindir_compare is not None: - ax.hist(chain_compare[burn:, idx], bins = nbins, histtype = 'step', color='orange', label = 'Past') - ax.axvline(chain_compare[burn:, idx][mp_compare_idx], ls = '--', color = 'orange') - if '_wb' in chaindir: ax.set_xlabel(par, fontsize=8) - else: ax.set_xlabel(par, fontsize = 10) + ax.hist( + chain_compare[:, idx], + bins=nbins, + histtype="step", + color="orange", + label="Past", + ) + ax.axvline( + chain_compare[:, idx][mp_compare_idx], ls="--", color="orange" + ) + if "_wb" in chaindir: + ax.set_xlabel(par, fontsize=8) + else: + ax.set_xlabel(par, fontsize=10) ax.set_yticks([]) ax.set_yticklabels([]) - if j == (nrows*ncols)-1 or idx == len(pars_short)-1: + if j == (nrows * ncols) - 1 or idx == len(pars_short) - 1: pl.tight_layout() pl.savefig(f"{figbase}_{pp}.pdf") # Wasn't working before, but how do I implement a legend? - #ax[nr][nc].legend(loc = 'best') + # ax[nr][nc].legend(loc = 'best') pl.show() - - ml_idx = np.argmax(chain[burn:, -4]) - wn_vals = chain[burn:, :-4][ml_idx] + ml_idx = np.argmax(chain[:, -4]) + + wn_vals = chain[:, :-4][ml_idx] wn_dict = dict(zip(pars, wn_vals)) - #Print bayes factor for red noise in pulsar - rn_bf = model_utils.bayes_fac(chain[burn:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] + # Print bayes factor for red noise in pulsar + rn_bf = model_utils.bayes_fac(chain[:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] return wn_dict, rn_bf -def model_noise(mo, to, which_sampler = 'PTMCMCSampler', - vary_red_noise = True, n_iter = int(1e5), - using_wideband = False, resume = False, - run_noise_analysis = True, - wb_efac_sigma = 0.25, base_op_dir = "./", - noise_kwargs = {}, sampler_kwargs = {}, - ): + +def model_noise( + mo, + to, + which_sampler="PTMCMCSampler", + vary_red_noise=True, + n_iter=int(1e5), + using_wideband=False, + resume=False, + run_noise_analysis=True, + wb_efac_sigma=0.25, + base_op_dir="./", + noise_kwargs={}, + sampler_kwargs={}, +): """ Setup enterprise PTA and perform MCMC noise analysis @@ -192,11 +264,16 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: either 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' + sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] + PTMCMCSampler -- MCMC sampling with the Enterprise likelihood + GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise + discovery -- blocked Gibbs-Hamiltonian MC in numpyro with a discovery likelihood red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True + noise_kwargs: dictionary of noise model parameters; Default: {} + sampler_kwargs: dictionary of sampler parameters; Default: {} Returns ======= @@ -204,71 +281,142 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', """ if not using_wideband: - outdir = base_op_dir + mo.PSR.value + '_nb/' + outdir = base_op_dir + mo.PSR.value + "_nb/" else: - outdir = base_op_dir + mo.PSR.value + '_wb/' + outdir = base_op_dir + mo.PSR.value + "_wb/" if os.path.exists(outdir) and (run_noise_analysis) and (not resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format(mo.PSR.value)) + log.info( + "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( + mo.PSR.value + ) + ) elif os.path.exists(outdir) and (run_noise_analysis) and (resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format(mo.PSR.value)) + log.info( + "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( + mo.PSR.value + ) + ) if not run_noise_analysis: - log.info("Skipping noise modeling. Change run_noise_analysis = True to run noise modeling.") + log.info( + "Skipping noise modeling. Change run_noise_analysis = True to run noise modeling." + ) return None - #Ensure n_iter is an integer + # Ensure n_iter is an integer n_iter = int(n_iter) - - if n_iter < 1e4: - log.warning("Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4") - #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - e_psr = Pulsar(mo, to) - - if which_sampler == 'PTMCMCSampler': + if n_iter < 1e4: + log.warning( + "Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" + ) + + # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + if which_sampler == "discovery": + # discovery requires feathered pulsars + f_psr = Pulsar(mo, to) + elif which_sampler == "GibbsSampler" or which_sampler == "PTMCMCSampler": + e_psr = Pulsar(mo, to) + + if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - #Setup a single pulsar PTA using enterprise_extensions + # Setup a single pulsar PTA using enterprise_extensions if not using_wideband: - pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) + pta = models.model_singlepsr_noise( + e_psr, + white_vary=True, + red_var=vary_red_noise, + is_wideband=False, + use_dmdata=False, + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + **noise_kwargs, + ) else: - pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True, **noise_kwargs) + pta = models.model_singlepsr_noise( + e_psr, + is_wideband=True, + use_dmdata=True, + white_vary=True, + red_var=vary_red_noise, + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + ng_twg_setup=True, + **noise_kwargs, + ) dmjump_params = {} for param in mo.params: - if param.startswith('DMJUMP'): - dmjump_param = getattr(mo,param) - dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + if param.startswith("DMJUMP"): + dmjump_param = getattr(mo, param) + dmjump_param_name = ( + f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + ) dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) # FIXME: set groups here ####### - #setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) + # setup sampler using enterprise_extensions + samp = sampler.setup_sampler(pta, outdir=outdir, resume=resume) - #Initial sample + # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) - #Start sampling - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif which_sampler == 'GibbsSampler': + # Start sampling + samp.sample( + x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs + ) + elif which_sampler == "GibbsSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - samp = GibbsSampler(e_psr, - **noise_kwargs, - ) + samp = GibbsSampler( + e_psr, + **noise_kwargs, + ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) pass - elif which_sampler == 'discovery': + elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - pass - else: - log.error("Invalid sampler specified. Please use \'PTMCMCSampler\' or \'GibbsSampler\' or \'discovery\' ") + try: + import jax + import xarray as xr + except ImportError: + log.error("Please install latest version of jax and/or xarray") + ValueError("Please install lastest version of jax and/or xarray") + samp = setup_discovery_noise(f_psr) + # run the sampler + samp.run(jax.random.key(42)) + # Get samples + samples = samp.get_samples() + + # Convert samples to xarray.Dataset + data = samp.Dataset({var: (["chain", "draw"], np.expand_dims(samples[var], axis=0)) for var in samples}) + + # Save to NetCDF file + data.to_netcdf(f"{base_op_dir}/discovery_chain.nc") + else: + log.error( + "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " + ) + + def convert_to_RNAMP(value): """ Utility function to convert enterprise RN amplitude to tempo2/PINT parfile RN amplitude """ - return (86400.*365.24*1e6)/(2.0*np.pi*np.sqrt(3.0)) * 10 ** value - -def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_plot = False, ignore_red_noise = False, using_wideband = False, rn_bf_thres = 1e2, base_dir = None, compare_dir=None): + return (86400.0 * 365.24 * 1e6) / (2.0 * np.pi * np.sqrt(3.0)) * 10**value + + +def add_noise_to_model( + model, + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + ignore_red_noise=False, + using_wideband=False, + rn_bf_thres=1e2, + base_dir=None, + compare_dir=None, +): """ Add WN and RN parameters to timing model. @@ -291,29 +439,37 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl # Assume results are in current working directory if not specified if not base_dir: - base_dir = './' + base_dir = "./" chaindir_compare = compare_dir if not using_wideband: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_nb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_nb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_nb/') + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_nb/") else: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_wb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_wb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_wb/') - - log.info(f'Using existing noise analysis results in {chaindir}') - log.info('Adding new noise parameters to model.') - wn_dict, rn_bf = analyze_noise(chaindir, burn_frac, save_corner, no_corner_plot, chaindir_compare=chaindir_compare) - chainfile = chaindir + 'chain_1.txt' + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_wb/") + + log.info(f"Using existing noise analysis results in {chaindir}") + log.info("Adding new noise parameters to model.") + wn_dict, rn_bf = analyze_noise( + chaindir, + burn_frac, + save_corner, + no_corner_plot, + chaindir_compare=chaindir_compare, + ) + chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") - #Create the maskParameter for EFACS + # Create the maskParameter for EFACS efac_params = [] equad_params = [] rn_params = [] + dm_gp_params = [] + chrom_gp_params = [] ecorr_params = [] dmefac_params = [] dmequad_params = [] @@ -326,140 +482,205 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl for key, val in wn_dict.items(): - psr_name = key.split('_')[0] + psr_name = key.split("_")[0] - if '_efac' in key: + if "_efac" in key: - param_name = key.split('_efac')[0].split(psr_name)[1][1:] + param_name = key.split("_efac")[0].split(psr_name)[1][1:] - tp = maskParameter(name = 'EFAC', index = efac_idx, key = '-f', key_value = param_name, - value = val, units = '', convert_tcb2tdb=False) + tp = maskParameter( + name="EFAC", + index=efac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) efac_params.append(tp) efac_idx += 1 # See https://github.com/nanograv/enterprise/releases/tag/v3.3.0 # ..._t2equad uses PINT/Tempo2/Tempo convention, resulting in total variance EFAC^2 x (toaerr^2 + EQUAD^2) - elif '_t2equad' in key: - - param_name = key.split('_t2equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_t2equad" in key: + + param_name = ( + key.split("_t2equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._tnequad uses temponest convention, resulting in total variance EFAC^2 toaerr^2 + EQUAD^2 - elif '_tnequad' in key: - - param_name = key.split('_tnequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_tnequad" in key: + + param_name = ( + key.split("_tnequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._equad uses temponest convention; generated with enterprise pre-v3.3.0 - elif '_equad' in key: - - param_name = key.split('_equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_equad" in key: + + param_name = ( + key.split("_equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 - elif ('_ecorr' in key) and (not using_wideband): - - param_name = key.split('_ecorr')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'ECORR', index = ecorr_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif ("_ecorr" in key) and (not using_wideband): + + param_name = ( + key.split("_ecorr")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="ECORR", + index=ecorr_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) ecorr_params.append(tp) ecorr_idx += 1 - elif ('_dmefac' in key) and (using_wideband): + elif ("_dmefac" in key) and (using_wideband): - param_name = key.split('_dmefac')[0].split(psr_name)[1][1:] + param_name = key.split("_dmefac")[0].split(psr_name)[1][1:] - tp = maskParameter(name = 'DMEFAC', index = dmefac_idx, key = '-f', key_value = param_name, - value = val, units = '', convert_tcb2tdb=False) + tp = maskParameter( + name="DMEFAC", + index=dmefac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) dmefac_params.append(tp) dmefac_idx += 1 - elif ('_dmequad' in key) and (using_wideband): - - param_name = key.split('_dmequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'DMEQUAD', index = dmequad_idx, key = '-f', key_value = param_name, - value = 10 ** val, units = 'pc/cm3', convert_tcb2tdb=False) + elif ("_dmequad" in key) and (using_wideband): + + param_name = ( + key.split("_dmequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="DMEQUAD", + index=dmequad_idx, + key="-f", + key_value=param_name, + value=10**val, + units="pc/cm3", + convert_tcb2tdb=False, + ) dmequad_params.append(tp) dmequad_idx += 1 # Test EQUAD convention and decide whether to convert convert_equad_to_t2 = False - if test_equad_convention(wn_dict.keys()) == 'tnequad': - log.info('WN paramaters use temponest convention; EQUAD values will be converted once added to model') + if test_equad_convention(wn_dict.keys()) == "tnequad": + log.info( + "WN paramaters use temponest convention; EQUAD values will be converted once added to model" + ) convert_equad_to_t2 = True - if np.any(['_equad' in p for p in wn_dict.keys()]): - log.info('WN parameters generated using enterprise pre-v3.3.0') - elif test_equad_convention(wn_dict.keys()) == 't2equad': - log.info('WN parameters use T2 convention; no conversion necessary') + if np.any(["_equad" in p for p in wn_dict.keys()]): + log.info("WN parameters generated using enterprise pre-v3.3.0") + elif test_equad_convention(wn_dict.keys()) == "t2equad": + log.info("WN parameters use T2 convention; no conversion necessary") # Create white noise components and add them to the model ef_eq_comp = pm.ScaleToaError() - ef_eq_comp.remove_param(param = 'EFAC1') - ef_eq_comp.remove_param(param = 'EQUAD1') - ef_eq_comp.remove_param(param = 'TNEQ1') + ef_eq_comp.remove_param(param="EFAC1") + ef_eq_comp.remove_param(param="EQUAD1") + ef_eq_comp.remove_param(param="TNEQ1") for efac_param in efac_params: - ef_eq_comp.add_param(param = efac_param, setup = True) + ef_eq_comp.add_param(param=efac_param, setup=True) for equad_param in equad_params: - ef_eq_comp.add_param(param = equad_param, setup = True) - model.add_component(ef_eq_comp, validate = True, force = True) + ef_eq_comp.add_param(param=equad_param, setup=True) + model.add_component(ef_eq_comp, validate=True, force=True) if len(dmefac_params) > 0 or len(dmequad_params) > 0: dm_comp = pm.noise_model.ScaleDmError() - dm_comp.remove_param(param = 'DMEFAC1') - dm_comp.remove_param(param = 'DMEQUAD1') + dm_comp.remove_param(param="DMEFAC1") + dm_comp.remove_param(param="DMEQUAD1") for dmefac_param in dmefac_params: - dm_comp.add_param(param = dmefac_param, setup = True) + dm_comp.add_param(param=dmefac_param, setup=True) for dmequad_param in dmequad_params: - dm_comp.add_param(param = dmequad_param, setup = True) - model.add_component(dm_comp, validate = True, force = True) + dm_comp.add_param(param=dmequad_param, setup=True) + model.add_component(dm_comp, validate=True, force=True) if len(ecorr_params) > 0: ec_comp = pm.EcorrNoise() - ec_comp.remove_param('ECORR1') + ec_comp.remove_param("ECORR1") for ecorr_param in ecorr_params: - ec_comp.add_param(param = ecorr_param, setup = True) - model.add_component(ec_comp, validate = True, force = True) + ec_comp.add_param(param=ecorr_param, setup=True) + model.add_component(ec_comp, validate=True, force=True) # Create red noise component and add it to the model log.info(f"The SD Bayes factor for red noise in this pulsar is: {rn_bf}") if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): log.info("Including red noise for this pulsar") - #Add the ML RN parameters to their component + # Add the ML RN parameters to their component rn_comp = pm.PLRedNoise() - rn_keys = np.array([key for key,val in wn_dict.items() if '_red_' in key]) - rn_comp.RNAMP.quantity = convert_to_RNAMP(wn_dict[psr_name + '_red_noise_log10_A']) - rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + '_red_noise_gamma'] + rn_keys = np.array([key for key, val in wn_dict.items() if "_red_" in key]) + rn_comp.RNAMP.quantity = convert_to_RNAMP( + wn_dict[psr_name + "_red_noise_log10_A"] + ) + rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + "_red_noise_gamma"] - #Add red noise to the timing model - model.add_component(rn_comp, validate = True, force = True) + # Add red noise to the timing model + model.add_component(rn_comp, validate=True, force=True) else: log.info("Not including red noise for this pulsar") - #Setup and validate the timing model to ensure things are correct + # Setup and validate the timing model to ensure things are correct model.setup() model.validate() model.noise_mtime = mtime.isot if convert_equad_to_t2: from pint_pal.lite_utils import convert_enterprise_equads + model = convert_enterprise_equads(model) return model + def setup_gibbs_sampler(): """ Setup the Gibbs sampler for noise analysis from enterprise extensions @@ -470,21 +691,59 @@ def setup_gibbs_sampler(): except ImportError: log.error("Please install the latest version of enterprise_extensions") return None - + pass -def setup_discovery_sampler(): + +def setup_discovery_noise(psr): """ Setup the discovery sampler for noise analysis from enterprise extensions """ # check that a sufficiently up-to-date version of enterprise_extensions is installed try: import discovery as ds + import jax + from jax import numpy as jnp + import numpyro + from numpyro import distributions as dist + from numpyro import infer + from discovery import prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + except ImportError: - log.error("Please install the latest version of discovery") - return None + log.error("Please install the latest version of discovery, numpyro, and/or jax") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") - pass + time_span = ds.getspan([psr]) + args = ( + ds.makenoise_measurement(psr), + ds.makegp_ecorr(psr), + ds.makegp_timing(psr, svd=True), + ds.makegp_fourier(psr, ds.powerlaw, 30, T=time_span, name='red_noise'), + psr.residuals + ) + psl = ds.PulsarLikelihood(args) + prior = prior.makelogprior_uniform(psl.logL.params, {'(.*_)?extra_parameter': [9, 10]}) + log_x = makelogtransform_uniform(psl.logL) + x0 = sample_uniform(psl.logL.params) + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + + sampler = infer.MCMC( + infer.NUTS(numpyro_model), + num_warmup=250, + num_samples=4096, + num_chains=4, + progress_bar=True, + chain_method='vectorized' + ) + + return sampler + + def test_equad_convention(pars_list): """ @@ -497,15 +756,17 @@ def test_equad_convention(pars_list): Returns ======= - convention_test: t2equad/tnequad/None + convention_test: t2equad/tnequad/None """ # Test equad convention - t2_test = np.any(['_t2equad' in p for p in pars_list]) - tn_test = np.any([('_tnequad' in p) or ('_equad' in p) for p in pars_list]) + t2_test = np.any(["_t2equad" in p for p in pars_list]) + tn_test = np.any([("_tnequad" in p) or ("_equad" in p) for p in pars_list]) if t2_test and not tn_test: - return 't2equad' + return "t2equad" elif tn_test and not t2_test: - return 'tnequad' + return "tnequad" else: - log.warning('EQUADs not present in parameter list (or something strange is going on).') + log.warning( + "EQUADs not present in parameter list (or something strange is going on)." + ) return None From 86c8c74a8aea102e7afec04b1f65b3c0af4b14b3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 18 Oct 2024 00:53:04 -0700 Subject: [PATCH 032/193] adding discovery to noise utils --- src/pint_pal/noise_utils.py | 63 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 9ba3ae91..112e162c 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,5 @@ import numpy as np, os +import arviz as az from astropy import log from astropy.time import Time @@ -72,10 +73,14 @@ def analyze_noise( except: log.error(f"Could not load noise run from {chaindir}") return None - noise_core.burn(burn_frac) + noise_core.set_burn(burn_frac) chain = noise_core.chain - psr_name = noise_core.pars[0].split("_")[0] - pars = noise_core.pars + psr_name = noise_core.params[0].split("_")[0] + pars = np.array(noise_core.params) + if chain.shape[1] != len(pars): + a = -4 + elif chain.shape[1] == len(pars): + a = len(chain.shape[1]) # load in same for comparison noise model if chaindir_compare is not None: @@ -110,12 +115,12 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :-4])) - * len(chain[:, :-4]) - / len(chain_compare[:, :-4]) + np.ones(len(chain_compare[:, :a])) + * len(chain[:, :a]) + / len(chain_compare[:, :a]) ) fig = corner.corner( - chain_compare[:, :-4], + chain_compare[:, :a], color="orange", alpha=0.5, weights=normalization_factor, @@ -123,10 +128,10 @@ def analyze_noise( ) # normal corner plot corner.corner( - chain[:, :-4], fig=fig, color="black", labels=pars_short + chain[:, :a], fig=fig, color="black", labels=pars_short ) if chaindir_compare is None: - corner.corner(chain[:, :-4], labels=pars_short) + corner.corner(chain[:, :a], labels=pars_short) if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" @@ -169,9 +174,9 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :-4])) - * len(chain[:, :-4]) - / len(chain_compare[:, :-4]) + np.ones(len(chain_compare[:, :a])) + * len(chain[:, :a]) + / len(chain_compare[:, :a]) ) # Set the shape of the subplots @@ -184,9 +189,9 @@ def analyze_noise( nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[:, -4]) + mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: - mp_compare_idx = np.argmax(chain_compare[:, -4]) + mp_compare_idx = np.argmax(chain_compare[:, a]) nbins = 20 pp = 0 @@ -231,9 +236,9 @@ def analyze_noise( # ax[nr][nc].legend(loc = 'best') pl.show() - ml_idx = np.argmax(chain[:, -4]) + ml_idx = np.argmax(chain[:, a]) - wn_vals = chain[:, :-4][ml_idx] + wn_vals = chain[:, :a][ml_idx] wn_dict = dict(zip(pars, wn_vals)) @@ -381,18 +386,18 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - samp = setup_discovery_noise(f_psr) + samp, log_x = setup_discovery_noise(f_psr) # run the sampler samp.run(jax.random.key(42)) - - # Get samples - samples = samp.get_samples() - - # Convert samples to xarray.Dataset - data = samp.Dataset({var: (["chain", "draw"], np.expand_dims(samples[var], axis=0)) for var in samples}) - - # Save to NetCDF file - data.to_netcdf(f"{base_op_dir}/discovery_chain.nc") + # convert to a DataFrame + df = log_x.to_df(samp.get_samples()['par']) + # convert DataFrame to dictionary + samples_dict = df.to_dict(orient='list') + # convert dictionary to ArviZ InferenceData object + inference_data = az.from_dict(samples_dict) + # Save to NetCDF file which can be loaded into la_forge + os.mkdir(outdir, parents=True, exist_ok=True) + inference_data.to_netcdf(outdir+"chain.nc") else: log.error( "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " @@ -725,9 +730,9 @@ def setup_discovery_noise(psr): psr.residuals ) psl = ds.PulsarLikelihood(args) - prior = prior.makelogprior_uniform(psl.logL.params, {'(.*_)?extra_parameter': [9, 10]}) + prior = prior.makelogprior_uniform(psl.logL.params, {}) log_x = makelogtransform_uniform(psl.logL) - x0 = sample_uniform(psl.logL.params) + # x0 = sample_uniform(psl.logL.params) def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -741,7 +746,7 @@ def numpyro_model(): chain_method='vectorized' ) - return sampler + return sampler, log_x From 8fd746bd1580eee29b326bb825e52fa2cb0ff6a3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 19 Oct 2024 15:28:10 -0700 Subject: [PATCH 033/193] commiting what i have rn --- src/pint_pal/noise_utils.py | 407 ++++++++++++++++++++++++++++-------- 1 file changed, 317 insertions(+), 90 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 112e162c..769560cb 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os +import numpy as np, os, json import arviz as az from astropy import log from astropy.time import Time @@ -29,6 +29,7 @@ from enterprise.signals import deterministic_signals from enterprise import constants as const +from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block @@ -41,12 +42,82 @@ from enterprise.signals import gp_priors as gpp +def setup_sampling_groups(pta, + write_groups=True, + outdir='./'): + """ + Sets sampling groups for PTMCMCSampler. + The sampling groups can help ensure the sampler does not get stuck. + The idea is to group parameters which are more highly correlated. + + Params + ------ + pta: the enterprise pta object + write_groups: bool, write the groups to a file + outdir: str, directory to write the groups to + + returns + ------- + groups: list of lists of indices corresponding to parameter groups + + """ + + # groups + pnames = pta.param_names + groups = get_parameter_groups(pta) + # add per-backend white noise + backends = np.unique([p[p.index('_')+1:p.index('efac')-1] for p in pnames if 'efac' in p]) + for be in backends: + groups.append(group_from_params(pta,[be])) + # group red noise parameters + exclude = ['linear_timing_model','sw_r2','sw_4p39','measurement_noise', + 'ecorr_sherman-morrison', 'ecorr_fast-sherman-morrison'] + red_signals = [p[p.index('_')+1:] for p in list(pta.signals.keys()) + if not p[p.index('_')+1:] in exclude] + rn_ct = 0 + for rs in red_signals: + if len(group_from_params(pta,[rs])) > 0: + rn_ct += 1 + groups.append(group_from_params(pta,[rs])) + if rn_ct > 1: + groups.append(group_from_params(pta,red_signals)) + # add cross chromatic groups + if 'n_earth' in pnames or 'log10_sigma_ne' in pnames: + # cross SW and chrom groups + dmgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','n_earth', 'log10_sigma_ne']])] + groups.append(dmgp_sw) + if np.any(['chrom' in param for param in pnames]): + chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['chrom_gp','n_earth', 'log10_sigma_ne']])] + dmgp_chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom','n_earth', 'log10_sigma_ne']])] + groups.append(chromgp_sw) + groups.append(dmgp_chromgp_sw) + if np.any(['chrom' in param for param in pnames]): + # cross dmgp and chromgp group + dmgp_chromgp = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom']])] + groups.append(dmgp_chromgp) + # everything + groups.append([i for i in range(len(pnames))]) + # save list of params corresponding to groups + if write_groups is True: + with open(f'{outdir}/groups.txt', 'w') as fi: + for group in groups: + line = np.array(pnames)[np.array(group)] + fi.write("[" + " ".join(line) + "]\n") + # return the groups to be passed to the sampler + return groups + + def analyze_noise( chaindir="./noise_run_chains/", burn_frac=0.25, save_corner=True, no_corner_plot=False, chaindir_compare=None, + which_sampler = 'PTMCMCSampler', ): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -56,38 +127,41 @@ def analyze_noise( chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True - chaindir_compare: path to enterprise noise run chain wish to plot in corner plot for comparison; Default: None + no_corner_plot: Flag to toggle saving of corner plots; Default: False + chaindir_compare: path to noise run chain wish to plot in corner plot for comparison; Default: None + which_sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] Returns ======= - wn_dict: Dictionary of maximum likelihood WN values - rn_bf: Savage-Dickey BF for RN for given pulsar + noise_core: la_forge.core object which contains noise chains and run metadata + noise_dict: Dictionary of maximum a posterior noise values + rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - #### replacing this with la_forge to be more flexible - # chainfile = chaindir + "chain_1.txt" - # chain = np.loadtxt(chainfile) - # burn = int(burn_frac * chain.shape[0]) - # pars = np.loadtxt(chaindir + "pars.txt", dtype=str) try: noise_core = co.Core(chaindir=chaindir) except: log.error(f"Could not load noise run from {chaindir}") - return None - noise_core.set_burn(burn_frac) + ValueError(f"Could not load noise run from {chaindir}") + if which_sampler == 'PTMCMCSampler': + noise_core.set_burn(burn_frac) + elif which_sampler == 'discovery': + noise_core.set_burn(0) + else: + noise_core.set_burn(burn_frac) chain = noise_core.chain psr_name = noise_core.params[0].split("_")[0] - pars = np.array(noise_core.params) - if chain.shape[1] != len(pars): - a = -4 - elif chain.shape[1] == len(pars): - a = len(chain.shape[1]) + pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) + if len(pars)+2 != chain.shape[1]: + chain = chain[:, :len(pars)+2] # load in same for comparison noise model if chaindir_compare is not None: - chainfile_compare = chaindir_compare + "chain_1.txt" - chain_compare = np.loadtxt(chainfile_compare) - burn_compare = int(burn_frac * chain_compare.shape[0]) - pars_compare = np.loadtxt(chaindir_compare + "pars.txt", dtype=str) + compare_core = co.Core(chaindir=chaindir) + compare_core.set_burn(noise_core.burn) + chain_compare = compare_core.chain + pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) + if len(pars_compare)+2 != chain_compare.shape[1]: + chain_compare = chain_compare[:, :len(pars_compare)+2] psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: @@ -105,22 +179,22 @@ def analyze_noise( compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") log.info( - f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: log.warning( - "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" ) chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :a])) - * len(chain[:, :a]) - / len(chain_compare[:, :a]) + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) ) fig = corner.corner( - chain_compare[:, :a], + chain_compare, color="orange", alpha=0.5, weights=normalization_factor, @@ -128,10 +202,10 @@ def analyze_noise( ) # normal corner plot corner.corner( - chain[:, :a], fig=fig, color="black", labels=pars_short + chain, fig=fig, color="black", labels=pars_short ) if chaindir_compare is None: - corner.corner(chain[:, :a], labels=pars_short) + corner.corner(chain, labels=pars_short) if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" @@ -174,9 +248,9 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :a])) - * len(chain[:, :a]) - / len(chain_compare[:, :a]) + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) ) # Set the shape of the subplots @@ -189,9 +263,10 @@ def analyze_noise( nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[:, a]) + mp_idx = noise_core.map_idx + #mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: - mp_compare_idx = np.argmax(chain_compare[:, a]) + mp_compare_idx = compare_core.map_idx nbins = 20 pp = 0 @@ -235,17 +310,14 @@ def analyze_noise( # Wasn't working before, but how do I implement a legend? # ax[nr][nc].legend(loc = 'best') pl.show() - - ml_idx = np.argmax(chain[:, a]) - - wn_vals = chain[:, :a][ml_idx] - - wn_dict = dict(zip(pars, wn_vals)) + + noise_dict = noise_core.get_map_dict() # Print bayes factor for red noise in pulsar - rn_bf = model_utils.bayes_fac(chain[:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] + rn_amp_nm = psr_name+"_red_noise_log10_A" + rn_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] - return wn_dict, rn_bf + return noise_core, noise_dict, rn_bf def model_noise( @@ -261,6 +333,7 @@ def model_noise( base_op_dir="./", noise_kwargs={}, sampler_kwargs={}, + return_sampler=False, ): """ Setup enterprise PTA and perform MCMC noise analysis @@ -272,17 +345,19 @@ def model_noise( sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] PTMCMCSampler -- MCMC sampling with the Enterprise likelihood GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise - discovery -- blocked Gibbs-Hamiltonian MC in numpyro with a discovery likelihood + discovery -- various numpyro samplers with a discovery likelihood red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True noise_kwargs: dictionary of noise model parameters; Default: {} sampler_kwargs: dictionary of sampler parameters; Default: {} + return_sampler: Flag to return the sampler object; Default: False Returns ======= - None + None or + samp: sampler object """ if not using_wideband: @@ -318,11 +393,7 @@ def model_noise( ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - if which_sampler == "discovery": - # discovery requires feathered pulsars - f_psr = Pulsar(mo, to) - elif which_sampler == "GibbsSampler" or which_sampler == "PTMCMCSampler": - e_psr = Pulsar(mo, to) + e_psr = Pulsar(mo, to) if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") @@ -359,11 +430,14 @@ def model_noise( ) dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) - # FIXME: set groups here + # set groups here + groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) ####### # setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir=outdir, resume=resume) - + samp = sampler.setup_sampler(pta, + outdir=outdir, + resume=resume, + groups=groups) # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling @@ -371,6 +445,11 @@ def model_noise( x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs ) elif which_sampler == "GibbsSampler": + try: + from enterprise_extensions import GibbsSampler + except: + log.error("Please install the latest enterprise_extensions") + ValueError("Please install the latest enterprise_extensions") log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler( e_psr, @@ -386,22 +465,42 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - samp, log_x = setup_discovery_noise(f_psr) + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_kwargs = model_defaults.update(noise_kwargs) + sampler_kwargs = sampler_defaults.update(sampler_kwargs) + os.mkdir(outdir, parents=True, exist_ok=True) + with open(outdir+"model_kwargs.json", "w") as f: + json.dump(model_kwargs, f) + with open(outdir+"sampler_kwargs.json", "w") as f: + json.dump(sampler_kwargs, f) + samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) # run the sampler samp.run(jax.random.key(42)) # convert to a DataFrame df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') + if sampler_kwargs['numpyro_sampler'] is not 'HMC_GIBBS': + ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] + ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) + ln_post = ln_like + ln_prior + samples_dict['lnlike'] = ln_like + samples_dict['lnpost'] = ln_post + else: + samples_dict['lnlike'] = None + samples_dict['lnpost'] = None # convert dictionary to ArviZ InferenceData object inference_data = az.from_dict(samples_dict) # Save to NetCDF file which can be loaded into la_forge - os.mkdir(outdir, parents=True, exist_ok=True) inference_data.to_netcdf(outdir+"chain.nc") else: log.error( "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " ) + if return_sampler: + return samp def convert_to_RNAMP(value): @@ -458,7 +557,7 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") - wn_dict, rn_bf = analyze_noise( + noise_dict, rn_bf = analyze_noise( chaindir, burn_frac, save_corner, @@ -472,9 +571,6 @@ def add_noise_to_model( # Create the maskParameter for EFACS efac_params = [] equad_params = [] - rn_params = [] - dm_gp_params = [] - chrom_gp_params = [] ecorr_params = [] dmefac_params = [] dmequad_params = [] @@ -485,7 +581,7 @@ def add_noise_to_model( dmefac_idx = 1 dmequad_idx = 1 - for key, val in wn_dict.items(): + for key, val in noise_dict.items(): psr_name = key.split("_")[0] @@ -617,14 +713,14 @@ def add_noise_to_model( # Test EQUAD convention and decide whether to convert convert_equad_to_t2 = False - if test_equad_convention(wn_dict.keys()) == "tnequad": + if test_equad_convention(noise_dict.keys()) == "tnequad": log.info( "WN paramaters use temponest convention; EQUAD values will be converted once added to model" ) convert_equad_to_t2 = True - if np.any(["_equad" in p for p in wn_dict.keys()]): + if np.any(["_equad" in p for p in noise_dict.keys()]): log.info("WN parameters generated using enterprise pre-v3.3.0") - elif test_equad_convention(wn_dict.keys()) == "t2equad": + elif test_equad_convention(noise_dict.keys()) == "t2equad": log.info("WN parameters use T2 convention; no conversion necessary") # Create white noise components and add them to the model @@ -662,11 +758,75 @@ def add_noise_to_model( # Add the ML RN parameters to their component rn_comp = pm.PLRedNoise() - rn_keys = np.array([key for key, val in wn_dict.items() if "_red_" in key]) + rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) rn_comp.RNAMP.quantity = convert_to_RNAMP( - wn_dict[psr_name + "_red_noise_log10_A"] + noise_dict[psr_name + "_red_noise_log10_A"] ) - rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + "_red_noise_gamma"] + rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] + + # Add red noise to the timing model + model.add_component(rn_comp, validate=True, force=True) + else: + log.info("Not including red noise for this pulsar") + + # Check to see if dm noise is present + dm_pars = [key for key in list(noise_dict.keys()) if "_dm_gp" in key] + if len(dm_pars) > 0: + ###### POWERLAW DM NOISE ###### + if f'{psr_name}_dm_gp_log10_A' in dm_pars: + #dm_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] + #log.info(f"The SD Bayes factor for dm noise in this pulsar is: {dm_bf}") + log.info('Adding Powerlaw DM GP noise as PLDMNoise to par file') + # Add the ML RN parameters to their component + dm_comp = pm.PLDMNoise() + dm_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_dm_gp_log10_A"] + ) + dm_comp.TNDMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + dm_comp.TNDMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(dm_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_dm_gp_log10_rho_0' in dm_pars: + log.info('Adding Free Spectral DM GP as DMWaveXnoise to par file') + NotImplementedError('DMWaveXNoise not yet implemented') + + # Check to see if higher order chromatic noise is present + chrom_pars = [key for key in list(noise_dict.keys()) if "_chrom_gp" in key] + if len(chrom_pars) > 0: + ###### POWERLAW CHROMATIC NOISE ###### + if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: + log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') + # Add the ML RN parameters to their component + chrom_comp = pm.PLCMNoise() + chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) + dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_chrom_gp_log10_A"] + ) + chrom_comp.TNCMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + chrom_comp.TNCMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(dm_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: + log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') + NotImplementedError('CMWaveXNoise not yet implemented') + + log.info(f"The SD Bayes factor for dm noise in this pulsar is: {rn_bf}") + if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): + + log.info("Including red noise for this pulsar") + # Add the ML RN parameters to their component + rn_comp = pm.PLRedNoise() + + rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + rn_comp.RNAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_red_noise_log10_A"] + ) + rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] # Add red noise to the timing model model.add_component(rn_comp, validate=True, force=True) @@ -696,15 +856,16 @@ def setup_gibbs_sampler(): except ImportError: log.error("Please install the latest version of enterprise_extensions") return None - - pass + NotImplementedError("Gibbs sampler not yet implemented") -def setup_discovery_noise(psr): +def setup_discovery_noise(psr, + model_kwargs={}, + sampler_kwargs={}): """ - Setup the discovery sampler for noise analysis from enterprise extensions + Setup the discovery likelihood with numpyro sampling for noise analysis """ - # check that a sufficiently up-to-date version of enterprise_extensions is installed + # check that jax, numpyro and discovery are installed try: import discovery as ds import jax @@ -716,38 +877,78 @@ def setup_discovery_noise(psr): from discovery.prior import (makelogtransform_uniform, makelogprior_uniform, sample_uniform) + from discovery.gibbs import setup_single_psr_hmc_gibbs except ImportError: log.error("Please install the latest version of discovery, numpyro, and/or jax") ValueError("Please install the latest version of discovery, numpyro, and/or jax") - + # set up the model time_span = ds.getspan([psr]) - args = ( + model_components = [ + psr.residuals, + ds.makegp_timing(psr, svd=True), ds.makenoise_measurement(psr), ds.makegp_ecorr(psr), - ds.makegp_timing(psr, svd=True), - ds.makegp_fourier(psr, ds.powerlaw, 30, T=time_span, name='red_noise'), - psr.residuals - ) - psl = ds.PulsarLikelihood(args) + ] + if model_kwargs['inc_rn']: + if model_kwargs['rn_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) + elif model_kwargs['rn_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) + if model_kwargs['inc_dmgp']: + if model_kwargs['dmgp_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) + elif model_kwargs['dmgp_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) + if model_kwargs['inc_chrom']: + if model_kwargs['rn_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + elif model_kwargs['rn_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + psl = ds.PulsarLikelihood(model_components) prior = prior.makelogprior_uniform(psl.logL.params, {}) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - - sampler = infer.MCMC( - infer.NUTS(numpyro_model), - num_warmup=250, - num_samples=4096, - num_chains=4, - progress_bar=True, - chain_method='vectorized' - ) + if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': + def numpyro_model(): + return None + gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( + psrl=psl, psrs=psr, + priordict=ds.priordict_standard, + invhdorf=None, nuts_kwargs={}) + sampler = infer.MCMC(gibbs_hmc_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) + elif sampler_kwargs['numpyro_sampler'] == 'NUTS': + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) + sampler = infer.MCMC(nuts_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) + elif sampler_kwargs['numpyro_sampler'] == 'HMC': + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) + sampler = infer.MCMC(hmc_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) - return sampler, log_x - + return sampler, log_x, numpyro_model def test_equad_convention(pars_list): @@ -775,3 +976,29 @@ def test_equad_convention(pars_list): "EQUADs not present in parameter list (or something strange is going on)." ) return None + +def get_model_and_sampler_default_settings(): + model_defaults = { + 'inc_rn': True, + 'rn_psd': 'powerlaw', + 'rn_nfreqs': 30, + 'inc_dmgp': False, + 'dmgp_psd': 'powerlaw', + 'dmgp_nfreqs': 100, + 'inc_chromgp': False, + 'chromgp_psd': 'powerlaw', + 'chromgp_nfreqs': 100, + 'vary_chrom_idx': False, + 'inc_swgp': False, + 'swgp_psd': 'powerlaw', + 'swgp_nfreqs': 100, + } + sampler_defaults = { + 'numpyro_sampler': 'HMC', + 'num_steps': 5, + 'num_warmup': 500, + 'num_samples': 2500, + 'num_chains': 4, + 'chain_method': 'vectorized', + } + return model_defaults, sampler_defaults \ No newline at end of file From f0aa78da5cfd75925cf83e18a60f635b030c9055 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 20 Oct 2024 15:11:34 -0700 Subject: [PATCH 034/193] adding solar wind stuff --- src/pint_pal/noise_utils.py | 200 ++++++++++++++++++++++++++---------- 1 file changed, 148 insertions(+), 52 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 769560cb..56f4584f 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -9,6 +9,7 @@ import pint.models as pm from pint.models.parameter import maskParameter +from pint.models.timing_model import Component import matplotlib as mpl import matplotlib.pyplot as pl @@ -137,12 +138,19 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - try: - noise_core = co.Core(chaindir=chaindir) - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") if which_sampler == 'PTMCMCSampler': + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}") + ValueError(f"Could not load noise run from {chaindir}") + elif which_sampler == 'GibbsSampler': + try: + noise_core = co.Core(corepath=chaindir+'/chain') + except: + log.error(f"Could not load noise run from {chaindir}") + ValueError(f"Could not load noise run from {chaindir}") + if which_sampler == 'PTMCMCSampler' or which_sampler == "GibbsSampler": noise_core.set_burn(burn_frac) elif which_sampler == 'discovery': noise_core.set_burn(0) @@ -359,7 +367,12 @@ def model_noise( None or samp: sampler object """ - + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_kwargs = model_defaults.update(noise_kwargs) + sampler_kwargs = sampler_defaults.update(sampler_kwargs) + if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" else: @@ -394,7 +407,9 @@ def model_noise( # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - + ########################################################## + ################ PTMCMCSampler ################## + ########################################################## if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions @@ -407,7 +422,17 @@ def model_noise( use_dmdata=False, dmjump_var=False, wb_efac_sigma=wb_efac_sigma, - **noise_kwargs, + # DM GP + dm_var=model_kwargs['inc_dmgp'], + dm_Nfreqs=model_kwargs['dmgp_nfreqs'], + # CHROM GP + chrom_gp=model_kwargs['inc_chromgp'], + chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + # DM SOLAR WIND + dm_sw_deter=model_kwargs['inc_sw_deter'], + ACE_prior=model_kwargs['ACE_prior'], + # can pass extra signals in here + extra_sigs=model_kwargs['extra_sigs'], ) else: pta = models.model_singlepsr_noise( @@ -419,7 +444,6 @@ def model_noise( dmjump_var=False, wb_efac_sigma=wb_efac_sigma, ng_twg_setup=True, - **noise_kwargs, ) dmjump_params = {} for param in mo.params: @@ -444,6 +468,9 @@ def model_noise( samp.sample( x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs ) + ############################################################## + ################## GibbsSampler ######################## + ############################################################## elif which_sampler == "GibbsSampler": try: from enterprise_extensions import GibbsSampler @@ -452,24 +479,39 @@ def model_noise( ValueError("Please install the latest enterprise_extensions") log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler( - e_psr, - **noise_kwargs, + e_psr, + vary_wn=True, + tm_marg=False, + inc_ecorr=True, + ecorr_type='kernel', + vary_rn=model_kwargs['inc_rn'], + rn_components=model_kwargs['rn_nfreqs'], + vary_dm=model_kwargs['inc_dmgp'], + dm_components=model_kwargs['dm_nfreqs'], + vary_chrom=model_kwargs['inc_chromgp'], + chrom_components=model_kwargs['chrom_nfreqs'], + noise_dict={}, + tnequad=True, + #**noise_kwargs, ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) - pass + # sorta redundant to have both, but la_forge doesn't look for .npy files + chain = np.load(f'{outdir}/chain_1.npy') + np.savetxt(f'{outdir}/chain_1.txt', chain,) + ################################################################# + ################## discovery likelihood ################### + ################################################################# elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") try: import jax import xarray as xr + from numpyro import distributions as dist + from numpyro.infer import log_likelihood + except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - # get the default settings - model_defaults, sampler_defaults = get_model_and_sampler_default_settings() - # update with args passed in - model_kwargs = model_defaults.update(noise_kwargs) - sampler_kwargs = sampler_defaults.update(sampler_kwargs) os.mkdir(outdir, parents=True, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -522,7 +564,7 @@ def add_noise_to_model( compare_dir=None, ): """ - Add WN and RN parameters to timing model. + Add WN, RN, DMGP, and parameters to timing model. Parameters ========== @@ -580,11 +622,12 @@ def add_noise_to_model( ecorr_idx = 1 dmefac_idx = 1 dmequad_idx = 1 - - for key, val in noise_dict.items(): - - psr_name = key.split("_")[0] - + + psr_name = list(noise_dict.keys())[0].split("_")[0] + noise_pars = np.array(list(noise_dict.keys())) + wn_dict = {key: val for key, val in noise_dict.items() if "efac" in key or "equad" in key or "ecorr" in key} + for key, val in wn_dict.items(): + if "_efac" in key: param_name = key.split("_efac")[0].split(psr_name)[1][1:] @@ -770,7 +813,7 @@ def add_noise_to_model( log.info("Not including red noise for this pulsar") # Check to see if dm noise is present - dm_pars = [key for key in list(noise_dict.keys()) if "_dm_gp" in key] + dm_pars = [key for key in noise_pars if "_dm_gp" in key] if len(dm_pars) > 0: ###### POWERLAW DM NOISE ###### if f'{psr_name}_dm_gp_log10_A' in dm_pars: @@ -794,7 +837,7 @@ def add_noise_to_model( NotImplementedError('DMWaveXNoise not yet implemented') # Check to see if higher order chromatic noise is present - chrom_pars = [key for key in list(noise_dict.keys()) if "_chrom_gp" in key] + chrom_pars = [key for key in noise_pars if "_chrom_gp" in key] if len(chrom_pars) > 0: ###### POWERLAW CHROMATIC NOISE ###### if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: @@ -814,24 +857,19 @@ def add_noise_to_model( elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') NotImplementedError('CMWaveXNoise not yet implemented') - - log.info(f"The SD Bayes factor for dm noise in this pulsar is: {rn_bf}") - if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): - - log.info("Including red noise for this pulsar") - # Add the ML RN parameters to their component - rn_comp = pm.PLRedNoise() - - rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) - rn_comp.RNAMP.quantity = convert_to_RNAMP( - noise_dict[psr_name + "_red_noise_log10_A"] - ) - rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] + + # Check to see if solar wind is present + sw_pars = [key for key in noise_pars if "sw_r2" in key] + if len(sw_pars) > 0: + log.info('Adding Solar Wind Dispersion to par file') + all_components = Component.component_types + noise_class = all_components["SolarWindDispersion"] + noise = noise_class() # Make the dispersion instance. + model.add_component(noise, validate=False) + # add parameters + model['NE_SW'].quantity = noise_dict[f'{psr_name}_NE_SW'] + model['NE_SW'].frozen = True - # Add red noise to the timing model - model.add_component(rn_comp, validate=True, force=True) - else: - log.info("Not including red noise for this pulsar") # Setup and validate the timing model to ensure things are correct model.setup() @@ -846,17 +884,67 @@ def add_noise_to_model( return model -def setup_gibbs_sampler(): +def plot_free_specs(sampler_kwargs={}, + model_kwargs={}, + noise_dict={}): """ Setup the Gibbs sampler for noise analysis from enterprise extensions """ # check that a sufficiently up-to-date version of enterprise_extensions is installed - try: - from enterprise_extensions.gibbs_sampling import gibbs - except ImportError: - log.error("Please install the latest version of enterprise_extensions") - return None - NotImplementedError("Gibbs sampler not yet implemented") + + + print("attempting to sample...") + savepath = f'/home/baierj/projects/ng20yr/noise_testing/test_J0613-0200/{psr_pkls[pidx].name}_prenoise/' + bps.sample(niter=30000, savepath = savepath,) + chain = np.load(f'{savepath}/chain_1.npy') + rn_freqs = np.load(f'{savepath}/rn_freqs.npy') + dm_freqs = np.load(f'{savepath}/dm_freqs.npy') + chrom_freqs = np.load(f'{savepath}/chrom_freqs.npy') + print(chain.shape) + outdir=savepath + np.savetxt(f'{savepath}/chain_1.txt', chain,) + c0 = co.Core(chaindir=savepath) + c0.chain = chain + + + wn_params = [par for par in c0.params if any([p in par for p in ['efac', 'equad', 'ecor']])] + if len(wn_params) > 0: + dg.plot_chains(c0, pars = wn_params) + plt.savefig(f'{outdir}/wn_hists.png') + plt.close() + dg.plot_grubin(c0) + plt.savefig(f'{outdir}/grubin.png') + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + c0.rn_freqs = rn_freqs + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_red_noise_log10_rho', violin=True, Color='red',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | red noise | nfreqs={len(rn_freqs)}" ) + plt.savefig(f"{outdir}/rn.png") + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + c0.rn_freqs = dm_freqs + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_dm_gp_log10_rho', + violin=True, Color='blue',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | DM GP | nfreqs={len(dm_freqs)} " ) + plt.savefig(f'{outdir}/dm_gp.png') + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + c0.rn_freqs = chrom_freqs + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_chrom_gp_log10_rho', + violin=True, Color='orange',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | chrom gp | nfreqs={len(chrom_freqs)}" ) + plt.ylim(-9,-5) + plt.savefig(f'{outdir}/chrom_gp.png') + plt.close() def setup_discovery_noise(psr, @@ -979,19 +1067,27 @@ def test_equad_convention(pars_list): def get_model_and_sampler_default_settings(): model_defaults = { + # acrhomatic red noise 'inc_rn': True, 'rn_psd': 'powerlaw', 'rn_nfreqs': 30, + # dm gp 'inc_dmgp': False, 'dmgp_psd': 'powerlaw', 'dmgp_nfreqs': 100, + # higher order chromatic gp 'inc_chromgp': False, 'chromgp_psd': 'powerlaw', 'chromgp_nfreqs': 100, - 'vary_chrom_idx': False, + 'chrom_idx': 4, + 'chrom_quad': False, + # solar wind + 'inc_sw_deter': False, + # GP perturbations ontop of the deterministic model 'inc_swgp': False, - 'swgp_psd': 'powerlaw', - 'swgp_nfreqs': 100, + 'ACE_prior': False, + # + 'extra_sigs': None, } sampler_defaults = { 'numpyro_sampler': 'HMC', From 7920c131d22c851b0700a91c943663df4521b91d Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Mon, 21 Oct 2024 03:31:34 +0000 Subject: [PATCH 035/193] bug fixes --- src/pint_pal/noise_utils.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 56f4584f..178b4e82 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -138,7 +138,7 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - if which_sampler == 'PTMCMCSampler': + if which_sampler == 'PTMCMCSampler' or which_sampler == 'discovery': try: noise_core = co.Core(chaindir=chaindir) except: @@ -339,7 +339,7 @@ def model_noise( run_noise_analysis=True, wb_efac_sigma=0.25, base_op_dir="./", - noise_kwargs={}, + model_kwargs={}, sampler_kwargs={}, return_sampler=False, ): @@ -370,8 +370,12 @@ def model_noise( # get the default settings model_defaults, sampler_defaults = get_model_and_sampler_default_settings() # update with args passed in - model_kwargs = model_defaults.update(noise_kwargs) - sampler_kwargs = sampler_defaults.update(sampler_kwargs) + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + + if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" @@ -512,7 +516,7 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - os.mkdir(outdir, parents=True, exist_ok=True) + os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) with open(outdir+"sampler_kwargs.json", "w") as f: @@ -524,7 +528,7 @@ def model_noise( df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') - if sampler_kwargs['numpyro_sampler'] is not 'HMC_GIBBS': + if sampler_kwargs['numpyro_sampler'] != 'HMC_GIBBS': ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior @@ -562,6 +566,7 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, + which_sampler='PTMCMCSampler' ): """ Add WN, RN, DMGP, and parameters to timing model. @@ -599,12 +604,13 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") - noise_dict, rn_bf = analyze_noise( + noise_core, noise_dict, rn_bf = analyze_noise( chaindir, burn_frac, save_corner, no_corner_plot, chaindir_compare=chaindir_compare, + which_sampler=which_sampler, ) chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") @@ -988,7 +994,7 @@ def setup_discovery_noise(psr, model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) elif model_kwargs['dmgp_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - if model_kwargs['inc_chrom']: + if model_kwargs['inc_chromgp']: if model_kwargs['rn_psd'] == 'powerlaw': model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) elif model_kwargs['rn_psd'] == 'free_spectral': @@ -1006,7 +1012,7 @@ def numpyro_model(): invhdorf=None, nuts_kwargs={}) sampler = infer.MCMC(gibbs_hmc_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, @@ -1018,7 +1024,7 @@ def numpyro_model(): nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, From 7a0b14053e7582fae0a0ac2ab9134d2640d63d83 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Mon, 21 Oct 2024 06:57:48 +0000 Subject: [PATCH 036/193] more bug fix --- src/pint_pal/noise_utils.py | 64 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 178b4e82..fcce8b8d 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -34,6 +34,24 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block +try: + import xarray as xr + import jax + from jax import numpy as jnp + import numpyro + from numpyro.infer import log_likelihood + from numpyro import distributions as dist + from numpyro import infer + import discovery as ds + from discovery import prior as ds_prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + from discovery.gibbs import setup_single_psr_hmc_gibbs + +except ImportError: + log.error("Please install the latest version of discovery, numpyro, and/or jax") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -507,15 +525,6 @@ def model_noise( ################################################################# elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - try: - import jax - import xarray as xr - from numpyro import distributions as dist - from numpyro.infer import log_likelihood - - except ImportError: - log.error("Please install latest version of jax and/or xarray") - ValueError("Please install lastest version of jax and/or xarray") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -585,7 +594,7 @@ def add_noise_to_model( Returns ======= - model: New timing model which includes WN and RN parameters + model: New timing model which includes WN and RN (and potentially dmgp, chrom_gp, and solar wind) parameters """ # Assume results are in current working directory if not specified @@ -959,25 +968,10 @@ def setup_discovery_noise(psr, """ Setup the discovery likelihood with numpyro sampling for noise analysis """ - # check that jax, numpyro and discovery are installed - try: - import discovery as ds - import jax - from jax import numpy as jnp - import numpyro - from numpyro import distributions as dist - from numpyro import infer - from discovery import prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - from discovery.gibbs import setup_single_psr_hmc_gibbs - - except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") # set up the model time_span = ds.getspan([psr]) + # this updates the ds.stand_priordict object + ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, ds.makegp_timing(psr, svd=True), @@ -1000,7 +994,7 @@ def setup_discovery_noise(psr, elif model_kwargs['rn_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) psl = ds.PulsarLikelihood(model_components) - prior = prior.makelogprior_uniform(psl.logL.params, {}) + prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': @@ -1021,7 +1015,8 @@ def numpyro_model(): def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) + nuts_kernel = infer.NUTS(numpyro_model, max_tree_depth=5, dense_mass=True, + forward_mode_differentiation=False, target_accept_prob=0.99) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], num_samples=sampler_kwargs['num_samples'], @@ -1036,7 +1031,7 @@ def numpyro_model(): hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) sampler = infer.MCMC(hmc_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, @@ -1071,6 +1066,15 @@ def test_equad_convention(pars_list): ) return None + +def prior_dictionary_updates(): + return { + '(.*_)?dm_gp_log10_A': [-20, -11], + '(.*_)?dm_gp_gamma': [0, 7], + '(.*_)?chrom_gp_log10_A': [-20, -11], + '(.*_)?chrom_gp_gamma': [0, 7], + } + def get_model_and_sampler_default_settings(): model_defaults = { # acrhomatic red noise From 59c890da0cb9c067ef29557db8ab3e62a595d386 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 22 Oct 2024 06:19:15 +0000 Subject: [PATCH 037/193] swapping some kwargs & adding some logs --- src/pint_pal/noise_utils.py | 141 +++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 59 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index fcce8b8d..c6f4257b 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -4,7 +4,8 @@ from astropy.time import Time from enterprise.pulsar import Pulsar -from enterprise_extensions import models, model_utils, sampler +from enterprise_extensions import models, model_utils +from enterprise_extensions import sampler as ee_sampler import corner import pint.models as pm @@ -52,6 +53,11 @@ except ImportError: log.error("Please install the latest version of discovery, numpyro, and/or jax") ValueError("Please install the latest version of discovery, numpyro, and/or jax") +try: + from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler +except: + log.warning("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -136,7 +142,8 @@ def analyze_noise( save_corner=True, no_corner_plot=False, chaindir_compare=None, - which_sampler = 'PTMCMCSampler', + model_kwargs={}, + sampler_kwargs={}, ): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -148,7 +155,6 @@ def analyze_noise( save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False chaindir_compare: path to noise run chain wish to plot in corner plot for comparison; Default: None - which_sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] Returns ======= @@ -156,21 +162,25 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - if which_sampler == 'PTMCMCSampler' or which_sampler == 'discovery': - try: - noise_core = co.Core(chaindir=chaindir) - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") - elif which_sampler == 'GibbsSampler': - try: - noise_core = co.Core(corepath=chaindir+'/chain') - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") - if which_sampler == 'PTMCMCSampler' or which_sampler == "GibbsSampler": + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + sampler = sampler_kwargs['sampler'] + likelihood = sampler_kwargs['likelihood'] + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") + ValueError(f"Could not load noise run from {chaindir}") + if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": + # standard burn ins noise_core.set_burn(burn_frac) - elif which_sampler == 'discovery': + elif likelihood == 'discovery': + # the numpyro sampler already deals with the burn in noise_core.set_burn(0) else: noise_core.set_burn(burn_frac) @@ -196,6 +206,7 @@ def analyze_noise( ) chaindir_compare = None + if save_corner and not no_corner_plot: pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") @@ -349,8 +360,6 @@ def analyze_noise( def model_noise( mo, to, - which_sampler="PTMCMCSampler", - vary_red_noise=True, n_iter=int(1e5), using_wideband=False, resume=False, @@ -362,17 +371,17 @@ def model_noise( return_sampler=False, ): """ - Setup enterprise PTA and perform MCMC noise analysis + Setup enterprise or discovery likelihood and perform Bayesian inference on noise model Parameters ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] - PTMCMCSampler -- MCMC sampling with the Enterprise likelihood - GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise + likelihood: choose from ['Enterprise', 'discovery'] + enterprise -- Enterprise likelihood discovery -- various numpyro samplers with a discovery likelihood - red_noise: include red noise in the model + sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True @@ -392,23 +401,23 @@ def model_noise( sampler_defaults.update(sampler_kwargs) model_kwargs = model_defaults.copy() sampler_kwargs = sampler_defaults.copy() - - + likelihood = sampler_kwargs['likelihood'] + sampler = sampler_kwargs['sampler'] if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" else: outdir = base_op_dir + mo.PSR.value + "_wb/" - + os.makedirs(outdir, exits_ok=True) if os.path.exists(outdir) and (run_noise_analysis) and (not resume): log.info( - "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( + "A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( mo.PSR.value ) ) elif os.path.exists(outdir) and (run_noise_analysis) and (resume): log.info( - "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( + "A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( mo.PSR.value ) ) @@ -428,18 +437,19 @@ def model_noise( ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") e_psr = Pulsar(mo, to) ########################################################## ################ PTMCMCSampler ################## ########################################################## - if which_sampler == "PTMCMCSampler": - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions if not using_wideband: pta = models.model_singlepsr_noise( e_psr, white_vary=True, - red_var=vary_red_noise, + red_var=model_kwargs['inc_rn'], # defaults True is_wideband=False, use_dmdata=False, dmjump_var=False, @@ -450,9 +460,10 @@ def model_noise( # CHROM GP chrom_gp=model_kwargs['inc_chromgp'], chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + chrom_gp_kernel='diag', # Fourier basis chromg_gp # DM SOLAR WIND - dm_sw_deter=model_kwargs['inc_sw_deter'], - ACE_prior=model_kwargs['ACE_prior'], + #dm_sw_deter=model_kwargs['inc_sw_deter'], + #ACE_prior=model_kwargs['ACE_prior'], # can pass extra signals in here extra_sigs=model_kwargs['extra_sigs'], ) @@ -462,7 +473,7 @@ def model_noise( is_wideband=True, use_dmdata=True, white_vary=True, - red_var=vary_red_noise, + red_var=model_kwargs['inc_rn'], dmjump_var=False, wb_efac_sigma=wb_efac_sigma, ng_twg_setup=True, @@ -480,26 +491,23 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) ####### # setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, + samp = ee_sampler.setup_sampler(pta, outdir=outdir, resume=resume, groups=groups) # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling + log.info("Beginnning to sample...") samp.sample( - x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs + x0, 1_000_000, SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) + log.info("Finished sampling.") ############################################################## ################## GibbsSampler ######################## ############################################################## - elif which_sampler == "GibbsSampler": - try: - from enterprise_extensions import GibbsSampler - except: - log.error("Please install the latest enterprise_extensions") - ValueError("Please install the latest enterprise_extensions") - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + elif likelihood == "Enterprise" and sampler == "GibbsSampler": + log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, vary_wn=True, @@ -509,22 +517,24 @@ def model_noise( vary_rn=model_kwargs['inc_rn'], rn_components=model_kwargs['rn_nfreqs'], vary_dm=model_kwargs['inc_dmgp'], - dm_components=model_kwargs['dm_nfreqs'], + dm_components=model_kwargs['dmgp_nfreqs'], vary_chrom=model_kwargs['inc_chromgp'], - chrom_components=model_kwargs['chrom_nfreqs'], + chrom_components=model_kwargs['chromgp_nfreqs'], noise_dict={}, - tnequad=True, + tnequad=model_kwargs['tnequad'], #**noise_kwargs, ) - samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) + log.info("Beginnning to sample...") + samp.sample(niter=n_iter, savepath=outdir) + log.info("Finished sampling.") # sorta redundant to have both, but la_forge doesn't look for .npy files chain = np.load(f'{outdir}/chain_1.npy') np.savetxt(f'{outdir}/chain_1.txt', chain,) ################################################################# ################## discovery likelihood ################### ################################################################# - elif which_sampler == "discovery": - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + elif likelihood == "discovery": + log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -532,12 +542,15 @@ def model_noise( json.dump(sampler_kwargs, f) samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) # run the sampler + log.info("Beginnning to sample...") samp.run(jax.random.key(42)) + log.info("Finished sampling.") # convert to a DataFrame df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') - if sampler_kwargs['numpyro_sampler'] != 'HMC_GIBBS': + if sampler_kwargs['sampler'] != 'HMC-GIBBS': + log.info("Reconstructing Log Likelihood and Posterior from samples...") ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior @@ -552,7 +565,8 @@ def model_noise( inference_data.to_netcdf(outdir+"chain.nc") else: log.error( - "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " + f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + + "\nCan only use Enterprise with PTMCMCSampler or GibbsSampler." ) if return_sampler: return samp @@ -575,10 +589,9 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, - which_sampler='PTMCMCSampler' ): """ - Add WN, RN, DMGP, and parameters to timing model. + Add WN, RN, DMGP, ChromGP, and SW parameters to timing model. Parameters ========== @@ -619,7 +632,6 @@ def add_noise_to_model( save_corner, no_corner_plot, chaindir_compare=chaindir_compare, - which_sampler=which_sampler, ) chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") @@ -969,6 +981,7 @@ def setup_discovery_noise(psr, Setup the discovery likelihood with numpyro sampling for noise analysis """ # set up the model + sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) # this updates the ds.stand_priordict object ds.priordict_standard.update(prior_dictionary_updates()) @@ -997,7 +1010,7 @@ def setup_discovery_noise(psr, prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) - if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': + if sampler == 'HMC-Gibbs': def numpyro_model(): return None gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( @@ -1011,7 +1024,7 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) - elif sampler_kwargs['numpyro_sampler'] == 'NUTS': + elif sampler == 'NUTS': def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -1024,7 +1037,7 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) - elif sampler_kwargs['numpyro_sampler'] == 'HMC': + elif sampler == 'HMC': def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -1036,6 +1049,12 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) + else: + log.error( + f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." + ) + return sampler, log_x, numpyro_model @@ -1077,6 +1096,9 @@ def prior_dictionary_updates(): def get_model_and_sampler_default_settings(): model_defaults = { + # white noise + 'inc_wn': True, + 'tnequad': True, # acrhomatic red noise 'inc_rn': True, 'rn_psd': 'powerlaw', @@ -1100,7 +1122,8 @@ def get_model_and_sampler_default_settings(): 'extra_sigs': None, } sampler_defaults = { - 'numpyro_sampler': 'HMC', + 'likelihood': 'Enterprise', + 'sampler': 'HMC', 'num_steps': 5, 'num_warmup': 500, 'num_samples': 2500, From 16bfa71b76d80eeaa9c4a6d0672165e5deb372f2 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 23 Oct 2024 19:05:03 +0000 Subject: [PATCH 038/193] random fixes --- src/pint_pal/noise_utils.py | 105 +++++++++++------------------------- 1 file changed, 32 insertions(+), 73 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index c6f4257b..0578e708 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -408,7 +408,7 @@ def model_noise( outdir = base_op_dir + mo.PSR.value + "_nb/" else: outdir = base_op_dir + mo.PSR.value + "_wb/" - os.makedirs(outdir, exits_ok=True) + os.makedirs(outdir, exist_ok=True) if os.path.exists(outdir) and (run_noise_analysis) and (not resume): log.info( "A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( @@ -507,7 +507,7 @@ def model_noise( ################## GibbsSampler ######################## ############################################################## elif likelihood == "Enterprise" and sampler == "GibbsSampler": - log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, vary_wn=True, @@ -534,7 +534,7 @@ def model_noise( ################## discovery likelihood ################### ################################################################# elif likelihood == "discovery": - log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -551,7 +551,7 @@ def model_noise( samples_dict = df.to_dict(orient='list') if sampler_kwargs['sampler'] != 'HMC-GIBBS': log.info("Reconstructing Log Likelihood and Posterior from samples...") - ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] + ln_like = log_likelihood(numpyro_model, samp.get_samples(), parallel=True)['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior samples_dict['lnlike'] = ln_like @@ -634,8 +634,14 @@ def add_noise_to_model( chaindir_compare=chaindir_compare, ) chainfile = chaindir + "chain_1.txt" - mtime = Time(os.path.getmtime(chainfile), format="unix") - log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + try: + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + except: + chainfile = chaindir+"chain.nc" + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + # Create the maskParameter for EFACS efac_params = [] @@ -848,12 +854,12 @@ def add_noise_to_model( #log.info(f"The SD Bayes factor for dm noise in this pulsar is: {dm_bf}") log.info('Adding Powerlaw DM GP noise as PLDMNoise to par file') # Add the ML RN parameters to their component - dm_comp = pm.PLDMNoise() + dm_comp = pm.noise_model.PLDMNoise() dm_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) dm_comp.TNDMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_dm_gp_log10_A"] ) - dm_comp.TNDMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + dm_comp.TNDMGAM.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] ##### FIXMEEEEEEE : need to figure out some way to softcode this dm_comp.TNDMC.quantitity = 100 # Add red noise to the timing model @@ -870,16 +876,16 @@ def add_noise_to_model( if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') # Add the ML RN parameters to their component - chrom_comp = pm.PLCMNoise() + chrom_comp = pm.noise_model.PLCMNoise() chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) - dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + chrom_comp.TNCMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_chrom_gp_log10_A"] ) - chrom_comp.TNCMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + chrom_comp.TNCMGAM.quantity = -1 * noise_dict[psr_name + "_chrom_gp_gamma"] ##### FIXMEEEEEEE : need to figure out some way to softcode this chrom_comp.TNCMC.quantitity = 100 # Add red noise to the timing model - model.add_component(dm_comp, validate=True, force=True) + model.add_component(chrom_comp, validate=True, force=True) ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') @@ -901,6 +907,7 @@ def add_noise_to_model( # Setup and validate the timing model to ensure things are correct model.setup() model.validate() + #FIXME:::not sure why this is broken model.noise_mtime = mtime.isot if convert_equad_to_t2: @@ -911,67 +918,12 @@ def add_noise_to_model( return model -def plot_free_specs(sampler_kwargs={}, - model_kwargs={}, - noise_dict={}): +def plot_free_specs(c0, freqs, fs_type='Red Noise'): """ - Setup the Gibbs sampler for noise analysis from enterprise extensions + Plot free specs when using free spectral model """ - # check that a sufficiently up-to-date version of enterprise_extensions is installed - - - print("attempting to sample...") - savepath = f'/home/baierj/projects/ng20yr/noise_testing/test_J0613-0200/{psr_pkls[pidx].name}_prenoise/' - bps.sample(niter=30000, savepath = savepath,) - chain = np.load(f'{savepath}/chain_1.npy') - rn_freqs = np.load(f'{savepath}/rn_freqs.npy') - dm_freqs = np.load(f'{savepath}/dm_freqs.npy') - chrom_freqs = np.load(f'{savepath}/chrom_freqs.npy') - print(chain.shape) - outdir=savepath - np.savetxt(f'{savepath}/chain_1.txt', chain,) - c0 = co.Core(chaindir=savepath) - c0.chain = chain - - - wn_params = [par for par in c0.params if any([p in par for p in ['efac', 'equad', 'ecor']])] - if len(wn_params) > 0: - dg.plot_chains(c0, pars = wn_params) - plt.savefig(f'{outdir}/wn_hists.png') - plt.close() - dg.plot_grubin(c0) - plt.savefig(f'{outdir}/grubin.png') - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - c0.rn_freqs = rn_freqs - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_red_noise_log10_rho', violin=True, Color='red',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | red noise | nfreqs={len(rn_freqs)}" ) - plt.savefig(f"{outdir}/rn.png") - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - c0.rn_freqs = dm_freqs - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_dm_gp_log10_rho', - violin=True, Color='blue',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | DM GP | nfreqs={len(dm_freqs)} " ) - plt.savefig(f'{outdir}/dm_gp.png') - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - c0.rn_freqs = chrom_freqs - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_chrom_gp_log10_rho', - violin=True, Color='orange',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | chrom gp | nfreqs={len(chrom_freqs)}" ) - plt.ylim(-9,-5) - plt.savefig(f'{outdir}/chrom_gp.png') - plt.close() + ImpelmentationError("not yet implemented") + return None def setup_discovery_noise(psr, @@ -984,6 +936,8 @@ def setup_discovery_noise(psr, sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) # this updates the ds.stand_priordict object + # need 64-bit precision for PTA inference + numpyro.enable_x64() ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, @@ -1028,8 +982,11 @@ def numpyro_model(): def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, max_tree_depth=5, dense_mass=True, - forward_mode_differentiation=False, target_accept_prob=0.99) + nuts_kernel = infer.NUTS(numpyro_model, + max_tree_depth=sampler_kwargs['max_tree_depth'], + dense_mass=sampler_kwargs['dense_mass'], + forward_mode_differentiation=False, + target_accept_prob=0.99) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], num_samples=sampler_kwargs['num_samples'], @@ -1129,5 +1086,7 @@ def get_model_and_sampler_default_settings(): 'num_samples': 2500, 'num_chains': 4, 'chain_method': 'vectorized', + 'max_tree_depth': 5, + 'dense_mass': False, } return model_defaults, sampler_defaults \ No newline at end of file From 70b846fba163e97ee28b20b786c96926e6157a3f Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Thu, 24 Oct 2024 19:11:13 +0000 Subject: [PATCH 039/193] Transfering new fix. --- .github/workflows/test_notebook.yml | 65 +++++++++++++++++++---------- 1 file changed, 42 insertions(+), 23 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index eddc18d6..1333e422 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -11,45 +11,64 @@ on: jobs: build: - - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - - name: Install pdflatex + - name: Install Required Ubuntu Packages run: | sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html - - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 + - uses: actions/checkout@v4 + - uses: mamba-org/setup-micromamba@v1 with: - python-version: ${{ matrix.python-version }} - mamba-version: "*" - channels: conda-forge,defaults - channel-priority: true - - name: Conda info - shell: bash -el {0} - run: conda info - - name: Install Dependencies & Main Code + init-shell: bash + environment-name: pulsar + cache-environment: true + cache-downloads: true + create-args: >- + -c conda-forge + python=${{ matrix.python-version }} + pytest + cython + pint-pulsar + tempo2 + libstempo + enterprise-pulsar + enterprise_extensions + scikit-sparse + scikit-learn + ruamel.yaml + nbconvert + ipywidgets>=7.6.3 + weasyprint + pytest-xdist>=2.3.0 + jupyter + seaborn + gitpython + - name: Install Main Code shell: bash -el {0} run: | - mamba install -c conda-forge python=${{ matrix.python-version }} pytest cython=0.29.36 tempo2 enterprise-pulsar enterprise_extensions scikit-sparse pip install -e . - name: Test with Standard Pulsar shell: bash -el {0} run: | export PULSAR_NAME='J0605+3757' - export JUPYTER_PLATFORM_DIRS=1 && jupyter --paths - tree tests - pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME - mv tmp-* nb_outputs + export JUPYTER_PLATFORM_DIRS=1 + pytest tests/test_run_notebook.py -k $PULSAR_NAME + ls -lah + mv tmp* nb_outputs - name: Archive Notebook Output Files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: TestNB-OutputFiles + name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} path: | - nb_outputs - + nb_outputs/*/*.pdf + nb_outputs/*/*.tim + nb_outputs/*/*.par + compression-level: 6 + \ No newline at end of file From 48d323a4d5a622a755615d361034e8862824d685 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Thu, 24 Oct 2024 19:12:05 +0000 Subject: [PATCH 040/193] Adding uploading to commits. --- .github/workflows/commenting_artifacts.yml | 50 ++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/commenting_artifacts.yml diff --git a/.github/workflows/commenting_artifacts.yml b/.github/workflows/commenting_artifacts.yml new file mode 100644 index 00000000..bd7bc209 --- /dev/null +++ b/.github/workflows/commenting_artifacts.yml @@ -0,0 +1,50 @@ +name: Bind Reports to Pull-Request + +# Triggered by the name of the previos +on: + workflow_run: + workflows: ["Notebook Pipeline (Ubuntu)"] + types: [completed] + +# Enable the option to post a comment +permissions: + pull-requests: write + +jobs: + build: + runs-on: ubuntu-latest + # Check that the previos workflow succeeded + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Get Artifacts Link + env: + WORKFLOW_RUN_EVENT_OBJ: ${{ toJSON(github.event.workflow_run) }} + GH_TOKEN: ${{ github.token }} + run: | + PREVIOUS_JOB_ID=$(jq -r '.id' <<< "$WORKFLOW_RUN_EVENT_OBJ") + SUITE_ID=$(jq -r '.check_suite_id' <<< "$WORKFLOW_RUN_EVENT_OBJ") + # Sample for a single artifact, can be improved for a multiple artifacts + ARTIFACT_ID=$(gh api "/repos/${{ github.repository }}/actions/artifacts" \ + --jq ".artifacts.[] | + select(.workflow_run.id==${PREVIOUS_JOB_ID}) | + .id") + echo "ARTIFACT_URL=https://github.com/${{ github.repository }}/suites/${SUITE_ID}/artifacts/${ARTIFACT_ID}" >> $GITHUB_ENV + PR_NUMBER=$(jq -r '.pull_requests[0].number' <<< "$WORKFLOW_RUN_EVENT_OBJ") + echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_ENV + + - uses: actions/github-script@v6 + env: + PR_NUMBER: ${{ env.PR_NUMBER }} + PR_NOTES: | + Build artifacts: + | Name | Link | + |------|------| + | NB/WB Pipeline | [Archived ZIP](${{ env.ARTIFACT_URL }}) | + with: + script: | + github.rest.issues.createComment({ + issue_number: process.env.PR_NUMBER, + owner: context.repo.owner, + repo: context.repo.repo, + body: process.env.PR_NOTES + }) \ No newline at end of file From 3218cc369138150b26918b381b927e85be024af4 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 24 Oct 2024 16:34:56 -0400 Subject: [PATCH 041/193] Fix setter function names --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 52ef70f8..c9851869 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -65,7 +65,7 @@ def tim_directory(self): ) @tim_directory.setter - def set_tim_directory(self, tim_directory): + def tim_directory(self, tim_directory): """ Set tim directory. If a relative path is supplied, it will be turned into an absolute path. @@ -83,7 +83,7 @@ def par_directory(self): ) @par_directory.setter - def set_par_directory(self, par_directory): + def par_directory(self, par_directory): """ Set par directory. If a relative path is supplied, it will be turned into an absolute path. From 60ce16dcf5370972c364a1bf111be0f350fe7c20 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 27 Sep 2024 17:02:59 +0000 Subject: [PATCH 042/193] fix list comprehension in apply_ignore() for poor-febe check --- src/pint_pal/timingconfiguration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 52ef70f8..ba80fc58 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -785,7 +785,7 @@ def apply_ignore(self,toas,specify_keys=None,warn=False,model=None): if self.get_snr_cut() > 25.0 and self.get_toa_type() == 'WB': log.warning('snr-cut should be set to 25; try excising TOAs using other methods.') if 'poor-febe' in valid_valued: - fs = np.array([(f['f'] if 'f' in f else None) in toas.orig_table['flags']]) + fs = np.array([(f['f'] if 'f' in f else None) for f in toas.orig_table['flags']]) for febe in self.get_poor_febes(): febeinds = np.where(fs==febe)[0] apply_cut_flag(toas,febeinds,'poorfebe',warn=warn) From 5e7e41901d07a18dc920c216da6a29ad6b430972 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Mon, 18 Nov 2024 16:56:45 +0000 Subject: [PATCH 043/193] Adjusting to deal with the `created_time` bug introduced in `pint@v1.1`. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0f91604b..6da7f0e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", - "pint_pulsar>=0.9.1", + "pint_pulsar>=0.9.1<=1.0.1", "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", From 760f23019453a8025442476eb5249498758685c0 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Mon, 18 Nov 2024 17:02:39 +0000 Subject: [PATCH 044/193] Fixing syntax error in pint_pulsar versioning by pinning the version at 1.0.1 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6da7f0e3..594b7aee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", - "pint_pulsar>=0.9.1<=1.0.1", + "pint_pulsar==1.0.1", "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", From 993ae5e7a5012196fc56fde6c52ffbfb5c9034df Mon Sep 17 00:00:00 2001 From: Golam Date: Thu, 5 Dec 2024 15:35:41 +0100 Subject: [PATCH 045/193] Update pyproject.toml to fix missing plot_settings.yaml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 594b7aee..b0eef5bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ classifiers = [ ] [tool.setuptools.package-data] -pint_pal = ["defaults.yaml"] +pint_pal = ["defaults.yaml", "plot_settings.yaml"] [project.urls] "Homepage" = "https://github.com/nanograv/pint_pal" From 6a64600fc35a0aa2b9f46847acb1e5bf47caf63e Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:10:53 -0500 Subject: [PATCH 046/193] Avoid slicing toas objects --- src/pint_pal/dmx_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 55a65383..3bb37bfb 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -460,27 +460,27 @@ def get_dmx_epoch(toas: pint.toa.TOAs, weighted_average: bool = True) -> float: return epoch -def get_dmx_freqs(toas: pint.toa.TOAs, allow_wideband: bool = True) -> Tuple[float, float]: +def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = True) -> Tuple[float, float]: """ Return the lowest and highest frequency of the TOAs in a DMX bin. - toas is a PINT TOA object of TOAs in the DMX bin. + toas is a PINT TOA object containing all the relevant TOAs. + mask is a boolean mask that identifies the TOAs in this DMX bin. allow_wideband=True will consider the -fratio and -bw flags in the determination of these frequencies, if toas contains wideband TOAs. """ - freqs = toas.get_freqs().value # MHz + freqs = toas.get_freqs()[mask].value # MHz high_freq = 0.0 low_freq = np.inf # indices of wideband TOAs - iwb = np.arange(len(toas))[np.array(toas.get_flag_value('pp_dm')[0]) \ - != None] + wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) if allow_wideband: # the following arrays will be empty if narrowband TOAs - fratios = toas[iwb].get_flag_value('fratio') # frequency ratio / WB TOA - fratios = np.array(fratios[0]) - bws = toas[iwb].get_flag_value('bw') # bandwidth [MHz] / WB TOA - bws = np.array(bws[0]) + fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA + fratios = np.array(fratios[wb_mask]) + bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA + bws = np.array(bws[wb_mask]) low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) high_freqs = bws.astype('float32') + low_freqs From c92a1753602d44a83155df7c5d849da04bde8c59 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:13:11 -0500 Subject: [PATCH 047/193] Fix call sites --- src/pint_pal/dmx_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 3bb37bfb..6638efa6 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -530,7 +530,7 @@ def check_frequency_ratio( low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) if high_freq / low_freq >= frequency_ratio: # passes toa_mask += mask @@ -612,7 +612,7 @@ def check_solar_wind( low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) # Convert to time delay, using calc from David's code (fixed) theta = np.pi - phis[mask] # rad @@ -911,7 +911,7 @@ def make_dmx( high_mjd = max(dmx_ranges[irange]) mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion) epoch = get_dmx_epoch(toas[mask], weighted_average) - low_freq, high_freq = get_dmx_freqs(toas[mask], allow_wideband) + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband) dmx_parameter = DMXParameter() dmx_parameter.idx = idx dmx_parameter.val = dmx_vals[irange] From 997879d940dad4f83f3465480e6d577b64495f21 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:22:00 -0500 Subject: [PATCH 048/193] Slice after array conversion --- src/pint_pal/dmx_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 6638efa6..cf932296 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -478,9 +478,9 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) if allow_wideband: # the following arrays will be empty if narrowband TOAs fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA - fratios = np.array(fratios[wb_mask]) + fratios = np.array(fratios)[wb_mask] bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA - bws = np.array(bws[wb_mask]) + bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) high_freqs = bws.astype('float32') + low_freqs From 2520c2cfd0525d86709786044e7a91c1631d60df Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:31:01 -0500 Subject: [PATCH 049/193] Let Numpy do the mins and maxes --- src/pint_pal/dmx_utils.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index cf932296..ab374250 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -471,26 +471,21 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = """ freqs = toas.get_freqs()[mask].value # MHz - high_freq = 0.0 - low_freq = np.inf + high_freq = np.max(freqs) + low_freq = np.min(freqs) - # indices of wideband TOAs - wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) - if allow_wideband: # the following arrays will be empty if narrowband TOAs + if allow_wideband: + # indices of wideband TOAs + wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) + # the following arrays will be empty if all TOAs are narrowband fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA fratios = np.array(fratios)[wb_mask] bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) + low_freq = min(low_freq, np.min(low_freqs)) high_freqs = bws.astype('float32') + low_freqs - - for itoa in range(len(toas)): - if itoa in iwb and allow_wideband: - if low_freqs[itoa] < low_freq: low_freq = low_freqs[itoa] - if high_freqs[itoa] > high_freq: high_freq = high_freqs[itoa] - else: - if freqs[itoa] < low_freq: low_freq = freqs[itoa] - if freqs[itoa] > high_freq: high_freq = freqs[itoa] + high_freq = max(high_freq, np.max(high_freqs)) return low_freq, high_freq From 54f964ee2cc76106f0ce42606ae9cc82ce819d46 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:36:03 -0500 Subject: [PATCH 050/193] Add initial values --- src/pint_pal/dmx_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index ab374250..eac6432b 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -471,8 +471,8 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = """ freqs = toas.get_freqs()[mask].value # MHz - high_freq = np.max(freqs) - low_freq = np.min(freqs) + high_freq = np.max(freqs, initial=0.) + low_freq = np.min(freqs, initial=np.inf) if allow_wideband: # indices of wideband TOAs @@ -483,9 +483,9 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) - low_freq = min(low_freq, np.min(low_freqs)) + low_freq = min(low_freq, np.min(low_freqs, initial=np.inf)) high_freqs = bws.astype('float32') + low_freqs - high_freq = max(high_freq, np.max(high_freqs)) + high_freq = max(high_freq, np.max(high_freqs, initial=0.)) return low_freq, high_freq From bf81a06aa9eabb5923bd92a496b732c3bb184346 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Fri, 13 Dec 2024 16:42:29 +0000 Subject: [PATCH 051/193] Also avoid slicing TOAs for log messages --- src/pint_pal/dmx_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index eac6432b..2049674e 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -531,9 +531,9 @@ def check_frequency_ratio( toa_mask += mask dmx_range_mask[irange] = True else: # fails - nfail_toas += len(toas[mask]) + nfail_toas += np.sum(mask) if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nfail_ranges = sum(np.logical_not(dmx_range_mask)) @@ -621,7 +621,7 @@ def check_solar_wind( toa_mask += mask dmx_range_mask[irange] = True if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nsolar = sum(dmx_range_mask) if not quiet and nsolar: From 2597a7adaa66628cd40ec409a0651953d608893e Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 18:28:43 +0000 Subject: [PATCH 052/193] move import try/except; add empirical distributions --- src/pint_pal/noise_utils.py | 118 +++++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 50 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0578e708..683cee3f 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -35,29 +35,6 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block -try: - import xarray as xr - import jax - from jax import numpy as jnp - import numpyro - from numpyro.infer import log_likelihood - from numpyro import distributions as dist - from numpyro import infer - import discovery as ds - from discovery import prior as ds_prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - from discovery.gibbs import setup_single_psr_hmc_gibbs - -except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") -try: - from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler -except: - log.warning("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -175,7 +152,7 @@ def analyze_noise( noise_core = co.Core(chaindir=chaindir) except: log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") - ValueError(f"Could not load noise run from {chaindir}") + raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": # standard burn ins noise_core.set_burn(burn_frac) @@ -360,7 +337,6 @@ def analyze_noise( def model_noise( mo, to, - n_iter=int(1e5), using_wideband=False, resume=False, run_noise_analysis=True, @@ -377,17 +353,21 @@ def model_noise( ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - likelihood: choose from ['Enterprise', 'discovery'] - enterprise -- Enterprise likelihood - discovery -- various numpyro samplers with a discovery likelihood - sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] - for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] - n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False + resume: Flag to resume overwrite previous run or not. run_noise_analysis: Flag to toggle execution of noise modeling; Default: True noise_kwargs: dictionary of noise model parameters; Default: {} sampler_kwargs: dictionary of sampler parameters; Default: {} return_sampler: Flag to return the sampler object; Default: False + + Recommended to pass model_kwargs and sampler_kwargs from the config file. + Default kwargs given by function `get_model_and_sampler_default_settings`. + Import configuration parameters: + likelihood: choose from ['Enterprise', 'discovery'] + enterprise -- Enterprise likelihood + discovery -- various numpyro samplers with a discovery likelihood + sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] Returns ======= @@ -428,13 +408,6 @@ def model_noise( ) return None - # Ensure n_iter is an integer - n_iter = int(n_iter) - - if n_iter < 1e4: - log.warning( - "Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" - ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") @@ -445,6 +418,13 @@ def model_noise( if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions + # Ensure n_iter is an integer + sampler_kwargs['n_iter'] = int(sampler_kwargs['n_iter']) + + if sampler_kwargs['n_iter'] < 1e4: + log.warning( + f"Such a small number of iterations with {sampler} is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" + ) if not using_wideband: pta = models.model_singlepsr_noise( e_psr, @@ -467,6 +447,7 @@ def model_noise( # can pass extra signals in here extra_sigs=model_kwargs['extra_sigs'], ) + pta.set_default_params({}) else: pta = models.model_singlepsr_noise( e_psr, @@ -488,25 +469,37 @@ def model_noise( dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) # set groups here - groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) + groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions samp = ee_sampler.setup_sampler(pta, - outdir=outdir, - resume=resume, - groups=groups) + outdir=outdir, + resume=resume, + groups=groups, + empirical_distr = sampler_kwargs['empirical_distr'] + xx) + if sampler_kwargs['empirical_distr'] is not None: + try: + samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) + except: + log.warning("Failed to add draws from empirical distribution.") # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling log.info("Beginnning to sample...") samp.sample( - x0, 1_000_000, SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs + x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) log.info("Finished sampling.") ############################################################## ################## GibbsSampler ######################## ############################################################## elif likelihood == "Enterprise" and sampler == "GibbsSampler": + try: + from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler + except: + log.error("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") + raise ValueError("Please install a version of enterprise extensions which contains the `gibbs_sampling` module.") log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, @@ -534,6 +527,22 @@ def model_noise( ################## discovery likelihood ################### ################################################################# elif likelihood == "discovery": + try: # make sure requisite packages are installed + import xarray as xr + import jax + from jax import numpy as jnp + import numpyro + from numpyro.infer import log_likelihood + from numpyro import distributions as dist + from numpyro import infer + import discovery as ds + from discovery import prior as ds_prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + except ImportError: + log.error("Please install the latest version of discovery, numpyro, and/or jax") + raise ValueError("Please install the latest version of discovery, numpyro, and/or jax") log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: @@ -935,9 +944,9 @@ def setup_discovery_noise(psr, # set up the model sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) - # this updates the ds.stand_priordict object # need 64-bit precision for PTA inference numpyro.enable_x64() + # this updates the ds.stand_priordict object ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, @@ -965,8 +974,12 @@ def setup_discovery_noise(psr, log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) if sampler == 'HMC-Gibbs': - def numpyro_model(): - return None + try: + from discovery.gibbs import setup_single_psr_hmc_gibbs + except ImportError: + log.error("Need to have most up-to-date version of discovery installed.") + raise ValueError("Make sure proper version of discovery is imported") + numpyro_model = None # this doesnt get used for HMC-Gibbs gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( psrl=psl, psrs=psr, priordict=ds.priordict_standard, @@ -1008,7 +1021,7 @@ def numpyro_model(): ) else: log.error( - f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + f"Invalid likelihood ({sampler_kwargs['likelihood']}) and sampler ({sampler_kwargs['sampler']}) combination." \ + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." ) @@ -1077,15 +1090,20 @@ def get_model_and_sampler_default_settings(): 'ACE_prior': False, # 'extra_sigs': None, + # path to empirical distribution } sampler_defaults = { 'likelihood': 'Enterprise', - 'sampler': 'HMC', - 'num_steps': 5, + 'sampler': 'PTMCMCSampler', + # ptmcmc kwargs + 'n_iter': 2e5, + 'empirical_distr': None, + # numpyro kwargs + 'num_steps': 25, 'num_warmup': 500, 'num_samples': 2500, 'num_chains': 4, - 'chain_method': 'vectorized', + 'chain_method': 'parallel', 'max_tree_depth': 5, 'dense_mass': False, } From 502902930492f746e30a0c603793a1cd15ed4062 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 10:33:16 -0800 Subject: [PATCH 053/193] fix typo --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 683cee3f..6b425359 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -477,7 +477,7 @@ def model_noise( resume=resume, groups=groups, empirical_distr = sampler_kwargs['empirical_distr'] - xx) + ) if sampler_kwargs['empirical_distr'] is not None: try: samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) From e316f564e439c6feddda8da1e641b26de1cda8bf Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 12:14:18 -0800 Subject: [PATCH 054/193] add laforge, arviz to pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b0eef5bc..07ae091f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ authors = [ { name="Anne Archibald", email="anne.archibald@nanograv.org" }, { name="Kevin Wilson", email="kevin.wilson@nanograv.org" }, { name="Ross Jennings", email="ross.jennings@nanograv.org" }, + { name="Jeremy Baier", email="jeremy.baier@nanograv.org"} ] description = "A long-lived repository for NANOGrav Pulsar Timing analysis work." readme = "README.md" @@ -32,6 +33,8 @@ dependencies = [ "jupyter", "seaborn", "gitpython", + "laforge", + "arviz", ] classifiers = [ "Programming Language :: Python :: 3", From e2dae64a7af66b565887910e86566a02ff492fca Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 12:33:09 -0800 Subject: [PATCH 055/193] changes to noise utils --- src/pint_pal/noise_utils.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 6b425359..3fa93721 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -848,7 +848,6 @@ def add_noise_to_model( noise_dict[psr_name + "_red_noise_log10_A"] ) rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] - # Add red noise to the timing model model.add_component(rn_comp, validate=True, force=True) else: @@ -909,14 +908,18 @@ def add_noise_to_model( noise = noise_class() # Make the dispersion instance. model.add_component(noise, validate=False) # add parameters - model['NE_SW'].quantity = noise_dict[f'{psr_name}_NE_SW'] - model['NE_SW'].frozen = True + if f'{psr_name}_n_earth' in sw_pars: + model['NE_SW'].quantity = noise_dict[f'{psr_name}_n_earth'] + model['NE_SW'].frozen = True + elif f'{psr_name}_sw_gp_log10_A' in sw_pars: + raise NotImplementedError('Solar Wind Dispersion power-law GP not yet implemented') + elif f'{psr_name}_sw_gp_log10_rho' in sw_pars: + raise NotImplementedError('Solar Wind Dispersion free spec GP not yet implemented') # Setup and validate the timing model to ensure things are correct model.setup() model.validate() - #FIXME:::not sure why this is broken model.noise_mtime = mtime.isot if convert_equad_to_t2: @@ -931,7 +934,7 @@ def plot_free_specs(c0, freqs, fs_type='Red Noise'): """ Plot free specs when using free spectral model """ - ImpelmentationError("not yet implemented") + raise NotImplementedError("not yet implemented") return None From 6fbeef60e5719fcc65c56f9babc94b93547e592d Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 13:36:34 -0800 Subject: [PATCH 056/193] fixing stuff in noise utils --- src/pint_pal/noise_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 3fa93721..54f7a726 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -518,7 +518,7 @@ def model_noise( #**noise_kwargs, ) log.info("Beginnning to sample...") - samp.sample(niter=n_iter, savepath=outdir) + samp.sample(niter=sampler_kwargs['n_iter'], savepath=outdir) log.info("Finished sampling.") # sorta redundant to have both, but la_forge doesn't look for .npy files chain = np.load(f'{outdir}/chain_1.npy') @@ -973,6 +973,7 @@ def setup_discovery_noise(psr, elif model_kwargs['rn_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) psl = ds.PulsarLikelihood(model_components) + ## this prior transform is no longer required and should be removed prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) From a5f421f7046be1d822b10a4c557939d843c473cf Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:15:49 +0000 Subject: [PATCH 057/193] Moving "created_time" to the meta attribute of PINT.TimingModel --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index e605b9e7..72a678b7 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -157,8 +157,8 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial m = model.get_model(par_path,allow_name_mixing=True) match = re.search(r"#\s+Created:\s+(\S+)", open(par_path).read()) if match: - m.created_time = match.group(1) - log.info(f"Par file created: {m.created_time}") + m.meta["created_time"] = match.group(1) + log.info(f"Par file created: {m.meta["created_time"]}") m.file_mtime = Time(os.path.getmtime(par_path), format="unix").isot From f3f4c2e2b6a18a685a94d7725acc5212f9589ec3 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:18:41 +0000 Subject: [PATCH 058/193] Update to use new version of PINT. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b0eef5bc..7a2f57fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", - "pint_pulsar==1.0.1", + "pint_pulsar>=1.1", "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", From 1eac987211a34175aca6899afe859c86d1ba2aaa Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:25:26 +0000 Subject: [PATCH 059/193] Fix syntax to use ' instead of " --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 72a678b7..beac0265 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -157,8 +157,8 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial m = model.get_model(par_path,allow_name_mixing=True) match = re.search(r"#\s+Created:\s+(\S+)", open(par_path).read()) if match: - m.meta["created_time"] = match.group(1) - log.info(f"Par file created: {m.meta["created_time"]}") + m.meta['created_time'] = match.group(1) + log.info(f"Par file created: {m.meta['created_time']}") m.file_mtime = Time(os.path.getmtime(par_path), format="unix").isot From 40162e5eb92926259177b892b81d46db3352b95b Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:27:09 +0000 Subject: [PATCH 060/193] Switching m.file_mtime to be a key of m.meta --- src/pint_pal/timingconfiguration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index beac0265..1c7c7218 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -159,7 +159,7 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial if match: m.meta['created_time'] = match.group(1) log.info(f"Par file created: {m.meta['created_time']}") - m.file_mtime = Time(os.path.getmtime(par_path), format="unix").isot + m.meta['file_mtime'] = Time(os.path.getmtime(par_path), format="unix").isot if m.PSR.value != self.get_source(): From a4c204acb0a906dd328f2881dc59f0fc6959c1fd Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:31:16 +0000 Subject: [PATCH 061/193] Change model.noise_mtime to be a key inside of model.meta --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index bca27bdd..968b48a3 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -432,7 +432,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl #Setup and validate the timing model to ensure things are correct model.setup() model.validate() - model.noise_mtime = mtime.isot + model.meta['noise_mtime'] = mtime.isot if convert_equad_to_t2: from pint_pal.lite_utils import convert_enterprise_equads From 4b2c5cbb81cf6ac09201a8ee3bdbdfe1916315d5 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 18:04:48 +0000 Subject: [PATCH 062/193] Update to require PINT>=1.1.1 and use notebook over jupyter. --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7a2f57fd..26079026 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", - "pint_pulsar>=1.1", + "pint_pulsar>=1.1.1", "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", @@ -29,7 +29,7 @@ dependencies = [ "numpy", "weasyprint", "pytest-xdist[psutil]>=2.3.0", - "jupyter", + "notebook", "seaborn", "gitpython", ] From 7d7fda66011557b07f6097f7266bb3122cada802 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 18 Dec 2024 11:32:27 -0800 Subject: [PATCH 063/193] updating to la-forge in the pyproject --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 07ae091f..8bc978bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ "jupyter", "seaborn", "gitpython", - "laforge", + "la-forge", "arviz", ] classifiers = [ From e2077f00c705e3f1acf6f96e68ad510c2eed9e73 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 18 Dec 2024 11:41:05 -0800 Subject: [PATCH 064/193] changing test nb --- nb_templates/process_v1.2.ipynb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index ae1871cd..446a3b5d 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -257,7 +257,12 @@ "if run_noise_analysis or use_existing_noise_dir:\n", " mo_new = copy.deepcopy(mo)\n", " lu.remove_noise(mo_new)\n", - " nu.model_noise(mo_new, to, using_wideband = using_wideband, run_noise_analysis = run_noise_analysis, n_iter = num_noise_iter)\n", + " nu.model_noise(mo_new, to,\n", + " using_wideband = using_wideband,\n", + " run_noise_analysis = run_noise_analysis,\n", + " model_kwargs=tc.config['noise_run']['model'],\n", + " sampler_kwargs=tc.config['noise_run']['inference'],\n", + " )\n", " try:\n", " mo_new = nu.add_noise_to_model(mo_new, using_wideband = using_wideband, base_dir=tc.get_noise_dir(), \n", " compare_dir=tc.get_compare_noise_dir(), no_corner_plot=tc.get_no_corner())\n", From ff32910cf637ff210dc8cc3213aea1dc28f133e3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 06:29:42 -0800 Subject: [PATCH 065/193] change enterprise to lowercase; add noise_run section to test config files; fix typo in setup discovery --- src/pint_pal/noise_utils.py | 24 ++++++++++++------------ tests/configs/J0605+3757.nb.yaml | 11 +++++++++++ tests/configs/J0605+3757.wb.yaml | 11 +++++++++++ 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 54f7a726..08a2ff5a 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -363,10 +363,10 @@ def model_noise( Recommended to pass model_kwargs and sampler_kwargs from the config file. Default kwargs given by function `get_model_and_sampler_default_settings`. Import configuration parameters: - likelihood: choose from ['Enterprise', 'discovery'] - enterprise -- Enterprise likelihood + likelihood: choose from ['enterprise', 'discovery'] + enterprise -- enterprise likelihood discovery -- various numpyro samplers with a discovery likelihood - sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + sampler: for enterprise choose from ['PTMCMCSampler','GibbsSampler'] for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] Returns @@ -410,12 +410,12 @@ def model_noise( # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") + log.info(f"Creating enterprise.Pulsar object from model with {mo.NTOA.value} toas...") e_psr = Pulsar(mo, to) ########################################################## ################ PTMCMCSampler ################## ########################################################## - if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': + if likelihood == "enterprise" and sampler == 'PTMCMCSampler': log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions # Ensure n_iter is an integer @@ -494,7 +494,7 @@ def model_noise( ############################################################## ################## GibbsSampler ######################## ############################################################## - elif likelihood == "Enterprise" and sampler == "GibbsSampler": + elif likelihood == "enterprise" and sampler == "GibbsSampler": try: from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler except: @@ -575,7 +575,7 @@ def model_noise( else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ - + "\nCan only use Enterprise with PTMCMCSampler or GibbsSampler." + + "\nCan only use enterprise with PTMCMCSampler or GibbsSampler." ) if return_sampler: return samp @@ -968,10 +968,10 @@ def setup_discovery_noise(psr, elif model_kwargs['dmgp_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) if model_kwargs['inc_chromgp']: - if model_kwargs['rn_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) - elif model_kwargs['rn_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + if model_kwargs['chrom_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) + elif model_kwargs['chrom_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) psl = ds.PulsarLikelihood(model_components) ## this prior transform is no longer required and should be removed prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) @@ -1097,7 +1097,7 @@ def get_model_and_sampler_default_settings(): # path to empirical distribution } sampler_defaults = { - 'likelihood': 'Enterprise', + 'likelihood': 'enterprise', 'sampler': 'PTMCMCSampler', # ptmcmc kwargs 'n_iter': 2e5, diff --git a/tests/configs/J0605+3757.nb.yaml b/tests/configs/J0605+3757.nb.yaml index 949bd3f5..b08dae5b 100644 --- a/tests/configs/J0605+3757.nb.yaml +++ b/tests/configs/J0605+3757.nb.yaml @@ -25,6 +25,16 @@ outlier: # control outlier analysis runs n-burn: 1000 n-samples: 20000 +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20220301.Noise.nb.ac12e98/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220222.Noise.nb.4e07003/ @@ -53,3 +63,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.nb.par' - '2022-03-08 joe.swiggum READY_FOR: v1.1' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' diff --git a/tests/configs/J0605+3757.wb.yaml b/tests/configs/J0605+3757.wb.yaml index bf7b6d16..6734fa6b 100644 --- a/tests/configs/J0605+3757.wb.yaml +++ b/tests/configs/J0605+3757.wb.yaml @@ -23,6 +23,16 @@ dmx: # control dmx windowing/fixing max-sw-delay: 0.1 # finer binning when solar wind delay > threshold (us) custom-dmx: [] # designated by [mjd_low,mjd_hi,binsize] +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20221021.Noise.wb.a8ff4ddc/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220822.Noise.wb.a77c37bb/ @@ -49,3 +59,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.wb.par' - '2022-08-24 thankful.cromartie NOISE: changed to 20220822.Noise.wb.a77c37bb' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' From fe1aaf5338e19b98f9ea7ea173a7963aea5837c9 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 08:52:27 -0800 Subject: [PATCH 066/193] delete 'num noise iter' from notebook --- nb_templates/process_v1.2.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index 446a3b5d..9f69b712 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -48,7 +48,6 @@ "autorun = False\n", "run_Ftest = True # Set to False if you don't need F-tests and want a faster notebook run!\n", "check_excision = True\n", - "num_noise_iter = 2e5\n", "\n", "if not autorun:\n", " run_noise_analysis = False \n", From 382110c8329d9ca937d7b207959ae30720ba4a2e Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 13:48:47 -0800 Subject: [PATCH 067/193] fixing SW GP in addition to noise model --- src/pint_pal/noise_utils.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 08a2ff5a..5badbfd4 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -875,7 +875,7 @@ def add_noise_to_model( ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_dm_gp_log10_rho_0' in dm_pars: log.info('Adding Free Spectral DM GP as DMWaveXnoise to par file') - NotImplementedError('DMWaveXNoise not yet implemented') + raise NotImplementedError('DMWaveXNoise not yet implemented') # Check to see if higher order chromatic noise is present chrom_pars = [key for key in noise_pars if "_chrom_gp" in key] @@ -885,7 +885,7 @@ def add_noise_to_model( log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') # Add the ML RN parameters to their component chrom_comp = pm.noise_model.PLCMNoise() - chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) + # chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) chrom_comp.TNCMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_chrom_gp_log10_A"] ) @@ -897,7 +897,7 @@ def add_noise_to_model( ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') - NotImplementedError('CMWaveXNoise not yet implemented') + raise NotImplementedError('CMWaveXNoise not yet implemented') # Check to see if solar wind is present sw_pars = [key for key in noise_pars if "sw_r2" in key] @@ -906,14 +906,22 @@ def add_noise_to_model( all_components = Component.component_types noise_class = all_components["SolarWindDispersion"] noise = noise_class() # Make the dispersion instance. - model.add_component(noise, validate=False) + model.add_component(noise, validate=False, force=False) # add parameters if f'{psr_name}_n_earth' in sw_pars: model['NE_SW'].quantity = noise_dict[f'{psr_name}_n_earth'] model['NE_SW'].frozen = True - elif f'{psr_name}_sw_gp_log10_A' in sw_pars: - raise NotImplementedError('Solar Wind Dispersion power-law GP not yet implemented') - elif f'{psr_name}_sw_gp_log10_rho' in sw_pars: + if f'{psr_name}_sw_gp_log10_A' in sw_pars: + sw_comp = pm.noise_model.PLSWNoise() + sw_comp.TNSWAMP.quantity = convert_to_RNAMP(noise_dict[f'{psr_name}_sw_gp_log10_A']) + sw_comp.TNSWAMP.frozen = True + sw_comp.TNSWGAM.quantity = -1.*noise_dict[f'{psr_name}_sw_gp_gamma'] + sw_comp.TNSWGAM.frozen = True + # FIXMEEEEEEE : need to figure out some way to softcode this + sw_comp.TNSWC.quantity = 10 + sw_comp.TNSWC.frozen = True + model.add_component(sw_comp, validate=False, force=True) + if f'{psr_name}_sw_gp_log10_rho' in sw_pars: raise NotImplementedError('Solar Wind Dispersion free spec GP not yet implemented') From 738a06e5174950063cc64189beecc625e8651027 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 17 Feb 2025 23:47:01 +0000 Subject: [PATCH 068/193] Added a few frozen params to frozen ignore list --- src/pint_pal/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index ba71d011..3ecfd28c 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -716,7 +716,7 @@ def alert(s): and pm.frozen and pm.value is not None and pm.value != 0): - if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX"}: + if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX", "CHI2R", "TRES", "SWP"}: ignoring.append(p) continue skip = False @@ -1348,4 +1348,4 @@ def no_ecorr_average(toas, resids, use_noise_model=True): return no_avg - \ No newline at end of file + From a79c1f70c768f0b0af0c3b3faddbc9c39d0581a3 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 18 Feb 2025 01:28:58 +0000 Subject: [PATCH 069/193] Get rid of bad warnings for noise chain directories --- src/pint_pal/utils.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 3ecfd28c..8d4bbb9e 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1240,12 +1240,10 @@ def check_recentness_noise(tc): return None, None d = os.path.abspath(tc.get_noise_dir()) - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) - for p in sorted(glob.glob(os.path.join(d, - "..", - "*.Noise.*", - tc.get_source()+"_"+tc.get_toa_type().lower(), - "chain*.txt")))] + if os.path.isfile(os.path.join(d, "chain*.txt")): + noise_runs = glob.glob(os.path.join(d, "chain*.txt")) + else: + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") From 7264d939de00d524e44b2d071941105261bd218e Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Wed, 19 Feb 2025 02:30:00 +0000 Subject: [PATCH 070/193] Reverted behavior of noise chain search, changed date format in PDF for range --- src/pint_pal/utils.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 8d4bbb9e..1a97330c 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -544,7 +544,9 @@ def alert(s): # Get some values from the fitter start = fitter.toas.first_MJD.value + start_ymd = fitter.toas.first_MJD.to_value(format='iso') finish = fitter.toas.last_MJD.value + finish_ymd = fitter.toas.last_MJD.to_value(format='iso') span = finish - start label = f"{psr} {'narrowband' if NB else 'wideband'}" @@ -573,8 +575,8 @@ def alert(s): for tf in tim_files: fsum.write(r'\item ' + verb(tf.split('/')[-1]) + '\n') fsum.write(r'\end{itemize}' + "\n") - fsum.write('Span: %.1f years (%.1f -- %.1f)\\\\\n ' % (span/365.24, - year(float(start)), year(float(finish)))) + fsum.write('Span: %.1f years (%s -- %s)\\\\\n ' % (span/365.24, + str(start_ymd).split(' ')[0], str(finish_ymd).split(' ')[0])) if NB: try: @@ -1240,10 +1242,14 @@ def check_recentness_noise(tc): return None, None d = os.path.abspath(tc.get_noise_dir()) - if os.path.isfile(os.path.join(d, "chain*.txt")): - noise_runs = glob.glob(os.path.join(d, "chain*.txt")) - else: - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] + if glob.glob(os.path.join(d,"chain*.txt")): + log.warning(f'Ignoring chains directly in {d}. Chains should be in a subdirectory of {os.path.split(d)[1]} called {tc.get_source()}_{tc.get_toa_type().lower()}') + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) + for p in sorted(glob.glob(os.path.join(d, + "..", + "????-??-??", + tc.get_source()+"_"+tc.get_toa_type().lower(), + "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") From a80a7c9b32cccadd79543250a4327d1062797e25 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Thu, 20 Feb 2025 00:29:21 +0000 Subject: [PATCH 071/193] Allow for noise date directories of format YYYY-MM-DD(a,b,c,etc.). Disallow overwriting noise directories. --- src/pint_pal/noise_utils.py | 10 ++++++++-- src/pint_pal/utils.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index bca27bdd..b29624ca 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -201,8 +201,14 @@ def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband else: outdir = base_op_dir + mo.PSR.value + '_wb/' - if os.path.exists(outdir) and (run_noise_analysis) and (not resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format(mo.PSR.value)) + if os.path.exists(outdir) and run_noise_analysis and not resume: + log.warning( + f"A noise directory for pulsar {mo.PSR.value} already exists! " + "Please rename the existing directory or specify a new location with " + "base_op_dir. If you're trying to resume noise modeling, use " + "resume=True with the existing directory. Skipping noise analysis." + ) + return None elif os.path.exists(outdir) and (run_noise_analysis) and (resume): log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format(mo.PSR.value)) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 1a97330c..cc8d27ef 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1247,7 +1247,7 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, "..", - "????-??-??", + "????-??-*", tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] used_chains = os.path.basename(d) From 8bf6c558f3a8b9c49d8a92d1d02d3084de3df685 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 25 Feb 2025 17:52:52 +0000 Subject: [PATCH 072/193] Support for home directory nosie runs --- src/pint_pal/utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index cc8d27ef..d1429cdd 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1252,6 +1252,15 @@ def check_recentness_noise(tc): "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] + + if not noise_runs: + log.warning('Looking for noise chains in working directory.') + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] + if len(noise_runs) > 1: + log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') + used_chains = os.path.basename(noise_runs[-1]) + available_chains = [os.path.basename(n) for n in noise_runs] + log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") try: From 8c63ad5b08cd70d18777b8f43ab36113e5def4ca Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Sun, 2 Mar 2025 13:53:52 -0500 Subject: [PATCH 073/193] Add fastshermanmorrison to requirements --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 26079026..e82cf4f0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ dependencies = [ "notebook", "seaborn", "gitpython", + "fastshermanmorrison-pulsar", ] classifiers = [ "Programming Language :: Python :: 3", From 9630b406a050392324fa6641a33476a71cba6790 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 11:33:05 -0800 Subject: [PATCH 074/193] yonking the discovery changes into another PR --- src/pint_pal/noise_utils.py | 318 ++++++++++-------------------------- 1 file changed, 82 insertions(+), 236 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 28b608cb..446a009c 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,5 +1,4 @@ import numpy as np, os, json -import arviz as az from astropy import log from astropy.time import Time @@ -12,36 +11,12 @@ from pint.models.parameter import maskParameter from pint.models.timing_model import Component -import matplotlib as mpl import matplotlib.pyplot as pl import la_forge.core as co -import la_forge.diagnostics as dg -import la_forge.utils as lu - -# Imports necessary for e_e noise modeling functions -import functools -from collections import OrderedDict - -from enterprise.signals import parameter -from enterprise.signals import selections -from enterprise.signals import signal_base -from enterprise.signals import white_signals -from enterprise.signals import gp_signals -from enterprise.signals import deterministic_signals -from enterprise import constants as const from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils -from enterprise_extensions import deterministic -from enterprise_extensions.timing import timing_block - -# from enterprise_extensions.blocks import (white_noise_block, red_noise_block) - -import types - -from enterprise.signals import utils -from enterprise.signals import gp_priors as gpp def setup_sampling_groups(pta, @@ -115,6 +90,7 @@ def setup_sampling_groups(pta, def analyze_noise( chaindir="./noise_run_chains/", + use_noise_point='MAP', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -128,6 +104,9 @@ def analyze_noise( Parameters ========== chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' + use_noise_point: point to use for noise analysis; Default: 'MAP'. + Options: 'MAP', 'median', + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False @@ -151,17 +130,15 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. + Also make sure you have an up-to-date la_forge installation. ") raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") - if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": + if sampler == 'PTMCMCSampler': # standard burn ins noise_core.set_burn(burn_frac) - elif likelihood == 'discovery': - # the numpyro sampler already deals with the burn in - noise_core.set_burn(0) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :] psr_name = noise_core.params[0].split("_")[0] pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) if len(pars)+2 != chain.shape[1]: @@ -171,7 +148,7 @@ def analyze_noise( if chaindir_compare is not None: compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) - chain_compare = compare_core.chain + chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :] pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) if len(pars_compare)+2 != chain_compare.shape[1]: chain_compare = chain_compare[:, :len(pars_compare)+2] @@ -278,6 +255,8 @@ def analyze_noise( nrows = 5 # number of rows per page mp_idx = noise_core.map_idx + param_medians = [noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']] + param_medians_dict = {p: noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']} #mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: mp_compare_idx = compare_core.map_idx @@ -298,7 +277,8 @@ def analyze_noise( color="black", label="Current", ) - ax.axvline(chain[:, idx][mp_idx], ls="--", color="black") + ax.axvline(chain[:, idx][mp_idx], ls="--", color="black", label="MAP") + ax.axvline(param_medians[idx], ls="--", color="green", label="median") if chaindir_compare is not None: ax.hist( chain_compare[:, idx], @@ -325,7 +305,13 @@ def analyze_noise( # ax[nr][nc].legend(loc = 'best') pl.show() - noise_dict = noise_core.get_map_dict() + if use_noise_point == 'MAP': + noise_dict = noise_core.get_map_dict() + elif use_noise_point == 'median': + noise_dict = param_medians_dict + else: + log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") + raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") # Print bayes factor for red noise in pulsar rn_amp_nm = psr_name+"_red_noise_log10_A" @@ -344,7 +330,7 @@ def model_noise( base_op_dir="./", model_kwargs={}, sampler_kwargs={}, - return_sampler=False, + return_sampler_without_sampling=False, ): """ Setup enterprise or discovery likelihood and perform Bayesian inference on noise model @@ -486,100 +472,22 @@ def model_noise( except: log.warning("Failed to add draws from empirical distribution.") # Initial sample - x0 = np.hstack([p.sample() for p in pta.params]) - # Start sampling - log.info("Beginnning to sample...") - samp.sample( - x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs - ) - log.info("Finished sampling.") - ############################################################## - ################## GibbsSampler ######################## - ############################################################## - elif likelihood == "enterprise" and sampler == "GibbsSampler": - try: - from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler - except: - log.error("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") - raise ValueError("Please install a version of enterprise extensions which contains the `gibbs_sampling` module.") - log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") - samp = GibbsSampler( - e_psr, - vary_wn=True, - tm_marg=False, - inc_ecorr=True, - ecorr_type='kernel', - vary_rn=model_kwargs['inc_rn'], - rn_components=model_kwargs['rn_nfreqs'], - vary_dm=model_kwargs['inc_dmgp'], - dm_components=model_kwargs['dmgp_nfreqs'], - vary_chrom=model_kwargs['inc_chromgp'], - chrom_components=model_kwargs['chromgp_nfreqs'], - noise_dict={}, - tnequad=model_kwargs['tnequad'], - #**noise_kwargs, - ) - log.info("Beginnning to sample...") - samp.sample(niter=sampler_kwargs['n_iter'], savepath=outdir) - log.info("Finished sampling.") - # sorta redundant to have both, but la_forge doesn't look for .npy files - chain = np.load(f'{outdir}/chain_1.npy') - np.savetxt(f'{outdir}/chain_1.txt', chain,) - ################################################################# - ################## discovery likelihood ################### - ################################################################# - elif likelihood == "discovery": - try: # make sure requisite packages are installed - import xarray as xr - import jax - from jax import numpy as jnp - import numpyro - from numpyro.infer import log_likelihood - from numpyro import distributions as dist - from numpyro import infer - import discovery as ds - from discovery import prior as ds_prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - raise ValueError("Please install the latest version of discovery, numpyro, and/or jax") - log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") - os.makedirs(outdir, exist_ok=True) - with open(outdir+"model_kwargs.json", "w") as f: - json.dump(model_kwargs, f) - with open(outdir+"sampler_kwargs.json", "w") as f: - json.dump(sampler_kwargs, f) - samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) - # run the sampler - log.info("Beginnning to sample...") - samp.run(jax.random.key(42)) - log.info("Finished sampling.") - # convert to a DataFrame - df = log_x.to_df(samp.get_samples()['par']) - # convert DataFrame to dictionary - samples_dict = df.to_dict(orient='list') - if sampler_kwargs['sampler'] != 'HMC-GIBBS': - log.info("Reconstructing Log Likelihood and Posterior from samples...") - ln_like = log_likelihood(numpyro_model, samp.get_samples(), parallel=True)['ll'] - ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) - ln_post = ln_like + ln_prior - samples_dict['lnlike'] = ln_like - samples_dict['lnpost'] = ln_post - else: - samples_dict['lnlike'] = None - samples_dict['lnpost'] = None - # convert dictionary to ArviZ InferenceData object - inference_data = az.from_dict(samples_dict) - # Save to NetCDF file which can be loaded into la_forge - inference_data.to_netcdf(outdir+"chain.nc") + # try to initialize the sampler to the maximum likelihood value from a previous run + # initialize to a random point if any points are missing + x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + if not return_sampler_without_sampling: + # Start sampling + log.info("Beginnning to sample...") + samp.sample( + x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs + ) + log.info("Finished sampling.") else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + "\nCan only use enterprise with PTMCMCSampler or GibbsSampler." ) - if return_sampler: + if return_sampler_without_sampling: return samp @@ -592,6 +500,7 @@ def convert_to_RNAMP(value): def add_noise_to_model( model, + use_noise_point='MAP', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -607,6 +516,9 @@ def add_noise_to_model( Parameters ========== model: PINT (or tempo2) timing model + use_noise_point: point to use for noise analysis; Default: 'MAP'. + Options: 'MAP', 'median', + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True ignore_red_noise: Flag to manually force RN exclusion from timing model. When False, @@ -940,109 +852,6 @@ def add_noise_to_model( return model -def plot_free_specs(c0, freqs, fs_type='Red Noise'): - """ - Plot free specs when using free spectral model - """ - raise NotImplementedError("not yet implemented") - return None - - -def setup_discovery_noise(psr, - model_kwargs={}, - sampler_kwargs={}): - """ - Setup the discovery likelihood with numpyro sampling for noise analysis - """ - # set up the model - sampler = sampler_kwargs['sampler'] - time_span = ds.getspan([psr]) - # need 64-bit precision for PTA inference - numpyro.enable_x64() - # this updates the ds.stand_priordict object - ds.priordict_standard.update(prior_dictionary_updates()) - model_components = [ - psr.residuals, - ds.makegp_timing(psr, svd=True), - ds.makenoise_measurement(psr), - ds.makegp_ecorr(psr), - ] - if model_kwargs['inc_rn']: - if model_kwargs['rn_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) - elif model_kwargs['rn_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) - if model_kwargs['inc_dmgp']: - if model_kwargs['dmgp_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - elif model_kwargs['dmgp_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - if model_kwargs['inc_chromgp']: - if model_kwargs['chrom_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) - elif model_kwargs['chrom_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) - psl = ds.PulsarLikelihood(model_components) - ## this prior transform is no longer required and should be removed - prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) - log_x = makelogtransform_uniform(psl.logL) - # x0 = sample_uniform(psl.logL.params) - if sampler == 'HMC-Gibbs': - try: - from discovery.gibbs import setup_single_psr_hmc_gibbs - except ImportError: - log.error("Need to have most up-to-date version of discovery installed.") - raise ValueError("Make sure proper version of discovery is imported") - numpyro_model = None # this doesnt get used for HMC-Gibbs - gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( - psrl=psl, psrs=psr, - priordict=ds.priordict_standard, - invhdorf=None, nuts_kwargs={}) - sampler = infer.MCMC(gibbs_hmc_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - elif sampler == 'NUTS': - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, - max_tree_depth=sampler_kwargs['max_tree_depth'], - dense_mass=sampler_kwargs['dense_mass'], - forward_mode_differentiation=False, - target_accept_prob=0.99) - sampler = infer.MCMC(nuts_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - elif sampler == 'HMC': - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) - sampler = infer.MCMC(hmc_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - else: - log.error( - f"Invalid likelihood ({sampler_kwargs['likelihood']}) and sampler ({sampler_kwargs['sampler']}) combination." \ - + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." - ) - - - return sampler, log_x, numpyro_model - - def test_equad_convention(pars_list): """ If (t2/tn)equad present, report convention used. @@ -1070,14 +879,51 @@ def test_equad_convention(pars_list): return None -def prior_dictionary_updates(): - return { - '(.*_)?dm_gp_log10_A': [-20, -11], - '(.*_)?dm_gp_gamma': [0, 7], - '(.*_)?chrom_gp_log10_A': [-20, -11], - '(.*_)?chrom_gp_gamma': [0, 7], - } - +def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): + """ + Get the initial sample from a chain directory or json file path. + If parameters are missing, draw randomly from the prior + Parameters + ========== + pta: enterprise PTA object + chaindir: path to chain directory + json_path: path to json file containing starting point + Returns + ======= + x0: initial sample + """ + try: + if chaindir is not None: + core = co.Core(chaindir) + starting_point = core.get_map_dict() + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + elif json_path is not None: + with open(json_path, 'r') as fin: + starting_point = json.load(fin) + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + else: + x0 = np.hstack([p.sample() for p in pta.params]) + except: + x0 = np.hstack([p.sample() for p in pta.params]) + x0_dict = None + log.warning( + f"Unable to initialize sampler from chain directory or json file. Drawing random initial sample." + ) + return x0 + + def get_model_and_sampler_default_settings(): model_defaults = { # white noise @@ -1110,7 +956,7 @@ def get_model_and_sampler_default_settings(): 'likelihood': 'enterprise', 'sampler': 'PTMCMCSampler', # ptmcmc kwargs - 'n_iter': 2e5, + 'n_iter': 2.5e5, 'empirical_distr': None, # numpyro kwargs 'num_steps': 25, From c37dc3366665e41cd84585cdd160febfb3038d85 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 13:25:14 -0800 Subject: [PATCH 075/193] fix notebook; fix typo in nu --- nb_templates/process_v1.1.ipynb | 2 +- nb_templates/process_v1.2.ipynb | 2 +- src/pint_pal/noise_utils.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nb_templates/process_v1.1.ipynb b/nb_templates/process_v1.1.ipynb index bc1a16ad..87ba7441 100644 --- a/nb_templates/process_v1.1.ipynb +++ b/nb_templates/process_v1.1.ipynb @@ -262,7 +262,7 @@ " # to temporarily address current inconsistency between enterprise <= v3.1.0 and pint implementations\n", " mo_new = lu.convert_enterprise_equads(mo_new)\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index 9f69b712..ee97cf91 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -266,7 +266,7 @@ " mo_new = nu.add_noise_to_model(mo_new, using_wideband = using_wideband, base_dir=tc.get_noise_dir(), \n", " compare_dir=tc.get_compare_noise_dir(), no_corner_plot=tc.get_no_corner())\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 446a009c..979d21fa 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -130,8 +130,8 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. - Also make sure you have an up-to-date la_forge installation. ") + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct." \ + +"Also make sure you have an up-to-date la_forge installation. ") raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") if sampler == 'PTMCMCSampler': # standard burn ins From c3094f1048d10cd5a52b5d57a375ed57080c5ab7 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 14:10:34 -0800 Subject: [PATCH 076/193] add mean large likelihoods function; implementation errors --- src/pint_pal/noise_utils.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 979d21fa..9ed3e0b0 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -88,9 +88,30 @@ def setup_sampling_groups(pta, return groups +def get_mean_large_likelihoods(core, N=10): + ''' + Calculate the mean of the top N likelihood samples from the chain. + This is an alternate to fixing the noise values in the timing model to + the MAP or the median. + + Params + ====== + core: la_forge.core object + N: int, number of top likelihood samples to average + Returns + ======= + mean_data: np.array, mean of the top N likelihood samples + ''' + chain = core.chain[core.burn:,:] + lnlike_idx = core.params.index('lnlike') + sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] + return np.mean(sorted_data[:N,:],axis=0) + + def analyze_noise( chaindir="./noise_run_chains/", - use_noise_point='MAP', + use_noise_point='mean_large_likelihood', + likelihoods_to_average=10, burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -105,8 +126,10 @@ def analyze_noise( ========== chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' use_noise_point: point to use for noise analysis; Default: 'MAP'. - Options: 'MAP', 'median', + Options: 'MAP', 'median', 'mean_large_likelihood', Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + likelihoods_to_average: number of top likelihood samples to average; Default: 10 + Only applicable if use_noise_point is 'mean_large_likelihood'. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False @@ -309,6 +332,8 @@ def analyze_noise( noise_dict = noise_core.get_map_dict() elif use_noise_point == 'median': noise_dict = param_medians_dict + elif use_noise_point == 'mean_large_likelihood': + noise_dict = get_mean_large_likelihoods(noise_core, N=likelihoods_to_average) else: log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") @@ -482,6 +507,12 @@ def model_noise( x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) log.info("Finished sampling.") + elif likelihood == "enterprise" and sampler == 'GibbsSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("GibbsSampler not yet implemented for enterprise likelihood") + elif likelihood == "discovery": + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("Discovery likelihood not yet implemented") else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ From 7cac3ff9496fdfce4b5242c7443bee847a0e9a0c Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 14:28:34 -0800 Subject: [PATCH 077/193] 10->50 likelihoods to average --- src/pint_pal/noise_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 9ed3e0b0..73f4cf07 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -111,7 +111,7 @@ def get_mean_large_likelihoods(core, N=10): def analyze_noise( chaindir="./noise_run_chains/", use_noise_point='mean_large_likelihood', - likelihoods_to_average=10, + likelihoods_to_average=50, burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -128,7 +128,7 @@ def analyze_noise( use_noise_point: point to use for noise analysis; Default: 'MAP'. Options: 'MAP', 'median', 'mean_large_likelihood', Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. - likelihoods_to_average: number of top likelihood samples to average; Default: 10 + likelihoods_to_average: number of top likelihood samples to average; Default: 50 Only applicable if use_noise_point is 'mean_large_likelihood'. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True From 01b96e355eedc72b9afaf3100e70668370e913ac Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 16:09:03 -0800 Subject: [PATCH 078/193] remove dmgp compatibility :( --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 73f4cf07..2841610d 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -448,17 +448,17 @@ def model_noise( dmjump_var=False, wb_efac_sigma=wb_efac_sigma, # DM GP - dm_var=model_kwargs['inc_dmgp'], - dm_Nfreqs=model_kwargs['dmgp_nfreqs'], + #dm_var=model_kwargs['inc_dmgp'], + #dm_Nfreqs=model_kwargs['dmgp_nfreqs'], # CHROM GP - chrom_gp=model_kwargs['inc_chromgp'], - chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], - chrom_gp_kernel='diag', # Fourier basis chromg_gp + #chrom_gp=model_kwargs['inc_chromgp'], + #chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + #chrom_gp_kernel='diag', # Fourier basis chromg_gp # DM SOLAR WIND #dm_sw_deter=model_kwargs['inc_sw_deter'], #ACE_prior=model_kwargs['ACE_prior'], # can pass extra signals in here - extra_sigs=model_kwargs['extra_sigs'], + #extra_sigs=model_kwargs['extra_sigs'], ) pta.set_default_params({}) else: From 73db40c52b296b27ed65195f459b8c9d3648993f Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 17:35:54 -0800 Subject: [PATCH 079/193] add extra prints --- src/pint_pal/noise_utils.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 2841610d..0cc95314 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -447,6 +447,7 @@ def model_noise( use_dmdata=False, dmjump_var=False, wb_efac_sigma=wb_efac_sigma, + tm_svd=True, # DM GP #dm_var=model_kwargs['inc_dmgp'], #dm_Nfreqs=model_kwargs['dmgp_nfreqs'], @@ -485,13 +486,19 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions + if sampler_kwargs['empirical_distr'] is not None: + log.info(f"Attempting to set up sampler with empirical distribution from {sampler_kwargs['empirical_distr']}") + emp_dist = sampler_kwargs['empirical_distr'] + else: + log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") + emp_dist = None samp = ee_sampler.setup_sampler(pta, outdir=outdir, resume=resume, groups=groups, - empirical_distr = sampler_kwargs['empirical_distr'] + empirical_distr = emp_dist, ) - if sampler_kwargs['empirical_distr'] is not None: + if emp_dist is not None: try: samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) except: @@ -981,7 +988,8 @@ def get_model_and_sampler_default_settings(): 'ACE_prior': False, # 'extra_sigs': None, - # path to empirical distribution + # misc + 'tm_svd': True } sampler_defaults = { 'likelihood': 'enterprise', From 0ecc3aa6dd3314de62a88c3a4e45e9d51d36a675 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 17:51:21 -0800 Subject: [PATCH 080/193] turns out you cant make empirical distributions like that --- src/pint_pal/noise_utils.py | 54 ++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0cc95314..189c66e2 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os, json +import numpy as np, os, json, itertools from astropy import log from astropy.time import Time @@ -17,6 +17,8 @@ from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils +from enterprise_extensions.empirical_distr import (EmpiricalDistribution1D, + EmpiricalDistribution2D) def setup_sampling_groups(pta, @@ -487,8 +489,13 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to set up sampler with empirical distribution from {sampler_kwargs['empirical_distr']}") - emp_dist = sampler_kwargs['empirical_distr'] + log.info(f"Attempting to create empirical distribution from {sampler_kwargs['empirical_distr']}") + try: + core = co.Core(chaindir=sampler_kwargs['empirical_distr']) + emp_dist = make_emp_distr(core) + except: + log.warning(f"Failed to create empirical distribution from {sampler_kwargs['empirical_distr']}... check path.") + emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") emp_dist = None @@ -961,6 +968,47 @@ def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): ) return x0 +def make1d(par, samples, bins=None, nbins=81): + if bins is None: + bins = np.linspace(min(samples), max(samples), nbins) + + return EmpiricalDistribution1D(par, samples, bins) + +def make2d(pars, samples, bins=None, nbins=81): + idx = [0,1] + if bins is None: + bins = [np.linspace(min(samples[:, i]), max(samples[:, i]), nbins) for i in idx] + return EmpiricalDistribution2D(pars, samples.T, bins) + + +def make_emp_distr(core): + """ + Make empirical distributions for all parameters in core. + Parameters + ========== + core: enterprise_extensions.core.Core object + + Returns + ======= + dists: list of EmpiricalDistribution1D and EmpiricalDistribution2D objects + """ + types = ['dm_gp', 'chrom_gp', 'red_noise', 'ecorr', 'chrom_s1yr', 'dm_s1yr', 'exp',] + # made 1d hist for everything + dists = [make1d(par, core(par)) for par in core.params[:-4] if 'chrom_gp_idx' not in par] + # get list of parameters minus chrom_gp_idx cuz this prior is weird. + params = [p for p in core.params if 'chrom_gp_idx' not in p] + groups = {ii: [par for par in params if ii in par] for ii in types} + # make 2ds for various related parameter subgroups + for group in groups.values(): + _ = [dists.append(make2d(pars,core(list(pars)))) for pars in list(itertools.combinations(group,2)) if len(group)>1] + # make 2d cross groups + _ = [[dists.append(make2d([ecr, dm], core([ecr, dm]))) for ecr in groups['ecorr']] for dm in groups['dm_gp']] + _ = [[dists.append(make2d([dm, chrom], core([dm, chrom]))) for dm in groups['dm_gp']] for chrom in groups['chrom_gp']] + + return dists + + + def get_model_and_sampler_default_settings(): model_defaults = { From 2ee428f1248b23564d2ffa44ec1af87406cdb159 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:39:42 -0800 Subject: [PATCH 081/193] removing chain accept --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 189c66e2..78c02890 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -163,11 +163,11 @@ def analyze_noise( noise_core.set_burn(burn_frac) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :] + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-2] psr_name = noise_core.params[0].split("_")[0] - pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) - if len(pars)+2 != chain.shape[1]: - chain = chain[:, :len(pars)+2] + pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) + # if len(pars)+2 != chain.shape[1]: + # chain = chain[:, :len(pars)+2] # load in same for comparison noise model if chaindir_compare is not None: @@ -489,12 +489,12 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to create empirical distribution from {sampler_kwargs['empirical_distr']}") + log.info(f"Attempting to create empirical distributions from {sampler_kwargs['empirical_distr']}") try: core = co.Core(chaindir=sampler_kwargs['empirical_distr']) emp_dist = make_emp_distr(core) except: - log.warning(f"Failed to create empirical distribution from {sampler_kwargs['empirical_distr']}... check path.") + log.warning(f"Failed to create empirical distributions ... check path.") emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") From 1502c84d80cad154cab8f31b2a0860a9fc7c9251 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:43:25 -0800 Subject: [PATCH 082/193] removing lnlike --- src/pint_pal/noise_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 78c02890..9f94545b 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -163,7 +163,7 @@ def analyze_noise( noise_core.set_burn(burn_frac) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-2] + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] psr_name = noise_core.params[0].split("_")[0] pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) # if len(pars)+2 != chain.shape[1]: @@ -171,12 +171,12 @@ def analyze_noise( # load in same for comparison noise model if chaindir_compare is not None: - compare_core = co.Core(chaindir=chaindir) + compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) - chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :] + chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) - if len(pars_compare)+2 != chain_compare.shape[1]: - chain_compare = chain_compare[:, :len(pars_compare)+2] + # if len(pars_compare)+2 != chain_compare.shape[1]: + # chain_compare = chain_compare[:, :len(pars_compare)+2] psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: From 537040e7a416fcffd5f9c4f181b4666f6d94ee09 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:48:11 -0800 Subject: [PATCH 083/193] fix bug --- src/pint_pal/noise_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 9f94545b..03c20997 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -595,10 +595,12 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") noise_core, noise_dict, rn_bf = analyze_noise( - chaindir, - burn_frac, - save_corner, - no_corner_plot, + chaindir=chaindir, + use_noise_point='mean_large_likelihood', + likelihoods_to_average=50, + burn_frac=0.25, + save_corner=save_corner, + no_corner_plot=no_corner_plot, chaindir_compare=chaindir_compare, ) chainfile = chaindir + "chain_1.txt" From 7e138715fcaf99a4b8034d51bfc64f6187fde279 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:54:41 -0800 Subject: [PATCH 084/193] fix bug --- src/pint_pal/noise_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 03c20997..b7ea976b 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -107,7 +107,8 @@ def get_mean_large_likelihoods(core, N=10): chain = core.chain[core.burn:,:] lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] - return np.mean(sorted_data[:N,:],axis=0) + vals = np.mean(sorted_data[:N,:],axis=0) + return {p: val[p] for p, par in enumerate(core.params)} def analyze_noise( From 9ef6bb333ca02d21be95dcb084c2180ba53e1d55 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:55:55 -0800 Subject: [PATCH 085/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index b7ea976b..0a639207 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: val[p] for p, par in enumerate(core.params)} + return {p: vals[p] for p, par in enumerate(core.params)} def analyze_noise( From 036a167ae4552b17d338b6ef94225778923c5fc0 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:57:39 -0800 Subject: [PATCH 086/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0a639207..749ee6bc 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: vals[p] for p, par in enumerate(core.params)} + return {p: vals[par] for p, par in enumerate(core.params)} def analyze_noise( From b367d3cdc979db616b07eed429ed68bc8ae3425f Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:59:06 -0800 Subject: [PATCH 087/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 749ee6bc..f134a615 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: vals[par] for p, par in enumerate(core.params)} + return {par: vals[p] for p, par in enumerate(core.params)} def analyze_noise( From 18b4a6c18524022e108a3d9890f39749ceb6af28 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:11:53 -0800 Subject: [PATCH 088/193] extra logger statements --- src/pint_pal/noise_utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index f134a615..9f1ae529 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -546,7 +546,7 @@ def convert_to_RNAMP(value): def add_noise_to_model( model, - use_noise_point='MAP', + use_noise_point='mean_large_likelihood', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -555,6 +555,7 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, + return_noise_core=False, ): """ Add WN, RN, DMGP, ChromGP, and SW parameters to timing model. @@ -562,9 +563,10 @@ def add_noise_to_model( Parameters ========== model: PINT (or tempo2) timing model - use_noise_point: point to use for noise analysis; Default: 'MAP'. - Options: 'MAP', 'median', + use_noise_point: point to use for noise analysis; Default: 'mean_large_likelihood'. + Options: 'MAP', 'median', 'mean_large_likelihood' Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + Mean large likelihood takes N of the largest likelihood values and then takes the mean of those. (Recommended). burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True ignore_red_noise: Flag to manually force RN exclusion from timing model. When False, @@ -573,10 +575,13 @@ def add_noise_to_model( using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False base_dir: directory containing {psr}_nb and {psr}_wb chains directories; if None, will check for results in the current working directory './'. + return_noise_core: Flag to return the la_forge.core object; Default: False Returns ======= model: New timing model which includes WN and RN (and potentially dmgp, chrom_gp, and solar wind) parameters + (optional) + noise_core: la_forge.core object which contains noise chains and run metadata """ # Assume results are in current working directory if not specified @@ -595,11 +600,17 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") + if use_noise_point == 'mean_large_likelihood': + log.info("Using mean of top 50 likelihood samples for noise parameters.") + elif use_noise_point == 'MAP': + log.info("Using maximum a posteriori values for noise parameters.") + elif use_noise_point == 'median': + log.info("Using median values for noise parameters.") noise_core, noise_dict, rn_bf = analyze_noise( chaindir=chaindir, - use_noise_point='mean_large_likelihood', + use_noise_point=use_noise_point, likelihoods_to_average=50, - burn_frac=0.25, + burn_frac=burn_frac, save_corner=save_corner, no_corner_plot=no_corner_plot, chaindir_compare=chaindir_compare, @@ -897,7 +908,10 @@ def add_noise_to_model( model = convert_enterprise_equads(model) - return model + if not return_noise_core: + return model + if return_noise_core: + return model, noise_core def test_equad_convention(pars_list): From ec807c3fe5d826ef381a397ff09572e81f5b5ace Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:31:52 -0800 Subject: [PATCH 089/193] adding legend to fig --- src/pint_pal/noise_utils.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 9f1ae529..0cd7ae52 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -304,7 +304,13 @@ def analyze_noise( label="Current", ) ax.axvline(chain[:, idx][mp_idx], ls="--", color="black", label="MAP") - ax.axvline(param_medians[idx], ls="--", color="green", label="median") + if use_noise_point == 'mean_large_likelihood': + lbl = "mean of 50 MLVs" + if use_noise_point == 'MAP': + lbl = "MAP" + if use_noise_point == 'median': + lbl = "median" + ax.axvline(param_medians[idx], ls="--", color="green", label=lbl) if chaindir_compare is not None: ax.hist( chain_compare[:, idx], @@ -329,6 +335,7 @@ def analyze_noise( # Wasn't working before, but how do I implement a legend? # ax[nr][nc].legend(loc = 'best') + pl.legend(loc="best") pl.show() if use_noise_point == 'MAP': @@ -339,7 +346,7 @@ def analyze_noise( noise_dict = get_mean_large_likelihoods(noise_core, N=likelihoods_to_average) else: log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") - raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") + raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' or 'mean_large_likelihood' ") # Print bayes factor for red noise in pulsar rn_amp_nm = psr_name+"_red_noise_log10_A" From 3cc6679d409e11ed19136d19d68e20521a63e639 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:49:03 -0800 Subject: [PATCH 090/193] fix bug in chain comparison --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0cd7ae52..47be3a1e 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -175,7 +175,7 @@ def analyze_noise( compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] - pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) + pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) # if len(pars_compare)+2 != chain_compare.shape[1]: # chain_compare = chain_compare[:, :len(pars_compare)+2] From 85ec7d9e4cc37c266e86aef4525ea53f98a1e751 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 17:03:00 +0000 Subject: [PATCH 091/193] Quick fix for finding old noise runs --- src/pint_pal/noise_utils.py | 12 ++++++++---- src/pint_pal/utils.py | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 47be3a1e..513cfd18 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -156,9 +156,13 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct." \ - +"Also make sure you have an up-to-date la_forge installation. ") - raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") + if os.path.isfile(chaindir): + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. " \ + +"Also make sure you have an up-to-date la_forge installation. ") + raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") + else: + log.error(f"No noise runs found in {chaindir}. Make sure the path is correct.") + raise ValueError(f"Could not load noise run from {chaindir}. Check path.") if sampler == 'PTMCMCSampler': # standard burn ins noise_core.set_burn(burn_frac) @@ -1078,4 +1082,4 @@ def get_model_and_sampler_default_settings(): 'max_tree_depth': 5, 'dense_mass': False, } - return model_defaults, sampler_defaults \ No newline at end of file + return model_defaults, sampler_defaults diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index d1429cdd..056bd1d3 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1258,8 +1258,8 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 1: log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') - used_chains = os.path.basename(noise_runs[-1]) - available_chains = [os.path.basename(n) for n in noise_runs] + used_chains = os.path.basename(noise_runs[-1]) + available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") From 68500e41c2b2d6c62d9601b0545728bd181f2f45 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 20:49:30 +0000 Subject: [PATCH 092/193] Fixed recentness checking to handle cases outside the expected noise-dir location, including home dir --- src/pint_pal/utils.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 056bd1d3..6459d882 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1238,7 +1238,7 @@ def check_recentness_noise(tc): name of the most recent available set of chains """ if not tc.get_noise_dir(): - log.warning(f"Yaml file does not have a noise-dir field (or it is unset).") + log.warning(f"Yaml file does not have a noise-dir field (or it is unset). Will check working directory.") return None, None d = os.path.abspath(tc.get_noise_dir()) @@ -1254,12 +1254,18 @@ def check_recentness_noise(tc): available_chains = [os.path.basename(n) for n in noise_runs] if not noise_runs: - log.warning('Looking for noise chains in working directory.') - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] - if len(noise_runs) > 1: - log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') - used_chains = os.path.basename(noise_runs[-1]) - available_chains = [os.path.basename(n) for n in noise_runs] + log.warning('Looking for noise chains in given noise-dir, but does not follow current conventions.') + noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] + if len(noise_runs) > 0: + if len(noise_runs) == 1: + log.warning(f'{len(noise_runs)} noise chain found in noise-dir.') + else: + log.warning(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + used_chains = os.path.abspath(noise_runs[0]) + available_chains = [os.path.abspath(n) for n in noise_runs] + + if not noise_runs: + log.warning('No chains found. Will search working directory and apply if found.') log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") From b5bde65ee9900ffea660d27fb17faf351a0991d0 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 21:14:51 +0000 Subject: [PATCH 093/193] Changed warning-->info for non-standard directory case --- src/pint_pal/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 6459d882..5621d959 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1258,9 +1258,9 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 0: if len(noise_runs) == 1: - log.warning(f'{len(noise_runs)} noise chain found in noise-dir.') + log.info(f'{len(noise_runs)} noise chain found in noise-dir.') else: - log.warning(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + log.info(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') used_chains = os.path.abspath(noise_runs[0]) available_chains = [os.path.abspath(n) for n in noise_runs] From f21d70904c45a05e5d204e02eae0498f18094446 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 11 Mar 2025 16:09:04 +0000 Subject: [PATCH 094/193] Added path to noise-dir warnings. See simultaneous MR in NG20 repo (notebook changes) --- src/pint_pal/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 5621d959..b2b940a9 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1254,13 +1254,13 @@ def check_recentness_noise(tc): available_chains = [os.path.basename(n) for n in noise_runs] if not noise_runs: - log.warning('Looking for noise chains in given noise-dir, but does not follow current conventions.') + log.warning(f'Looking for noise chains in given noise-dir ({d}), but does not follow current conventions (shared chains in /nanograv/share/20yr/noise-chains///).') noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 0: if len(noise_runs) == 1: - log.info(f'{len(noise_runs)} noise chain found in noise-dir.') + log.info(f'{len(noise_runs)} noise chain found in noise-dir ({d}).') else: - log.info(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + log.info(f'{len(noise_runs)} noise chains found in noise-dir ({d}). Using first in sorted list.') used_chains = os.path.abspath(noise_runs[0]) available_chains = [os.path.abspath(n) for n in noise_runs] From d16e658a32197a03000ca992f15f6b56145ffae0 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 22:55:55 -0700 Subject: [PATCH 095/193] fix integer bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 513cfd18..2d7cd4c6 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -446,7 +446,7 @@ def model_noise( log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions # Ensure n_iter is an integer - sampler_kwargs['n_iter'] = int(sampler_kwargs['n_iter']) + sampler_kwargs['n_iter'] = int(float(sampler_kwargs['n_iter'])) if sampler_kwargs['n_iter'] < 1e4: log.warning( From 1849ffd9d364db1393c2dd95f82c01ca98b3d7c7 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 23:17:19 -0700 Subject: [PATCH 096/193] add print statements --- src/pint_pal/noise_utils.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 2d7cd4c6..b12d9d47 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -501,12 +501,19 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to create empirical distributions from {sampler_kwargs['empirical_distr']}") try: + log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['empirical_distr']}") core = co.Core(chaindir=sampler_kwargs['empirical_distr']) - emp_dist = make_emp_distr(core) except: - log.warning(f"Failed to create empirical distributions ... check path.") + log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['empirical_distr']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") + core = None + try: + if core is not None: + emp_dist = make_emp_distr(core) + log.info(f"Successfully created empirical distributions !!") + log.info("Setting up sampler ...") + except: + log.warning(f"Failed to create empirical distributions from successfully loaded directory.") emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") From de53cfd5054b358ffb7bbaf9cf8fc6857063949c Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 23:49:52 -0700 Subject: [PATCH 097/193] adding single likelihood evaluation timer --- src/pint_pal/noise_utils.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index b12d9d47..693f9e70 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os, json, itertools +import numpy as np, os, json, itertools, time from astropy import log from astropy.time import Time @@ -533,6 +533,10 @@ def model_noise( # try to initialize the sampler to the maximum likelihood value from a previous run # initialize to a random point if any points are missing x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + try: + log_single_likelihood_evaluation_time(pta, sampler_kwargs) + except: + log.warning("Failed to time likelihood.") if not return_sampler_without_sampling: # Start sampling log.info("Beginnning to sample...") @@ -974,6 +978,7 @@ def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): """ try: if chaindir is not None: + log.info(f"Attempting to initialize sampler from MAP of chain directory {chaindir}") core = co.Core(chaindir) starting_point = core.get_map_dict() x0_dict = {} @@ -1015,7 +1020,6 @@ def make2d(pars, samples, bins=None, nbins=81): bins = [np.linspace(min(samples[:, i]), max(samples[:, i]), nbins) for i in idx] return EmpiricalDistribution2D(pars, samples.T, bins) - def make_emp_distr(core): """ Make empirical distributions for all parameters in core. @@ -1042,6 +1046,19 @@ def make_emp_distr(core): return dists +def log_single_likelihood_evaluation_time(pta, sampler_kwargs): + """ + Log the time it takes to evaluate the likelihood once. + """ + log.info("Building the enterprise likelihood and estimating evaluation time...") + x1 = [[p.sample() for p in pta.params] for _ in range(11)] + pta.get_lnlikelihood(x1[0]) + start_time = time.time() + [pta.get_lnlikelihood(x1[i]) for i in range(1,11)] + end_time = time.time() + slet = (end_time-start_time)/10 + log.info(f"Single likelihood evaluation time is approximately {slet:.1e} seconds") + log.info(f"4 times {sampler_kwargs['n_iter']} likelihood evaluations will take approximately: {4*slet*float(sampler_kwargs['n_iter'])/3600/24:.2f} days") From e3e425d4bfbc152be07893210fdccaf26e44f129 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 14 Mar 2025 00:06:23 -0700 Subject: [PATCH 098/193] rename empirical distribuition key --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 693f9e70..995b0700 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -500,12 +500,12 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions - if sampler_kwargs['empirical_distr'] is not None: + if sampler_kwargs['emp_distribution'] is not None: try: - log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['empirical_distr']}") - core = co.Core(chaindir=sampler_kwargs['empirical_distr']) + log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['emp_distribution']}") + core = co.Core(chaindir=sampler_kwargs['emp_distribution']) except: - log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['empirical_distr']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") + log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['emp_distribution']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") core = None try: if core is not None: @@ -532,7 +532,7 @@ def model_noise( # Initial sample # try to initialize the sampler to the maximum likelihood value from a previous run # initialize to a random point if any points are missing - x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['emp_distribution']) try: log_single_likelihood_evaluation_time(pta, sampler_kwargs) except: @@ -1096,7 +1096,7 @@ def get_model_and_sampler_default_settings(): 'sampler': 'PTMCMCSampler', # ptmcmc kwargs 'n_iter': 2.5e5, - 'empirical_distr': None, + 'emp_distribution': None, # numpyro kwargs 'num_steps': 25, 'num_warmup': 500, From 98175ec9bb684a2066034fea920aa9aaec6546e1 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 14 Mar 2025 00:15:16 -0700 Subject: [PATCH 099/193] add additional instructions for adding empirical distributions --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 995b0700..2233e6f1 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -516,7 +516,7 @@ def model_noise( log.warning(f"Failed to create empirical distributions from successfully loaded directory.") emp_dist = None else: - log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") + log.warning("Setting up sampler without empirical distributions... Consider adding one for faster sampling by adding `emp_distribution`: //_nb to the `noise_run`->`inference` section of the config file.") emp_dist = None samp = ee_sampler.setup_sampler(pta, outdir=outdir, From 4b74b216a9ee800bc0cb995f1daeb349a3871c04 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Tue, 7 May 2024 21:17:04 +0000 Subject: [PATCH 100/193] Add PINT autofitter option --- src/pint_pal/timingconfiguration.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 1ccf1ed5..185fc490 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -336,8 +336,11 @@ def get_fitter(self): def construct_fitter(self, to, mo): """ Return the fitter, tracking pulse numbers if available """ fitter_name = self.config['fitter'] - fitter_class = getattr(pint.fitter, fitter_name) - return fitter_class(to, mo) + if fitter_name == 'Auto': + return pint.fitter.Fitter.auto(to, mo) + else: + fitter_class = getattr(pint.fitter, fitter_name) + return fitter_class(to, mo) def get_toa_type(self): """ Return the toa-type string """ From d50b83aae4b8b7e2941bf0bdcaeaaa0dacd7b00f Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 16:44:04 -0400 Subject: [PATCH 101/193] Add config module --- pyproject.toml | 3 ++ src/pint_pal/__init__.py | 2 + src/pint_pal/config.py | 61 +++++++++++++++++++++++++++++ src/pint_pal/defaults.py | 16 -------- src/pint_pal/defaults.yaml | 21 ++++++++++ src/pint_pal/par_checker.py | 8 +++- src/pint_pal/timingconfiguration.py | 2 +- src/pint_pal/yamlio.py | 2 +- 8 files changed, 96 insertions(+), 19 deletions(-) create mode 100644 src/pint_pal/config.py delete mode 100644 src/pint_pal/defaults.py create mode 100644 src/pint_pal/defaults.yaml diff --git a/pyproject.toml b/pyproject.toml index b6d1652d..185ae2cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,9 @@ classifiers = [ "Operating System :: OS Independent", ] +[tool.setuptools.package-data] +pint_pal = ["defaults.yaml"] + [project.urls] "Homepage" = "https://github.com/nanograv/pint_pal" "Bug Tracker" = "https://github.com/nanograv/pint_pal/issues" diff --git a/src/pint_pal/__init__.py b/src/pint_pal/__init__.py index af9b84cf..3429e639 100644 --- a/src/pint_pal/__init__.py +++ b/src/pint_pal/__init__.py @@ -1,4 +1,6 @@ import pint_pal.checkin +import pint_pal.config +from pint_pal.config import set_data_root from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py new file mode 100644 index 00000000..0997474c --- /dev/null +++ b/src/pint_pal/config.py @@ -0,0 +1,61 @@ +from ruamel.yaml import YAML +import os.path +yaml = YAML(typ='safe') +PACKAGE_DIR = os.path.dirname(__file__) +DATA_ROOT = '.' + +def set_data_root(path): + """ + Set the root directory of the data repository to be used with PINT Pal. + PINT Pal will search this directory for a configuration file specifying settings + such as the appropriate JPL ephemeris and version of TT(BIPM) to check for when + validating timing models. + + It will also be treated as the base directory when resolving paths in YAML + configuration files. This allows notebooks (or scripts) using YAML files within + the data repository, which specify paths relative to the data root, to be run + from other locations. + + The default value of `data_root` is '.' (the current working directory), which + is sufficient in cases where either (1) no data repository is in use, or + (2) all scripts and notebooks are run from the root of the data repository. + """ + global DATA_ROOT + DATA_ROOT = path + try: + read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) + except FileNotFoundError: + pass + +def read_config_file(config_file): + """ + Read a configuration file, along the lines of `defaults.yaml`, and load the results + into a location that can be accessed by other PINT Pal code. + """ + with open(config_file, 'r') as f: + config = yaml.load(f) + + global LATEST_BIPM + global LATEST_EPHEM + global PLANET_SHAPIRO + global CORRECT_TROPOSPHERE + global FREQUENCY_RATIO + global MAX_SOLARWIND_DELAY + global LATEST_TOA_RELEASE + + if 'LATEST_BIPM' in config: + LATEST_BIPM = config['LATEST_BIPM'] + if 'LATEST_EPHEM' in config: + LATEST_EPHEM = config['LATEST_EPHEM'] + if 'PLANET_SHAPIRO' in config: + PLANET_SHAPIRO = config['PLANET_SHAPIRO'] + if 'CORRECT_TROPOSPHERE' in config: + CORRECT_TROPOSPHERE = config['CORRECT_TROPOSPHERE'] + if 'FREQUENCY_RATIO' in config: + FREQUENCY_RATIO = config['FREQUENCY_RATIO'] + if 'MAX_SOLARWIND_DELAY' in config: + MAX_SOLARWIND_DELAY = config['MAX_SOLARWIND_DELAY'] + if 'LATEST_TOA_RELEASE' in config: + LATEST_TOA_RELEASE = config['LATEST_TOA_RELEASE'] + +read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) diff --git a/src/pint_pal/defaults.py b/src/pint_pal/defaults.py deleted file mode 100644 index 0bc6c674..00000000 --- a/src/pint_pal/defaults.py +++ /dev/null @@ -1,16 +0,0 @@ -# Here we keep track of global default settings - -# Choice of clock, SSE -LATEST_BIPM = "BIPM2021" # latest clock realization to use -LATEST_EPHEM = "DE440" # latest solar system ephemeris to use - -# Toggle various corrections -PLANET_SHAPIRO = True # correct for Shapiro delay from planets -CORRECT_TROPOSPHERE = True # correct for tropospheric delays - -# DMX model defaults -FREQUENCY_RATIO = 1.1 # set the high/low frequency ratio for DMX bins -MAX_SOLARWIND_DELAY = 0.1 # set the maximum permited 'delay' from SW [us] - -# Desired TOA release tag -LATEST_TOA_RELEASE = "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/defaults.yaml b/src/pint_pal/defaults.yaml new file mode 100644 index 00000000..6cfcc3d3 --- /dev/null +++ b/src/pint_pal/defaults.yaml @@ -0,0 +1,21 @@ +# Here we keep track of global default settings +# +# These can be overridden on a per-project basis by placing a file +# called `pint_pal_project.yaml` in the `data_root` location (this +# defaults to the current working directory, but can be configured +# with `pint_pal.set_data_root()`). + +# Choice of clock, SSE +LATEST_BIPM: "BIPM2021" # latest clock realization to use +LATEST_EPHEM: "DE440" # latest solar system ephemeris to use + +# Toggle various corrections +PLANET_SHAPIRO: True # correct for Shapiro delay from planets +CORRECT_TROPOSPHERE: True # correct for tropospheric delays + +# DMX model defaults +FREQUENCY_RATIO: 1.1 # set the high/low frequency ratio for DMX bins +MAX_SOLARWIND_DELAY: 0.1 # set the maximum permited 'delay' from SW [us] + +# Desired TOA release tag +LATEST_TOA_RELEASE: "2021.08.25-9d8d617" # current set of TOAs available diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index 011c489a..eb23ec06 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,7 +4,13 @@ import copy from astropy import log import astropy.units as u -from pint_pal.defaults import * +from pint_pal.defaults import ( + LATEST_BIPM, + LATEST_EPHEM, + PLANET_SHAPIRO, + CORRECT_TROPOSPHERE, + LATEST_TOA_RELEASE, +) from pint.modelutils import model_equatorial_to_ecliptic def check_if_fit(model, *param): diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 185fc490..dd44b7b3 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.defaults import * +from pint_pal.config import PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY class TimingConfiguration: """ diff --git a/src/pint_pal/yamlio.py b/src/pint_pal/yamlio.py index 25878847..5cfe6868 100644 --- a/src/pint_pal/yamlio.py +++ b/src/pint_pal/yamlio.py @@ -8,7 +8,7 @@ import glob from astropy import log import numpy as np -from pint_pal.defaults import * +from pint_pal.config import LATEST_TOA_RELEASE import os yaml = YAML() From 5c18585a708910601fe0e1259c140f1a519b8bf9 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 17:08:30 -0400 Subject: [PATCH 102/193] Replace one last instance of pint_pal.defaults --- src/pint_pal/par_checker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index eb23ec06..4128209e 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,7 +4,7 @@ import copy from astropy import log import astropy.units as u -from pint_pal.defaults import ( +from pint_pal.config import ( LATEST_BIPM, LATEST_EPHEM, PLANET_SHAPIRO, From 37d8c09f7c1ccef5bf03fe09d70242dda3c4f43c Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 8 May 2024 17:32:26 -0400 Subject: [PATCH 103/193] Actually use DATA_ROOT to resolve paths --- src/pint_pal/notebook_runner.py | 5 +++-- src/pint_pal/timingconfiguration.py | 16 +++++++++++++--- tests/test_run_notebook.py | 14 ++++++++------ 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/pint_pal/notebook_runner.py b/src/pint_pal/notebook_runner.py index 495f6fbd..2e2223ee 100644 --- a/src/pint_pal/notebook_runner.py +++ b/src/pint_pal/notebook_runner.py @@ -11,6 +11,7 @@ import pint_pal from pint_pal.notebook_templater import transform_notebook +from pint_pal.config import DATA_ROOT ansi_color = re.compile(r'\x1b\[([0-9]{1,3};)*[0-9]{1,3}m') @@ -36,8 +37,8 @@ def run_template_notebook(template_nb, config_file, output_nb=None, err_file=Non verbose: Print a description of replacements made in the template notebook. transformations: Transformations to apply to the notebook. """ - # base_dir = parent directory of directory containing config_file - base_dir = os.path.dirname(os.path.dirname(os.path.abspath(config_file))) + # base_dir = root of data repository + base_dir = DATA_ROOT nb_name = os.path.splitext(os.path.split(template_nb)[1])[0] cfg_name = os.path.splitext(os.path.split(config_file)[1])[0] diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index dd44b7b3..e505ab7f 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.config import PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY +from pint_pal.config import DATA_ROOT, PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY class TimingConfiguration: """ @@ -48,8 +48,18 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.filename = filename with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) - self.tim_directory = self.config['tim-directory'] if tim_directory is None else tim_directory - self.par_directory = self.config['par-directory'] if par_directory is None else par_directory + if tim_directory is None: + self.tim_directory = os.path.realpath( + os.path.join(DATA_ROOT, self.config['tim-directory']) + ) + else: + self.tim_directory = tim_directory + if par_directory is None: + self.par_directory = os.path.realpath( + os.path.join(DATA_ROOT, self.config['par-directory']) + ) + else: + self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' def get_source(self): diff --git a/tests/test_run_notebook.py b/tests/test_run_notebook.py index 4f45fe77..b1f47262 100644 --- a/tests/test_run_notebook.py +++ b/tests/test_run_notebook.py @@ -3,15 +3,17 @@ from datetime import datetime from glob import glob import pytest +import pint_pal from pint_pal.notebook_runner import run_template_notebook - -base_dir = dirname(dirname(__file__)) +test_dir = dirname(__file__) +base_dir = dirname(test_dir) +pint_pal.set_data_root(test_dir) def config_files(): - config_files = (glob(join(base_dir, 'tests/configs/B*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.nb.yaml')) - + glob(join(base_dir, 'tests/configs/B*.wb.yaml')) - + glob(join(base_dir, 'tests/configs/J*.wb.yaml'))) + config_files = (glob(join(test_dir, 'configs', 'B*.nb.yaml')) + + glob(join(test_dir, 'configs', 'J*.nb.yaml')) + + glob(join(test_dir, 'configs', 'B*.wb.yaml')) + + glob(join(test_dir, 'configs', 'J*.wb.yaml'))) config_files = sorted(config_files) basenames = [splitext(split(filename)[1])[0] for filename in config_files] print(config_files) From f34f2816de4450e44f7d3d897382242dd69025b4 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 12:57:44 -0400 Subject: [PATCH 104/193] Can't import config vars directly --- src/pint_pal/notebook_runner.py | 4 +-- src/pint_pal/par_checker.py | 40 ++++++++++++++--------------- src/pint_pal/timingconfiguration.py | 12 ++++----- src/pint_pal/yamlio.py | 4 +-- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/notebook_runner.py b/src/pint_pal/notebook_runner.py index 2e2223ee..26db8126 100644 --- a/src/pint_pal/notebook_runner.py +++ b/src/pint_pal/notebook_runner.py @@ -10,8 +10,8 @@ yaml = YAML(typ='safe') import pint_pal +import pint_pal.config from pint_pal.notebook_templater import transform_notebook -from pint_pal.config import DATA_ROOT ansi_color = re.compile(r'\x1b\[([0-9]{1,3};)*[0-9]{1,3}m') @@ -38,7 +38,7 @@ def run_template_notebook(template_nb, config_file, output_nb=None, err_file=Non transformations: Transformations to apply to the notebook. """ # base_dir = root of data repository - base_dir = DATA_ROOT + base_dir = pint_pal.config.DATA_ROOT nb_name = os.path.splitext(os.path.split(template_nb)[1])[0] cfg_name = os.path.splitext(os.path.split(config_file)[1])[0] diff --git a/src/pint_pal/par_checker.py b/src/pint_pal/par_checker.py index 4128209e..6ecf6856 100644 --- a/src/pint_pal/par_checker.py +++ b/src/pint_pal/par_checker.py @@ -4,13 +4,7 @@ import copy from astropy import log import astropy.units as u -from pint_pal.config import ( - LATEST_BIPM, - LATEST_EPHEM, - PLANET_SHAPIRO, - CORRECT_TROPOSPHERE, - LATEST_TOA_RELEASE, -) +import pint_pal.config from pint.modelutils import model_equatorial_to_ecliptic def check_if_fit(model, *param): @@ -290,8 +284,9 @@ def check_ephem(toa): UserWarning If ephemeris is not set to the latest version. """ - if toa.ephem != LATEST_EPHEM: - msg = f"Wrong Solar System ephemeris in use ({toa.ephem}); should be {LATEST_EPHEM}." + if toa.ephem != pint_pal.config.LATEST_EPHEM: + msg = (f"Wrong Solar System ephemeris in use ({toa.ephem});" + f" should be {pint_pal.config.LATEST_EPHEM}.") log.warning(msg) else: msg = f"Current Solar System ephemeris in use is {toa.ephem}." @@ -310,8 +305,9 @@ def check_bipm(toa): UserWarning If BIPM correction is not set to the latest version. """ - if toa.clock_corr_info['bipm_version'] != LATEST_BIPM: - msg = f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']}); should be {LATEST_BIPM}." + if toa.clock_corr_info['bipm_version'] != pint_pal.config.LATEST_BIPM: + msg = (f"Wrong bipm_version ({toa.clock_corr_info['bipm_version']});" + f" should be {pint_pal.config.LATEST_BIPM}.") log.warning(msg) else: msg = f"BIPM version in use is {toa.clock_corr_info['bipm_version']}." @@ -362,9 +358,10 @@ def check_troposphere(model): msg = "Added TroposphereDelay to model components." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value - if tropo != CORRECT_TROPOSPHERE: - model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( \ - CORRECT_TROPOSPHERE) + if tropo != pint_pal.config.CORRECT_TROPOSPHERE: + model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.set( + pint_pal.config.CORRECT_TROPOSPHERE + ) msg = "Switching CORRECT_TROPOSPHERE setting." log.warning(msg) tropo = model.components['TroposphereDelay'].CORRECT_TROPOSPHERE.value @@ -391,9 +388,10 @@ def check_planet_shapiro(model): msg = "Added SolarSystemShapiro to model components." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value - if sss != PLANET_SHAPIRO: - model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( \ - PLANET_SHAPIRO) + if sss != pint_pal.config.PLANET_SHAPIRO: + model.components['SolarSystemShapiro'].PLANET_SHAPIRO.set( + pint_pal.config.PLANET_SHAPIRO + ) msg = "Switching PLANET_SHAPIRO setting." log.warning(msg) sss = model.components['SolarSystemShapiro'].PLANET_SHAPIRO.value @@ -455,7 +453,9 @@ def check_toa_release(toas): if len(set(release_flags)) > 1: log.error(f'TOAs from multiple releases should not be combined: {set(release_flags)}') else: - if release_flags[0] == LATEST_TOA_RELEASE: - log.info(f'All TOAs are from the latest release ({LATEST_TOA_RELEASE}).') + if release_flags[0] == pint_pal.config.LATEST_TOA_RELEASE: + log.info(f'All TOAs are from the latest release ({pint_pal.config.LATEST_TOA_RELEASE}).') else: - log.warning(f'TOAs in use are from an old release {release_flags[0]}, not {LATEST_TOA_RELEASE}; update tim-directory in the .yaml accordingly.') + log.warning(f'TOAs in use are from an old release {release_flags[0]}, ' + f'not {pint_pal.config.LATEST_TOA_RELEASE}; ' + f'update tim-directory in the .yaml accordingly.') diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index e505ab7f..3d8be07b 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -21,7 +21,7 @@ from pint_pal.utils import write_if_changed, apply_cut_flag, apply_cut_select from pint_pal.lite_utils import new_changelog_entry from pint_pal.lite_utils import check_toa_version, check_tobs -from pint_pal.config import DATA_ROOT, PLANET_SHAPIRO, FREQUENCY_RATIO, MAX_SOLARWIND_DELAY +import pint_pal.config class TimingConfiguration: """ @@ -50,13 +50,13 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is None: self.tim_directory = os.path.realpath( - os.path.join(DATA_ROOT, self.config['tim-directory']) + os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) ) else: self.tim_directory = tim_directory if par_directory is None: self.par_directory = os.path.realpath( - os.path.join(DATA_ROOT, self.config['par-directory']) + os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) ) else: self.par_directory = par_directory @@ -143,7 +143,7 @@ def get_model_and_toas(self,usepickle=True,print_all_ignores=False,apply_initial usepickle=usepickle, bipm_version=BIPM, ephem=EPHEM, - planets=PLANET_SHAPIRO, + planets=pint_pal.config.PLANET_SHAPIRO, model=m, picklefilename=picklefilename, include_pn=include_pn @@ -654,13 +654,13 @@ def get_fratio(self): """ Return desired frequency ratio """ if 'fratio' in self.config['dmx'].keys(): return self.config['dmx']['fratio'] - return FREQUENCY_RATIO + return pint_pal.config.FREQUENCY_RATIO def get_sw_delay(self): """ Return desired max(solar wind delay) threshold """ if 'max-sw-delay' in self.config['dmx'].keys(): return self.config['dmx']['max-sw-delay'] - return MAX_SOLARWIND_DELAY + return pint_pal.config.MAX_SOLARWIND_DELAY def get_custom_dmx(self): """ Return MJD/binning params for handling DM events, etc. """ diff --git a/src/pint_pal/yamlio.py b/src/pint_pal/yamlio.py index 5cfe6868..a5819f7f 100644 --- a/src/pint_pal/yamlio.py +++ b/src/pint_pal/yamlio.py @@ -8,11 +8,11 @@ import glob from astropy import log import numpy as np -from pint_pal.config import LATEST_TOA_RELEASE +from pint_pal import config import os yaml = YAML() -RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{LATEST_TOA_RELEASE}/' +RELEASE = f'/nanograv/timing/releases/15y/toagen/releases/{config.LATEST_TOA_RELEASE}/' def fix_toa_info(yaml_file,current_release=RELEASE,overwrite=True,extension='fix'): """Checks/fixes tim-directory, toas, toa-type from existing yaml; writes new one. From 2962884b08e238f1b3ce06af5a8170d5bdc446f7 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:10:14 -0400 Subject: [PATCH 105/193] Resolve paths according to latest config --- src/pint_pal/timingconfiguration.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 3d8be07b..5438246f 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -49,19 +49,35 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is None: - self.tim_directory = os.path.realpath( - os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) - ) + self.config['tim-directory'] = tim_directory else: self.tim_directory = tim_directory if par_directory is None: - self.par_directory = os.path.realpath( - os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) - ) + self.config['par-directory'] = par_directory else: self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' + @property + def tim_directory(self): + """ + Location of tim files, as specified in the config. + This returns the absolute path to the tim directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) + ) + + @property + def par_directory(self): + """ + Location of par files, as specified in the config. + This returns the absolute path to the par directory. + """ + return os.path.realpath( + os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) + ) + def get_source(self): """ Return the source name """ return self.config['source'] From d0b63124c9dcbbb5206827eb048684c031d76e96 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:21:37 -0400 Subject: [PATCH 106/193] Resolve paths using current config --- src/pint_pal/timingconfiguration.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 5438246f..446568bd 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -48,14 +48,10 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non self.filename = filename with open(filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) - if tim_directory is None: + if tim_directory is not None: self.config['tim-directory'] = tim_directory - else: - self.tim_directory = tim_directory - if par_directory is None: + if par_directory is not None: self.config['par-directory'] = par_directory - else: - self.par_directory = par_directory self.skip_check = self.config['skip-check'] if 'skip-check' in self.config.keys() else '' @property @@ -68,6 +64,14 @@ def tim_directory(self): os.path.join(pint_pal.config.DATA_ROOT, self.config['tim-directory']) ) + @tim_directory.setter + def set_tim_directory(self, tim_directory): + """ + Set tim directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['tim-directory'] = tim_directory + @property def par_directory(self): """ @@ -78,6 +82,14 @@ def par_directory(self): os.path.join(pint_pal.config.DATA_ROOT, self.config['par-directory']) ) + @par_directory.setter + def set_par_directory(self, par_directory): + """ + Set par directory. + If a relative path is supplied, it will be turned into an absolute path. + """ + self.config['par-directory'] = par_directory + def get_source(self): """ Return the source name """ return self.config['source'] From 87536aa5cb7be0129b6a4e968a894eafb018a718 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:22:06 -0400 Subject: [PATCH 107/193] Use set_data_root() in tests --- tests/test_run_notebook.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_run_notebook.py b/tests/test_run_notebook.py index b1f47262..e61874f3 100644 --- a/tests/test_run_notebook.py +++ b/tests/test_run_notebook.py @@ -40,6 +40,7 @@ def test_run_notebook(config_file, output_dir): `pytest -n tests/test_run_notebook.py` is the number of worker processes to launch (e.g. 4 to use 4 CPU threads) """ + pint_pal.set_data_root(dirname(__file__)) global_log = join(output_dir, f'test-run-notebook.log') with open(global_log, 'a') as f: run_template_notebook( From 75d6db680cce907f8ef4a7a0ade4ec2c6628518e Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 13:42:41 -0400 Subject: [PATCH 108/193] Convert DATA_ROOT into absolute path --- src/pint_pal/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py index 0997474c..c53a4e55 100644 --- a/src/pint_pal/config.py +++ b/src/pint_pal/config.py @@ -21,7 +21,7 @@ def set_data_root(path): (2) all scripts and notebooks are run from the root of the data repository. """ global DATA_ROOT - DATA_ROOT = path + DATA_ROOT = os.path.realpath(path) try: read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) except FileNotFoundError: From 49db4eae5fa54500719379d258e60724936b86d7 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 14:10:14 -0400 Subject: [PATCH 109/193] set_data_root(): add reset, allow ~ in paths --- src/pint_pal/__init__.py | 2 +- src/pint_pal/config.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/__init__.py b/src/pint_pal/__init__.py index 3429e639..ba6ea479 100644 --- a/src/pint_pal/__init__.py +++ b/src/pint_pal/__init__.py @@ -1,6 +1,6 @@ import pint_pal.checkin import pint_pal.config -from pint_pal.config import set_data_root +from pint_pal.config import set_data_root, reset_data_root from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/pint_pal/config.py b/src/pint_pal/config.py index c53a4e55..faf79495 100644 --- a/src/pint_pal/config.py +++ b/src/pint_pal/config.py @@ -21,12 +21,20 @@ def set_data_root(path): (2) all scripts and notebooks are run from the root of the data repository. """ global DATA_ROOT - DATA_ROOT = os.path.realpath(path) + DATA_ROOT = os.path.realpath(os.path.expanduser(path)) try: read_config_file(os.path.join(DATA_ROOT, 'pint_pal_project.yaml')) except FileNotFoundError: pass +def reset_data_root(): + """ + Reset the data root and config variables to the default values. + """ + global DATA_ROOT + DATA_ROOT = '.' + read_config_file(os.path.join(PACKAGE_DIR, 'defaults.yaml')) + def read_config_file(config_file): """ Read a configuration file, along the lines of `defaults.yaml`, and load the results From 774580b1941ac9196a74ed2961ec796bb665b956 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 9 May 2024 14:27:12 -0400 Subject: [PATCH 110/193] Accept ~ in YAML path too --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 446568bd..952d0396 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -45,8 +45,8 @@ def __init__(self, filename="config.yaml", tim_directory=None, par_directory=Non tim_directory (optional) : override the tim directory specified in the config par_directory (optional) : override the par directory specified in the config """ - self.filename = filename - with open(filename) as FILE: + self.filename = os.path.realpath(os.path.expanduser(filename)) + with open(self.filename) as FILE: self.config = yaml.load(FILE, Loader=yaml.FullLoader) if tim_directory is not None: self.config['tim-directory'] = tim_directory From 4fbf2c7e1efc56dd1363e76c7b0eefc7a7ff1548 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Tue, 7 May 2024 18:07:22 -0400 Subject: [PATCH 111/193] Also run tests on PRs targeting NG20 branch --- .github/workflows/test_notebook.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index cd1c5f82..f52fc418 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -7,6 +7,7 @@ on: pull_request: branches: - main + - NG20 jobs: build: From cf52288f80f53afd297634db73ff9d978c40c298 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 13 Jun 2024 17:46:51 +0000 Subject: [PATCH 112/193] Don't let tc barf if excise-tim is there but unset --- src/pint_pal/timingconfiguration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 952d0396..fe2d5f57 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -400,7 +400,7 @@ def get_niter(self): def get_excised(self): """ Return excised-tim file if set and exists""" - if 'excised-tim' in self.config['intermediate-results'].keys(): + if 'excised-tim' in self.config['intermediate-results'].keys() and self.config['intermediate-results']['excised-tim']: if os.path.exists(self.config['intermediate-results']['excised-tim']): return self.config['intermediate-results']['excised-tim'] return None From a51814d7cf75918fd70bdfd54ec96f1098ff487d Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Sat, 10 Aug 2024 06:43:36 +0000 Subject: [PATCH 113/193] add convert_tcb2tdb=False flags --- src/pint_pal/noise_utils.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 930747b8..968b48a3 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -313,7 +313,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_efac')[0].split(psr_name)[1][1:] tp = maskParameter(name = 'EFAC', index = efac_idx, key = '-f', key_value = param_name, - value = val, units = '') + value = val, units = '', convert_tcb2tdb=False) efac_params.append(tp) efac_idx += 1 @@ -324,7 +324,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_t2equad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -334,7 +334,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_tnequad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -344,7 +344,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_equad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) equad_params.append(tp) equad_idx += 1 @@ -353,7 +353,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_ecorr')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'ECORR', index = ecorr_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us') + value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) ecorr_params.append(tp) ecorr_idx += 1 @@ -362,7 +362,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_dmefac')[0].split(psr_name)[1][1:] tp = maskParameter(name = 'DMEFAC', index = dmefac_idx, key = '-f', key_value = param_name, - value = val, units = '') + value = val, units = '', convert_tcb2tdb=False) dmefac_params.append(tp) dmefac_idx += 1 @@ -371,7 +371,7 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl param_name = key.split('_dmequad')[0].split(psr_name)[1].split('_log10')[0][1:] tp = maskParameter(name = 'DMEQUAD', index = dmequad_idx, key = '-f', key_value = param_name, - value = 10 ** val, units = 'pc/cm3') + value = 10 ** val, units = 'pc/cm3', convert_tcb2tdb=False) dmequad_params.append(tp) dmequad_idx += 1 From 2d28f50be333e3fa7dcf5ed1c4fa8a78a5ea3ac0 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 19:08:14 +0000 Subject: [PATCH 114/193] tcb2tdb fix in lite_utils --- src/pint_pal/lite_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/lite_utils.py b/src/pint_pal/lite_utils.py index 96e8c884..bd549d88 100644 --- a/src/pint_pal/lite_utils.py +++ b/src/pint_pal/lite_utils.py @@ -410,7 +410,7 @@ def add_feJumps(mo,rcvrs): if len(missing_fe_jumps) > 1: for j in missing_fe_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key='-fe',key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def add_feDMJumps(mo,rcvrs): @@ -447,7 +447,7 @@ def add_feDMJumps(mo,rcvrs): if len(missing_fe_dmjumps): for j in missing_fe_dmjumps: log.info(f"Adding frontend DMJUMP {j}") - DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3) + DMJUMPn = maskParameter('DMJUMP',key='-fe',key_value=[j],value=0.0,units=u.pc*u.cm**-3,convert_tcb2tdb=False) dmjump.add_param(DMJUMPn,setup=True) def get_flag_val_list(toas, flag): @@ -516,7 +516,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) > 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) else: if len(missing_jumps): @@ -529,7 +529,7 @@ def add_flag_jumps(mo,flag,flaglist,base=False): if len(missing_jumps) >= 1: for j in missing_jumps[:-1]: log.info(f"Adding frontend JUMP {j}") - JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second) + JUMPn = maskParameter('JUMP',key=flagval,key_value=[j],value=0.0,units=u.second,convert_tcb2tdb=False) phasejump.add_param(JUMPn,setup=True) def large_residuals(fo,threshold_us,threshold_dm=None,*,n_sigma=None,max_sigma=None,prefit=False,ignore_ASP_dms=True,print_bad=True, check_jumps=False): From 2bdcc0583857fb9a91a960fb27a3c227ad1c7769 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 21:40:07 -0400 Subject: [PATCH 115/193] add VEGAS to plot_utils --- src/pint_pal/plot_utils.py | 282 +++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 140 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 225b3ef4..47ca5109 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -46,7 +46,7 @@ "meerkat": "#FD9927", "None": "#808080" }, - + 'pta':{ "InPTA": "#855CA0", "EPTA": "#407BD5", @@ -64,8 +64,10 @@ "L-wide_PUPPI": "#6BA9E2", "Rcvr1_2_GASP": "#61C853", "Rcvr1_2_GUPPI": "#61C853", + "Rcvr1_2_VEGAS": "#61C853", "Rcvr_800_GASP": "#61C853", "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "#61C853", "S-wide_ASP": "#6BA9E2", "S-wide_PUPPI": "#6BA9E2", "1.5GHz_YUPPI": "#40635F", @@ -157,7 +159,7 @@ "PPTA": "x", "MPTA": "x", "None": "x" - }, + }, 'febe': {"327_ASP": "x", "327_PUPPI": "x", "430_ASP": "x", @@ -239,7 +241,7 @@ def call(x): subprocess.call(x,shell=True) - + def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): """ @@ -255,7 +257,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope + 'obs' - color residuals by telescope 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. @@ -267,7 +269,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- @@ -288,7 +290,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg == True and restype == 'prefit' and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -358,7 +360,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -371,14 +373,14 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -406,7 +408,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -441,18 +443,18 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa mjds = avg_dict['mjds'].value # Convert to years years = (mjds - 51544.0)/365.25 + 2000.0 - + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_years = np.hstack((years, years_no_avg)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -469,23 +471,23 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] elif colorby == 'obs': colorscheme = colorschemes['observatories'] elif colorby == 'f': colorscheme = colorschemes['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -497,7 +499,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa else: fig = plt.gcf() ax1 = axs - + for i, c in enumerate(CB): inds = np.where(cb==c)[0] @@ -543,7 +545,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa if restype == 'both': ax1.errorbar(combo_years[inds], combo_res_rpe[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) - + else: if plotsig: sig = res[inds]/errs[inds] @@ -618,7 +620,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = Fa elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points text = ax2.text(0,0,"") @@ -654,25 +656,25 @@ def onclick(event): def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, axs = None, legend=True, show_bin=True, **kwargs): """ - Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. + Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival Time Measurements, and Analysis of 37 Millisecond Pulsars, The Astrophysical Journal, Volume 813, Issue 1, article id. 65, 31 pp.(2015). Eq.(2): FDdelay = sum(c_i * (log(obs_freq/1GHz))^i) - - This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. - + + This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over. + Arguments ---------- - + fitter[object] : The PINT fitter object. model[object] : The PINT model object. Can be used instead of fitter save [boolean] : If True will save plot with the name "FD_delay.png"[default: False]. title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- freqs [list/array] : List or array of frequencies (MHz) to plot. Will override values from toa object. @@ -683,11 +685,11 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, alpha [float] : matplotlib alpha options for error regions [default: 0.2] loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True """ - - #Make sure that either a fitter or model object has been specified + + #Make sure that either a fitter or model object has been specified if fitter == None and model_object == None: raise Exception("Need to specify either a fitter or model object") - + #Get frequencies if 'freqs' in kwargs.keys(): freqs = kwargs['freqs'] @@ -696,7 +698,7 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, else: freqs = fitter.toas.get_freqs().value freqs = np.sort(freqs) - + #Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions def get_FD_delay(pint_model_object,freqs): FD_map = model.TimingModel.get_prefix_mapping(pint_model_object,"FD") @@ -719,7 +721,7 @@ def get_FD_delay(pint_model_object,freqs): else: FD_phrase = "FD1" return delay *1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6 , FD_phrase - + #Get FD params if fitter object is given if fitter is not None: #Check if the fitter object has FD parameters @@ -727,9 +729,9 @@ def get_FD_delay(pint_model_object,freqs): FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(fitter.model, freqs*1e-3) #print(FD_delay) psr_name = fitter.model.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: nbins = fitter.toas['nbin'].astype(int).min() @@ -738,26 +740,26 @@ def get_FD_delay(pint_model_object,freqs): except: print("No FD parameters in this model! Exitting...") #sys.exit() - - #Get FD params if model object is given + + #Get FD params if model object is given if model_object is not None: #Check if the model object has FD parameters try: FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(model_object, freqs*1e-3) psr_name = model_object.PSR.value - """For when new version of PINT is default on pint_pal + """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) - + """ if show_bin: print("show_bin requires a fitter object, cannot be used with the model alone") show_bin = False except: print("No FD parameters in this model! Exitting...") - #sys.exit() - + #sys.exit() + - #Get plotting preferences. + #Get plotting preferences. if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] else: @@ -800,13 +802,13 @@ def get_FD_delay(pint_model_object,freqs): if title: ax1.set_title("%s FD Delay" % psr_name) if legend: - ax1.legend(loc=loc) + ax1.legend(loc=loc) if axs == None: plt.tight_layout() if save: plt.savefig("%s_fd_delay.png" % psr_name) - return + return def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = False, avg = False, mixed_ecorr=False,\ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): @@ -823,8 +825,8 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal 'both' - overplot both the pre and post-fit residuals. colorby ['string']: What to use to determine color/markers 'pta' - color residuals by PTA (default) - 'obs' - color residuals by telescope - 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). + 'obs' - color residuals by telescope + 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals [default: False]. avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False]. @@ -923,11 +925,11 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - - + + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -940,14 +942,14 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -975,7 +977,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -997,15 +999,15 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get freqs @@ -1013,8 +1015,8 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal freqs = kwargs['freqs'] else: freqs = fitter.toas.get_freqs().value - - + + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -1031,15 +1033,15 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] markerscheme = markers['pta'] @@ -1049,7 +1051,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal elif colorby == 'f': colorscheme = colorschemes['febe'] markerscheme = markers['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1093,7 +1095,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal alpha = kwargs['alpha'] else: alpha = 0.5 - + if avg and mixed_ecorr: if plotsig: combo_sig = combo_res[inds]/combo_errs[inds] @@ -1179,7 +1181,7 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = Fal elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_v_freq%s.png" % (fitter.model.PSR.value, ext)) - + if axs == None: # Define clickable points text = ax1.text(0,0,"") @@ -1292,7 +1294,7 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.nb.out"%(psrname),\ unpack=True, usecols=(0,1,2,3,4)) dmx_mid_yr = (dmx_epochs- 51544.0)/365.25 + 2000.0 - + # Define the plotting function if axs == None: if 'figsize' in kwargs.keys(): @@ -1418,7 +1420,7 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): from astropy.time import Time if isinstance(dmxout_files, str): dmxout_files = [dmxout_files] if isinstance(labels, str): labels = [labels] - + figsize = (10,4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -1431,7 +1433,7 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): dmxDict = {} for ii,(df,lab) in enumerate(zip(dmxout_files,labels)): dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(df, unpack=True, usecols=range(0,5)) - idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} + idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} ax2.errorbar(dmxmjd, dmxval*10**3, yerr=dmxerr*10**3, label=lab, marker='o', ls='', markerfacecolor='none') dmxDict[lab] = idmxDict @@ -1622,14 +1624,14 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False elif restype == 'both': dm_error = fitter.resids.residual_objs['dm'].get_data_error().value dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + # Get the MJDs if 'mjds' in kwargs.keys(): mjds = kwargs['mjds'] else: mjds = fitter.toas.get_mjds().value years = (mjds - 51544.0)/365.25 + 2000.0 - + # Get the receiver-backend combos if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -1661,7 +1663,7 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False ylabel = r"$\Delta$DM/Uncertainty" else: ylabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1781,7 +1783,7 @@ def onclick(event): text.set_text("DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) fig.canvas.mpl_connect('button_press_event', onclick) - + return def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = 50, avg = False, whitened = False, \ @@ -1808,7 +1810,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = title [boolean] : If False, will not print plot title [default: True]. axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. - + Optional Arguments: -------------------- res [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -1828,7 +1830,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg == True and restype == 'prefit': avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -1837,8 +1839,8 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = elif avg == True and restype == 'both': avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - - + + # Get residuals if 'res' in kwargs.keys(): res = kwargs['res'] @@ -1873,7 +1875,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = else: raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): if avg == True: @@ -1883,7 +1885,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -1892,7 +1894,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = res_pre = whiten_resids(fitter, restype='postfit') res_pre = res_pre.to(u.us) res = res.to(u.us) - + # Get errors if 'errs' in kwargs.keys(): errs = kwargs['errs'] @@ -1921,7 +1923,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = else: errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - + # Get receiver backends if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -1934,7 +1936,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -1944,7 +1946,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = ax1 = fig.add_subplot(111) else: ax1 = axs - + xmax=0 for i, r_b in enumerate(RCVR_BCKNDS): inds = np.where(rcvr_bcknds==r_b)[0] @@ -1971,7 +1973,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if restype == 'both': ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ label=r_b_label+" Prefit") - + ax1.grid(True) ax1.set_ylabel("Number of measurements") if plotsig: @@ -2026,7 +2028,7 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin = 50, \ @@ -2052,7 +2054,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin axs [string] : If not None, should be defined subplot value and the figure will be used as part of a larger figure [default: None]. mean_sub [boolean] : If False, will not mean subtract the DM residuals to be centered on zero [default: True] - + Optional Arguments: -------------------- dmres [list/array] : List or array of residual values to plot. Will override values from fitter object. @@ -2067,7 +2069,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin # Check if wideband if not fitter.is_wideband: raise ValueError("Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead.") - + # Get the DM residuals if 'dmres' in kwargs.keys(): dm_resids = kwargs['dmres'] @@ -2079,7 +2081,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == 'both': dm_resids = fitter.resids.residual_objs['dm'].resids.value dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value - + # Get the DM residual errors if "errs" in kwargs.keys(): dm_error = kwargs['errs'] @@ -2091,7 +2093,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == 'both': dm_error = fitter.resids.residual_objs['dm'].get_data_error().value dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value - + # Get the receiver-backend combos if 'rcvr_bcknds' in kwargs.keys(): rcvr_bcknds = kwargs['rcvr_bcknds'] @@ -2123,7 +2125,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin xlabel = r"$\Delta$DM/Uncertainty" else: xlabel = r"$\Delta$DM [cm$^{-3}$ pc]" - + if axs == None: if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] @@ -2144,7 +2146,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin clr = kwargs['color'] else: clr = colorscheme[r_b_label] - + if plotsig: sig = dm_resids[inds]/dm_error[inds] ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) @@ -2157,7 +2159,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin if restype == 'both': ax1.hist(dm_resids_init[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ label=r_b_label+" Prefit") - + ax1.grid(True) ax1.set_ylabel("Number of measurements") ax1.set_xlabel(xlabel) @@ -2186,7 +2188,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin elif restype == "both": ext += "_pre_post_fit" plt.savefig("%s_DM_resid_measurements%s.png" % (fitter.model.PSR.value, ext)) - + return @@ -2232,8 +2234,8 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - - + + # Check if want epoch averaged residuals if avg == True and restype == 'prefit' and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) @@ -2301,11 +2303,11 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ %(restype)) - - + + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): - if avg == True and mixed_ecorr == True: + if avg == True and mixed_ecorr == True: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) @@ -2318,14 +2320,14 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) - elif avg == True and mixed_ecorr == False: + elif avg == True and mixed_ecorr == False: if restype != 'both': res = whiten_resids(avg_dict, restype=restype) else: res = whiten_resids(avg_dict_pre, restype='prefit') res_pre = whiten_resids(avg_dict, restype='postfit') res_pre = res_pre.to(u.us) - res = res.to(u.us) + res = res.to(u.us) else: if restype != 'both': res = whiten_resids(fitter, restype=restype) @@ -2353,7 +2355,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal errs = avg_dict['errors'].to(u.us) errs_no_avg = no_avg_dict['errors'].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict['errors'].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: @@ -2383,7 +2385,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal if mixed_ecorr == True: mjds_no_avg = no_avg_dict['mjds'].value - + # Now we need to the orbital phases; start with binary model name if 'orbphase' in kwargs.keys(): @@ -2392,20 +2394,20 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal orbphase = fitter.model.orbital_phase(mjds, radians = False) if avg and mixed_ecorr: no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians = False) - - + + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays - - if avg == True and mixed_ecorr == True: + + if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_orbphase = np.hstack((orbphase, no_avg_orbphase)) if restype =='both': - combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - - + + # Get colorby flag values (obs, PTA, febe, etc.) if 'colorby' in kwargs.keys(): cb = kwargs['colorby'] @@ -2422,15 +2424,15 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal for jjs in no_avg_dict['indices']: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) - + cb = np.array(avg_cb) - + # Get the set of unique flag values if avg==True and mixed_ecorr==True: cb = np.hstack((cb,no_ecorr_cb)) - + CB = set(cb) - + if colorby== 'pta': colorscheme = colorschemes['pta'] markerscheme = markers['pta'] @@ -2440,7 +2442,7 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = Fal elif colorby == 'f': colorscheme = colorschemes['febe'] markerscheme = markers['febe'] - + if 'figsize' in kwargs.keys(): figsize = kwargs['figsize'] else: @@ -3058,7 +3060,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi Middle: Best fit residuals with no FD parameters. Bottom: Residuals with FD correction included. Note - This function may take a while to run if there are many TOAs. - + Arguments --------- fitter [object] : The PINT fitter object. @@ -3093,11 +3095,11 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") else: NB = True - + # Check if want epoch averaged residuals if avg: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - + # Get residuals if 'res' in kwargs.keys(): res = kwargs['res'] @@ -3109,7 +3111,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res = fitter.resids.time_resids.to(u.us) else: res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - + # Check if we want whitened residuals if whitened == True and ('res' not in kwargs.keys()): if avg == True: @@ -3118,7 +3120,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi else: res = whiten_resids(fitter) res = res.to(u.us) - + # Get errors if 'errs' in kwargs.keys(): errs = kwargs['errs'] @@ -3143,7 +3145,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) - + # get frequencies if 'freqs' in kwargs.keys(): freqs = kwargs['freqs'] @@ -3152,7 +3154,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi freqs = avg_dict['freqs'].value else: freqs = fitter.toas.get_freqs().value - + # Check if comparing the FD parameters if comp_FD: if axs != None: @@ -3675,7 +3677,7 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`. By definition, this function will save all plots as "psrname"_summary_plot_#.nb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3683,7 +3685,7 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): title [boolean] : If True, will add titles to ALL plots [default: False]. legend [boolean] : If True, will add legends to ALL plots [default: False]. """ - + if fitter.is_wideband: raise ValueError("Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead.") # Need to make four sets of plots @@ -3820,7 +3822,7 @@ def plots_for_summary_pdf_wb(fitter, title = None, legends = False): Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`. By definition, this function will save all plots as "psrname"_summary_plot_#.wb.png, where # is - and integer from 1-4. + and integer from 1-4. Arguments --------- @@ -3983,12 +3985,12 @@ def plot_settings(): def get_fitter(yaml): """ Get the fitter and model from a given YAML - + Parameters ========== yaml: str yaml to use for locating latest results - + """ tc = TimingConfiguration(yaml) mo, to = tc.get_model_and_toas(excised=True, usepickle=True) @@ -4004,17 +4006,17 @@ def get_fitter(yaml): def get_avg_years(fo_nb, fo_wb, avg_dict): """ Get MJDS for each data set in years - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ mjd_nb = fo_nb.toas.get_mjds().value years_nb = (mjd_nb - 51544.0)/365.25 + 2000.0 - mjd_wb = fo_wb.toas.get_mjds().value + mjd_wb = fo_wb.toas.get_mjds().value years_wb = (mjd_wb - 51544.0)/365.25 + 2000.0 mjds_avg = avg_dict['mjds'].value years_avg = (mjds_avg - 51544.0)/365.25 + 2000.0 @@ -4023,13 +4025,13 @@ def get_avg_years(fo_nb, fo_wb, avg_dict): def get_backends(fo_nb, fo_wb, avg_dict): """ Grab backends via flags to make plotting easier - + Parameters ========== fo: fitter object mo: model object avg_dict: from fo.resids.ecorr_average() - + """ rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value('f')[0]) rcvr_set_nb = set(rcvr_bcknds_nb) @@ -4045,11 +4047,11 @@ def get_backends(fo_nb, fo_wb, avg_dict): def get_DMX_info(fo): """ Get DMX timeseries info from dmxparse - + Parameters ========== fo: fitter object - + """ dmx_dict = pint.utils.dmxparse(fo) DMXs = dmx_dict['dmxs'].value @@ -4061,7 +4063,7 @@ def get_DMX_info(fo): def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ Plot color-divided-by-receiver/BE points on any axis - + Parameters ========== ax: axis for plotting @@ -4070,7 +4072,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ markers, colorscheme = plot_settings() for i, r_b in enumerate(set(bknds)): @@ -4088,7 +4090,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): if be_legend: handles, labels = ax.get_legend_handles_labels() - labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) + labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) label_names = {"327_ASP": "ASP 327 MHz", "327_PUPPI": "PUPPI 327 MHz", "430_ASP": "ASP 430 MHz", @@ -4116,7 +4118,7 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): def rec_labels(axs, bcknds, years_avg): """ Mark transitions between backends - + Parameters ========== axs: axis for plotting @@ -4125,7 +4127,7 @@ def rec_labels(axs, bcknds, years_avg): err: error bars to plot bknds: list of backend flags associated with TOAs rn_off: the DC red noise offset to subtract (prior to PINT fix) - + """ guppi = 2010.1 puppi = 2012.1 @@ -4172,13 +4174,13 @@ def rec_labels(axs, bcknds, years_avg): else: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI', transform=tform, va=va, ha=ha) axs[0].text((guppi+x_min_yr)/2., ycoord, 'ASP/GASP', transform=tform, va=va, ha=ha) - axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) + axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) elif has_ao and not has_gbt: if has_yuppi: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) else: axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) - axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) + axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) elif not has_ao and has_gbt: if has_yuppi: axs[0].text((puppi+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) From 726c7a0d3598fa974c8dd520945695023fa9dc75 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Mon, 19 Aug 2024 22:04:49 -0400 Subject: [PATCH 116/193] add VEGAS marker styles --- src/pint_pal/plot_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 47ca5109..22681b43 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -168,8 +168,10 @@ "L-wide_PUPPI": "x", "Rcvr1_2_GASP": "x", "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", "Rcvr_800_GASP": "o", "Rcvr_800_GUPPI": "o", + "Rcvr_800_VEGAS": "o", "S-wide_ASP": "o", "S-wide_PUPPI": "o", "1.5GHz_YUPPI": "x", From 76a0cf6536eb9c4c47d4174f0498bde0389b59c6 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 22 Aug 2024 02:48:47 +0000 Subject: [PATCH 117/193] More VEGAS defaults, pta no longer default scheme --- src/pint_pal/plot_utils.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 22681b43..1291471c 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -244,7 +244,7 @@ def call(x): subprocess.call(x,shell=True) -def plot_residuals_time(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ +def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, whitened = False, \ save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): """ Make a plot of the residuals vs. time @@ -812,7 +812,7 @@ def get_FD_delay(pint_model_object,freqs): return -def plot_residuals_freq(fitter, restype = 'postfit', colorby='pta',plotsig = False, avg = False, mixed_ecorr=False,\ +def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False, avg = False, mixed_ecorr=False,\ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): """ Make a plot of the residuals vs. frequency @@ -1960,6 +1960,8 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if 'color' in kwargs.keys(): clr = kwargs['color'] else: + print(colorscheme) + print(r_b_label) clr = colorscheme[r_b_label] if plotsig: sig = res[inds]/errs[inds] @@ -2194,7 +2196,7 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin return -def plot_residuals_orb(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, mixed_ecorr=False, \ +def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, mixed_ecorr=False, \ whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): """ Make a plot of the residuals vs. orbital phase. @@ -3950,8 +3952,10 @@ def plot_settings(): "L-wide_PUPPI": "#6BA9E2", "Rcvr1_2_GASP": "#407BD5", "Rcvr1_2_GUPPI": "#407BD5", + "Rcvr1_2_VEGAS": "#61C853", "Rcvr_800_GASP": "#61C853", "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "#61C853", "S-wide_ASP": "#855CA0", "S-wide_PUPPI": "#855CA0", "1.5GHz_YUPPI": "#45062E", @@ -3969,8 +3973,10 @@ def plot_settings(): "L-wide_PUPPI": "x", "Rcvr1_2_GASP": "x", "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", "Rcvr_800_GASP": "x", "Rcvr_800_GUPPI": "x", + "Rcvr_800_VEGAS": "x", "S-wide_ASP": "x", "S-wide_PUPPI": "x", "1.5GHz_YUPPI": "x", @@ -4101,8 +4107,10 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): "L-wide_PUPPI": "PUPPI L-wide", "Rcvr1_2_GASP": "GASP L-band", "Rcvr1_2_GUPPI": "GUPPI L-band", + "Rcvr1_2_VEGAS": "VEGAS L-band", "Rcvr_800_GASP": "GASP 820 MHz", "Rcvr_800_GUPPI": "GUPPI 820 MHz", + "Rcvr_800_VEGAS": "VEGAS 820 MHz", "S-wide_ASP": "ASP S-wide", "S-wide_PUPPI": "PUPPI S-wide", "1.5GHz_YUPPI": "YUPPI 1.5 GHz", From bdb8d8f17ed65444af6322be6f3176ea3c47da84 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Thu, 22 Aug 2024 02:52:26 +0000 Subject: [PATCH 118/193] Take out print statements for testing --- src/pint_pal/plot_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index 1291471c..a32757cb 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -1960,8 +1960,6 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if 'color' in kwargs.keys(): clr = kwargs['color'] else: - print(colorscheme) - print(r_b_label) clr = colorscheme[r_b_label] if plotsig: sig = res[inds]/errs[inds] From 21fab4e73c0ca8e9cbf8e539cd08d1555ffde810 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 05:55:25 +0000 Subject: [PATCH 119/193] allow bin_width as argument to setup_dmx; introduce type hinting --- src/pint_pal/dmx_utils.py | 123 ++++++++++++++++++++++++++++---------- 1 file changed, 93 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 024f7583..2782e8f2 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -1,5 +1,7 @@ +from typing import Any, Optional, Tuple import numpy as np from astropy import log +import pint from pint_pal.utils import apply_cut_flag, apply_cut_select class DMXParameter: @@ -9,7 +11,7 @@ class DMXParameter: aliases = {'idx':'index', 'val':'dmx_val', 'err':'dmx_err', 'ep':'epoch', 'r1':'low_mjd', 'r2':'high_mjd', 'f1':'low_freq', 'f2':'high_freq', 'mask':'toa_mask'} - def __init__(self): + def __init__(self) -> None: """ """ self.idx = 0 # index label [int] @@ -22,17 +24,17 @@ def __init__(self): self.f2 = 0.0 # highest frequency [MHz] self.mask = [] # Boolean index array for selecting TOAs - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: name = self.aliases.get(name, name) object.__setattr__(self, name, value) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'aliases': raise AttributeError # http://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html name = self.aliases.get(name, name) return object.__getattribute__(self, name) - def print_dmx(self, range_only=False, fit_flag=True, fortran=False): + def print_dmx(self, range_only: bool = False, fit_flag: bool = True, fortran: bool = False) -> None: """ Print TEMPO-style DMX parameter. @@ -60,7 +62,7 @@ def print_dmx(self, range_only=False, fit_flag=True, fortran=False): print(DMX_str) -def group_dates(toas, group_width=0.1): +def group_dates(toas: pint.toa.TOAs, group_width: float = 0.1) -> list: """ Returns MJDs of groups of TOAs no wider than a specified amount. @@ -93,8 +95,13 @@ def group_dates(toas, group_width=0.1): return group_mjds -def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, - check=True): +def get_dmx_ranges( + toas: pint.toa.TOAs, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Returns a list of low and high MJDs defining DMX ranges, covering all TOAs. @@ -151,8 +158,14 @@ def get_dmx_ranges(toas, bin_width=1.0, pad=0.0, strict_inclusion=True, return dmx_ranges -def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, - strict_inclusion=True, check=True): +def get_gasp_dmx_ranges( + toas: pint.toa.TOAs, + group_width: float = 0.1, + bin_width: float = 15.0, + pad: float = 0.0, + strict_inclusion: bool = True, + check: bool = True +) -> list: """ Return a list of DMX ranges that group GASP TOAs into bins. @@ -221,8 +234,15 @@ def get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.0, return dmx_ranges -def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, - strict_inclusion=True, add_new_ranges=False, check=True): +def expand_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + bin_width: float = 1.0, + pad: float = 0.0, + strict_inclusion: bool = True, + add_new_ranges: bool = False, + check: bool = True +) -> list: """ Expands DMX ranges to accommodate new TOAs up to a maximum bin width. @@ -297,7 +317,12 @@ def expand_dmx_ranges(toas, dmx_ranges, bin_width=1.0, pad=0.0, return dmx_ranges -def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): +def check_dmx_ranges( + toas: pint.toa.TOAs, + dmx_ranges: list, + full_return: bool = False, + quiet: bool = False +) -> Tuple[list, list, list, list, list, list] | None: """ Ensures all TOAs match only one DMX bin and all bins have at least one TOA. @@ -392,7 +417,7 @@ def check_dmx_ranges(toas, dmx_ranges, full_return=False, quiet=False): return masks, ibad, iover, iempty, inone, imult -def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): +def get_dmx_mask(toas: pint.toa.TOAs, low_mjd: float, high_mjd: float, strict_inclusion: bool = True) -> np.ndarray: """ Return a Boolean index array for selecting TOAs from toas in a DMX range. @@ -413,7 +438,7 @@ def get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=True): return mask -def get_dmx_epoch(toas, weighted_average=True): +def get_dmx_epoch(toas: pint.toa.TOAs, weighted_average: bool = True) -> float: """ Return the epoch of a DMX bin. @@ -435,7 +460,7 @@ def get_dmx_epoch(toas, weighted_average=True): return epoch -def get_dmx_freqs(toas, allow_wideband=True): +def get_dmx_freqs(toas: pint.toa.TOAs, allow_wideband: bool = True) -> Tuple[float, float]: """ Return the lowest and highest frequency of the TOAs in a DMX bin. @@ -470,8 +495,15 @@ def get_dmx_freqs(toas, allow_wideband=True): return low_freq, high_freq -def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, - strict_inclusion=True, allow_wideband=True, invert=False, quiet=False): +def check_frequency_ratio( + toas: pint.toa.TOAs, + dmx_ranges: list, + frequency_ratio: float = 1.1, + strict_inclusion: bool = True, + allow_wideband: bool = True, + invert: bool = False, + quiet: bool = False +) -> Tuple[np.ndarray, np.ndarray]: """ Check that the TOAs in a DMX bin pass a frequency ratio criterion. @@ -522,9 +554,20 @@ def check_frequency_ratio(toas, dmx_ranges, frequency_ratio=1.1, np.arange(len(dmx_ranges))[np.logical_not(dmx_range_mask)] -def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, - solar_n0=5.0, allow_wideband=True, strict_inclusion=True, pad=0.0, - check=True, return_only=False, quiet=False): +def check_solar_wind( + toas: pint.toa.TOAs, + dmx_ranges: list, + model: pint.models.timing_model.TimingModel, + max_delta_t: float = 0.1, + bin_width: float = 1.0, + solar_n0: float = 5.0, + allow_wideband: bool = True, + strict_inclusion: bool = True, + pad: float = 0.0, + check: bool = True, + return_only: bool = False, + quiet: bool = False +) -> list: """ Split DMX ranges based on influence of the solar wind. @@ -608,7 +651,7 @@ def check_solar_wind(toas, dmx_ranges, model, max_delta_t=0.1, bin_width=1.0, return dmx_ranges -def add_dmx(model, bin_width=1.0): +def add_dmx(model: pint.models.timing_model.TimingModel, bin_width: float = 1.0) -> None: """ Checks for DispersionDMX and ensures the bin width is the only parameter. @@ -628,7 +671,7 @@ def add_dmx(model, bin_width=1.0): dmx.DMX.set(bin_width) -def model_dmx_params(model): +def model_dmx_params(model: pint.models.timing_model.TimingModel) -> Tuple[list, np.ndarray, np.ndarray]: """ Get DMX ranges, values, and uncertainties from a PINT model object. @@ -655,7 +698,7 @@ def model_dmx_params(model): return dmx_ranges, dmx_vals, dmx_errs -def remove_all_dmx_ranges(model, quiet=False): +def remove_all_dmx_ranges(model: pint.models.timing_model.TimingModel, quiet: bool = False) -> None: """ Uses PINT to remove all DMX parameter ranges from a timing model. @@ -675,8 +718,15 @@ def remove_all_dmx_ranges(model, quiet=False): pass -def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, - freeze_DM=True): +def setup_dmx( + model: pint.models.timing_model.TimingModel, + toas: pint.toa.TOAs, + quiet: bool = True, + frequency_ratio: float = 1.1, + max_delta_t: float = 0.1, + bin_width: Optional[float] = None, + freeze_DM: bool = True +) -> pint.toa.TOAs: """ Sets up and checks a DMX model using a number of defaults. @@ -688,6 +738,7 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, the frequencies used are returned by get_dmx_freqs(). max_delta_t is the time delay [us] above which a DMX range will be split. quiet=True turns off some of the logged warnings and info. + bin_width=constant bin width if provided, otherwise use observatory defaults if None freeze_DM=True ensures the mean DM parameter is not fit. """ @@ -714,8 +765,12 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, adjust_old_dmx = False # Set up DMX model - if toas.observatories == set(['arecibo']): bin_width = 0.5 # day - else: bin_width = 6.5 #day + if bin_width is None: #use observatory defaults + if toas.observatories == set(['arecibo']): + bin_width = 0.5 # day + else: + bin_width = 6.5 #day + # Calculate GASP-era ranges, if applicable dmx_ranges = get_gasp_dmx_ranges(toas, group_width=0.1, bin_width=15.0, pad=0.05, check=False) @@ -813,9 +868,17 @@ def setup_dmx(model, toas, quiet=True, frequency_ratio=1.1, max_delta_t=0.1, return toas -def make_dmx(toas, dmx_ranges, dmx_vals=None, dmx_errs=None, - strict_inclusion=True, weighted_average=True, allow_wideband=True, - start_idx=1, print_dmx=False): +def make_dmx( + toas: pint.toa.TOAs, + dmx_ranges: list, + dmx_vals: Optional[np.ndarray] = None, + dmx_errs: Optional[np.ndarray] = None, + strict_inclusion: bool = True, + weighted_average: bool = True, + allow_wideband: bool = True, + start_idx: int = 1, + print_dmx: bool = False +): """ Uses convenience functions to assemble a TEMPO-style DMX parameters. From becf6a96410e22275d363a2dd10a177a9f6f8d19 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 07:43:49 +0000 Subject: [PATCH 120/193] switch | to Union for earlier python support --- src/pint_pal/dmx_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 2782e8f2..63554e9b 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -322,7 +322,7 @@ def check_dmx_ranges( dmx_ranges: list, full_return: bool = False, quiet: bool = False -) -> Tuple[list, list, list, list, list, list] | None: +) -> Union[Tuple[list, list, list, list, list, list],None]: """ Ensures all TOAs match only one DMX bin and all bins have at least one TOA. From a9ff97bde6061b7040e6a61113ba9998c65edc53 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 23 Aug 2024 08:01:27 +0000 Subject: [PATCH 121/193] switch | to Union for earlier python support --- src/pint_pal/dmx_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 63554e9b..55a65383 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Tuple +from typing import Any, Optional, Tuple, Union import numpy as np from astropy import log import pint From 1cdd9f7c85cafa31f6f5dd2c00fd3caea78424e3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 25 Aug 2024 03:17:16 +0000 Subject: [PATCH 122/193] adding a gibbs sampler for noise analyses --- src/pint_pal/gibbs_sampler.py | 615 ++++++++++++++++++++++++++++++++++ 1 file changed, 615 insertions(+) create mode 100644 src/pint_pal/gibbs_sampler.py diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py new file mode 100644 index 00000000..e33115a3 --- /dev/null +++ b/src/pint_pal/gibbs_sampler.py @@ -0,0 +1,615 @@ +import numpy as np +from tqdm import tqdm +import scipy.linalg as sl +from functools import cached_property +import os +import glob +import warnings +from enterprise_extensions import model_utils, blocks +from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc +from enterprise.signals import signal_base, gp_signals +from scipy.linalg import solve_triangular as st_solve +from scipy.linalg import cho_factor, cho_solve + + +class BayesPowerSingle(object): + + """ + The Gibbs Method class used for single-pulsar noise analyses. + + Based on: + + Article by van Haasteren & Vallisneri (2014), + "New advances in the Gaussian-process approach + to pulsar-timing data analysis", + Physical Review D, Volume 90, Issue 10, id.104012 + arXiv:1407.1838 + + Initial structure of the code is based on https://github.com/jellis18/gibbs_student_t + + Authors: + + S. R. Taylor + N. Laal + """ + + def __init__( + self, + psr=None, + Tspan=None, + select="backend", + white_vary=False, + inc_ecorr=False, + ecorr_type="kernel", + noise_dict=None, + tm_marg=False, + rn_components=30, + dm_components=None, + chrom_components=None, + dm_type = "gibbs", + chrom_type = "gibbs", + tnequad=True, + log10rhomin=-9.0, + log10rhomax=-4.0, + ): + """ + Parameters + ----------- + + psr : object + instance of an ENTERPRISE psr object for a single pulsar + + Tspan: float (optional) + if given, the baseline of the pulsar is fixed to the input value. If not, + baseline is determined inetrnally + + select: str + the selection of backend ('backend' or 'none') for the white-noise parameters + + white_vary: bool + whether to vary the white noise + + inc_ecorr: bool + whether to include ecorr + + ecorr_type: str + the type of ecorr to use. Choose between 'basis' or 'kernel' + + noise_dict: dict + white noise dictionary in case 'white_vary' is set to False + + tm_marg: bool + whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) + + rn_components: int + number of red noise Fourier modes to include + + dm_components: int + number of DM noise Fourier modes to include + + chrom_components: int + number of chromatic noise Fourier modes to include + + dm_type: str + the type of DM noise to use. Choose between 'gibbs' or 'mcmc' or None (for DMX) + + chrom_type: str + the type of chromatic noise to use. Choose between 'gibbs' or 'mcmc' or None (for no chromatic noise) + + log10rhomin: float + lower bound for the log10 of the rho parameter. + + log10rhomax: float + upper bound for the log10 of the rho parameter + + tnequad: string + whether to use the temponest convension of efac and equad + """ + + self.psr = [psr] + if Tspan: + self.Tspan = Tspan + else: + self.Tspan = model_utils.get_tspan(self.psr) + self.name = self.psr[0].name + self.inc_ecorr = inc_ecorr + self.ecorr_type = ecorr_type + self.white_vary = white_vary + self.tm_marg = tm_marg + self.wn_names = ["efac", "equad", "ecorr"] + self.rhomin = log10rhomin + self.rhomax = log10rhomax + self.rn_components = rn_components + self.dm_components = dm_components + self.chrom_components = chrom_components + self.dm_type = dm_type + self.chrom_type = chrom_type + self.low = 10 ** (2 * self.rhomin) + self.high = 10 ** (2 * self.rhomax) + + # Making the pta object + if self.tm_marg: + tm = gp_signals.MarginalizingTimingModel(use_svd=True) + if self.white_vary: + warnings.warn( + "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" + ) + else: + tm = gp_signals.TimingModel(use_svd=True) + + if self.ecorr_type == "basis": + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=True, + select=select, + tnequad=tnequad, + ) + else: + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=False, + select=select, + tnequad=tnequad, + ) + + rn = blocks.common_red_noise_block( + psd="spectrum", + prior="log-uniform", + Tspan=self.Tspan, + logmin=self.rhomin, + logmax=self.rhomax, + components=rn_components, + gamma_val=None, + name="gw", + ) + s = tm + wn + rn + self.pta = signal_base.PTA( + [s(p) for p in self.psr], + lnlikelihood=signal_base.LogLikelihoodDenseCholesky, + ) + if not white_vary: + self.pta.set_default_params(noise_dict) + self.Nmat = self.pta.get_ndiag(params={})[0] + self.TNr = self.pta.get_TNr(params={})[0] + self.TNT = self.pta.get_TNT(params={})[0] + else: + self.Nmat = None + + if self.inc_ecorr and "basis" in self.ecorr_type: + # grabbing priors on ECORR params + for ct, par in enumerate(self.pta.params): + if "ecorr" in str(par): + ind = ct + ecorr_priors = str(self.pta.params[ind].params[0]) + ecorr_priors = ecorr_priors.split("(")[1].split(")")[0].split(", ") + self.ecorrmin, self.ecorrmax = ( + 10 ** (2 * float(ecorr_priors[0].split("=")[1])), + 10 ** (2 * float(ecorr_priors[1].split("=")[1])), + ) + + # Getting residuals + self._residuals = self.pta.get_residuals()[0] + # Intial guess for the model params + self._xs = np.array([p.sample() + for p in self.pta.params], dtype=object) + # Initializign the b-coefficients. The shape is 2*freq_bins if tm_marg + # = True. + self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) + self.Tmat = self.pta.get_basis(params={})[0] + self.phiinv = None + + # find basis indices of GW process + self.gwid = [] + ct = 0 + psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] + for sig in psigs: + Fmat = self.pta.signals[sig].get_basis() + if "gw" in self.pta.signals[sig].name: + self.gwid.append(ct + np.arange(0, Fmat.shape[1])) + # Avoid None-basis processes. + # Also assume red + GW signals share basis. + if Fmat is not None and "red" not in sig: + ct += Fmat.shape[1] + + @cached_property + def params(self): + return self.pta.params + + @cached_property + def param_names(self): + return self.pta.param_names + + def map_params(self, xs): + return self.pta.map_params(xs) + + @cached_property + def get_red_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "log10_A" in par or "gamma" in par or "rho" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_efacequad_indices(self): + ind = [] + if "basis" in self.ecorr_type: + for ct, par in enumerate(self.param_names): + if "efac" in par or "equad" in par: + ind.append(ct) + else: + for ct, par in enumerate(self.param_names): + if "ecorr" in par or "efac" in par or "equad" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_basis_ecorr_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "ecorr" in par: + ind.append(ct) + return np.array(ind) + + def update_red_params(self, xs): + """ + Function to perform log10_rho updates given the Fourier coefficients. + """ + tau = self._b[tuple(self.gwid)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-1] = 0.5 * np.log10(rhonew) + return xs + + def update_b(self, xs): + """ + Function to perform updates on Fourier coefficients given other model parameters. + """ + params = self.pta.map_params(np.hstack(xs)) + self._phiinv = self.pta.get_phiinv(params, logdet=False)[0] + + try: + TNT = self.TNT.copy() + except BaseException: + T = self.Tmat + TNT = self.Nmat.solve(T, left_array=T) + try: + TNr = self.TNr.copy() + except BaseException: + T = self.Tmat + TNr = self.Nmat.solve(self._residuals, left_array=T) + + np.fill_diagonal(TNT, TNT.diagonal() + self._phiinv) + try: + chol = cho_factor( + TNT, + lower=True, + overwrite_a=False, + check_finite=False) + mean = cho_solve( + chol, + b=TNr, + overwrite_b=False, + check_finite=False) + self._b = mean + st_solve( + chol[0], + np.random.normal(loc=0, scale=1, size=TNT.shape[0]), + lower=True, + unit_diagonal=False, + overwrite_b=False, + check_finite=False, + trans=1, + ) + except np.linalg.LinAlgError: + if self.bchain.any(): + self._b = self.bchain[ + np.random.default_rng().integers(0, len(self.bchain)) + ] + else: + bchain = np.memmap( + self._savepath + "/chain_1", + dtype="float32", + mode="r", + shape=(self.niter, self.len_x + self.len_b), + )[:, -len(self._b):] + self._b = bchain[np.random.default_rng().integers( + 0, len(bchain))] + + def update_white_params(self, xs, iters=10): + """ + Function to perform WN updates given other model parameters. + If kernel ecorr is chosen, WN includes ecorr as well. + """ + # get white noise parameter indices + wind = self.get_efacequad_indices + xnew = xs + x0 = xnew[wind].copy() + lnlike0, lnprior0 = self.get_lnlikelihood_white( + x0), self.get_wn_lnprior(x0) + lnprob0 = lnlike0 + lnprior0 + + for ii in range( + self.start_wn_iter + 1, + self.start_wn_iter + iters + 1): + x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) + xnew[wind] = x0 + self.start_wn_iter = ii + + # Do some caching of "later needed" parameters for improved performance + self.Nmat = self.pta.get_ndiag(self.map_params(xnew))[0] + Tmat = self.Tmat + if "basis" not in self.ecorr_type: + self.TNT = self.Nmat.solve(Tmat, left_array=Tmat) + else: + TN = Tmat / self.Nmat[:, None] + self.TNT = Tmat.T @ TN + residuals = self._residuals + self.rNr = np.sum(residuals**2 / self.Nmat) + self.logdet_N = np.sum(np.log(self.Nmat)) + self.d = TN.T @ residuals + + return xnew + + def update_basis_ecorr_params(self, xs, iters=10): + """ + Function to perform basis ecorr updates. + """ + # get white noise parameter indices + eind = self.get_basis_ecorr_indices + xnew = xs + x0 = xnew[eind].copy() + lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood( + x0 + ), self.get_basis_ecorr_lnprior(x0) + lnprob0 = lnlike0 + lnprior0 + + for ii in range( + self.start_ec_iter + 1, + self.start_ec_iter + iters + 1): + x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) + xnew[eind] = x0 + self.start_ec_iter = ii + + return xnew + + def get_lnlikelihood_white(self, xs): + """ + Function to calculate WN log-liklihood. + """ + x0 = self._xs.copy() + x0[self.get_efacequad_indices] = xs + + params = self.map_params(x0) + Nmat = self.pta.get_ndiag(params)[0] + # whitened residuals + yred = self._residuals - self.Tmat @ self._b + try: + if "basis" not in self.ecorr_type: + rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet=True) + else: + rNr = np.sum(yred**2 / Nmat) + logdet_N = np.sum(np.log(Nmat)) + except BaseException: + return -np.inf + # first component of likelihood function + loglike = -0.5 * (logdet_N + rNr) + + return loglike + + def get_basis_ecorr_lnlikelihood(self, xs): + """ + Function to calculate basis ecorr log-liklihood. + """ + x0 = np.hstack(self._xs.copy()) + x0[self.get_basis_ecorr_indices] = xs + + params = self.map_params(x0) + # start likelihood calculations + loglike = 0 + # get auxiliaries + phiinv, logdet_phi = self.pta.get_phiinv(params, logdet=True)[0] + # first component of likelihood function + loglike += -0.5 * (self.logdet_N + self.rNr) + # Red noise piece + Sigma = self.TNT + np.diag(phiinv) + try: + cf = sl.cho_factor(Sigma) + expval = sl.cho_solve(cf, self.d) + except np.linalg.LinAlgError: + return -np.inf + + logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) + loglike += 0.5 * (self.d @ expval - logdet_sigma - logdet_phi) + + return loglike + + def get_wn_lnprior(self, xs): + """ + Function to calculate WN log-prior. + """ + x0 = self._xs.copy() + x0[self.get_efacequad_indices] = xs + + return np.sum([p.get_logpdf(value=x0[ct]) + for ct, p in enumerate(self.params)]) + + def get_basis_ecorr_lnprior(self, xs): + """ + Function to calculate basis ecorr log-prior. + """ + x0 = self._xs.copy() + x0[self.get_basis_ecorr_indices] = xs + + return np.sum([p.get_logpdf(value=x0[ct]) + for ct, p in enumerate(self.params)]) + + def sample( + self, + niter=int(1e4), + wniters=30, + eciters=10, + savepath=None, + SCAMweight=30, + AMweight=15, + DEweight=50, + covUpdate=1000, + burn=10000, + **kwargs + ): + """ + Gibbs Sampling + + Parameters + ----------- + niter: integer + total number of Gibbs sampling iterations + + wniters: + number of white noise MCMC sampling iterations within each Gibbs step + + eciters: + number of basis ecorr MCMC sampling iterations within each Gibbs step + + savepath: str + the path to save the chains + + covUpdate: integer + Number of iterations between AM covariance updates + + SCAMweight: integer + Weight of SCAM jumps in overall jump cycle + + AMweight: integer + Weight of AM jumps in overall jump cycle + + DEweight: integer + Weight of DE jumps in overall jump cycle + + kwargs: dict + PTMCMC initialization settings not mentioned above + """ + self.start_wn_iter = 0 + self.start_ec_iter = 0 + + os.makedirs(savepath, exist_ok=True) + + if self.white_vary: + # large number to avoid saving the white noise choice in a txt file + isave = int(4e9) + thin = 1 + Niter = int(niter * wniters + 1) + + x0 = self._xs[self.get_efacequad_indices] + ndim = len(x0) + cov = np.diag( + np.ones(ndim) * 0.01**2 + ) # helps to tune MCMC proposal distribution + self.sampler_wn = ptmcmc( + ndim, + self.get_lnlikelihood_white, + self.get_wn_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_wn.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + if "basis" in self.ecorr_type and self.white_vary: + x0 = self._xs[self.get_basis_ecorr_indices] + ndim = len(x0) + cov = np.diag(np.ones(ndim) * 0.01**2) + self.sampler_ec = ptmcmc( + ndim, + self.get_basis_ecorr_lnlikelihood, + self.get_basis_ecorr_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_ec.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + np.savetxt(savepath + "/pars.txt", + list(map(str, self.pta.param_names)), fmt="%s") + np.savetxt( + savepath + "/priors.txt", + list(map(lambda x: str(x.__repr__()), self.pta.params)), + fmt="%s", + ) + rn_freqs = np.arange( + 1 / self.Tspan, + (self.rn_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/rn_freqs.npy", rn_freqs) + + if self.dm_components is not None: + dm_freqs = np.arange( + 1 / self.Tspan, + (self.dm_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/dm_freqs.npy", dm_freqs) + if self.chrom_components is not None: + chrom_freqs = np.arange( + 1 / self.Tspan, + (self.chrom_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/chrom_freqs.npy", chrom_freqs) + [os.remove(dpa) for dpa in glob.glob(savepath + "/*jump.txt")] + + xnew = self._xs.copy() + + len_b = len(self._b) + len_x = len(np.hstack(self._xs)) + self._savepath = savepath + + fp = np.lib.format.open_memmap( + savepath + "/chain_1.npy", + mode="w+", + dtype="float32", + shape=(niter, len_x + len_b), + fortran_order=False, + ) + + pbar = tqdm(range(niter), colour="GREEN") + pbar.set_description("Sampling %s" % self.name) + for ii in pbar: + if self.white_vary: + xnew = self.update_white_params(xnew, iters=wniters) + + if self.inc_ecorr and "basis" in self.ecorr_type: + xnew = self.update_basis_ecorr_params(xnew, iters=eciters) + + self.update_b(xs=xnew) + xnew = self.update_red_params(xs=xnew) + + fp[ii, -len_b:] = self._b + fp[ii, 0:len_x] = np.hstack(xnew) + From 39e72024698d316b31c642d9375e8ac26ebcaefa Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 25 Aug 2024 03:35:25 +0000 Subject: [PATCH 123/193] i initially added the wrong version of the gibbs sampler -- lol --- src/pint_pal/gibbs_sampler.py | 237 ++++++++++++++++++++++++++-------- 1 file changed, 181 insertions(+), 56 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index e33115a3..b98e6c12 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -12,7 +12,7 @@ from scipy.linalg import cho_factor, cho_solve -class BayesPowerSingle(object): +class GibbsSampler(object): """ The Gibbs Method class used for single-pulsar noise analyses. @@ -31,6 +31,7 @@ class BayesPowerSingle(object): S. R. Taylor N. Laal + J. G. Baier """ def __init__( @@ -38,19 +39,21 @@ def __init__( psr=None, Tspan=None, select="backend", - white_vary=False, + vary_wn=False, inc_ecorr=False, ecorr_type="kernel", noise_dict=None, tm_marg=False, + vary_rn=True, rn_components=30, - dm_components=None, - chrom_components=None, - dm_type = "gibbs", - chrom_type = "gibbs", tnequad=True, log10rhomin=-9.0, log10rhomax=-4.0, + vary_dm=False, + dm_components=50, + vary_chrom=False, + chrom_components=50, + include_quadratic=False, ): """ Parameters @@ -66,7 +69,7 @@ def __init__( select: str the selection of backend ('backend' or 'none') for the white-noise parameters - white_vary: bool + self.vary_wn: bool whether to vary the white noise inc_ecorr: bool @@ -76,7 +79,7 @@ def __init__( the type of ecorr to use. Choose between 'basis' or 'kernel' noise_dict: dict - white noise dictionary in case 'white_vary' is set to False + white noise dictionary in case 'self.vary_wn' is set to False tm_marg: bool whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) @@ -90,12 +93,15 @@ def __init__( chrom_components: int number of chromatic noise Fourier modes to include - dm_type: str - the type of DM noise to use. Choose between 'gibbs' or 'mcmc' or None (for DMX) + dm_var: bool + wheter to include a free spectrum gibbs dm_gp - chrom_type: str - the type of chromatic noise to use. Choose between 'gibbs' or 'mcmc' or None (for no chromatic noise) - + chrom_var: bool + whether to include a free spectrum gibbs chrom_gp + + include_quadratic: bool + whether or not to fit out a quadratic trend in chrom_gp (think DM2) + log10rhomin: float lower bound for the log10 of the rho parameter. @@ -114,7 +120,7 @@ def __init__( self.name = self.psr[0].name self.inc_ecorr = inc_ecorr self.ecorr_type = ecorr_type - self.white_vary = white_vary + self.vary_wn = vary_wn self.tm_marg = tm_marg self.wn_names = ["efac", "equad", "ecorr"] self.rhomin = log10rhomin @@ -122,15 +128,17 @@ def __init__( self.rn_components = rn_components self.dm_components = dm_components self.chrom_components = chrom_components - self.dm_type = dm_type - self.chrom_type = chrom_type + self.vary_rn = vary_rn + self.vary_dm = vary_dm + self.vary_chrom = vary_chrom + self.include_quadratic = include_quadratic self.low = 10 ** (2 * self.rhomin) self.high = 10 ** (2 * self.rhomax) # Making the pta object if self.tm_marg: tm = gp_signals.MarginalizingTimingModel(use_svd=True) - if self.white_vary: + if self.vary_wn: warnings.warn( "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" ) @@ -139,7 +147,7 @@ def __init__( if self.ecorr_type == "basis": wn = blocks.white_noise_block( - vary=self.white_vary, + vary=self.vary_wn, inc_ecorr=self.inc_ecorr, gp_ecorr=True, select=select, @@ -147,29 +155,62 @@ def __init__( ) else: wn = blocks.white_noise_block( - vary=self.white_vary, + vary=self.vary_wn, inc_ecorr=self.inc_ecorr, gp_ecorr=False, select=select, tnequad=tnequad, ) - rn = blocks.common_red_noise_block( - psd="spectrum", - prior="log-uniform", - Tspan=self.Tspan, - logmin=self.rhomin, - logmax=self.rhomax, - components=rn_components, - gamma_val=None, - name="gw", - ) - s = tm + wn + rn + if self.vary_rn: + rn = blocks.red_noise_block( + psd="spectrum", + prior="log-uniform", + Tspan=self.Tspan, + #logmin=self.rhomin, + #logmax=self.rhomax, + components=self.rn_components, + gamma_val=None, + ) + + if self.vary_dm: + dm = blocks.dm_noise_block( + gp_kernel='diag', + psd='spectrum', + prior='log-uniform', + Tspan=self.Tspan, + components=self.dm_components, + gamma_val=None, + coefficients=False + ) + + if self.vary_chrom: + chrom = blocks.chromatic_noise_block( + gp_kernel='diag', + psd='spectrum', + prior='log-uniform', + idx=4, + include_quadratic=self.include_quadratic, + Tspan=self.Tspan, + name='chrom', + components=self.chrom_components, + ) + + s = tm + wn + + if self.vary_rn: + s += rn + if self.vary_dm: + s += dm + if self.vary_chrom: + s += chrom + self.pta = signal_base.PTA( [s(p) for p in self.psr], lnlikelihood=signal_base.LogLikelihoodDenseCholesky, ) - if not white_vary: + #print(self.pta.signals.keys()) + if not self.vary_wn: self.pta.set_default_params(noise_dict) self.Nmat = self.pta.get_ndiag(params={})[0] self.TNr = self.pta.get_TNr(params={})[0] @@ -177,7 +218,7 @@ def __init__( else: self.Nmat = None - if self.inc_ecorr and "basis" in self.ecorr_type: + if self.inc_ecorr and "basis" in self.ecorr_type and self.vary_wn: # grabbing priors on ECORR params for ct, par in enumerate(self.pta.params): if "ecorr" in str(par): @@ -188,30 +229,58 @@ def __init__( 10 ** (2 * float(ecorr_priors[0].split("=")[1])), 10 ** (2 * float(ecorr_priors[1].split("=")[1])), ) + #print(self.ecorrmin, self.ecorrmax) # Getting residuals self._residuals = self.pta.get_residuals()[0] + ## FIXME : maybe don't cache this -- could lead to memory issues. # Intial guess for the model params self._xs = np.array([p.sample() for p in self.pta.params], dtype=object) - # Initializign the b-coefficients. The shape is 2*freq_bins if tm_marg - # = True. + # Initializign the b-coefficients. + # The shape is 2*rn_comp+2*dm_comp+2*chrom_comp if tm_marg = True + # if tm_marg = False, + # then the shape is more because there are some tm params in there? self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) + # when including dm and chromatic models, the b's are + # the concantenation of the red noise, dm, and chromatic noise fourier coefficients + #print("len b: ", len(self._b)) + #print(self.pta.get_basis(self._xs)[0].shape) self.Tmat = self.pta.get_basis(params={})[0] self.phiinv = None - + # print(self._xs.shape) + # print(self.pta.params) + # print("dm", self.get_dm_param_indices) + # print("chrom", self.get_chrom_param_indices) + # print("rn:", self.get_rn_param_indices) # find basis indices of GW process - self.gwid = [] + ### jeremy : changing the below from gwid to rn_id and adding dm_id and chrom_id + self.rn_id = [] + self.dm_id = [] + self.chrom_id = [] ct = 0 psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] for sig in psigs: Fmat = self.pta.signals[sig].get_basis() - if "gw" in self.pta.signals[sig].name: - self.gwid.append(ct + np.arange(0, Fmat.shape[1])) + if "red_noise" in self.pta.signals[sig].name: + self.rn_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + if "dm_gp" in self.pta.signals[sig].name: + self.dm_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + if "chrom_gp" in self.pta.signals[sig].name: + self.chrom_id.append(ct + np.arange(0, Fmat.shape[1])) + ct+=Fmat.shape[1] + ### jeremy : chaning the above to red_noise and adding dm and chrom as well # Avoid None-basis processes. # Also assume red + GW signals share basis. - if Fmat is not None and "red" not in sig: + if Fmat is not None and "red" not in sig and 'dm_gp' not in sig and 'chrom_gp' not in sig: ct += Fmat.shape[1] + #print(sig) + #print(ct) + #print("rn", self.rn_id) + #print("dm", self.dm_id) + #print("chrom", self.chrom_id) @cached_property def params(self): @@ -225,10 +294,26 @@ def map_params(self, xs): return self.pta.map_params(xs) @cached_property - def get_red_param_indices(self): + def get_rn_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "red_noise" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_dm_param_indices(self): + ind = [] + for ct, par in enumerate(self.param_names): + if "dm_gp" in par: + ind.append(ct) + return np.array(ind) + + @cached_property + def get_chrom_param_indices(self): ind = [] for ct, par in enumerate(self.param_names): - if "log10_A" in par or "gamma" in par or "rho" in par: + if "chrom_gp" in par: ind.append(ct) return np.array(ind) @@ -255,9 +340,10 @@ def get_basis_ecorr_indices(self): def update_red_params(self, xs): """ - Function to perform log10_rho updates given the Fourier coefficients. + Function to perform red_noise_log10_rho updates given + the red noise Fourier coefficients. """ - tau = self._b[tuple(self.gwid)] ** 2 + tau = self._b[tuple(self.rn_id)] ** 2 tau = (tau[0::2] + tau[1::2]) / 2 Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) @@ -266,6 +352,34 @@ def update_red_params(self, xs): xs[-1] = 0.5 * np.log10(rhonew) return xs + def update_dm_params(self, xs): + """ + Function to perform dm_gp_log10_rho updates given + the dm gp Fourier coefficients. + """ + tau = self._b[tuple(self.dm_id)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-2] = 0.5 * np.log10(rhonew) + return xs + + def update_chrom_params(self, xs): + """ + Function to perform chrom_gp_log10_rho updates given + the chromatic gp Fourier coefficients. + """ + tau = self._b[tuple(self.chrom_id)] ** 2 + tau = (tau[0::2] + tau[1::2]) / 2 + + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-3] = 0.5 * np.log10(rhonew) + return xs + def update_b(self, xs): """ Function to perform updates on Fourier coefficients given other model parameters. @@ -306,6 +420,7 @@ def update_b(self, xs): trans=1, ) except np.linalg.LinAlgError: + print("oh sh******t; a spiiiiiddddeeeerrrrrr") if self.bchain.any(): self._b = self.bchain[ np.random.default_rng().integers(0, len(self.bchain)) @@ -455,8 +570,8 @@ def get_basis_ecorr_lnprior(self, xs): def sample( self, niter=int(1e4), - wniters=30, - eciters=10, + wniters=100, + eciters=15, savepath=None, SCAMweight=30, AMweight=15, @@ -502,7 +617,7 @@ def sample( os.makedirs(savepath, exist_ok=True) - if self.white_vary: + if self.vary_wn: # large number to avoid saving the white noise choice in a txt file isave = int(4e9) thin = 1 @@ -533,7 +648,7 @@ def sample( **kwargs ) - if "basis" in self.ecorr_type and self.white_vary: + if "basis" in self.ecorr_type and self.vary_wn and self.inc_ecorr: x0 = self._xs[self.get_basis_ecorr_indices] ndim = len(x0) cov = np.diag(np.ones(ndim) * 0.01**2) @@ -564,19 +679,20 @@ def sample( list(map(lambda x: str(x.__repr__()), self.pta.params)), fmt="%s", ) - rn_freqs = np.arange( - 1 / self.Tspan, - (self.rn_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/rn_freqs.npy", rn_freqs) + if self.vary_rn: + rn_freqs = np.arange( + 1 / self.Tspan, + (self.rn_components + 0.001) / self.Tspan, + 1 / self.Tspan) + np.save(savepath + "/rn_freqs.npy", rn_freqs) - if self.dm_components is not None: + if self.vary_dm: dm_freqs = np.arange( 1 / self.Tspan, (self.dm_components + 0.001) / self.Tspan, 1 / self.Tspan) np.save(savepath + "/dm_freqs.npy", dm_freqs) - if self.chrom_components is not None: + if self.vary_chrom: chrom_freqs = np.arange( 1 / self.Tspan, (self.chrom_components + 0.001) / self.Tspan, @@ -600,16 +716,25 @@ def sample( pbar = tqdm(range(niter), colour="GREEN") pbar.set_description("Sampling %s" % self.name) +# num_gibbs = np.sum([int(self.vary_rn), int(self.vary_dm), int(self.vary_chrom)]) for ii in pbar: - if self.white_vary: + if self.vary_wn: xnew = self.update_white_params(xnew, iters=wniters) if self.inc_ecorr and "basis" in self.ecorr_type: xnew = self.update_basis_ecorr_params(xnew, iters=eciters) +# turn = ii % num_gibbs + #if self.vary_rn and turn == 0: self.update_b(xs=xnew) xnew = self.update_red_params(xs=xnew) - + #if self.vary_dm and turn == 1: + #self.update_b(xs=xnew) + xnew = self.update_dm_params(xs=xnew) + #if self.vary_chrom and turn == 2: + #self.update_b(xs=xnew) + xnew = self.update_chrom_params(xs=xnew) + fp[ii, -len_b:] = self._b fp[ii, 0:len_x] = np.hstack(xnew) From 1a9de5990b55cfb22fd9a3ed6e2e7fe3cb6daa26 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 24 Aug 2024 21:20:33 -0700 Subject: [PATCH 124/193] adding gibbs to noise_utils --- src/pint_pal/gibbs_sampler.py | 8 ++--- src/pint_pal/noise_utils.py | 64 ++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index b98e6c12..ac8021f8 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -47,8 +47,8 @@ def __init__( vary_rn=True, rn_components=30, tnequad=True, - log10rhomin=-9.0, - log10rhomax=-4.0, + #log10rhomin=-9.0, i think these would only apply to HD correlations + #log10rhomax=-4.0, on gibbs sampling. IRN and DM/CHROM are diagonal ?? vary_dm=False, dm_components=50, vary_chrom=False, @@ -123,8 +123,6 @@ def __init__( self.vary_wn = vary_wn self.tm_marg = tm_marg self.wn_names = ["efac", "equad", "ecorr"] - self.rhomin = log10rhomin - self.rhomax = log10rhomax self.rn_components = rn_components self.dm_components = dm_components self.chrom_components = chrom_components @@ -167,8 +165,6 @@ def __init__( psd="spectrum", prior="log-uniform", Tspan=self.Tspan, - #logmin=self.rhomin, - #logmax=self.rhomax, components=self.rn_components, gamma_val=None, ) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 968b48a3..8d226563 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -8,6 +8,7 @@ import pint.models as pm from pint.models.parameter import maskParameter +from pint_pal.gibbs_sampler import GibbsSampler import matplotlib as mpl import matplotlib.pyplot as pl @@ -178,7 +179,13 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne return wn_dict, rn_bf -def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband = False, resume = False, run_noise_analysis = True, wb_efac_sigma = 0.25, base_op_dir = "./"): +def model_noise(mo, to, sampler = 'PTMCMCSampler', + vary_red_noise = True, n_iter = int(1e5), + using_wideband = False, resume = False, + run_noise_analysis = True, + wb_efac_sigma = 0.25, base_op_dir = "./", + noise_kwargs = {}, sampler_kwargs = {}, + ): """ Setup enterprise PTA and perform MCMC noise analysis @@ -186,6 +193,7 @@ def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs + sampler: either 'PTMCMCSampler' or 'gibbs' red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False @@ -219,28 +227,38 @@ def model_noise(mo, to, vary_red_noise = True, n_iter = int(1e5), using_wideband #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - #Setup a single pulsar PTA using enterprise_extensions - if not using_wideband: - pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma) - else: - pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True) - dmjump_params = {} - for param in mo.params: - if param.startswith('DMJUMP'): - dmjump_param = getattr(mo,param) - dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" - dmjump_params[dmjump_param_name] = dmjump_param.value - pta.set_default_params(dmjump_params) - - #setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) - - #Initial sample - x0 = np.hstack([p.sample() for p in pta.params]) - - #Start sampling - - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50,) + if sampler == 'PTMCMCSampler': + log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + #Setup a single pulsar PTA using enterprise_extensions + if not using_wideband: + pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) + else: + pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True, **noise_kwargs) + dmjump_params = {} + for param in mo.params: + if param.startswith('DMJUMP'): + dmjump_param = getattr(mo,param) + dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + dmjump_params[dmjump_param_name] = dmjump_param.value + pta.set_default_params(dmjump_params) + # FIXME: set groups here + ####### + #setup sampler using enterprise_extensions + samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) + + #Initial sample + x0 = np.hstack([p.sample() for p in pta.params]) + + #Start sampling + + samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) + elif sampler == 'gibbs': + log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + samp = GibbsSampler(e_psr, + **noise_kwargs, + ) + samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) + pass def convert_to_RNAMP(value): """ From 44f63c3ed7b3a4693ef01492b6abb8d54240ac5a Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 24 Aug 2024 21:32:36 -0700 Subject: [PATCH 125/193] un-hard-coding model inclusion --- src/pint_pal/gibbs_sampler.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py index ac8021f8..8675ffdd 100644 --- a/src/pint_pal/gibbs_sampler.py +++ b/src/pint_pal/gibbs_sampler.py @@ -130,10 +130,15 @@ def __init__( self.vary_dm = vary_dm self.vary_chrom = vary_chrom self.include_quadratic = include_quadratic - self.low = 10 ** (2 * self.rhomin) - self.high = 10 ** (2 * self.rhomax) - + #self.low = 10 ** (2 * self.rhomin) + #self.high = 10 ** (2 * self.rhomax) # Making the pta object + # need to keep track of which parameters are being varied + # they appear alphebetically in signal_collections + # FIXME: this would probably break if you added a solar wind model + self.rn_idx = -1 + self.dm_idx = -1 - int(self.vary_rn) + self.chrom_idx = -1 - int(self.vary_rn) - int(self.vary_dm) if self.tm_marg: tm = gp_signals.MarginalizingTimingModel(use_svd=True) if self.vary_wn: @@ -191,6 +196,7 @@ def __init__( name='chrom', components=self.chrom_components, ) + s = tm + wn @@ -277,6 +283,7 @@ def __init__( #print("rn", self.rn_id) #print("dm", self.dm_id) #print("chrom", self.chrom_id) + @cached_property def params(self): @@ -345,7 +352,7 @@ def update_red_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-1] = 0.5 * np.log10(rhonew) + xs[self.rn_idx] = 0.5 * np.log10(rhonew) return xs def update_dm_params(self, xs): @@ -359,7 +366,7 @@ def update_dm_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-2] = 0.5 * np.log10(rhonew) + xs[self.dm_idx] = 0.5 * np.log10(rhonew) return xs def update_chrom_params(self, xs): @@ -373,7 +380,7 @@ def update_chrom_params(self, xs): Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) x = np.random.default_rng().uniform(0, 1, size=tau.shape) rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[-3] = 0.5 * np.log10(rhonew) + xs[self.chrom_idx] = 0.5 * np.log10(rhonew) return xs def update_b(self, xs): From 0e83379c23cc9fba8cb3d8c145a1be67a993268b Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 28 Aug 2024 12:02:18 -0700 Subject: [PATCH 126/193] fix sampler import bug --- src/pint_pal/noise_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 8d226563..7cab5e54 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -179,7 +179,7 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne return wn_dict, rn_bf -def model_noise(mo, to, sampler = 'PTMCMCSampler', +def model_noise(mo, to, which_sampler = 'PTMCMCSampler', vary_red_noise = True, n_iter = int(1e5), using_wideband = False, resume = False, run_noise_analysis = True, @@ -227,7 +227,7 @@ def model_noise(mo, to, sampler = 'PTMCMCSampler', #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - if sampler == 'PTMCMCSampler': + if which_sampler == 'PTMCMCSampler': log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") #Setup a single pulsar PTA using enterprise_extensions if not using_wideband: @@ -252,7 +252,7 @@ def model_noise(mo, to, sampler = 'PTMCMCSampler', #Start sampling samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif sampler == 'gibbs': + elif which_sampler == 'gibbs': log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") samp = GibbsSampler(e_psr, **noise_kwargs, From 841cb8edff0d1639a5dfc3a6a48573f8507a5c02 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 11 Sep 2024 11:50:36 -0700 Subject: [PATCH 127/193] moving gibbs sampler to enterprise extensions; adding some initial discovery set up --- src/pint_pal/gibbs_sampler.py | 743 ---------------------------------- src/pint_pal/noise_utils.py | 46 ++- 2 files changed, 37 insertions(+), 752 deletions(-) delete mode 100644 src/pint_pal/gibbs_sampler.py diff --git a/src/pint_pal/gibbs_sampler.py b/src/pint_pal/gibbs_sampler.py deleted file mode 100644 index 8675ffdd..00000000 --- a/src/pint_pal/gibbs_sampler.py +++ /dev/null @@ -1,743 +0,0 @@ -import numpy as np -from tqdm import tqdm -import scipy.linalg as sl -from functools import cached_property -import os -import glob -import warnings -from enterprise_extensions import model_utils, blocks -from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc -from enterprise.signals import signal_base, gp_signals -from scipy.linalg import solve_triangular as st_solve -from scipy.linalg import cho_factor, cho_solve - - -class GibbsSampler(object): - - """ - The Gibbs Method class used for single-pulsar noise analyses. - - Based on: - - Article by van Haasteren & Vallisneri (2014), - "New advances in the Gaussian-process approach - to pulsar-timing data analysis", - Physical Review D, Volume 90, Issue 10, id.104012 - arXiv:1407.1838 - - Initial structure of the code is based on https://github.com/jellis18/gibbs_student_t - - Authors: - - S. R. Taylor - N. Laal - J. G. Baier - """ - - def __init__( - self, - psr=None, - Tspan=None, - select="backend", - vary_wn=False, - inc_ecorr=False, - ecorr_type="kernel", - noise_dict=None, - tm_marg=False, - vary_rn=True, - rn_components=30, - tnequad=True, - #log10rhomin=-9.0, i think these would only apply to HD correlations - #log10rhomax=-4.0, on gibbs sampling. IRN and DM/CHROM are diagonal ?? - vary_dm=False, - dm_components=50, - vary_chrom=False, - chrom_components=50, - include_quadratic=False, - ): - """ - Parameters - ----------- - - psr : object - instance of an ENTERPRISE psr object for a single pulsar - - Tspan: float (optional) - if given, the baseline of the pulsar is fixed to the input value. If not, - baseline is determined inetrnally - - select: str - the selection of backend ('backend' or 'none') for the white-noise parameters - - self.vary_wn: bool - whether to vary the white noise - - inc_ecorr: bool - whether to include ecorr - - ecorr_type: str - the type of ecorr to use. Choose between 'basis' or 'kernel' - - noise_dict: dict - white noise dictionary in case 'self.vary_wn' is set to False - - tm_marg: bool - whether to marginalize over timing model parameters (do not use this if you are varying the white noise!) - - rn_components: int - number of red noise Fourier modes to include - - dm_components: int - number of DM noise Fourier modes to include - - chrom_components: int - number of chromatic noise Fourier modes to include - - dm_var: bool - wheter to include a free spectrum gibbs dm_gp - - chrom_var: bool - whether to include a free spectrum gibbs chrom_gp - - include_quadratic: bool - whether or not to fit out a quadratic trend in chrom_gp (think DM2) - - log10rhomin: float - lower bound for the log10 of the rho parameter. - - log10rhomax: float - upper bound for the log10 of the rho parameter - - tnequad: string - whether to use the temponest convension of efac and equad - """ - - self.psr = [psr] - if Tspan: - self.Tspan = Tspan - else: - self.Tspan = model_utils.get_tspan(self.psr) - self.name = self.psr[0].name - self.inc_ecorr = inc_ecorr - self.ecorr_type = ecorr_type - self.vary_wn = vary_wn - self.tm_marg = tm_marg - self.wn_names = ["efac", "equad", "ecorr"] - self.rn_components = rn_components - self.dm_components = dm_components - self.chrom_components = chrom_components - self.vary_rn = vary_rn - self.vary_dm = vary_dm - self.vary_chrom = vary_chrom - self.include_quadratic = include_quadratic - #self.low = 10 ** (2 * self.rhomin) - #self.high = 10 ** (2 * self.rhomax) - # Making the pta object - # need to keep track of which parameters are being varied - # they appear alphebetically in signal_collections - # FIXME: this would probably break if you added a solar wind model - self.rn_idx = -1 - self.dm_idx = -1 - int(self.vary_rn) - self.chrom_idx = -1 - int(self.vary_rn) - int(self.vary_dm) - if self.tm_marg: - tm = gp_signals.MarginalizingTimingModel(use_svd=True) - if self.vary_wn: - warnings.warn( - "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" - ) - else: - tm = gp_signals.TimingModel(use_svd=True) - - if self.ecorr_type == "basis": - wn = blocks.white_noise_block( - vary=self.vary_wn, - inc_ecorr=self.inc_ecorr, - gp_ecorr=True, - select=select, - tnequad=tnequad, - ) - else: - wn = blocks.white_noise_block( - vary=self.vary_wn, - inc_ecorr=self.inc_ecorr, - gp_ecorr=False, - select=select, - tnequad=tnequad, - ) - - if self.vary_rn: - rn = blocks.red_noise_block( - psd="spectrum", - prior="log-uniform", - Tspan=self.Tspan, - components=self.rn_components, - gamma_val=None, - ) - - if self.vary_dm: - dm = blocks.dm_noise_block( - gp_kernel='diag', - psd='spectrum', - prior='log-uniform', - Tspan=self.Tspan, - components=self.dm_components, - gamma_val=None, - coefficients=False - ) - - if self.vary_chrom: - chrom = blocks.chromatic_noise_block( - gp_kernel='diag', - psd='spectrum', - prior='log-uniform', - idx=4, - include_quadratic=self.include_quadratic, - Tspan=self.Tspan, - name='chrom', - components=self.chrom_components, - ) - - - s = tm + wn - - if self.vary_rn: - s += rn - if self.vary_dm: - s += dm - if self.vary_chrom: - s += chrom - - self.pta = signal_base.PTA( - [s(p) for p in self.psr], - lnlikelihood=signal_base.LogLikelihoodDenseCholesky, - ) - #print(self.pta.signals.keys()) - if not self.vary_wn: - self.pta.set_default_params(noise_dict) - self.Nmat = self.pta.get_ndiag(params={})[0] - self.TNr = self.pta.get_TNr(params={})[0] - self.TNT = self.pta.get_TNT(params={})[0] - else: - self.Nmat = None - - if self.inc_ecorr and "basis" in self.ecorr_type and self.vary_wn: - # grabbing priors on ECORR params - for ct, par in enumerate(self.pta.params): - if "ecorr" in str(par): - ind = ct - ecorr_priors = str(self.pta.params[ind].params[0]) - ecorr_priors = ecorr_priors.split("(")[1].split(")")[0].split(", ") - self.ecorrmin, self.ecorrmax = ( - 10 ** (2 * float(ecorr_priors[0].split("=")[1])), - 10 ** (2 * float(ecorr_priors[1].split("=")[1])), - ) - #print(self.ecorrmin, self.ecorrmax) - - # Getting residuals - self._residuals = self.pta.get_residuals()[0] - ## FIXME : maybe don't cache this -- could lead to memory issues. - # Intial guess for the model params - self._xs = np.array([p.sample() - for p in self.pta.params], dtype=object) - # Initializign the b-coefficients. - # The shape is 2*rn_comp+2*dm_comp+2*chrom_comp if tm_marg = True - # if tm_marg = False, - # then the shape is more because there are some tm params in there? - self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) - # when including dm and chromatic models, the b's are - # the concantenation of the red noise, dm, and chromatic noise fourier coefficients - #print("len b: ", len(self._b)) - #print(self.pta.get_basis(self._xs)[0].shape) - self.Tmat = self.pta.get_basis(params={})[0] - self.phiinv = None - # print(self._xs.shape) - # print(self.pta.params) - # print("dm", self.get_dm_param_indices) - # print("chrom", self.get_chrom_param_indices) - # print("rn:", self.get_rn_param_indices) - # find basis indices of GW process - ### jeremy : changing the below from gwid to rn_id and adding dm_id and chrom_id - self.rn_id = [] - self.dm_id = [] - self.chrom_id = [] - ct = 0 - psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] - for sig in psigs: - Fmat = self.pta.signals[sig].get_basis() - if "red_noise" in self.pta.signals[sig].name: - self.rn_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - if "dm_gp" in self.pta.signals[sig].name: - self.dm_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - if "chrom_gp" in self.pta.signals[sig].name: - self.chrom_id.append(ct + np.arange(0, Fmat.shape[1])) - ct+=Fmat.shape[1] - ### jeremy : chaning the above to red_noise and adding dm and chrom as well - # Avoid None-basis processes. - # Also assume red + GW signals share basis. - if Fmat is not None and "red" not in sig and 'dm_gp' not in sig and 'chrom_gp' not in sig: - ct += Fmat.shape[1] - #print(sig) - #print(ct) - #print("rn", self.rn_id) - #print("dm", self.dm_id) - #print("chrom", self.chrom_id) - - - @cached_property - def params(self): - return self.pta.params - - @cached_property - def param_names(self): - return self.pta.param_names - - def map_params(self, xs): - return self.pta.map_params(xs) - - @cached_property - def get_rn_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "red_noise" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_dm_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "dm_gp" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_chrom_param_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "chrom_gp" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_efacequad_indices(self): - ind = [] - if "basis" in self.ecorr_type: - for ct, par in enumerate(self.param_names): - if "efac" in par or "equad" in par: - ind.append(ct) - else: - for ct, par in enumerate(self.param_names): - if "ecorr" in par or "efac" in par or "equad" in par: - ind.append(ct) - return np.array(ind) - - @cached_property - def get_basis_ecorr_indices(self): - ind = [] - for ct, par in enumerate(self.param_names): - if "ecorr" in par: - ind.append(ct) - return np.array(ind) - - def update_red_params(self, xs): - """ - Function to perform red_noise_log10_rho updates given - the red noise Fourier coefficients. - """ - tau = self._b[tuple(self.rn_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.rn_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_dm_params(self, xs): - """ - Function to perform dm_gp_log10_rho updates given - the dm gp Fourier coefficients. - """ - tau = self._b[tuple(self.dm_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.dm_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_chrom_params(self, xs): - """ - Function to perform chrom_gp_log10_rho updates given - the chromatic gp Fourier coefficients. - """ - tau = self._b[tuple(self.chrom_id)] ** 2 - tau = (tau[0::2] + tau[1::2]) / 2 - - Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) - x = np.random.default_rng().uniform(0, 1, size=tau.shape) - rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) - xs[self.chrom_idx] = 0.5 * np.log10(rhonew) - return xs - - def update_b(self, xs): - """ - Function to perform updates on Fourier coefficients given other model parameters. - """ - params = self.pta.map_params(np.hstack(xs)) - self._phiinv = self.pta.get_phiinv(params, logdet=False)[0] - - try: - TNT = self.TNT.copy() - except BaseException: - T = self.Tmat - TNT = self.Nmat.solve(T, left_array=T) - try: - TNr = self.TNr.copy() - except BaseException: - T = self.Tmat - TNr = self.Nmat.solve(self._residuals, left_array=T) - - np.fill_diagonal(TNT, TNT.diagonal() + self._phiinv) - try: - chol = cho_factor( - TNT, - lower=True, - overwrite_a=False, - check_finite=False) - mean = cho_solve( - chol, - b=TNr, - overwrite_b=False, - check_finite=False) - self._b = mean + st_solve( - chol[0], - np.random.normal(loc=0, scale=1, size=TNT.shape[0]), - lower=True, - unit_diagonal=False, - overwrite_b=False, - check_finite=False, - trans=1, - ) - except np.linalg.LinAlgError: - print("oh sh******t; a spiiiiiddddeeeerrrrrr") - if self.bchain.any(): - self._b = self.bchain[ - np.random.default_rng().integers(0, len(self.bchain)) - ] - else: - bchain = np.memmap( - self._savepath + "/chain_1", - dtype="float32", - mode="r", - shape=(self.niter, self.len_x + self.len_b), - )[:, -len(self._b):] - self._b = bchain[np.random.default_rng().integers( - 0, len(bchain))] - - def update_white_params(self, xs, iters=10): - """ - Function to perform WN updates given other model parameters. - If kernel ecorr is chosen, WN includes ecorr as well. - """ - # get white noise parameter indices - wind = self.get_efacequad_indices - xnew = xs - x0 = xnew[wind].copy() - lnlike0, lnprior0 = self.get_lnlikelihood_white( - x0), self.get_wn_lnprior(x0) - lnprob0 = lnlike0 + lnprior0 - - for ii in range( - self.start_wn_iter + 1, - self.start_wn_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep( - x0, lnlike0, lnprob0, ii - ) - xnew[wind] = x0 - self.start_wn_iter = ii - - # Do some caching of "later needed" parameters for improved performance - self.Nmat = self.pta.get_ndiag(self.map_params(xnew))[0] - Tmat = self.Tmat - if "basis" not in self.ecorr_type: - self.TNT = self.Nmat.solve(Tmat, left_array=Tmat) - else: - TN = Tmat / self.Nmat[:, None] - self.TNT = Tmat.T @ TN - residuals = self._residuals - self.rNr = np.sum(residuals**2 / self.Nmat) - self.logdet_N = np.sum(np.log(self.Nmat)) - self.d = TN.T @ residuals - - return xnew - - def update_basis_ecorr_params(self, xs, iters=10): - """ - Function to perform basis ecorr updates. - """ - # get white noise parameter indices - eind = self.get_basis_ecorr_indices - xnew = xs - x0 = xnew[eind].copy() - lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood( - x0 - ), self.get_basis_ecorr_lnprior(x0) - lnprob0 = lnlike0 + lnprior0 - - for ii in range( - self.start_ec_iter + 1, - self.start_ec_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep( - x0, lnlike0, lnprob0, ii - ) - xnew[eind] = x0 - self.start_ec_iter = ii - - return xnew - - def get_lnlikelihood_white(self, xs): - """ - Function to calculate WN log-liklihood. - """ - x0 = self._xs.copy() - x0[self.get_efacequad_indices] = xs - - params = self.map_params(x0) - Nmat = self.pta.get_ndiag(params)[0] - # whitened residuals - yred = self._residuals - self.Tmat @ self._b - try: - if "basis" not in self.ecorr_type: - rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet=True) - else: - rNr = np.sum(yred**2 / Nmat) - logdet_N = np.sum(np.log(Nmat)) - except BaseException: - return -np.inf - # first component of likelihood function - loglike = -0.5 * (logdet_N + rNr) - - return loglike - - def get_basis_ecorr_lnlikelihood(self, xs): - """ - Function to calculate basis ecorr log-liklihood. - """ - x0 = np.hstack(self._xs.copy()) - x0[self.get_basis_ecorr_indices] = xs - - params = self.map_params(x0) - # start likelihood calculations - loglike = 0 - # get auxiliaries - phiinv, logdet_phi = self.pta.get_phiinv(params, logdet=True)[0] - # first component of likelihood function - loglike += -0.5 * (self.logdet_N + self.rNr) - # Red noise piece - Sigma = self.TNT + np.diag(phiinv) - try: - cf = sl.cho_factor(Sigma) - expval = sl.cho_solve(cf, self.d) - except np.linalg.LinAlgError: - return -np.inf - - logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) - loglike += 0.5 * (self.d @ expval - logdet_sigma - logdet_phi) - - return loglike - - def get_wn_lnprior(self, xs): - """ - Function to calculate WN log-prior. - """ - x0 = self._xs.copy() - x0[self.get_efacequad_indices] = xs - - return np.sum([p.get_logpdf(value=x0[ct]) - for ct, p in enumerate(self.params)]) - - def get_basis_ecorr_lnprior(self, xs): - """ - Function to calculate basis ecorr log-prior. - """ - x0 = self._xs.copy() - x0[self.get_basis_ecorr_indices] = xs - - return np.sum([p.get_logpdf(value=x0[ct]) - for ct, p in enumerate(self.params)]) - - def sample( - self, - niter=int(1e4), - wniters=100, - eciters=15, - savepath=None, - SCAMweight=30, - AMweight=15, - DEweight=50, - covUpdate=1000, - burn=10000, - **kwargs - ): - """ - Gibbs Sampling - - Parameters - ----------- - niter: integer - total number of Gibbs sampling iterations - - wniters: - number of white noise MCMC sampling iterations within each Gibbs step - - eciters: - number of basis ecorr MCMC sampling iterations within each Gibbs step - - savepath: str - the path to save the chains - - covUpdate: integer - Number of iterations between AM covariance updates - - SCAMweight: integer - Weight of SCAM jumps in overall jump cycle - - AMweight: integer - Weight of AM jumps in overall jump cycle - - DEweight: integer - Weight of DE jumps in overall jump cycle - - kwargs: dict - PTMCMC initialization settings not mentioned above - """ - self.start_wn_iter = 0 - self.start_ec_iter = 0 - - os.makedirs(savepath, exist_ok=True) - - if self.vary_wn: - # large number to avoid saving the white noise choice in a txt file - isave = int(4e9) - thin = 1 - Niter = int(niter * wniters + 1) - - x0 = self._xs[self.get_efacequad_indices] - ndim = len(x0) - cov = np.diag( - np.ones(ndim) * 0.01**2 - ) # helps to tune MCMC proposal distribution - self.sampler_wn = ptmcmc( - ndim, - self.get_lnlikelihood_white, - self.get_wn_lnprior, - cov, - outDir=savepath, - resume=False, - ) - self.sampler_wn.initialize( - Niter=Niter, - isave=isave, - thin=thin, - SCAMweight=SCAMweight, - AMweight=AMweight, - DEweight=DEweight, - covUpdate=covUpdate, - burn=burn, - **kwargs - ) - - if "basis" in self.ecorr_type and self.vary_wn and self.inc_ecorr: - x0 = self._xs[self.get_basis_ecorr_indices] - ndim = len(x0) - cov = np.diag(np.ones(ndim) * 0.01**2) - self.sampler_ec = ptmcmc( - ndim, - self.get_basis_ecorr_lnlikelihood, - self.get_basis_ecorr_lnprior, - cov, - outDir=savepath, - resume=False, - ) - self.sampler_ec.initialize( - Niter=Niter, - isave=isave, - thin=thin, - SCAMweight=SCAMweight, - AMweight=AMweight, - DEweight=DEweight, - covUpdate=covUpdate, - burn=burn, - **kwargs - ) - - np.savetxt(savepath + "/pars.txt", - list(map(str, self.pta.param_names)), fmt="%s") - np.savetxt( - savepath + "/priors.txt", - list(map(lambda x: str(x.__repr__()), self.pta.params)), - fmt="%s", - ) - if self.vary_rn: - rn_freqs = np.arange( - 1 / self.Tspan, - (self.rn_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/rn_freqs.npy", rn_freqs) - - if self.vary_dm: - dm_freqs = np.arange( - 1 / self.Tspan, - (self.dm_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/dm_freqs.npy", dm_freqs) - if self.vary_chrom: - chrom_freqs = np.arange( - 1 / self.Tspan, - (self.chrom_components + 0.001) / self.Tspan, - 1 / self.Tspan) - np.save(savepath + "/chrom_freqs.npy", chrom_freqs) - [os.remove(dpa) for dpa in glob.glob(savepath + "/*jump.txt")] - - xnew = self._xs.copy() - - len_b = len(self._b) - len_x = len(np.hstack(self._xs)) - self._savepath = savepath - - fp = np.lib.format.open_memmap( - savepath + "/chain_1.npy", - mode="w+", - dtype="float32", - shape=(niter, len_x + len_b), - fortran_order=False, - ) - - pbar = tqdm(range(niter), colour="GREEN") - pbar.set_description("Sampling %s" % self.name) -# num_gibbs = np.sum([int(self.vary_rn), int(self.vary_dm), int(self.vary_chrom)]) - for ii in pbar: - if self.vary_wn: - xnew = self.update_white_params(xnew, iters=wniters) - - if self.inc_ecorr and "basis" in self.ecorr_type: - xnew = self.update_basis_ecorr_params(xnew, iters=eciters) - -# turn = ii % num_gibbs - #if self.vary_rn and turn == 0: - self.update_b(xs=xnew) - xnew = self.update_red_params(xs=xnew) - #if self.vary_dm and turn == 1: - #self.update_b(xs=xnew) - xnew = self.update_dm_params(xs=xnew) - #if self.vary_chrom and turn == 2: - #self.update_b(xs=xnew) - xnew = self.update_chrom_params(xs=xnew) - - fp[ii, -len_b:] = self._b - fp[ii, 0:len_x] = np.hstack(xnew) - diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 7cab5e54..b34bccc9 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -8,7 +8,6 @@ import pint.models as pm from pint.models.parameter import maskParameter -from pint_pal.gibbs_sampler import GibbsSampler import matplotlib as mpl import matplotlib.pyplot as pl @@ -193,7 +192,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: either 'PTMCMCSampler' or 'gibbs' + sampler: either 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False @@ -220,7 +219,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', #Ensure n_iter is an integer n_iter = int(n_iter) - + if n_iter < 1e4: log.warning("Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4") @@ -228,7 +227,7 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', e_psr = Pulsar(mo, to) if which_sampler == 'PTMCMCSampler': - log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") #Setup a single pulsar PTA using enterprise_extensions if not using_wideband: pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) @@ -248,17 +247,20 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', #Initial sample x0 = np.hstack([p.sample() for p in pta.params]) - #Start sampling - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif which_sampler == 'gibbs': - log.info(f"INFO: Running noise analysis with {sampler} for {e_psr.name}") + elif which_sampler == 'GibbsSampler': + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler(e_psr, **noise_kwargs, ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) - pass + pass + elif which_sampler == 'discovery': + log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + pass + else: + log.error("Invalid sampler specified. Please use \'PTMCMCSampler\' or \'GibbsSampler\' or \'discovery\' ") def convert_to_RNAMP(value): """ @@ -458,6 +460,32 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl return model +def setup_gibbs_sampler(): + """ + Setup the Gibbs sampler for noise analysis from enterprise extensions + """ + # check that a sufficiently up-to-date version of enterprise_extensions is installed + try: + from enterprise_extensions.gibbs_sampling import gibbs + except ImportError: + log.error("Please install the latest version of enterprise_extensions") + return None + + pass + +def setup_discovery_sampler(): + """ + Setup the discovery sampler for noise analysis from enterprise extensions + """ + # check that a sufficiently up-to-date version of enterprise_extensions is installed + try: + import discovery as ds + except ImportError: + log.error("Please install the latest version of discovery") + return None + + pass + def test_equad_convention(pars_list): """ If (t2/tn)equad present, report convention used. From 467b6faec4f4734ef301a0dbf153fac29bab871a Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 17 Oct 2024 20:46:05 -0700 Subject: [PATCH 128/193] getting back onnnit --- src/pint_pal/noise_utils.py | 647 +++++++++++++++++++++++++----------- 1 file changed, 454 insertions(+), 193 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index b34bccc9..7a3270f3 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -12,7 +12,11 @@ import matplotlib as mpl import matplotlib.pyplot as pl -#Imports necessary for e_e noise modeling functions +import la_forge.core as co +import la_forge.diagnostics as dg +import la_forge.utils as lu + +# Imports necessary for e_e noise modeling functions import functools from collections import OrderedDict @@ -27,14 +31,22 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block -#from enterprise_extensions.blocks import (white_noise_block, red_noise_block) + +# from enterprise_extensions.blocks import (white_noise_block, red_noise_block) import types from enterprise.signals import utils from enterprise.signals import gp_priors as gpp -def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corner = True, no_corner_plot = False, chaindir_compare=None): + +def analyze_noise( + chaindir="./noise_run_chains/", + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + chaindir_compare=None, +): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -50,141 +62,201 @@ def analyze_noise(chaindir = './noise_run_chains/', burn_frac = 0.25, save_corne wn_dict: Dictionary of maximum likelihood WN values rn_bf: Savage-Dickey BF for RN for given pulsar """ - chainfile = chaindir + 'chain_1.txt' - chain = np.loadtxt(chainfile) - burn = int(burn_frac * chain.shape[0]) - pars = np.loadtxt(chaindir + 'pars.txt', dtype = str) - - psr_name = pars[0].split('_')[0] - + #### replacing this with la_forge to be more flexible + # chainfile = chaindir + "chain_1.txt" + # chain = np.loadtxt(chainfile) + # burn = int(burn_frac * chain.shape[0]) + # pars = np.loadtxt(chaindir + "pars.txt", dtype=str) + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}") + return None + noise_core.burn(burn_frac) + chain = noise_core.chain + psr_name = noise_core.pars[0].split("_")[0] + pars = noise_core.pars + # load in same for comparison noise model if chaindir_compare is not None: - chainfile_compare = chaindir_compare + 'chain_1.txt' + chainfile_compare = chaindir_compare + "chain_1.txt" chain_compare = np.loadtxt(chainfile_compare) burn_compare = int(burn_frac * chain_compare.shape[0]) - pars_compare = np.loadtxt(chaindir_compare + 'pars.txt', dtype = str) + pars_compare = np.loadtxt(chaindir_compare + "pars.txt", dtype=str) - psr_name_compare = pars_compare[0].split('_')[0] + psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: - log.warning(f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison") + log.warning( + f"Pulsar name from {chaindir_compare} does not match. Will not plot comparison" + ) chaindir_compare = None if save_corner and not no_corner_plot: - pars_short = [p.split("_",1)[1] for p in pars] + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - fig = corner.corner(chain_compare[burn:, :-4], color='orange', alpha=0.5, weights=normalization_factor, labels = compare_pars_short) + normalization_factor = ( + np.ones(len(chain_compare[:, :-4])) + * len(chain[:, :-4]) + / len(chain_compare[:, :-4]) + ) + fig = corner.corner( + chain_compare[:, :-4], + color="orange", + alpha=0.5, + weights=normalization_factor, + labels=compare_pars_short, + ) # normal corner plot - corner.corner(chain[burn:, :-4], fig=fig, color='black', labels = pars_short) + corner.corner( + chain[:, :-4], fig=fig, color="black", labels=pars_short + ) if chaindir_compare is None: - corner.corner(chain[burn:, :-4], labels = pars_short) + corner.corner(chain[:, :-4], labels=pars_short) - if '_wb' in chaindir: + if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figname = f"./{psr_name}_noise_corner_nb.pdf" else: figname = f"./{psr_name}_noise_corner.pdf" pl.savefig(figname) - pl.savefig(figname.replace(".pdf",".png"), dpi=300) + pl.savefig(figname.replace(".pdf", ".png"), dpi=300) pl.show() - + if no_corner_plot: - + from matplotlib.backends.backend_pdf import PdfPages - if '_wb' in chaindir: + + if "_wb" in chaindir: figbase = f"./{psr_name}_noise_posterior_wb" - elif '_nb' in chaindir: + elif "_nb" in chaindir: figbase = f"./{psr_name}_noise_posterior_nb" else: figbase = f"./{psr_name}_noise_posterior" - - pars_short = [p.split("_",1)[1] for p in pars] + + pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") log.info(f"Chain parameter convention: {test_equad_convention(pars_short)}") if chaindir_compare is not None: # need to plot comparison corner plot first so it's underneath - compare_pars_short = [p.split("_",1)[1] for p in pars_compare] + compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") - log.info(f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}") + log.info( + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: - log.warning("Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains") + log.warning( + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + ) chaindir_compare = None else: - normalization_factor = np.ones(len(chain_compare[burn:, :-4]))*len(chain[burn:, :-4])/len(chain_compare[burn:, :-4]) - - #Set the shape of the subplots + normalization_factor = ( + np.ones(len(chain_compare[:, :-4])) + * len(chain[:, :-4]) + / len(chain_compare[:, :-4]) + ) + + # Set the shape of the subplots shape = pars.shape[0] - - if '_wb' in chaindir: - ncols = 4 # number of columns per page + + if "_wb" in chaindir: + ncols = 4 # number of columns per page else: ncols = 3 - - nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[burn:, -4]) - if chaindir_compare is not None: mp_compare_idx = np.argmax(chain_compare[burn:, -4]) - + nrows = 5 # number of rows per page + + mp_idx = np.argmax(chain[:, -4]) + if chaindir_compare is not None: + mp_compare_idx = np.argmax(chain_compare[:, -4]) + nbins = 20 pp = 0 for idx, par in enumerate(pars_short): - j = idx % (nrows*ncols) + j = idx % (nrows * ncols) if j == 0: pp += 1 - fig = pl.figure(figsize=(8,11)) - - ax = fig.add_subplot(nrows, ncols, j+1) - ax.hist(chain[burn:, idx], bins = nbins, histtype = 'step', color='black', label = 'Current') - ax.axvline(chain[burn:, idx][mp_idx], ls = '--', color = 'black') + fig = pl.figure(figsize=(8, 11)) + + ax = fig.add_subplot(nrows, ncols, j + 1) + ax.hist( + chain[:, idx], + bins=nbins, + histtype="step", + color="black", + label="Current", + ) + ax.axvline(chain[:, idx][mp_idx], ls="--", color="black") if chaindir_compare is not None: - ax.hist(chain_compare[burn:, idx], bins = nbins, histtype = 'step', color='orange', label = 'Past') - ax.axvline(chain_compare[burn:, idx][mp_compare_idx], ls = '--', color = 'orange') - if '_wb' in chaindir: ax.set_xlabel(par, fontsize=8) - else: ax.set_xlabel(par, fontsize = 10) + ax.hist( + chain_compare[:, idx], + bins=nbins, + histtype="step", + color="orange", + label="Past", + ) + ax.axvline( + chain_compare[:, idx][mp_compare_idx], ls="--", color="orange" + ) + if "_wb" in chaindir: + ax.set_xlabel(par, fontsize=8) + else: + ax.set_xlabel(par, fontsize=10) ax.set_yticks([]) ax.set_yticklabels([]) - if j == (nrows*ncols)-1 or idx == len(pars_short)-1: + if j == (nrows * ncols) - 1 or idx == len(pars_short) - 1: pl.tight_layout() pl.savefig(f"{figbase}_{pp}.pdf") # Wasn't working before, but how do I implement a legend? - #ax[nr][nc].legend(loc = 'best') + # ax[nr][nc].legend(loc = 'best') pl.show() - - ml_idx = np.argmax(chain[burn:, -4]) - wn_vals = chain[burn:, :-4][ml_idx] + ml_idx = np.argmax(chain[:, -4]) + + wn_vals = chain[:, :-4][ml_idx] wn_dict = dict(zip(pars, wn_vals)) - #Print bayes factor for red noise in pulsar - rn_bf = model_utils.bayes_fac(chain[burn:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] + # Print bayes factor for red noise in pulsar + rn_bf = model_utils.bayes_fac(chain[:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] return wn_dict, rn_bf -def model_noise(mo, to, which_sampler = 'PTMCMCSampler', - vary_red_noise = True, n_iter = int(1e5), - using_wideband = False, resume = False, - run_noise_analysis = True, - wb_efac_sigma = 0.25, base_op_dir = "./", - noise_kwargs = {}, sampler_kwargs = {}, - ): + +def model_noise( + mo, + to, + which_sampler="PTMCMCSampler", + vary_red_noise=True, + n_iter=int(1e5), + using_wideband=False, + resume=False, + run_noise_analysis=True, + wb_efac_sigma=0.25, + base_op_dir="./", + noise_kwargs={}, + sampler_kwargs={}, +): """ Setup enterprise PTA and perform MCMC noise analysis @@ -192,11 +264,16 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: either 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' + sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] + PTMCMCSampler -- MCMC sampling with the Enterprise likelihood + GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise + discovery -- blocked Gibbs-Hamiltonian MC in numpyro with a discovery likelihood red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True + noise_kwargs: dictionary of noise model parameters; Default: {} + sampler_kwargs: dictionary of sampler parameters; Default: {} Returns ======= @@ -204,71 +281,142 @@ def model_noise(mo, to, which_sampler = 'PTMCMCSampler', """ if not using_wideband: - outdir = base_op_dir + mo.PSR.value + '_nb/' + outdir = base_op_dir + mo.PSR.value + "_nb/" else: - outdir = base_op_dir + mo.PSR.value + '_wb/' + outdir = base_op_dir + mo.PSR.value + "_wb/" if os.path.exists(outdir) and (run_noise_analysis) and (not resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format(mo.PSR.value)) + log.info( + "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( + mo.PSR.value + ) + ) elif os.path.exists(outdir) and (run_noise_analysis) and (resume): - log.info("INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format(mo.PSR.value)) + log.info( + "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( + mo.PSR.value + ) + ) if not run_noise_analysis: - log.info("Skipping noise modeling. Change run_noise_analysis = True to run noise modeling.") + log.info( + "Skipping noise modeling. Change run_noise_analysis = True to run noise modeling." + ) return None - #Ensure n_iter is an integer + # Ensure n_iter is an integer n_iter = int(n_iter) - - if n_iter < 1e4: - log.warning("Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4") - #Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - e_psr = Pulsar(mo, to) - - if which_sampler == 'PTMCMCSampler': + if n_iter < 1e4: + log.warning( + "Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" + ) + + # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + if which_sampler == "discovery": + # discovery requires feathered pulsars + f_psr = Pulsar(mo, to) + elif which_sampler == "GibbsSampler" or which_sampler == "PTMCMCSampler": + e_psr = Pulsar(mo, to) + + if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - #Setup a single pulsar PTA using enterprise_extensions + # Setup a single pulsar PTA using enterprise_extensions if not using_wideband: - pta = models.model_singlepsr_noise(e_psr, white_vary = True, red_var = vary_red_noise, is_wideband = False, use_dmdata = False, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, **noise_kwargs) + pta = models.model_singlepsr_noise( + e_psr, + white_vary=True, + red_var=vary_red_noise, + is_wideband=False, + use_dmdata=False, + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + **noise_kwargs, + ) else: - pta = models.model_singlepsr_noise(e_psr, is_wideband = True, use_dmdata = True, white_vary = True, red_var = vary_red_noise, dmjump_var = False, wb_efac_sigma = wb_efac_sigma, ng_twg_setup = True, **noise_kwargs) + pta = models.model_singlepsr_noise( + e_psr, + is_wideband=True, + use_dmdata=True, + white_vary=True, + red_var=vary_red_noise, + dmjump_var=False, + wb_efac_sigma=wb_efac_sigma, + ng_twg_setup=True, + **noise_kwargs, + ) dmjump_params = {} for param in mo.params: - if param.startswith('DMJUMP'): - dmjump_param = getattr(mo,param) - dmjump_param_name = f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + if param.startswith("DMJUMP"): + dmjump_param = getattr(mo, param) + dmjump_param_name = ( + f"{pta.pulsars[0]}_{dmjump_param.key_value[0]}_dmjump" + ) dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) # FIXME: set groups here ####### - #setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir = outdir, resume = resume) + # setup sampler using enterprise_extensions + samp = sampler.setup_sampler(pta, outdir=outdir, resume=resume) - #Initial sample + # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) - #Start sampling - samp.sample(x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs) - elif which_sampler == 'GibbsSampler': + # Start sampling + samp.sample( + x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs + ) + elif which_sampler == "GibbsSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - samp = GibbsSampler(e_psr, - **noise_kwargs, - ) + samp = GibbsSampler( + e_psr, + **noise_kwargs, + ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) pass - elif which_sampler == 'discovery': + elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - pass - else: - log.error("Invalid sampler specified. Please use \'PTMCMCSampler\' or \'GibbsSampler\' or \'discovery\' ") + try: + import jax + import xarray as xr + except ImportError: + log.error("Please install latest version of jax and/or xarray") + ValueError("Please install lastest version of jax and/or xarray") + samp = setup_discovery_noise(f_psr) + # run the sampler + samp.run(jax.random.key(42)) + # Get samples + samples = samp.get_samples() + + # Convert samples to xarray.Dataset + data = samp.Dataset({var: (["chain", "draw"], np.expand_dims(samples[var], axis=0)) for var in samples}) + + # Save to NetCDF file + data.to_netcdf(f"{base_op_dir}/discovery_chain.nc") + else: + log.error( + "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " + ) + + def convert_to_RNAMP(value): """ Utility function to convert enterprise RN amplitude to tempo2/PINT parfile RN amplitude """ - return (86400.*365.24*1e6)/(2.0*np.pi*np.sqrt(3.0)) * 10 ** value - -def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_plot = False, ignore_red_noise = False, using_wideband = False, rn_bf_thres = 1e2, base_dir = None, compare_dir=None): + return (86400.0 * 365.24 * 1e6) / (2.0 * np.pi * np.sqrt(3.0)) * 10**value + + +def add_noise_to_model( + model, + burn_frac=0.25, + save_corner=True, + no_corner_plot=False, + ignore_red_noise=False, + using_wideband=False, + rn_bf_thres=1e2, + base_dir=None, + compare_dir=None, +): """ Add WN and RN parameters to timing model. @@ -291,29 +439,37 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl # Assume results are in current working directory if not specified if not base_dir: - base_dir = './' + base_dir = "./" chaindir_compare = compare_dir if not using_wideband: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_nb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_nb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_nb/') + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_nb/") else: - chaindir = os.path.join(base_dir,f'{model.PSR.value}_wb/') + chaindir = os.path.join(base_dir, f"{model.PSR.value}_wb/") if compare_dir is not None: - chaindir_compare = os.path.join(compare_dir,f'{model.PSR.value}_wb/') - - log.info(f'Using existing noise analysis results in {chaindir}') - log.info('Adding new noise parameters to model.') - wn_dict, rn_bf = analyze_noise(chaindir, burn_frac, save_corner, no_corner_plot, chaindir_compare=chaindir_compare) - chainfile = chaindir + 'chain_1.txt' + chaindir_compare = os.path.join(compare_dir, f"{model.PSR.value}_wb/") + + log.info(f"Using existing noise analysis results in {chaindir}") + log.info("Adding new noise parameters to model.") + wn_dict, rn_bf = analyze_noise( + chaindir, + burn_frac, + save_corner, + no_corner_plot, + chaindir_compare=chaindir_compare, + ) + chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") - #Create the maskParameter for EFACS + # Create the maskParameter for EFACS efac_params = [] equad_params = [] rn_params = [] + dm_gp_params = [] + chrom_gp_params = [] ecorr_params = [] dmefac_params = [] dmequad_params = [] @@ -326,140 +482,205 @@ def add_noise_to_model(model, burn_frac = 0.25, save_corner = True, no_corner_pl for key, val in wn_dict.items(): - psr_name = key.split('_')[0] + psr_name = key.split("_")[0] - if '_efac' in key: + if "_efac" in key: - param_name = key.split('_efac')[0].split(psr_name)[1][1:] + param_name = key.split("_efac")[0].split(psr_name)[1][1:] - tp = maskParameter(name = 'EFAC', index = efac_idx, key = '-f', key_value = param_name, - value = val, units = '', convert_tcb2tdb=False) + tp = maskParameter( + name="EFAC", + index=efac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) efac_params.append(tp) efac_idx += 1 # See https://github.com/nanograv/enterprise/releases/tag/v3.3.0 # ..._t2equad uses PINT/Tempo2/Tempo convention, resulting in total variance EFAC^2 x (toaerr^2 + EQUAD^2) - elif '_t2equad' in key: - - param_name = key.split('_t2equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_t2equad" in key: + + param_name = ( + key.split("_t2equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._tnequad uses temponest convention, resulting in total variance EFAC^2 toaerr^2 + EQUAD^2 - elif '_tnequad' in key: - - param_name = key.split('_tnequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_tnequad" in key: + + param_name = ( + key.split("_tnequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 # ..._equad uses temponest convention; generated with enterprise pre-v3.3.0 - elif '_equad' in key: - - param_name = key.split('_equad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'EQUAD', index = equad_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif "_equad" in key: + + param_name = ( + key.split("_equad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="EQUAD", + index=equad_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) equad_params.append(tp) equad_idx += 1 - elif ('_ecorr' in key) and (not using_wideband): - - param_name = key.split('_ecorr')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'ECORR', index = ecorr_idx, key = '-f', key_value = param_name, - value = 10 ** val / 1e-6, units = 'us', convert_tcb2tdb=False) + elif ("_ecorr" in key) and (not using_wideband): + + param_name = ( + key.split("_ecorr")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="ECORR", + index=ecorr_idx, + key="-f", + key_value=param_name, + value=10**val / 1e-6, + units="us", + convert_tcb2tdb=False, + ) ecorr_params.append(tp) ecorr_idx += 1 - elif ('_dmefac' in key) and (using_wideband): + elif ("_dmefac" in key) and (using_wideband): - param_name = key.split('_dmefac')[0].split(psr_name)[1][1:] + param_name = key.split("_dmefac")[0].split(psr_name)[1][1:] - tp = maskParameter(name = 'DMEFAC', index = dmefac_idx, key = '-f', key_value = param_name, - value = val, units = '', convert_tcb2tdb=False) + tp = maskParameter( + name="DMEFAC", + index=dmefac_idx, + key="-f", + key_value=param_name, + value=val, + units="", + convert_tcb2tdb=False, + ) dmefac_params.append(tp) dmefac_idx += 1 - elif ('_dmequad' in key) and (using_wideband): - - param_name = key.split('_dmequad')[0].split(psr_name)[1].split('_log10')[0][1:] - - tp = maskParameter(name = 'DMEQUAD', index = dmequad_idx, key = '-f', key_value = param_name, - value = 10 ** val, units = 'pc/cm3', convert_tcb2tdb=False) + elif ("_dmequad" in key) and (using_wideband): + + param_name = ( + key.split("_dmequad")[0].split(psr_name)[1].split("_log10")[0][1:] + ) + + tp = maskParameter( + name="DMEQUAD", + index=dmequad_idx, + key="-f", + key_value=param_name, + value=10**val, + units="pc/cm3", + convert_tcb2tdb=False, + ) dmequad_params.append(tp) dmequad_idx += 1 # Test EQUAD convention and decide whether to convert convert_equad_to_t2 = False - if test_equad_convention(wn_dict.keys()) == 'tnequad': - log.info('WN paramaters use temponest convention; EQUAD values will be converted once added to model') + if test_equad_convention(wn_dict.keys()) == "tnequad": + log.info( + "WN paramaters use temponest convention; EQUAD values will be converted once added to model" + ) convert_equad_to_t2 = True - if np.any(['_equad' in p for p in wn_dict.keys()]): - log.info('WN parameters generated using enterprise pre-v3.3.0') - elif test_equad_convention(wn_dict.keys()) == 't2equad': - log.info('WN parameters use T2 convention; no conversion necessary') + if np.any(["_equad" in p for p in wn_dict.keys()]): + log.info("WN parameters generated using enterprise pre-v3.3.0") + elif test_equad_convention(wn_dict.keys()) == "t2equad": + log.info("WN parameters use T2 convention; no conversion necessary") # Create white noise components and add them to the model ef_eq_comp = pm.ScaleToaError() - ef_eq_comp.remove_param(param = 'EFAC1') - ef_eq_comp.remove_param(param = 'EQUAD1') - ef_eq_comp.remove_param(param = 'TNEQ1') + ef_eq_comp.remove_param(param="EFAC1") + ef_eq_comp.remove_param(param="EQUAD1") + ef_eq_comp.remove_param(param="TNEQ1") for efac_param in efac_params: - ef_eq_comp.add_param(param = efac_param, setup = True) + ef_eq_comp.add_param(param=efac_param, setup=True) for equad_param in equad_params: - ef_eq_comp.add_param(param = equad_param, setup = True) - model.add_component(ef_eq_comp, validate = True, force = True) + ef_eq_comp.add_param(param=equad_param, setup=True) + model.add_component(ef_eq_comp, validate=True, force=True) if len(dmefac_params) > 0 or len(dmequad_params) > 0: dm_comp = pm.noise_model.ScaleDmError() - dm_comp.remove_param(param = 'DMEFAC1') - dm_comp.remove_param(param = 'DMEQUAD1') + dm_comp.remove_param(param="DMEFAC1") + dm_comp.remove_param(param="DMEQUAD1") for dmefac_param in dmefac_params: - dm_comp.add_param(param = dmefac_param, setup = True) + dm_comp.add_param(param=dmefac_param, setup=True) for dmequad_param in dmequad_params: - dm_comp.add_param(param = dmequad_param, setup = True) - model.add_component(dm_comp, validate = True, force = True) + dm_comp.add_param(param=dmequad_param, setup=True) + model.add_component(dm_comp, validate=True, force=True) if len(ecorr_params) > 0: ec_comp = pm.EcorrNoise() - ec_comp.remove_param('ECORR1') + ec_comp.remove_param("ECORR1") for ecorr_param in ecorr_params: - ec_comp.add_param(param = ecorr_param, setup = True) - model.add_component(ec_comp, validate = True, force = True) + ec_comp.add_param(param=ecorr_param, setup=True) + model.add_component(ec_comp, validate=True, force=True) # Create red noise component and add it to the model log.info(f"The SD Bayes factor for red noise in this pulsar is: {rn_bf}") if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): log.info("Including red noise for this pulsar") - #Add the ML RN parameters to their component + # Add the ML RN parameters to their component rn_comp = pm.PLRedNoise() - rn_keys = np.array([key for key,val in wn_dict.items() if '_red_' in key]) - rn_comp.RNAMP.quantity = convert_to_RNAMP(wn_dict[psr_name + '_red_noise_log10_A']) - rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + '_red_noise_gamma'] + rn_keys = np.array([key for key, val in wn_dict.items() if "_red_" in key]) + rn_comp.RNAMP.quantity = convert_to_RNAMP( + wn_dict[psr_name + "_red_noise_log10_A"] + ) + rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + "_red_noise_gamma"] - #Add red noise to the timing model - model.add_component(rn_comp, validate = True, force = True) + # Add red noise to the timing model + model.add_component(rn_comp, validate=True, force=True) else: log.info("Not including red noise for this pulsar") - #Setup and validate the timing model to ensure things are correct + # Setup and validate the timing model to ensure things are correct model.setup() model.validate() model.meta['noise_mtime'] = mtime.isot if convert_equad_to_t2: from pint_pal.lite_utils import convert_enterprise_equads + model = convert_enterprise_equads(model) return model + def setup_gibbs_sampler(): """ Setup the Gibbs sampler for noise analysis from enterprise extensions @@ -470,21 +691,59 @@ def setup_gibbs_sampler(): except ImportError: log.error("Please install the latest version of enterprise_extensions") return None - + pass -def setup_discovery_sampler(): + +def setup_discovery_noise(psr): """ Setup the discovery sampler for noise analysis from enterprise extensions """ # check that a sufficiently up-to-date version of enterprise_extensions is installed try: import discovery as ds + import jax + from jax import numpy as jnp + import numpyro + from numpyro import distributions as dist + from numpyro import infer + from discovery import prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + except ImportError: - log.error("Please install the latest version of discovery") - return None + log.error("Please install the latest version of discovery, numpyro, and/or jax") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") - pass + time_span = ds.getspan([psr]) + args = ( + ds.makenoise_measurement(psr), + ds.makegp_ecorr(psr), + ds.makegp_timing(psr, svd=True), + ds.makegp_fourier(psr, ds.powerlaw, 30, T=time_span, name='red_noise'), + psr.residuals + ) + psl = ds.PulsarLikelihood(args) + prior = prior.makelogprior_uniform(psl.logL.params, {'(.*_)?extra_parameter': [9, 10]}) + log_x = makelogtransform_uniform(psl.logL) + x0 = sample_uniform(psl.logL.params) + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + + sampler = infer.MCMC( + infer.NUTS(numpyro_model), + num_warmup=250, + num_samples=4096, + num_chains=4, + progress_bar=True, + chain_method='vectorized' + ) + + return sampler + + def test_equad_convention(pars_list): """ @@ -497,15 +756,17 @@ def test_equad_convention(pars_list): Returns ======= - convention_test: t2equad/tnequad/None + convention_test: t2equad/tnequad/None """ # Test equad convention - t2_test = np.any(['_t2equad' in p for p in pars_list]) - tn_test = np.any([('_tnequad' in p) or ('_equad' in p) for p in pars_list]) + t2_test = np.any(["_t2equad" in p for p in pars_list]) + tn_test = np.any([("_tnequad" in p) or ("_equad" in p) for p in pars_list]) if t2_test and not tn_test: - return 't2equad' + return "t2equad" elif tn_test and not t2_test: - return 'tnequad' + return "tnequad" else: - log.warning('EQUADs not present in parameter list (or something strange is going on).') + log.warning( + "EQUADs not present in parameter list (or something strange is going on)." + ) return None From 1579c7409d2fd219e87019e1e71c8e92cb888ccb Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 18 Oct 2024 00:53:04 -0700 Subject: [PATCH 129/193] adding discovery to noise utils --- src/pint_pal/noise_utils.py | 63 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 7a3270f3..c38cba8b 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,5 @@ import numpy as np, os +import arviz as az from astropy import log from astropy.time import Time @@ -72,10 +73,14 @@ def analyze_noise( except: log.error(f"Could not load noise run from {chaindir}") return None - noise_core.burn(burn_frac) + noise_core.set_burn(burn_frac) chain = noise_core.chain - psr_name = noise_core.pars[0].split("_")[0] - pars = noise_core.pars + psr_name = noise_core.params[0].split("_")[0] + pars = np.array(noise_core.params) + if chain.shape[1] != len(pars): + a = -4 + elif chain.shape[1] == len(pars): + a = len(chain.shape[1]) # load in same for comparison noise model if chaindir_compare is not None: @@ -110,12 +115,12 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :-4])) - * len(chain[:, :-4]) - / len(chain_compare[:, :-4]) + np.ones(len(chain_compare[:, :a])) + * len(chain[:, :a]) + / len(chain_compare[:, :a]) ) fig = corner.corner( - chain_compare[:, :-4], + chain_compare[:, :a], color="orange", alpha=0.5, weights=normalization_factor, @@ -123,10 +128,10 @@ def analyze_noise( ) # normal corner plot corner.corner( - chain[:, :-4], fig=fig, color="black", labels=pars_short + chain[:, :a], fig=fig, color="black", labels=pars_short ) if chaindir_compare is None: - corner.corner(chain[:, :-4], labels=pars_short) + corner.corner(chain[:, :a], labels=pars_short) if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" @@ -169,9 +174,9 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :-4])) - * len(chain[:, :-4]) - / len(chain_compare[:, :-4]) + np.ones(len(chain_compare[:, :a])) + * len(chain[:, :a]) + / len(chain_compare[:, :a]) ) # Set the shape of the subplots @@ -184,9 +189,9 @@ def analyze_noise( nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[:, -4]) + mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: - mp_compare_idx = np.argmax(chain_compare[:, -4]) + mp_compare_idx = np.argmax(chain_compare[:, a]) nbins = 20 pp = 0 @@ -231,9 +236,9 @@ def analyze_noise( # ax[nr][nc].legend(loc = 'best') pl.show() - ml_idx = np.argmax(chain[:, -4]) + ml_idx = np.argmax(chain[:, a]) - wn_vals = chain[:, :-4][ml_idx] + wn_vals = chain[:, :a][ml_idx] wn_dict = dict(zip(pars, wn_vals)) @@ -381,18 +386,18 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - samp = setup_discovery_noise(f_psr) + samp, log_x = setup_discovery_noise(f_psr) # run the sampler samp.run(jax.random.key(42)) - - # Get samples - samples = samp.get_samples() - - # Convert samples to xarray.Dataset - data = samp.Dataset({var: (["chain", "draw"], np.expand_dims(samples[var], axis=0)) for var in samples}) - - # Save to NetCDF file - data.to_netcdf(f"{base_op_dir}/discovery_chain.nc") + # convert to a DataFrame + df = log_x.to_df(samp.get_samples()['par']) + # convert DataFrame to dictionary + samples_dict = df.to_dict(orient='list') + # convert dictionary to ArviZ InferenceData object + inference_data = az.from_dict(samples_dict) + # Save to NetCDF file which can be loaded into la_forge + os.mkdir(outdir, parents=True, exist_ok=True) + inference_data.to_netcdf(outdir+"chain.nc") else: log.error( "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " @@ -725,9 +730,9 @@ def setup_discovery_noise(psr): psr.residuals ) psl = ds.PulsarLikelihood(args) - prior = prior.makelogprior_uniform(psl.logL.params, {'(.*_)?extra_parameter': [9, 10]}) + prior = prior.makelogprior_uniform(psl.logL.params, {}) log_x = makelogtransform_uniform(psl.logL) - x0 = sample_uniform(psl.logL.params) + # x0 = sample_uniform(psl.logL.params) def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -741,7 +746,7 @@ def numpyro_model(): chain_method='vectorized' ) - return sampler + return sampler, log_x From df7c03cd19340e927ef6557c7f247e9d07ddf3e1 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sat, 19 Oct 2024 15:28:10 -0700 Subject: [PATCH 130/193] commiting what i have rn --- src/pint_pal/noise_utils.py | 407 ++++++++++++++++++++++++++++-------- 1 file changed, 317 insertions(+), 90 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index c38cba8b..f125a314 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os +import numpy as np, os, json import arviz as az from astropy import log from astropy.time import Time @@ -29,6 +29,7 @@ from enterprise.signals import deterministic_signals from enterprise import constants as const +from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block @@ -41,12 +42,82 @@ from enterprise.signals import gp_priors as gpp +def setup_sampling_groups(pta, + write_groups=True, + outdir='./'): + """ + Sets sampling groups for PTMCMCSampler. + The sampling groups can help ensure the sampler does not get stuck. + The idea is to group parameters which are more highly correlated. + + Params + ------ + pta: the enterprise pta object + write_groups: bool, write the groups to a file + outdir: str, directory to write the groups to + + returns + ------- + groups: list of lists of indices corresponding to parameter groups + + """ + + # groups + pnames = pta.param_names + groups = get_parameter_groups(pta) + # add per-backend white noise + backends = np.unique([p[p.index('_')+1:p.index('efac')-1] for p in pnames if 'efac' in p]) + for be in backends: + groups.append(group_from_params(pta,[be])) + # group red noise parameters + exclude = ['linear_timing_model','sw_r2','sw_4p39','measurement_noise', + 'ecorr_sherman-morrison', 'ecorr_fast-sherman-morrison'] + red_signals = [p[p.index('_')+1:] for p in list(pta.signals.keys()) + if not p[p.index('_')+1:] in exclude] + rn_ct = 0 + for rs in red_signals: + if len(group_from_params(pta,[rs])) > 0: + rn_ct += 1 + groups.append(group_from_params(pta,[rs])) + if rn_ct > 1: + groups.append(group_from_params(pta,red_signals)) + # add cross chromatic groups + if 'n_earth' in pnames or 'log10_sigma_ne' in pnames: + # cross SW and chrom groups + dmgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','n_earth', 'log10_sigma_ne']])] + groups.append(dmgp_sw) + if np.any(['chrom' in param for param in pnames]): + chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['chrom_gp','n_earth', 'log10_sigma_ne']])] + dmgp_chromgp_sw = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom','n_earth', 'log10_sigma_ne']])] + groups.append(chromgp_sw) + groups.append(dmgp_chromgp_sw) + if np.any(['chrom' in param for param in pnames]): + # cross dmgp and chromgp group + dmgp_chromgp = [idx for idx, nm in enumerate(pnames) + if any([flag in nm for flag in ['dm_gp','chrom']])] + groups.append(dmgp_chromgp) + # everything + groups.append([i for i in range(len(pnames))]) + # save list of params corresponding to groups + if write_groups is True: + with open(f'{outdir}/groups.txt', 'w') as fi: + for group in groups: + line = np.array(pnames)[np.array(group)] + fi.write("[" + " ".join(line) + "]\n") + # return the groups to be passed to the sampler + return groups + + def analyze_noise( chaindir="./noise_run_chains/", burn_frac=0.25, save_corner=True, no_corner_plot=False, chaindir_compare=None, + which_sampler = 'PTMCMCSampler', ): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -56,38 +127,41 @@ def analyze_noise( chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True - chaindir_compare: path to enterprise noise run chain wish to plot in corner plot for comparison; Default: None + no_corner_plot: Flag to toggle saving of corner plots; Default: False + chaindir_compare: path to noise run chain wish to plot in corner plot for comparison; Default: None + which_sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] Returns ======= - wn_dict: Dictionary of maximum likelihood WN values - rn_bf: Savage-Dickey BF for RN for given pulsar + noise_core: la_forge.core object which contains noise chains and run metadata + noise_dict: Dictionary of maximum a posterior noise values + rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - #### replacing this with la_forge to be more flexible - # chainfile = chaindir + "chain_1.txt" - # chain = np.loadtxt(chainfile) - # burn = int(burn_frac * chain.shape[0]) - # pars = np.loadtxt(chaindir + "pars.txt", dtype=str) try: noise_core = co.Core(chaindir=chaindir) except: log.error(f"Could not load noise run from {chaindir}") - return None - noise_core.set_burn(burn_frac) + ValueError(f"Could not load noise run from {chaindir}") + if which_sampler == 'PTMCMCSampler': + noise_core.set_burn(burn_frac) + elif which_sampler == 'discovery': + noise_core.set_burn(0) + else: + noise_core.set_burn(burn_frac) chain = noise_core.chain psr_name = noise_core.params[0].split("_")[0] - pars = np.array(noise_core.params) - if chain.shape[1] != len(pars): - a = -4 - elif chain.shape[1] == len(pars): - a = len(chain.shape[1]) + pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) + if len(pars)+2 != chain.shape[1]: + chain = chain[:, :len(pars)+2] # load in same for comparison noise model if chaindir_compare is not None: - chainfile_compare = chaindir_compare + "chain_1.txt" - chain_compare = np.loadtxt(chainfile_compare) - burn_compare = int(burn_frac * chain_compare.shape[0]) - pars_compare = np.loadtxt(chaindir_compare + "pars.txt", dtype=str) + compare_core = co.Core(chaindir=chaindir) + compare_core.set_burn(noise_core.burn) + chain_compare = compare_core.chain + pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) + if len(pars_compare)+2 != chain_compare.shape[1]: + chain_compare = chain_compare[:, :len(pars_compare)+2] psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: @@ -105,22 +179,22 @@ def analyze_noise( compare_pars_short = [p.split("_", 1)[1] for p in pars_compare] log.info(f"Comparison chain parameter names are {compare_pars_short}") log.info( - f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" + f"Comparison chain parameter convention: {test_equad_convention(compare_pars_short)}" ) # don't plot comparison if the parameter names don't match if compare_pars_short != pars_short: log.warning( - "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" + "Parameter names for comparison noise chains do not match, not plotting the compare-noise-dir chains" ) chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :a])) - * len(chain[:, :a]) - / len(chain_compare[:, :a]) + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) ) fig = corner.corner( - chain_compare[:, :a], + chain_compare, color="orange", alpha=0.5, weights=normalization_factor, @@ -128,10 +202,10 @@ def analyze_noise( ) # normal corner plot corner.corner( - chain[:, :a], fig=fig, color="black", labels=pars_short + chain, fig=fig, color="black", labels=pars_short ) if chaindir_compare is None: - corner.corner(chain[:, :a], labels=pars_short) + corner.corner(chain, labels=pars_short) if "_wb" in chaindir: figname = f"./{psr_name}_noise_corner_wb.pdf" @@ -174,9 +248,9 @@ def analyze_noise( chaindir_compare = None else: normalization_factor = ( - np.ones(len(chain_compare[:, :a])) - * len(chain[:, :a]) - / len(chain_compare[:, :a]) + np.ones(len(chain_compare)) + * len(chain) + / len(chain_compare) ) # Set the shape of the subplots @@ -189,9 +263,10 @@ def analyze_noise( nrows = 5 # number of rows per page - mp_idx = np.argmax(chain[:, a]) + mp_idx = noise_core.map_idx + #mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: - mp_compare_idx = np.argmax(chain_compare[:, a]) + mp_compare_idx = compare_core.map_idx nbins = 20 pp = 0 @@ -235,17 +310,14 @@ def analyze_noise( # Wasn't working before, but how do I implement a legend? # ax[nr][nc].legend(loc = 'best') pl.show() - - ml_idx = np.argmax(chain[:, a]) - - wn_vals = chain[:, :a][ml_idx] - - wn_dict = dict(zip(pars, wn_vals)) + + noise_dict = noise_core.get_map_dict() # Print bayes factor for red noise in pulsar - rn_bf = model_utils.bayes_fac(chain[:, -5], ntol=1, logAmax=-11, logAmin=-20)[0] + rn_amp_nm = psr_name+"_red_noise_log10_A" + rn_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] - return wn_dict, rn_bf + return noise_core, noise_dict, rn_bf def model_noise( @@ -261,6 +333,7 @@ def model_noise( base_op_dir="./", noise_kwargs={}, sampler_kwargs={}, + return_sampler=False, ): """ Setup enterprise PTA and perform MCMC noise analysis @@ -272,17 +345,19 @@ def model_noise( sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] PTMCMCSampler -- MCMC sampling with the Enterprise likelihood GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise - discovery -- blocked Gibbs-Hamiltonian MC in numpyro with a discovery likelihood + discovery -- various numpyro samplers with a discovery likelihood red_noise: include red noise in the model n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True noise_kwargs: dictionary of noise model parameters; Default: {} sampler_kwargs: dictionary of sampler parameters; Default: {} + return_sampler: Flag to return the sampler object; Default: False Returns ======= - None + None or + samp: sampler object """ if not using_wideband: @@ -318,11 +393,7 @@ def model_noise( ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - if which_sampler == "discovery": - # discovery requires feathered pulsars - f_psr = Pulsar(mo, to) - elif which_sampler == "GibbsSampler" or which_sampler == "PTMCMCSampler": - e_psr = Pulsar(mo, to) + e_psr = Pulsar(mo, to) if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") @@ -359,11 +430,14 @@ def model_noise( ) dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) - # FIXME: set groups here + # set groups here + groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) ####### # setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, outdir=outdir, resume=resume) - + samp = sampler.setup_sampler(pta, + outdir=outdir, + resume=resume, + groups=groups) # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling @@ -371,6 +445,11 @@ def model_noise( x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs ) elif which_sampler == "GibbsSampler": + try: + from enterprise_extensions import GibbsSampler + except: + log.error("Please install the latest enterprise_extensions") + ValueError("Please install the latest enterprise_extensions") log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler( e_psr, @@ -386,22 +465,42 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - samp, log_x = setup_discovery_noise(f_psr) + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_kwargs = model_defaults.update(noise_kwargs) + sampler_kwargs = sampler_defaults.update(sampler_kwargs) + os.mkdir(outdir, parents=True, exist_ok=True) + with open(outdir+"model_kwargs.json", "w") as f: + json.dump(model_kwargs, f) + with open(outdir+"sampler_kwargs.json", "w") as f: + json.dump(sampler_kwargs, f) + samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) # run the sampler samp.run(jax.random.key(42)) # convert to a DataFrame df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') + if sampler_kwargs['numpyro_sampler'] is not 'HMC_GIBBS': + ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] + ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) + ln_post = ln_like + ln_prior + samples_dict['lnlike'] = ln_like + samples_dict['lnpost'] = ln_post + else: + samples_dict['lnlike'] = None + samples_dict['lnpost'] = None # convert dictionary to ArviZ InferenceData object inference_data = az.from_dict(samples_dict) # Save to NetCDF file which can be loaded into la_forge - os.mkdir(outdir, parents=True, exist_ok=True) inference_data.to_netcdf(outdir+"chain.nc") else: log.error( "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " ) + if return_sampler: + return samp def convert_to_RNAMP(value): @@ -458,7 +557,7 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") - wn_dict, rn_bf = analyze_noise( + noise_dict, rn_bf = analyze_noise( chaindir, burn_frac, save_corner, @@ -472,9 +571,6 @@ def add_noise_to_model( # Create the maskParameter for EFACS efac_params = [] equad_params = [] - rn_params = [] - dm_gp_params = [] - chrom_gp_params = [] ecorr_params = [] dmefac_params = [] dmequad_params = [] @@ -485,7 +581,7 @@ def add_noise_to_model( dmefac_idx = 1 dmequad_idx = 1 - for key, val in wn_dict.items(): + for key, val in noise_dict.items(): psr_name = key.split("_")[0] @@ -617,14 +713,14 @@ def add_noise_to_model( # Test EQUAD convention and decide whether to convert convert_equad_to_t2 = False - if test_equad_convention(wn_dict.keys()) == "tnequad": + if test_equad_convention(noise_dict.keys()) == "tnequad": log.info( "WN paramaters use temponest convention; EQUAD values will be converted once added to model" ) convert_equad_to_t2 = True - if np.any(["_equad" in p for p in wn_dict.keys()]): + if np.any(["_equad" in p for p in noise_dict.keys()]): log.info("WN parameters generated using enterprise pre-v3.3.0") - elif test_equad_convention(wn_dict.keys()) == "t2equad": + elif test_equad_convention(noise_dict.keys()) == "t2equad": log.info("WN parameters use T2 convention; no conversion necessary") # Create white noise components and add them to the model @@ -662,11 +758,75 @@ def add_noise_to_model( # Add the ML RN parameters to their component rn_comp = pm.PLRedNoise() - rn_keys = np.array([key for key, val in wn_dict.items() if "_red_" in key]) + rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) rn_comp.RNAMP.quantity = convert_to_RNAMP( - wn_dict[psr_name + "_red_noise_log10_A"] + noise_dict[psr_name + "_red_noise_log10_A"] ) - rn_comp.RNIDX.quantity = -1 * wn_dict[psr_name + "_red_noise_gamma"] + rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] + + # Add red noise to the timing model + model.add_component(rn_comp, validate=True, force=True) + else: + log.info("Not including red noise for this pulsar") + + # Check to see if dm noise is present + dm_pars = [key for key in list(noise_dict.keys()) if "_dm_gp" in key] + if len(dm_pars) > 0: + ###### POWERLAW DM NOISE ###### + if f'{psr_name}_dm_gp_log10_A' in dm_pars: + #dm_bf = model_utils.bayes_fac(noise_core(rn_amp_nm), ntol=1, logAmax=-11, logAmin=-20)[0] + #log.info(f"The SD Bayes factor for dm noise in this pulsar is: {dm_bf}") + log.info('Adding Powerlaw DM GP noise as PLDMNoise to par file') + # Add the ML RN parameters to their component + dm_comp = pm.PLDMNoise() + dm_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_dm_gp_log10_A"] + ) + dm_comp.TNDMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + dm_comp.TNDMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(dm_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_dm_gp_log10_rho_0' in dm_pars: + log.info('Adding Free Spectral DM GP as DMWaveXnoise to par file') + NotImplementedError('DMWaveXNoise not yet implemented') + + # Check to see if higher order chromatic noise is present + chrom_pars = [key for key in list(noise_dict.keys()) if "_chrom_gp" in key] + if len(chrom_pars) > 0: + ###### POWERLAW CHROMATIC NOISE ###### + if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: + log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') + # Add the ML RN parameters to their component + chrom_comp = pm.PLCMNoise() + chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) + dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_chrom_gp_log10_A"] + ) + chrom_comp.TNCMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + ##### FIXMEEEEEEE : need to figure out some way to softcode this + chrom_comp.TNCMC.quantitity = 100 + # Add red noise to the timing model + model.add_component(dm_comp, validate=True, force=True) + ###### FREE SPECTRAL (WaveX) DM NOISE ###### + elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: + log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') + NotImplementedError('CMWaveXNoise not yet implemented') + + log.info(f"The SD Bayes factor for dm noise in this pulsar is: {rn_bf}") + if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): + + log.info("Including red noise for this pulsar") + # Add the ML RN parameters to their component + rn_comp = pm.PLRedNoise() + + rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) + rn_comp.RNAMP.quantity = convert_to_RNAMP( + noise_dict[psr_name + "_red_noise_log10_A"] + ) + rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] # Add red noise to the timing model model.add_component(rn_comp, validate=True, force=True) @@ -696,15 +856,16 @@ def setup_gibbs_sampler(): except ImportError: log.error("Please install the latest version of enterprise_extensions") return None - - pass + NotImplementedError("Gibbs sampler not yet implemented") -def setup_discovery_noise(psr): +def setup_discovery_noise(psr, + model_kwargs={}, + sampler_kwargs={}): """ - Setup the discovery sampler for noise analysis from enterprise extensions + Setup the discovery likelihood with numpyro sampling for noise analysis """ - # check that a sufficiently up-to-date version of enterprise_extensions is installed + # check that jax, numpyro and discovery are installed try: import discovery as ds import jax @@ -716,38 +877,78 @@ def setup_discovery_noise(psr): from discovery.prior import (makelogtransform_uniform, makelogprior_uniform, sample_uniform) + from discovery.gibbs import setup_single_psr_hmc_gibbs except ImportError: log.error("Please install the latest version of discovery, numpyro, and/or jax") ValueError("Please install the latest version of discovery, numpyro, and/or jax") - + # set up the model time_span = ds.getspan([psr]) - args = ( + model_components = [ + psr.residuals, + ds.makegp_timing(psr, svd=True), ds.makenoise_measurement(psr), ds.makegp_ecorr(psr), - ds.makegp_timing(psr, svd=True), - ds.makegp_fourier(psr, ds.powerlaw, 30, T=time_span, name='red_noise'), - psr.residuals - ) - psl = ds.PulsarLikelihood(args) + ] + if model_kwargs['inc_rn']: + if model_kwargs['rn_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) + elif model_kwargs['rn_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) + if model_kwargs['inc_dmgp']: + if model_kwargs['dmgp_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) + elif model_kwargs['dmgp_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) + if model_kwargs['inc_chrom']: + if model_kwargs['rn_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + elif model_kwargs['rn_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + psl = ds.PulsarLikelihood(model_components) prior = prior.makelogprior_uniform(psl.logL.params, {}) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - - sampler = infer.MCMC( - infer.NUTS(numpyro_model), - num_warmup=250, - num_samples=4096, - num_chains=4, - progress_bar=True, - chain_method='vectorized' - ) + if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': + def numpyro_model(): + return None + gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( + psrl=psl, psrs=psr, + priordict=ds.priordict_standard, + invhdorf=None, nuts_kwargs={}) + sampler = infer.MCMC(gibbs_hmc_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) + elif sampler_kwargs['numpyro_sampler'] == 'NUTS': + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) + sampler = infer.MCMC(nuts_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) + elif sampler_kwargs['numpyro_sampler'] == 'HMC': + def numpyro_model(): + params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) + numpyro.factor("ll", log_x(params)) + hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) + sampler = infer.MCMC(hmc_kernel, + num_warmup=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_warmup'], + num_chains=sampler_kwargs['num_chains'], + chain_method=sampler_kwargs['chain_method'], + progress_bar=True, + ) - return sampler, log_x - + return sampler, log_x, numpyro_model def test_equad_convention(pars_list): @@ -775,3 +976,29 @@ def test_equad_convention(pars_list): "EQUADs not present in parameter list (or something strange is going on)." ) return None + +def get_model_and_sampler_default_settings(): + model_defaults = { + 'inc_rn': True, + 'rn_psd': 'powerlaw', + 'rn_nfreqs': 30, + 'inc_dmgp': False, + 'dmgp_psd': 'powerlaw', + 'dmgp_nfreqs': 100, + 'inc_chromgp': False, + 'chromgp_psd': 'powerlaw', + 'chromgp_nfreqs': 100, + 'vary_chrom_idx': False, + 'inc_swgp': False, + 'swgp_psd': 'powerlaw', + 'swgp_nfreqs': 100, + } + sampler_defaults = { + 'numpyro_sampler': 'HMC', + 'num_steps': 5, + 'num_warmup': 500, + 'num_samples': 2500, + 'num_chains': 4, + 'chain_method': 'vectorized', + } + return model_defaults, sampler_defaults \ No newline at end of file From 096ef57fdb879e4c10ed1c0c514d6b8402f256cd Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Sun, 20 Oct 2024 15:11:34 -0700 Subject: [PATCH 131/193] adding solar wind stuff --- src/pint_pal/noise_utils.py | 200 ++++++++++++++++++++++++++---------- 1 file changed, 148 insertions(+), 52 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index f125a314..d0f09ecb 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -9,6 +9,7 @@ import pint.models as pm from pint.models.parameter import maskParameter +from pint.models.timing_model import Component import matplotlib as mpl import matplotlib.pyplot as pl @@ -137,12 +138,19 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - try: - noise_core = co.Core(chaindir=chaindir) - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") if which_sampler == 'PTMCMCSampler': + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}") + ValueError(f"Could not load noise run from {chaindir}") + elif which_sampler == 'GibbsSampler': + try: + noise_core = co.Core(corepath=chaindir+'/chain') + except: + log.error(f"Could not load noise run from {chaindir}") + ValueError(f"Could not load noise run from {chaindir}") + if which_sampler == 'PTMCMCSampler' or which_sampler == "GibbsSampler": noise_core.set_burn(burn_frac) elif which_sampler == 'discovery': noise_core.set_burn(0) @@ -359,7 +367,12 @@ def model_noise( None or samp: sampler object """ - + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_kwargs = model_defaults.update(noise_kwargs) + sampler_kwargs = sampler_defaults.update(sampler_kwargs) + if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" else: @@ -394,7 +407,9 @@ def model_noise( # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) e_psr = Pulsar(mo, to) - + ########################################################## + ################ PTMCMCSampler ################## + ########################################################## if which_sampler == "PTMCMCSampler": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions @@ -407,7 +422,17 @@ def model_noise( use_dmdata=False, dmjump_var=False, wb_efac_sigma=wb_efac_sigma, - **noise_kwargs, + # DM GP + dm_var=model_kwargs['inc_dmgp'], + dm_Nfreqs=model_kwargs['dmgp_nfreqs'], + # CHROM GP + chrom_gp=model_kwargs['inc_chromgp'], + chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + # DM SOLAR WIND + dm_sw_deter=model_kwargs['inc_sw_deter'], + ACE_prior=model_kwargs['ACE_prior'], + # can pass extra signals in here + extra_sigs=model_kwargs['extra_sigs'], ) else: pta = models.model_singlepsr_noise( @@ -419,7 +444,6 @@ def model_noise( dmjump_var=False, wb_efac_sigma=wb_efac_sigma, ng_twg_setup=True, - **noise_kwargs, ) dmjump_params = {} for param in mo.params: @@ -444,6 +468,9 @@ def model_noise( samp.sample( x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs ) + ############################################################## + ################## GibbsSampler ######################## + ############################################################## elif which_sampler == "GibbsSampler": try: from enterprise_extensions import GibbsSampler @@ -452,24 +479,39 @@ def model_noise( ValueError("Please install the latest enterprise_extensions") log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") samp = GibbsSampler( - e_psr, - **noise_kwargs, + e_psr, + vary_wn=True, + tm_marg=False, + inc_ecorr=True, + ecorr_type='kernel', + vary_rn=model_kwargs['inc_rn'], + rn_components=model_kwargs['rn_nfreqs'], + vary_dm=model_kwargs['inc_dmgp'], + dm_components=model_kwargs['dm_nfreqs'], + vary_chrom=model_kwargs['inc_chromgp'], + chrom_components=model_kwargs['chrom_nfreqs'], + noise_dict={}, + tnequad=True, + #**noise_kwargs, ) samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) - pass + # sorta redundant to have both, but la_forge doesn't look for .npy files + chain = np.load(f'{outdir}/chain_1.npy') + np.savetxt(f'{outdir}/chain_1.txt', chain,) + ################################################################# + ################## discovery likelihood ################### + ################################################################# elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") try: import jax import xarray as xr + from numpyro import distributions as dist + from numpyro.infer import log_likelihood + except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - # get the default settings - model_defaults, sampler_defaults = get_model_and_sampler_default_settings() - # update with args passed in - model_kwargs = model_defaults.update(noise_kwargs) - sampler_kwargs = sampler_defaults.update(sampler_kwargs) os.mkdir(outdir, parents=True, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -522,7 +564,7 @@ def add_noise_to_model( compare_dir=None, ): """ - Add WN and RN parameters to timing model. + Add WN, RN, DMGP, and parameters to timing model. Parameters ========== @@ -580,11 +622,12 @@ def add_noise_to_model( ecorr_idx = 1 dmefac_idx = 1 dmequad_idx = 1 - - for key, val in noise_dict.items(): - - psr_name = key.split("_")[0] - + + psr_name = list(noise_dict.keys())[0].split("_")[0] + noise_pars = np.array(list(noise_dict.keys())) + wn_dict = {key: val for key, val in noise_dict.items() if "efac" in key or "equad" in key or "ecorr" in key} + for key, val in wn_dict.items(): + if "_efac" in key: param_name = key.split("_efac")[0].split(psr_name)[1][1:] @@ -770,7 +813,7 @@ def add_noise_to_model( log.info("Not including red noise for this pulsar") # Check to see if dm noise is present - dm_pars = [key for key in list(noise_dict.keys()) if "_dm_gp" in key] + dm_pars = [key for key in noise_pars if "_dm_gp" in key] if len(dm_pars) > 0: ###### POWERLAW DM NOISE ###### if f'{psr_name}_dm_gp_log10_A' in dm_pars: @@ -794,7 +837,7 @@ def add_noise_to_model( NotImplementedError('DMWaveXNoise not yet implemented') # Check to see if higher order chromatic noise is present - chrom_pars = [key for key in list(noise_dict.keys()) if "_chrom_gp" in key] + chrom_pars = [key for key in noise_pars if "_chrom_gp" in key] if len(chrom_pars) > 0: ###### POWERLAW CHROMATIC NOISE ###### if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: @@ -814,24 +857,19 @@ def add_noise_to_model( elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') NotImplementedError('CMWaveXNoise not yet implemented') - - log.info(f"The SD Bayes factor for dm noise in this pulsar is: {rn_bf}") - if (rn_bf >= rn_bf_thres or np.isnan(rn_bf)) and (not ignore_red_noise): - - log.info("Including red noise for this pulsar") - # Add the ML RN parameters to their component - rn_comp = pm.PLRedNoise() - - rn_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) - rn_comp.RNAMP.quantity = convert_to_RNAMP( - noise_dict[psr_name + "_red_noise_log10_A"] - ) - rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] + + # Check to see if solar wind is present + sw_pars = [key for key in noise_pars if "sw_r2" in key] + if len(sw_pars) > 0: + log.info('Adding Solar Wind Dispersion to par file') + all_components = Component.component_types + noise_class = all_components["SolarWindDispersion"] + noise = noise_class() # Make the dispersion instance. + model.add_component(noise, validate=False) + # add parameters + model['NE_SW'].quantity = noise_dict[f'{psr_name}_NE_SW'] + model['NE_SW'].frozen = True - # Add red noise to the timing model - model.add_component(rn_comp, validate=True, force=True) - else: - log.info("Not including red noise for this pulsar") # Setup and validate the timing model to ensure things are correct model.setup() @@ -846,17 +884,67 @@ def add_noise_to_model( return model -def setup_gibbs_sampler(): +def plot_free_specs(sampler_kwargs={}, + model_kwargs={}, + noise_dict={}): """ Setup the Gibbs sampler for noise analysis from enterprise extensions """ # check that a sufficiently up-to-date version of enterprise_extensions is installed - try: - from enterprise_extensions.gibbs_sampling import gibbs - except ImportError: - log.error("Please install the latest version of enterprise_extensions") - return None - NotImplementedError("Gibbs sampler not yet implemented") + + + print("attempting to sample...") + savepath = f'/home/baierj/projects/ng20yr/noise_testing/test_J0613-0200/{psr_pkls[pidx].name}_prenoise/' + bps.sample(niter=30000, savepath = savepath,) + chain = np.load(f'{savepath}/chain_1.npy') + rn_freqs = np.load(f'{savepath}/rn_freqs.npy') + dm_freqs = np.load(f'{savepath}/dm_freqs.npy') + chrom_freqs = np.load(f'{savepath}/chrom_freqs.npy') + print(chain.shape) + outdir=savepath + np.savetxt(f'{savepath}/chain_1.txt', chain,) + c0 = co.Core(chaindir=savepath) + c0.chain = chain + + + wn_params = [par for par in c0.params if any([p in par for p in ['efac', 'equad', 'ecor']])] + if len(wn_params) > 0: + dg.plot_chains(c0, pars = wn_params) + plt.savefig(f'{outdir}/wn_hists.png') + plt.close() + dg.plot_grubin(c0) + plt.savefig(f'{outdir}/grubin.png') + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + c0.rn_freqs = rn_freqs + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_red_noise_log10_rho', violin=True, Color='red',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | red noise | nfreqs={len(rn_freqs)}" ) + plt.savefig(f"{outdir}/rn.png") + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + c0.rn_freqs = dm_freqs + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_dm_gp_log10_rho', + violin=True, Color='blue',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | DM GP | nfreqs={len(dm_freqs)} " ) + plt.savefig(f'{outdir}/dm_gp.png') + plt.close() + + fig, axes = plt.subplots(1,1,figsize=(8,4)) + c0.rn_freqs = chrom_freqs + tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) + rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_chrom_gp_log10_rho', + violin=True, Color='orange',Tspan=tspan) + axes.set_xscale('log') + plt.title(f"{psr_pkls[pidx].name} | chrom gp | nfreqs={len(chrom_freqs)}" ) + plt.ylim(-9,-5) + plt.savefig(f'{outdir}/chrom_gp.png') + plt.close() def setup_discovery_noise(psr, @@ -979,19 +1067,27 @@ def test_equad_convention(pars_list): def get_model_and_sampler_default_settings(): model_defaults = { + # acrhomatic red noise 'inc_rn': True, 'rn_psd': 'powerlaw', 'rn_nfreqs': 30, + # dm gp 'inc_dmgp': False, 'dmgp_psd': 'powerlaw', 'dmgp_nfreqs': 100, + # higher order chromatic gp 'inc_chromgp': False, 'chromgp_psd': 'powerlaw', 'chromgp_nfreqs': 100, - 'vary_chrom_idx': False, + 'chrom_idx': 4, + 'chrom_quad': False, + # solar wind + 'inc_sw_deter': False, + # GP perturbations ontop of the deterministic model 'inc_swgp': False, - 'swgp_psd': 'powerlaw', - 'swgp_nfreqs': 100, + 'ACE_prior': False, + # + 'extra_sigs': None, } sampler_defaults = { 'numpyro_sampler': 'HMC', From ebc6283a5f4a055f8d39191083d71e33aa81c26f Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Mon, 21 Oct 2024 03:31:34 +0000 Subject: [PATCH 132/193] bug fixes --- src/pint_pal/noise_utils.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index d0f09ecb..073dc4c5 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -138,7 +138,7 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - if which_sampler == 'PTMCMCSampler': + if which_sampler == 'PTMCMCSampler' or which_sampler == 'discovery': try: noise_core = co.Core(chaindir=chaindir) except: @@ -339,7 +339,7 @@ def model_noise( run_noise_analysis=True, wb_efac_sigma=0.25, base_op_dir="./", - noise_kwargs={}, + model_kwargs={}, sampler_kwargs={}, return_sampler=False, ): @@ -370,8 +370,12 @@ def model_noise( # get the default settings model_defaults, sampler_defaults = get_model_and_sampler_default_settings() # update with args passed in - model_kwargs = model_defaults.update(noise_kwargs) - sampler_kwargs = sampler_defaults.update(sampler_kwargs) + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + + if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" @@ -512,7 +516,7 @@ def model_noise( except ImportError: log.error("Please install latest version of jax and/or xarray") ValueError("Please install lastest version of jax and/or xarray") - os.mkdir(outdir, parents=True, exist_ok=True) + os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) with open(outdir+"sampler_kwargs.json", "w") as f: @@ -524,7 +528,7 @@ def model_noise( df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') - if sampler_kwargs['numpyro_sampler'] is not 'HMC_GIBBS': + if sampler_kwargs['numpyro_sampler'] != 'HMC_GIBBS': ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior @@ -562,6 +566,7 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, + which_sampler='PTMCMCSampler' ): """ Add WN, RN, DMGP, and parameters to timing model. @@ -599,12 +604,13 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") - noise_dict, rn_bf = analyze_noise( + noise_core, noise_dict, rn_bf = analyze_noise( chaindir, burn_frac, save_corner, no_corner_plot, chaindir_compare=chaindir_compare, + which_sampler=which_sampler, ) chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") @@ -988,7 +994,7 @@ def setup_discovery_noise(psr, model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) elif model_kwargs['dmgp_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - if model_kwargs['inc_chrom']: + if model_kwargs['inc_chromgp']: if model_kwargs['rn_psd'] == 'powerlaw': model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) elif model_kwargs['rn_psd'] == 'free_spectral': @@ -1006,7 +1012,7 @@ def numpyro_model(): invhdorf=None, nuts_kwargs={}) sampler = infer.MCMC(gibbs_hmc_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, @@ -1018,7 +1024,7 @@ def numpyro_model(): nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, From feeddc9673a98547f6dc36ddb42287091ae3c1ff Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Mon, 21 Oct 2024 06:57:48 +0000 Subject: [PATCH 133/193] more bug fix --- src/pint_pal/noise_utils.py | 64 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 073dc4c5..98e25213 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -34,6 +34,24 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block +try: + import xarray as xr + import jax + from jax import numpy as jnp + import numpyro + from numpyro.infer import log_likelihood + from numpyro import distributions as dist + from numpyro import infer + import discovery as ds + from discovery import prior as ds_prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + from discovery.gibbs import setup_single_psr_hmc_gibbs + +except ImportError: + log.error("Please install the latest version of discovery, numpyro, and/or jax") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -507,15 +525,6 @@ def model_noise( ################################################################# elif which_sampler == "discovery": log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") - try: - import jax - import xarray as xr - from numpyro import distributions as dist - from numpyro.infer import log_likelihood - - except ImportError: - log.error("Please install latest version of jax and/or xarray") - ValueError("Please install lastest version of jax and/or xarray") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -585,7 +594,7 @@ def add_noise_to_model( Returns ======= - model: New timing model which includes WN and RN parameters + model: New timing model which includes WN and RN (and potentially dmgp, chrom_gp, and solar wind) parameters """ # Assume results are in current working directory if not specified @@ -959,25 +968,10 @@ def setup_discovery_noise(psr, """ Setup the discovery likelihood with numpyro sampling for noise analysis """ - # check that jax, numpyro and discovery are installed - try: - import discovery as ds - import jax - from jax import numpy as jnp - import numpyro - from numpyro import distributions as dist - from numpyro import infer - from discovery import prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - from discovery.gibbs import setup_single_psr_hmc_gibbs - - except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") # set up the model time_span = ds.getspan([psr]) + # this updates the ds.stand_priordict object + ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, ds.makegp_timing(psr, svd=True), @@ -1000,7 +994,7 @@ def setup_discovery_noise(psr, elif model_kwargs['rn_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) psl = ds.PulsarLikelihood(model_components) - prior = prior.makelogprior_uniform(psl.logL.params, {}) + prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': @@ -1021,7 +1015,8 @@ def numpyro_model(): def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, num_steps=sampler_kwargs['num_steps']) + nuts_kernel = infer.NUTS(numpyro_model, max_tree_depth=5, dense_mass=True, + forward_mode_differentiation=False, target_accept_prob=0.99) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], num_samples=sampler_kwargs['num_samples'], @@ -1036,7 +1031,7 @@ def numpyro_model(): hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) sampler = infer.MCMC(hmc_kernel, num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_warmup'], + num_samples=sampler_kwargs['num_samples'], num_chains=sampler_kwargs['num_chains'], chain_method=sampler_kwargs['chain_method'], progress_bar=True, @@ -1071,6 +1066,15 @@ def test_equad_convention(pars_list): ) return None + +def prior_dictionary_updates(): + return { + '(.*_)?dm_gp_log10_A': [-20, -11], + '(.*_)?dm_gp_gamma': [0, 7], + '(.*_)?chrom_gp_log10_A': [-20, -11], + '(.*_)?chrom_gp_gamma': [0, 7], + } + def get_model_and_sampler_default_settings(): model_defaults = { # acrhomatic red noise From 583fe3e238d159da9315530c47560c7c4e566b26 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 22 Oct 2024 06:19:15 +0000 Subject: [PATCH 134/193] swapping some kwargs & adding some logs --- src/pint_pal/noise_utils.py | 141 +++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 59 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 98e25213..3b5c9f2e 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -4,7 +4,8 @@ from astropy.time import Time from enterprise.pulsar import Pulsar -from enterprise_extensions import models, model_utils, sampler +from enterprise_extensions import models, model_utils +from enterprise_extensions import sampler as ee_sampler import corner import pint.models as pm @@ -52,6 +53,11 @@ except ImportError: log.error("Please install the latest version of discovery, numpyro, and/or jax") ValueError("Please install the latest version of discovery, numpyro, and/or jax") +try: + from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler +except: + log.warning("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") + ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -136,7 +142,8 @@ def analyze_noise( save_corner=True, no_corner_plot=False, chaindir_compare=None, - which_sampler = 'PTMCMCSampler', + model_kwargs={}, + sampler_kwargs={}, ): """ Reads enterprise chain file; produces and saves corner plot; returns WN dictionary and RN (SD) BF @@ -148,7 +155,6 @@ def analyze_noise( save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False chaindir_compare: path to noise run chain wish to plot in corner plot for comparison; Default: None - which_sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] Returns ======= @@ -156,21 +162,25 @@ def analyze_noise( noise_dict: Dictionary of maximum a posterior noise values rn_bf: Savage-Dickey BF for achromatic RN for given pulsar """ - if which_sampler == 'PTMCMCSampler' or which_sampler == 'discovery': - try: - noise_core = co.Core(chaindir=chaindir) - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") - elif which_sampler == 'GibbsSampler': - try: - noise_core = co.Core(corepath=chaindir+'/chain') - except: - log.error(f"Could not load noise run from {chaindir}") - ValueError(f"Could not load noise run from {chaindir}") - if which_sampler == 'PTMCMCSampler' or which_sampler == "GibbsSampler": + # get the default settings + model_defaults, sampler_defaults = get_model_and_sampler_default_settings() + # update with args passed in + model_defaults.update(model_kwargs) + sampler_defaults.update(sampler_kwargs) + model_kwargs = model_defaults.copy() + sampler_kwargs = sampler_defaults.copy() + sampler = sampler_kwargs['sampler'] + likelihood = sampler_kwargs['likelihood'] + try: + noise_core = co.Core(chaindir=chaindir) + except: + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") + ValueError(f"Could not load noise run from {chaindir}") + if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": + # standard burn ins noise_core.set_burn(burn_frac) - elif which_sampler == 'discovery': + elif likelihood == 'discovery': + # the numpyro sampler already deals with the burn in noise_core.set_burn(0) else: noise_core.set_burn(burn_frac) @@ -196,6 +206,7 @@ def analyze_noise( ) chaindir_compare = None + if save_corner and not no_corner_plot: pars_short = [p.split("_", 1)[1] for p in pars] log.info(f"Chain parameter names are {pars_short}") @@ -349,8 +360,6 @@ def analyze_noise( def model_noise( mo, to, - which_sampler="PTMCMCSampler", - vary_red_noise=True, n_iter=int(1e5), using_wideband=False, resume=False, @@ -362,17 +371,17 @@ def model_noise( return_sampler=False, ): """ - Setup enterprise PTA and perform MCMC noise analysis + Setup enterprise or discovery likelihood and perform Bayesian inference on noise model Parameters ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - sampler: choose from ['PTMCMCSampler' or 'GibbsSampler' or 'discovery'] - PTMCMCSampler -- MCMC sampling with the Enterprise likelihood - GibbsSampler -- enterprise_extension's GibbsSampler with PTMCMC and Enterprise white noise + likelihood: choose from ['Enterprise', 'discovery'] + enterprise -- Enterprise likelihood discovery -- various numpyro samplers with a discovery likelihood - red_noise: include red noise in the model + sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False run_noise_analysis: Flag to toggle execution of noise modeling; Default: True @@ -392,23 +401,23 @@ def model_noise( sampler_defaults.update(sampler_kwargs) model_kwargs = model_defaults.copy() sampler_kwargs = sampler_defaults.copy() - - + likelihood = sampler_kwargs['likelihood'] + sampler = sampler_kwargs['sampler'] if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" else: outdir = base_op_dir + mo.PSR.value + "_wb/" - + os.makedirs(outdir, exits_ok=True) if os.path.exists(outdir) and (run_noise_analysis) and (not resume): log.info( - "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( + "A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( mo.PSR.value ) ) elif os.path.exists(outdir) and (run_noise_analysis) and (resume): log.info( - "INFO: A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( + "A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( mo.PSR.value ) ) @@ -428,18 +437,19 @@ def model_noise( ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) + log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") e_psr = Pulsar(mo, to) ########################################################## ################ PTMCMCSampler ################## ########################################################## - if which_sampler == "PTMCMCSampler": - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions if not using_wideband: pta = models.model_singlepsr_noise( e_psr, white_vary=True, - red_var=vary_red_noise, + red_var=model_kwargs['inc_rn'], # defaults True is_wideband=False, use_dmdata=False, dmjump_var=False, @@ -450,9 +460,10 @@ def model_noise( # CHROM GP chrom_gp=model_kwargs['inc_chromgp'], chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + chrom_gp_kernel='diag', # Fourier basis chromg_gp # DM SOLAR WIND - dm_sw_deter=model_kwargs['inc_sw_deter'], - ACE_prior=model_kwargs['ACE_prior'], + #dm_sw_deter=model_kwargs['inc_sw_deter'], + #ACE_prior=model_kwargs['ACE_prior'], # can pass extra signals in here extra_sigs=model_kwargs['extra_sigs'], ) @@ -462,7 +473,7 @@ def model_noise( is_wideband=True, use_dmdata=True, white_vary=True, - red_var=vary_red_noise, + red_var=model_kwargs['inc_rn'], dmjump_var=False, wb_efac_sigma=wb_efac_sigma, ng_twg_setup=True, @@ -480,26 +491,23 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) ####### # setup sampler using enterprise_extensions - samp = sampler.setup_sampler(pta, + samp = ee_sampler.setup_sampler(pta, outdir=outdir, resume=resume, groups=groups) # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling + log.info("Beginnning to sample...") samp.sample( - x0, n_iter, SCAMweight=30, AMweight=15, DEweight=50, **sampler_kwargs + x0, 1_000_000, SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) + log.info("Finished sampling.") ############################################################## ################## GibbsSampler ######################## ############################################################## - elif which_sampler == "GibbsSampler": - try: - from enterprise_extensions import GibbsSampler - except: - log.error("Please install the latest enterprise_extensions") - ValueError("Please install the latest enterprise_extensions") - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + elif likelihood == "Enterprise" and sampler == "GibbsSampler": + log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, vary_wn=True, @@ -509,22 +517,24 @@ def model_noise( vary_rn=model_kwargs['inc_rn'], rn_components=model_kwargs['rn_nfreqs'], vary_dm=model_kwargs['inc_dmgp'], - dm_components=model_kwargs['dm_nfreqs'], + dm_components=model_kwargs['dmgp_nfreqs'], vary_chrom=model_kwargs['inc_chromgp'], - chrom_components=model_kwargs['chrom_nfreqs'], + chrom_components=model_kwargs['chromgp_nfreqs'], noise_dict={}, - tnequad=True, + tnequad=model_kwargs['tnequad'], #**noise_kwargs, ) - samp.sample(niter=n_iter, save_path=outdir, **sampler_kwargs) + log.info("Beginnning to sample...") + samp.sample(niter=n_iter, savepath=outdir) + log.info("Finished sampling.") # sorta redundant to have both, but la_forge doesn't look for .npy files chain = np.load(f'{outdir}/chain_1.npy') np.savetxt(f'{outdir}/chain_1.txt', chain,) ################################################################# ################## discovery likelihood ################### ################################################################# - elif which_sampler == "discovery": - log.info(f"INFO: Running noise analysis with {which_sampler} for {e_psr.name}") + elif likelihood == "discovery": + log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -532,12 +542,15 @@ def model_noise( json.dump(sampler_kwargs, f) samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) # run the sampler + log.info("Beginnning to sample...") samp.run(jax.random.key(42)) + log.info("Finished sampling.") # convert to a DataFrame df = log_x.to_df(samp.get_samples()['par']) # convert DataFrame to dictionary samples_dict = df.to_dict(orient='list') - if sampler_kwargs['numpyro_sampler'] != 'HMC_GIBBS': + if sampler_kwargs['sampler'] != 'HMC-GIBBS': + log.info("Reconstructing Log Likelihood and Posterior from samples...") ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior @@ -552,7 +565,8 @@ def model_noise( inference_data.to_netcdf(outdir+"chain.nc") else: log.error( - "Invalid sampler specified. Please use 'PTMCMCSampler' or 'GibbsSampler' or 'discovery' " + f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + + "\nCan only use Enterprise with PTMCMCSampler or GibbsSampler." ) if return_sampler: return samp @@ -575,10 +589,9 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, - which_sampler='PTMCMCSampler' ): """ - Add WN, RN, DMGP, and parameters to timing model. + Add WN, RN, DMGP, ChromGP, and SW parameters to timing model. Parameters ========== @@ -619,7 +632,6 @@ def add_noise_to_model( save_corner, no_corner_plot, chaindir_compare=chaindir_compare, - which_sampler=which_sampler, ) chainfile = chaindir + "chain_1.txt" mtime = Time(os.path.getmtime(chainfile), format="unix") @@ -969,6 +981,7 @@ def setup_discovery_noise(psr, Setup the discovery likelihood with numpyro sampling for noise analysis """ # set up the model + sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) # this updates the ds.stand_priordict object ds.priordict_standard.update(prior_dictionary_updates()) @@ -997,7 +1010,7 @@ def setup_discovery_noise(psr, prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) - if sampler_kwargs['numpyro_sampler'] == 'HMC_Gibbs': + if sampler == 'HMC-Gibbs': def numpyro_model(): return None gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( @@ -1011,7 +1024,7 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) - elif sampler_kwargs['numpyro_sampler'] == 'NUTS': + elif sampler == 'NUTS': def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -1024,7 +1037,7 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) - elif sampler_kwargs['numpyro_sampler'] == 'HMC': + elif sampler == 'HMC': def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) @@ -1036,6 +1049,12 @@ def numpyro_model(): chain_method=sampler_kwargs['chain_method'], progress_bar=True, ) + else: + log.error( + f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." + ) + return sampler, log_x, numpyro_model @@ -1077,6 +1096,9 @@ def prior_dictionary_updates(): def get_model_and_sampler_default_settings(): model_defaults = { + # white noise + 'inc_wn': True, + 'tnequad': True, # acrhomatic red noise 'inc_rn': True, 'rn_psd': 'powerlaw', @@ -1100,7 +1122,8 @@ def get_model_and_sampler_default_settings(): 'extra_sigs': None, } sampler_defaults = { - 'numpyro_sampler': 'HMC', + 'likelihood': 'Enterprise', + 'sampler': 'HMC', 'num_steps': 5, 'num_warmup': 500, 'num_samples': 2500, From 144f570aaa9d2e55ed58c71b755cd1316bf66379 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 23 Oct 2024 19:05:03 +0000 Subject: [PATCH 135/193] random fixes --- src/pint_pal/noise_utils.py | 107 +++++++++++------------------------- 1 file changed, 33 insertions(+), 74 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 3b5c9f2e..0578e708 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -408,7 +408,7 @@ def model_noise( outdir = base_op_dir + mo.PSR.value + "_nb/" else: outdir = base_op_dir + mo.PSR.value + "_wb/" - os.makedirs(outdir, exits_ok=True) + os.makedirs(outdir, exist_ok=True) if os.path.exists(outdir) and (run_noise_analysis) and (not resume): log.info( "A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( @@ -507,7 +507,7 @@ def model_noise( ################## GibbsSampler ######################## ############################################################## elif likelihood == "Enterprise" and sampler == "GibbsSampler": - log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, vary_wn=True, @@ -534,7 +534,7 @@ def model_noise( ################## discovery likelihood ################### ################################################################# elif likelihood == "discovery": - log.info(f"INFO: Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: json.dump(model_kwargs, f) @@ -551,7 +551,7 @@ def model_noise( samples_dict = df.to_dict(orient='list') if sampler_kwargs['sampler'] != 'HMC-GIBBS': log.info("Reconstructing Log Likelihood and Posterior from samples...") - ln_like = log_likelihood(numpyro_model, samp.get_samples())['ll'] + ln_like = log_likelihood(numpyro_model, samp.get_samples(), parallel=True)['ll'] ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) ln_post = ln_like + ln_prior samples_dict['lnlike'] = ln_like @@ -634,8 +634,14 @@ def add_noise_to_model( chaindir_compare=chaindir_compare, ) chainfile = chaindir + "chain_1.txt" - mtime = Time(os.path.getmtime(chainfile), format="unix") - log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + try: + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + except: + chainfile = chaindir+"chain.nc" + mtime = Time(os.path.getmtime(chainfile), format="unix") + log.info(f"Noise chains loaded from {chainfile} created at {mtime.isot}") + # Create the maskParameter for EFACS efac_params = [] @@ -848,12 +854,12 @@ def add_noise_to_model( #log.info(f"The SD Bayes factor for dm noise in this pulsar is: {dm_bf}") log.info('Adding Powerlaw DM GP noise as PLDMNoise to par file') # Add the ML RN parameters to their component - dm_comp = pm.PLDMNoise() + dm_comp = pm.noise_model.PLDMNoise() dm_keys = np.array([key for key, val in noise_dict.items() if "_red_" in key]) dm_comp.TNDMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_dm_gp_log10_A"] ) - dm_comp.TNDMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + dm_comp.TNDMGAM.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] ##### FIXMEEEEEEE : need to figure out some way to softcode this dm_comp.TNDMC.quantitity = 100 # Add red noise to the timing model @@ -870,16 +876,16 @@ def add_noise_to_model( if f'{psr_name}_chrom_gp_log10_A' in chrom_pars: log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') # Add the ML RN parameters to their component - chrom_comp = pm.PLCMNoise() + chrom_comp = pm.noise_model.PLCMNoise() chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) - dm_comp.TNDMAMP.quantity = convert_to_RNAMP( + chrom_comp.TNCMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_chrom_gp_log10_A"] ) - chrom_comp.TNCMIDX.quantity = -1 * noise_dict[psr_name + "_dm_gp_gamma"] + chrom_comp.TNCMGAM.quantity = -1 * noise_dict[psr_name + "_chrom_gp_gamma"] ##### FIXMEEEEEEE : need to figure out some way to softcode this chrom_comp.TNCMC.quantitity = 100 # Add red noise to the timing model - model.add_component(dm_comp, validate=True, force=True) + model.add_component(chrom_comp, validate=True, force=True) ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') @@ -901,7 +907,8 @@ def add_noise_to_model( # Setup and validate the timing model to ensure things are correct model.setup() model.validate() - model.meta['noise_mtime'] = mtime.isot + #FIXME:::not sure why this is broken + model.noise_mtime = mtime.isot if convert_equad_to_t2: from pint_pal.lite_utils import convert_enterprise_equads @@ -911,67 +918,12 @@ def add_noise_to_model( return model -def plot_free_specs(sampler_kwargs={}, - model_kwargs={}, - noise_dict={}): +def plot_free_specs(c0, freqs, fs_type='Red Noise'): """ - Setup the Gibbs sampler for noise analysis from enterprise extensions + Plot free specs when using free spectral model """ - # check that a sufficiently up-to-date version of enterprise_extensions is installed - - - print("attempting to sample...") - savepath = f'/home/baierj/projects/ng20yr/noise_testing/test_J0613-0200/{psr_pkls[pidx].name}_prenoise/' - bps.sample(niter=30000, savepath = savepath,) - chain = np.load(f'{savepath}/chain_1.npy') - rn_freqs = np.load(f'{savepath}/rn_freqs.npy') - dm_freqs = np.load(f'{savepath}/dm_freqs.npy') - chrom_freqs = np.load(f'{savepath}/chrom_freqs.npy') - print(chain.shape) - outdir=savepath - np.savetxt(f'{savepath}/chain_1.txt', chain,) - c0 = co.Core(chaindir=savepath) - c0.chain = chain - - - wn_params = [par for par in c0.params if any([p in par for p in ['efac', 'equad', 'ecor']])] - if len(wn_params) > 0: - dg.plot_chains(c0, pars = wn_params) - plt.savefig(f'{outdir}/wn_hists.png') - plt.close() - dg.plot_grubin(c0) - plt.savefig(f'{outdir}/grubin.png') - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - c0.rn_freqs = rn_freqs - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_red_noise_log10_rho', violin=True, Color='red',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | red noise | nfreqs={len(rn_freqs)}" ) - plt.savefig(f"{outdir}/rn.png") - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - c0.rn_freqs = dm_freqs - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_dm_gp_log10_rho', - violin=True, Color='blue',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | DM GP | nfreqs={len(dm_freqs)} " ) - plt.savefig(f'{outdir}/dm_gp.png') - plt.close() - - fig, axes = plt.subplots(1,1,figsize=(8,4)) - c0.rn_freqs = chrom_freqs - tspan = max(psr_pkls[pidx].toas)-min(psr_pkls[pidx].toas) - rn.plot_free_spec(c0, axis=axes, parname_root=f'{psr_pkls[pidx].name}_chrom_gp_log10_rho', - violin=True, Color='orange',Tspan=tspan) - axes.set_xscale('log') - plt.title(f"{psr_pkls[pidx].name} | chrom gp | nfreqs={len(chrom_freqs)}" ) - plt.ylim(-9,-5) - plt.savefig(f'{outdir}/chrom_gp.png') - plt.close() + ImpelmentationError("not yet implemented") + return None def setup_discovery_noise(psr, @@ -984,6 +936,8 @@ def setup_discovery_noise(psr, sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) # this updates the ds.stand_priordict object + # need 64-bit precision for PTA inference + numpyro.enable_x64() ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, @@ -1028,8 +982,11 @@ def numpyro_model(): def numpyro_model(): params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, max_tree_depth=5, dense_mass=True, - forward_mode_differentiation=False, target_accept_prob=0.99) + nuts_kernel = infer.NUTS(numpyro_model, + max_tree_depth=sampler_kwargs['max_tree_depth'], + dense_mass=sampler_kwargs['dense_mass'], + forward_mode_differentiation=False, + target_accept_prob=0.99) sampler = infer.MCMC(nuts_kernel, num_warmup=sampler_kwargs['num_warmup'], num_samples=sampler_kwargs['num_samples'], @@ -1129,5 +1086,7 @@ def get_model_and_sampler_default_settings(): 'num_samples': 2500, 'num_chains': 4, 'chain_method': 'vectorized', + 'max_tree_depth': 5, + 'dense_mass': False, } return model_defaults, sampler_defaults \ No newline at end of file From c4d3ab283d80be6d0302a855e06507b08737c60d Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 18:28:43 +0000 Subject: [PATCH 136/193] move import try/except; add empirical distributions --- src/pint_pal/noise_utils.py | 118 +++++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 50 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 0578e708..683cee3f 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -35,29 +35,6 @@ from enterprise_extensions import model_utils from enterprise_extensions import deterministic from enterprise_extensions.timing import timing_block -try: - import xarray as xr - import jax - from jax import numpy as jnp - import numpyro - from numpyro.infer import log_likelihood - from numpyro import distributions as dist - from numpyro import infer - import discovery as ds - from discovery import prior as ds_prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - from discovery.gibbs import setup_single_psr_hmc_gibbs - -except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") -try: - from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler -except: - log.warning("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") - ValueError("Please install the latest version of discovery, numpyro, and/or jax") # from enterprise_extensions.blocks import (white_noise_block, red_noise_block) @@ -175,7 +152,7 @@ def analyze_noise( noise_core = co.Core(chaindir=chaindir) except: log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") - ValueError(f"Could not load noise run from {chaindir}") + raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": # standard burn ins noise_core.set_burn(burn_frac) @@ -360,7 +337,6 @@ def analyze_noise( def model_noise( mo, to, - n_iter=int(1e5), using_wideband=False, resume=False, run_noise_analysis=True, @@ -377,17 +353,21 @@ def model_noise( ========== mo: PINT (or tempo2) timing model to: PINT (or tempo2) TOAs - likelihood: choose from ['Enterprise', 'discovery'] - enterprise -- Enterprise likelihood - discovery -- various numpyro samplers with a discovery likelihood - sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] - for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] - n_iter: number of MCMC iterations; Default: 1e5; Recommended > 5e4 using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False + resume: Flag to resume overwrite previous run or not. run_noise_analysis: Flag to toggle execution of noise modeling; Default: True noise_kwargs: dictionary of noise model parameters; Default: {} sampler_kwargs: dictionary of sampler parameters; Default: {} return_sampler: Flag to return the sampler object; Default: False + + Recommended to pass model_kwargs and sampler_kwargs from the config file. + Default kwargs given by function `get_model_and_sampler_default_settings`. + Import configuration parameters: + likelihood: choose from ['Enterprise', 'discovery'] + enterprise -- Enterprise likelihood + discovery -- various numpyro samplers with a discovery likelihood + sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] Returns ======= @@ -428,13 +408,6 @@ def model_noise( ) return None - # Ensure n_iter is an integer - n_iter = int(n_iter) - - if n_iter < 1e4: - log.warning( - "Such a small number of iterations is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" - ) # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") @@ -445,6 +418,13 @@ def model_noise( if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions + # Ensure n_iter is an integer + sampler_kwargs['n_iter'] = int(sampler_kwargs['n_iter']) + + if sampler_kwargs['n_iter'] < 1e4: + log.warning( + f"Such a small number of iterations with {sampler} is unlikely to yield accurate posteriors. STRONGLY recommend increasing the number of iterations to at least 5e4" + ) if not using_wideband: pta = models.model_singlepsr_noise( e_psr, @@ -467,6 +447,7 @@ def model_noise( # can pass extra signals in here extra_sigs=model_kwargs['extra_sigs'], ) + pta.set_default_params({}) else: pta = models.model_singlepsr_noise( e_psr, @@ -488,25 +469,37 @@ def model_noise( dmjump_params[dmjump_param_name] = dmjump_param.value pta.set_default_params(dmjump_params) # set groups here - groups = setup_sampling_groups(pta, write_groups=True, outdir=outdir) + groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions samp = ee_sampler.setup_sampler(pta, - outdir=outdir, - resume=resume, - groups=groups) + outdir=outdir, + resume=resume, + groups=groups, + empirical_distr = sampler_kwargs['empirical_distr'] + xx) + if sampler_kwargs['empirical_distr'] is not None: + try: + samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) + except: + log.warning("Failed to add draws from empirical distribution.") # Initial sample x0 = np.hstack([p.sample() for p in pta.params]) # Start sampling log.info("Beginnning to sample...") samp.sample( - x0, 1_000_000, SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs + x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) log.info("Finished sampling.") ############################################################## ################## GibbsSampler ######################## ############################################################## elif likelihood == "Enterprise" and sampler == "GibbsSampler": + try: + from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler + except: + log.error("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") + raise ValueError("Please install a version of enterprise extensions which contains the `gibbs_sampling` module.") log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") samp = GibbsSampler( e_psr, @@ -534,6 +527,22 @@ def model_noise( ################## discovery likelihood ################### ################################################################# elif likelihood == "discovery": + try: # make sure requisite packages are installed + import xarray as xr + import jax + from jax import numpy as jnp + import numpyro + from numpyro.infer import log_likelihood + from numpyro import distributions as dist + from numpyro import infer + import discovery as ds + from discovery import prior as ds_prior + from discovery.prior import (makelogtransform_uniform, + makelogprior_uniform, + sample_uniform) + except ImportError: + log.error("Please install the latest version of discovery, numpyro, and/or jax") + raise ValueError("Please install the latest version of discovery, numpyro, and/or jax") log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") os.makedirs(outdir, exist_ok=True) with open(outdir+"model_kwargs.json", "w") as f: @@ -935,9 +944,9 @@ def setup_discovery_noise(psr, # set up the model sampler = sampler_kwargs['sampler'] time_span = ds.getspan([psr]) - # this updates the ds.stand_priordict object # need 64-bit precision for PTA inference numpyro.enable_x64() + # this updates the ds.stand_priordict object ds.priordict_standard.update(prior_dictionary_updates()) model_components = [ psr.residuals, @@ -965,8 +974,12 @@ def setup_discovery_noise(psr, log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) if sampler == 'HMC-Gibbs': - def numpyro_model(): - return None + try: + from discovery.gibbs import setup_single_psr_hmc_gibbs + except ImportError: + log.error("Need to have most up-to-date version of discovery installed.") + raise ValueError("Make sure proper version of discovery is imported") + numpyro_model = None # this doesnt get used for HMC-Gibbs gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( psrl=psl, psrs=psr, priordict=ds.priordict_standard, @@ -1008,7 +1021,7 @@ def numpyro_model(): ) else: log.error( - f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + f"Invalid likelihood ({sampler_kwargs['likelihood']}) and sampler ({sampler_kwargs['sampler']}) combination." \ + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." ) @@ -1077,15 +1090,20 @@ def get_model_and_sampler_default_settings(): 'ACE_prior': False, # 'extra_sigs': None, + # path to empirical distribution } sampler_defaults = { 'likelihood': 'Enterprise', - 'sampler': 'HMC', - 'num_steps': 5, + 'sampler': 'PTMCMCSampler', + # ptmcmc kwargs + 'n_iter': 2e5, + 'empirical_distr': None, + # numpyro kwargs + 'num_steps': 25, 'num_warmup': 500, 'num_samples': 2500, 'num_chains': 4, - 'chain_method': 'vectorized', + 'chain_method': 'parallel', 'max_tree_depth': 5, 'dense_mass': False, } From d0f37b9f3928df3192b0e4f0efdb8464fae18c0b Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Thu, 24 Oct 2024 19:11:13 +0000 Subject: [PATCH 137/193] Transfering new fix. --- .github/workflows/test_notebook.yml | 39 ++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index f52fc418..883d59c8 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -11,19 +11,25 @@ on: jobs: build: + runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" python-version: ["3.9", "3.10", "3.11", "3.12"] + os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: + - name: Install Required Ubuntu Packages - name: Install Required Ubuntu Packages run: | sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html - uses: actions/checkout@v4 + - uses: mamba-org/setup-micromamba@v1 + - uses: actions/checkout@v4 - uses: mamba-org/setup-micromamba@v1 with: init-shell: bash @@ -50,6 +56,31 @@ jobs: jupyter seaborn gitpython + - name: Install Main Code + init-shell: bash + environment-name: pulsar + cache-environment: true + cache-downloads: true + create-args: >- + -c conda-forge + python=${{ matrix.python-version }} + pytest + cython + pint-pulsar + tempo2 + libstempo + enterprise-pulsar + enterprise_extensions + scikit-sparse + scikit-learn + ruamel.yaml + nbconvert + ipywidgets>=7.6.3 + weasyprint + pytest-xdist>=2.3.0 + jupyter + seaborn + gitpython - name: Install Main Code shell: bash -el {0} run: | @@ -62,13 +93,19 @@ jobs: pytest tests/test_run_notebook.py -k $PULSAR_NAME ls -lah mv tmp* nb_outputs + export JUPYTER_PLATFORM_DIRS=1 + pytest tests/test_run_notebook.py -k $PULSAR_NAME + ls -lah + mv tmp* nb_outputs - name: Archive Notebook Output Files + uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4 with: + name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} path: | nb_outputs/*/*.pdf nb_outputs/*/*.tim nb_outputs/*/*.par compression-level: 6 - + \ No newline at end of file From a689d2b1c375bcbaf494c045cbbaaaece7c037a4 Mon Sep 17 00:00:00 2001 From: Michael Lam Date: Fri, 27 Sep 2024 17:02:59 +0000 Subject: [PATCH 138/193] fix list comprehension in apply_ignore() for poor-febe check --- src/pint_pal/timingconfiguration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index fe2d5f57..4e645526 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -785,7 +785,7 @@ def apply_ignore(self,toas,specify_keys=None,warn=False,model=None): if self.get_snr_cut() > 25.0 and self.get_toa_type() == 'WB': log.warning('snr-cut should be set to 25; try excising TOAs using other methods.') if 'poor-febe' in valid_valued: - fs = np.array([(f['f'] if 'f' in f else None) in toas.orig_table['flags']]) + fs = np.array([(f['f'] if 'f' in f else None) for f in toas.orig_table['flags']]) for febe in self.get_poor_febes(): febeinds = np.where(fs==febe)[0] apply_cut_flag(toas,febeinds,'poorfebe',warn=warn) From 69a25d1e12e4f3b1e8d35ee553f978f82d3deff8 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Thu, 24 Oct 2024 16:34:56 -0400 Subject: [PATCH 139/193] Fix setter function names --- src/pint_pal/timingconfiguration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/timingconfiguration.py b/src/pint_pal/timingconfiguration.py index 4e645526..1c7c7218 100644 --- a/src/pint_pal/timingconfiguration.py +++ b/src/pint_pal/timingconfiguration.py @@ -65,7 +65,7 @@ def tim_directory(self): ) @tim_directory.setter - def set_tim_directory(self, tim_directory): + def tim_directory(self, tim_directory): """ Set tim directory. If a relative path is supplied, it will be turned into an absolute path. @@ -83,7 +83,7 @@ def par_directory(self): ) @par_directory.setter - def set_par_directory(self, par_directory): + def par_directory(self, par_directory): """ Set par directory. If a relative path is supplied, it will be turned into an absolute path. From 00c2a4df5e63e948b1160010a9e94cd980a705b8 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Wed, 4 Sep 2024 16:49:39 +0000 Subject: [PATCH 140/193] initial changes to plot_utils incl. adding plot_settings.yaml --- src/pint_pal/plot_settings.yaml | 281 +++ src/pint_pal/plot_utils.py | 3982 +++++++++++++++++++------------ 2 files changed, 2758 insertions(+), 1505 deletions(-) create mode 100644 src/pint_pal/plot_settings.yaml diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml new file mode 100644 index 00000000..9f67ae96 --- /dev/null +++ b/src/pint_pal/plot_settings.yaml @@ -0,0 +1,281 @@ +obs_c: { + "ao": "#6BA9E2", + "arecibo": "#6BA9E2", + "gbt": "#61C853", + "vla": "#40635F", + "CHIME": "#ECE133", + "nancay": "#407BD5", + "ncyobs": "#407BD5", + "effelsberg_asterix": "#407BD5", + "effelsberg": "#407BD5", + "leap": "#ECE133", + "jodrell": "#407BD5", + "jbroach": "#407BD5", + "wsrt": "#E5A4CB", + "parkes": "#BE0119", + "gmrt": "#855CA0", + "meerkat": "#FD9927", + "None": "#808080", +} + +pta_c: { + "InPTA": "#855CA0", + "EPTA": "#407BD5", + "NANOGrav": "#61C853", + "PPTA": "#BE0119", + "MPTA": "#FD9927", + "None": "#808080", +} + +febe_c: { + "327_ASP": "#6BA9E2", + "327_PUPPI": "#6BA9E2", + "430_ASP": "#6BA9E2", + "430_PUPPI": "#6BA9E2", + "L-wide_ASP": "#6BA9E2", + "L-wide_PUPPI": "#6BA9E2", + "Rcvr1_2_GASP": "#61C853", + "Rcvr1_2_GUPPI": "#61C853", + "Rcvr1_2_VEGAS": "hotpink", + "Rcvr_800_GASP": "#61C853", + "Rcvr_800_GUPPI": "#61C853", + "Rcvr_800_VEGAS": "violet", + "S-wide_ASP": "#6BA9E2", + "S-wide_PUPPI": "#6BA9E2", + "1.5GHz_YUPPI": "#40635F", + "3GHz_YUPPI": "#40635F", + "6GHz_YUPPI": "#40635F", + "CHIME": "#ECE133", + "unknown_LEAP": "#FD9927", + "NRT.BON.1600": "#FD9927", + "NRT.BON.1400": "#FD9927", + "NRT.BON.2000": "#FD9927", + "NRT.NUPPI.1484": "#FD9927", + "NRT.NUPPI.1854": "#FD9927", + "NRT.NUPPI.2154": "#FD9927", + "NRT.NUPPI.2539": "#FD9927", + "EFF.EBPP.1360": "#855CA0", + "EFF.EBPP.1410": "#855CA0", + "EFF.EBPP.2639": "#855CA0", + "S60-2_asterix": "#855CA0", + "JBO.DFB.1400": "#407BD5", + "JBO.DFB.1520": "#407BD5", + "WSRT.P2.1380": "#E5A4CB", + "WSRT.P1.1380.C": "#E5A4CB", + "WSRT.P1.2273.C": "#E5A4CB", + "WSRT.P1.323.C": "#40635F", + "WSRT.P1.367.C": "#40635F", + "P217-3_asterix": "#855CA0", + "unknown_asterix": "#855CA0", + "P200-3_asterix": "#855CA0", + "P217-3_PuMa2": "#855CA0", + "P217-6_LEAP": "#855CA0", + "P217-3_LEAP": "#855CA0", + "R217-3_LEAP": "#855CA0", + "P200-3_LEAP": "#855CA0", + "JBO.ROACH.1620": "#407BD5", + "1050CM_PDFB4": "#BE0119", + "1050CM_PDFB1": "#BE0119", + "1050CM_PDFB2": "#BE0119", + "1050CM_PDFB3": "#BE0119", + "1050CM_WBCORR": "#BE0119", + "1050CM_CPSR2": "#BE0119", + "1050CM_CASPSR": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_PDFB1": "#BE0119", + "H-OH_PDFB1": "#BE0119", + "H-OH_CPSR2n": "#BE0119", + "H-OH_CPSR2m": "#BE0119", + "H-OH_PDFB4": "#BE0119", + "MULTI_CPSR2m": "#BE0119", + "MULTI_CPSR2n": "#BE0119", + "MULTI_WBCORR": "#BE0119", + "MULTI_PDFB2": "#BE0119", + "MULTI_PDFB3": "#BE0119", + "MULTI_PDFB4": "#BE0119", + "UWL_Medusa": "#BE0119", + "UWL_CASPSR": "#BE0119", + "UWL_PDFB4": "#BE0119", + "UWL_PDFB4_10CM": "#BE0119", + "UWL_PDFB4_40CM": "#BE0119", + "None": "#808080", + "unknown_asterix": "#855CA0", + "CHIME": "#ECE133", +} + +ng20_c: { + "CHIME": "#FFA733", + "327_ASP": "#BE0119", + "327_PUPPI": "#BE0119", + "430_ASP": "#FD9927", + "430_PUPPI": "#FD9927", + "L-wide_ASP": "#BDB6F6", + "L-wide_PUPPI": "#BDB6F6", + # "L-wide_ASP": "#C3BEF7", + # "L-wide_PUPPI": "#A393BF", + # "Rcvr1_2_GASP": "#81BDEE", + "Rcvr1_2_GASP": "#79A3E2", + "Rcvr1_2_GUPPI": "#79A3E2", + "Rcvr1_2_VEGAS": "#79A3E2", + "Rcvr_800_GASP": "#8DD883", + "Rcvr_800_GUPPI": "#8DD883", + "Rcvr_800_VEGAS": "#8DD883", + # "VEGAS": "#465922", + # "S-wide_ASP": "#D81159", + # "S-wide_PUPPI": "#D81159", + "S-wide_ASP": "#C4457A", + "S-wide_PUPPI": "#C4457A", + "1.5GHz_YUPPI": "#EBADCB", + "3GHz_YUPPI": "#E79CC1", + "6GHz_YUPPI": "#DB6BA1", + # "CHIME": "#F3689B", + # "Rcvr_CHIME": "#F3689B", +} + +obs_m: { + "ao": "x", + "arecibo": "x", + "gbt": "x", + "vla": "x", + "CHIME": "x", + "leap": "x", + "nancay": "x", + "ncyobs": "x", + "effelsberg_asterix": "x", + "effelsberg": "x", + "jodrell": "x", + "jbroach": "x", + "wsrt": "x", + "parkes": "x", + "gmrt": "x", + "meerkat": "x", + "None": "x", +} + +pta_m: { + "InPTA": "x", + "EPTA": "x", + "NANOGrav": "x", + "PPTA": "x", + "MPTA": "x", + "None": "x", +} + +ng20_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "x", + "Rcvr_800_GUPPI": "x", + "Rcvr_800_VEGAS": "x", + "S-wide_ASP": "x", + "S-wide_PUPPI": "x", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", +} + +febe_m: { + "327_ASP": "x", + "327_PUPPI": "x", + "430_ASP": "x", + "430_PUPPI": "x", + "L-wide_ASP": "x", + "L-wide_PUPPI": "x", + "Rcvr1_2_GASP": "x", + "Rcvr1_2_GUPPI": "x", + "Rcvr1_2_VEGAS": "x", + "Rcvr_800_GASP": "o", + "Rcvr_800_GUPPI": "o", + "Rcvr_800_VEGAS": "o", + "S-wide_ASP": "o", + "S-wide_PUPPI": "o", + "1.5GHz_YUPPI": "x", + "3GHz_YUPPI": "o", + "6GHz_YUPPI": "^", + "CHIME": "x", + "NRT.BON.1600": "x", + "NRT.BON.1400": "o", + "NRT.BON.2000": "^", + "NRT.NUPPI.1484": "x", + "NRT.NUPPI.1854": "o", + "NRT.NUPPI.2154": "^", + "NRT.NUPPI.2539": "^", + "EFF.EBPP.1360": "o", + "EFF.EBPP.1410": "x", + "EFF.EBPP.2639": "^", + "S60-2_asterix": "v", + "P217-3_asterix": "x", + "P200-3_asterix": "v", + "unknown_asterix": "v", + "P217-3_PuMa2": "x", + "P200-3_LEAP": "v", + "P217-6_LEAP": "x", + "P217-3_LEAP": "x", + "R217-3_LEAP": "x", + "unknown_LEAP": "x", + "JBO.DFB.1400": "x", + "JBO.DFB.1520": "o", + "JBO.ROACH.1620": "^", + "WSRT.P2.1380": "v", + "WSRT.P1.1380.C": "x", + "WSRT.P1.2273.C": "o", + "WSRT.P1.323.C": "x", + "WSRT.P1.367.C": "x", + "1050CM_PDFB4": "x", + "1050CM_PDFB1": "x", + "1050CM_PDFB2": "x", + "1050CM_PDFB3": "x", + "1050CM_WBCORR": "x", + "1050CM_CPSR2": "x", + "1050CM_CPSR2m": "x", + "1050CM_CASPSR": "x", + "MULTI_CPSR2m": "o", + "MULTI_PDFB1": "o", + "H-OH_PDFB1": "^", + "H-OH_CPSR2m": "^", + "H-OH_CPSR2n": "^", + "H-OH_PDFB4": "^", + "MULTI_CPSR2n": "o", + "MULTI_WBCORR": "o", + "MULTI_PDFB2": "o", + "MULTI_PDFB3": "o", + "MULTI_PDFB4": "o", + "UWL_Medusa": "v", + "UWL_PDFB4": "v", + "UWL_PDFB4_10CM": "v", + "UWL_PDFB4_40CM": "v", + "UWL_CASPSR": "v", + "None": "x", + "3GHz_YUPPI": "x", + "6GHz_YUPPI": "x", + "CHIME": "x", +} + +label_names: { + "327_ASP": "ASP 327 MHz", + "327_PUPPI": "PUPPI 327 MHz", + "430_ASP": "ASP 430 MHz", + "430_PUPPI": "PUPPI 430 MHz", + "L-wide_ASP": "ASP L-wide", + "L-wide_PUPPI": "PUPPI L-wide", + "Rcvr1_2_GASP": "GASP L-band", + "Rcvr1_2_GUPPI": "GUPPI L-band", + "Rcvr1_2_VEGAS": "VEGAS L-band", + "Rcvr_800_GASP": "GASP 820 MHz", + "Rcvr_800_GUPPI": "GUPPI 820 MHz", + "Rcvr_800_VEGAS": "VEGAS 820 MHz", + "S-wide_ASP": "ASP S-wide", + "S-wide_PUPPI": "PUPPI S-wide", + "1.5GHz_YUPPI": "YUPPI 1.5 GHz", + "3GHz_YUPPI": "YUPPI 3 GHz", + "6GHz_YUPPI": "YUPPI 6 GHz", + "CHIME": "CHIME", +} diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index a32757cb..bb27d97e 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -2,250 +2,75 @@ # -*- coding: utf-8 -*- """ Created on Tue Feb 4 09:30:59 2020 - @author: bshapiroalbert +Code since butchered by many timers. """ import numpy as np import matplotlib.pyplot as plt -import sys, copy +import copy from astropy import log import astropy.units as u -# Import PINT +import yaml + import pint.toa as toa import pint.models as model import pint.fitter as fitter import pint.utils as pu import subprocess -# import extra util functions brent wrote + from pint_pal.utils import * import os from pint_pal.timingconfiguration import TimingConfiguration import pint_pal.lite_utils as lu -# color blind friends colors and markers? -#CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] -#MARKERS = ['.', 'v', 's', 'x', '^', 'D', 'p', 'P', '*'] - -# Color scheme for consistent reciever-backend combos, same as published 12.5 yr -colorschemes = {'observatories':{ - "ao": "#6BA9E2", - "arecibo": "#6BA9E2", - "gbt": "#61C853", - "vla": "#40635F", - "CHIME": "#ECE133", - "nancay": "#407BD5", - "ncyobs": "#407BD5", - "effelsberg_asterix": "#407BD5", - "effelsberg": "#407BD5", - "leap": "#ECE133", - "jodrell": "#407BD5", - "jbroach": "#407BD5", - "wsrt": "#E5A4CB", - "parkes": "#BE0119", - "gmrt": "#855CA0", - "meerkat": "#FD9927", - "None": "#808080" - }, - - 'pta':{ - "InPTA": "#855CA0", - "EPTA": "#407BD5", - "NANOGrav": "#61C853", - "PPTA": "#BE0119", - "MPTA": "#FD9927", - "None": "#808080" - }, - 'febe':{ - "327_ASP": "#6BA9E2", - "327_PUPPI": "#6BA9E2", - "430_ASP": "#6BA9E2", - "430_PUPPI": "#6BA9E2", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#61C853", - "Rcvr1_2_GUPPI": "#61C853", - "Rcvr1_2_VEGAS": "#61C853", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "Rcvr_800_VEGAS": "#61C853", - "S-wide_ASP": "#6BA9E2", - "S-wide_PUPPI": "#6BA9E2", - "1.5GHz_YUPPI": "#40635F", - "3GHz_YUPPI": "#40635F", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - "unknown_LEAP": "#FD9927", - "NRT.BON.1600": "#FD9927", - "NRT.BON.1400": "#FD9927", - "NRT.BON.2000": "#FD9927", - "NRT.NUPPI.1484": "#FD9927", - "NRT.NUPPI.1854": "#FD9927", - "NRT.NUPPI.2154": "#FD9927", - "NRT.NUPPI.2539": "#FD9927", - "EFF.EBPP.1360": "#855CA0", - "EFF.EBPP.1410": "#855CA0", - "EFF.EBPP.2639": "#855CA0", - "S60-2_asterix": "#855CA0", - "JBO.DFB.1400": "#407BD5", - "JBO.DFB.1520": "#407BD5", - "WSRT.P2.1380": "#E5A4CB", - "WSRT.P1.1380.C": "#E5A4CB", - "WSRT.P1.2273.C": "#E5A4CB", - "WSRT.P1.323.C": "#40635F", - "WSRT.P1.367.C": "#40635F", - "P217-3_asterix": "#855CA0", - "unknown_asterix": "#855CA0", - "P200-3_asterix": "#855CA0", - "P217-3_PuMa2": "#855CA0", - "P217-6_LEAP": "#855CA0", - "P217-3_LEAP": "#855CA0", - "R217-3_LEAP": "#855CA0", - "P200-3_LEAP": "#855CA0", - "JBO.ROACH.1620": "#407BD5", - "1050CM_PDFB4": "#BE0119", - "1050CM_PDFB1": "#BE0119", - "1050CM_PDFB2": "#BE0119", - "1050CM_PDFB3": "#BE0119", - "1050CM_WBCORR": "#BE0119", - "1050CM_CPSR2": "#BE0119", - "1050CM_CASPSR": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_PDFB1": "#BE0119", - "H-OH_PDFB1": "#BE0119", - "H-OH_CPSR2n": "#BE0119", - "H-OH_CPSR2m": "#BE0119", - "H-OH_PDFB4": "#BE0119", - "MULTI_CPSR2m": "#BE0119", - "MULTI_CPSR2n": "#BE0119", - "MULTI_WBCORR": "#BE0119", - "MULTI_PDFB2": "#BE0119", - "MULTI_PDFB3": "#BE0119", - "MULTI_PDFB4": "#BE0119", - "UWL_Medusa": "#BE0119", - "UWL_CASPSR": "#BE0119", - "UWL_PDFB4": "#BE0119", - "UWL_PDFB4_10CM": "#BE0119", - "UWL_PDFB4_40CM": "#BE0119", - "None": "#808080", - "unknown_asterix": "#855CA0", - "CHIME": "#ECE133" - }} - - -# marker dictionary to be used if desired, currently all 'x' -markers = {'observatories':{ - "ao": "x", - "arecibo": "x", - "gbt": "x", - "vla": "x", - "CHIME": "x", - "leap": "x", - "nancay": "x", - "ncyobs": "x", - "effelsberg_asterix": "x", - "effelsberg": "x", - "jodrell": "x", - "jbroach": "x", - "wsrt": "x", - "parkes": "x", - "gmrt": "x", - "meerkat": "x", - "None": "x" - }, - 'pta':{ - "InPTA": "x", - "EPTA": "x", - "NANOGrav": "x", - "PPTA": "x", - "MPTA": "x", - "None": "x" - }, - 'febe': {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr1_2_VEGAS": "x", - "Rcvr_800_GASP": "o", - "Rcvr_800_GUPPI": "o", - "Rcvr_800_VEGAS": "o", - "S-wide_ASP": "o", - "S-wide_PUPPI": "o", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "o", - "6GHz_YUPPI": "^", - "CHIME": "x", - "NRT.BON.1600": "x", - "NRT.BON.1400": "o", - "NRT.BON.2000": "^", - "NRT.NUPPI.1484": "x", - "NRT.NUPPI.1854": "o", - "NRT.NUPPI.2154": "^", - "NRT.NUPPI.2539": "^", - "EFF.EBPP.1360": "o", - "EFF.EBPP.1410": "x", - "EFF.EBPP.2639": "^", - "S60-2_asterix": "v", - "P217-3_asterix": "x", - "P200-3_asterix": "v", - "unknown_asterix": "v", - "P217-3_PuMa2": "x", - "P200-3_LEAP": "v", - "P217-6_LEAP": "x", - "P217-3_LEAP": "x", - "R217-3_LEAP": "x", - "unknown_LEAP": "x", - "JBO.DFB.1400": "x", - "JBO.DFB.1520": "o", - "JBO.ROACH.1620": "^", - "WSRT.P2.1380": "v", - "WSRT.P1.1380.C": "x", - "WSRT.P1.2273.C": "o", - "WSRT.P1.323.C": "x", - "WSRT.P1.367.C": "x", - "1050CM_PDFB4": "x", - "1050CM_PDFB1": "x", - "1050CM_PDFB2": "x", - "1050CM_PDFB3": "x", - "1050CM_WBCORR": "x", - "1050CM_CPSR2": "x", - "1050CM_CPSR2m": "x", - "1050CM_CASPSR": "x", - "MULTI_CPSR2m": "o", - "MULTI_PDFB1": "o", - "H-OH_PDFB1": "^", - "H-OH_CPSR2m": "^", - "H-OH_CPSR2n": "^", - "H-OH_PDFB4": "^", - "MULTI_CPSR2n": "o", - "MULTI_WBCORR": "o", - "MULTI_PDFB2": "o", - "MULTI_PDFB3": "o", - "MULTI_PDFB4": "o", - "UWL_Medusa": "v", - "UWL_PDFB4": "v", - "UWL_PDFB4_10CM": "v", - "UWL_PDFB4_40CM": "v", - "UWL_CASPSR": "v", - "None": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - }} -# Define the color map option -#colorscheme = colorschemes['thankful_2'] -#colorscheme = thesis_colorschemes['thesis'] +PACKAGE_DIR = os.path.dirname(__file__) +with open(os.path.join(PACKAGE_DIR, "plot_settings.yaml"), "r") as cf: + config = yaml.safe_load(cf) +# plot_settings.yaml now has a NANOGrav 20-yr specific colorscheme (ng20_c). +# If you want to go back to the old colors (or are doing DR3), change this to +# colorschemes["febe"] = config["febe_c"] AND markers["febe"] = config["febe_m"] +colorschemes, markers, labels = {}, {}, {} +colorschemes["obs"] = config["obs_c"] +colorschemes["pta"] = config["pta_c"] +colorschemes["febe"] = config["ng20_c"] +markers["obs"] = config["obs_m"] +markers["pta"] = config["pta_m"] +markers["febe"] = config["ng20_m"] +labels = config["label_names"] -def call(x): - subprocess.call(x,shell=True) - -def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, mixed_ecorr=False, **kwargs): +def call(x): + subprocess.call(x, shell=True) + + +def set_color_and_marker(colorby): + if colorby == "pta": + colorscheme = colorschemes["pta"] + markerscheme = markers["pta"] + elif colorby == "obs": + colorscheme = colorschemes["observatories"] + markerscheme = markers["observatories"] + elif colorby == "f": + colorscheme = colorschemes["febe"] + markerscheme = markers["febe"] + return colorscheme, markerscheme + + +def plot_residuals_time( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + mixed_ecorr=False, + **kwargs, +): """ Make a plot of the residuals vs. time @@ -289,162 +114,169 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - if avg == True and mixed_ecorr == True : - mjds = avg_dict['mjds'].value - mjds_no_avg = no_avg_dict['mjds'].value - years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0 + if avg == True and mixed_ecorr == True: + mjds = avg_dict["mjds"].value + mjds_no_avg = no_avg_dict["mjds"].value + years_no_avg = (mjds_no_avg - 51544.0) / 365.25 + 2000.0 elif avg == True and mixed_ecorr == False: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value # Convert to years - years = (mjds - 51544.0)/365.25 + 2000.0 + years = (mjds - 51544.0) / 365.25 + 2000.0 # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays @@ -453,48 +285,41 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_years = np.hstack((years, years_no_avg)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) + colorscheme, markerscheme = set_color_and_marker(colorby) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - - - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,5) + figsize = (10, 5) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -502,107 +327,183 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals fig = plt.gcf() ax1 = axs - for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markers[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg == True and mixed_ecorr == True: if plotsig: - combo_sig = combo_res[inds]/combo_[inds] - ax1.errorbar(combo_years[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_years[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_[inds] + ax1.errorbar( + combo_years[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_years[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(combo_years[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(combo_years[inds], combo_res_rpe[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + combo_years[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + combo_years[inds], + combo_res_rpe[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(years[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(years[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + years[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + years[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(years[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(years[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + years[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + years[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -615,9 +516,9 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -625,7 +526,7 @@ def plot_residuals_time(fitter, restype = 'postfit', colorby='f', plotsig = Fals if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color stamp_color = "#FD9927" @@ -634,29 +535,45 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, axs = None, legend=True, show_bin=True, **kwargs): + +def plot_FD_delay( + fitter=None, + model_object=None, + save=False, + title=True, + axs=None, + legend=True, + show_bin=True, + **kwargs, +): """ Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters. Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival @@ -688,117 +605,124 @@ def plot_FD_delay(fitter = None, model_object = None, save = False, title= True, loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True """ - #Make sure that either a fitter or model object has been specified + # Make sure that either a fitter or model object has been specified if fitter == None and model_object == None: raise Exception("Need to specify either a fitter or model object") - #Get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + # Get frequencies + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] elif model_object is not None: - raise Exception("Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over") + raise Exception( + "Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over" + ) else: freqs = fitter.toas.get_freqs().value freqs = np.sort(freqs) - #Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions - def get_FD_delay(pint_model_object,freqs): - FD_map = model.TimingModel.get_prefix_mapping(pint_model_object,"FD") + # Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions + def get_FD_delay(pint_model_object, freqs): + FD_map = model.TimingModel.get_prefix_mapping(pint_model_object, "FD") FD_names = list(FD_map.values()) FD_names.reverse() FD_vals = [] FD_uncert = [] for i in FD_names: - FD_vals.append(pint_model_object.get_params_dict(which="all",kind="value")[i]) - FD_uncert.append(pint_model_object.get_params_dict(which="all",kind="uncertainty")[i]) + FD_vals.append( + pint_model_object.get_params_dict(which="all", kind="value")[i] + ) + FD_uncert.append( + pint_model_object.get_params_dict(which="all", kind="uncertainty")[i] + ) FD_vals.append(0.0) FD_uncert.append(0.0) FD_vals = np.array(FD_vals) FD_uncert = np.array(FD_uncert) - delay = np.polyval(FD_vals,np.log10(freqs)) - delta_delay_plus = np.polyval(FD_uncert+FD_vals,np.log10(freqs)) - delta_delay_minus = np.polyval(FD_vals-FD_uncert,np.log10(freqs)) + delay = np.polyval(FD_vals, np.log10(freqs)) + delta_delay_plus = np.polyval(FD_uncert + FD_vals, np.log10(freqs)) + delta_delay_minus = np.polyval(FD_vals - FD_uncert, np.log10(freqs)) if len(FD_vals) - 1 > 1: FD_phrase = "FD1-%s" % (len(FD_vals) - 1) else: FD_phrase = "FD1" - return delay *1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6 , FD_phrase + return delay * 1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6, FD_phrase - #Get FD params if fitter object is given + # Get FD params if fitter object is given if fitter is not None: - #Check if the fitter object has FD parameters + # Check if the fitter object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(fitter.model, freqs*1e-3) - #print(FD_delay) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + fitter.model, freqs * 1e-3 + ) psr_name = fitter.model.PSR.value """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) """ if show_bin: - nbins = fitter.toas['nbin'].astype(int).min() - P0 = 1/fitter.model.F0.value - P0_bin_max = P0/nbins + nbins = fitter.toas["nbin"].astype(int).min() + P0 = 1 / fitter.model.F0.value + P0_bin_max = P0 / nbins except: print("No FD parameters in this model! Exitting...") - #sys.exit() - #Get FD params if model object is given + # Get FD params if model object is given if model_object is not None: - #Check if the model object has FD parameters + # Check if the model object has FD parameters try: - FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(model_object, freqs*1e-3) + FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay( + model_object, freqs * 1e-3 + ) psr_name = model_object.PSR.value """For when new version of PINT is default on pint_pal FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs) """ if show_bin: - print("show_bin requires a fitter object, cannot be used with the model alone") + print( + "show_bin requires a fitter object, cannot be used with the model alone" + ) show_bin = False except: print("No FD parameters in this model! Exitting...") - #sys.exit() - - #Get plotting preferences. - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + # Get plotting preferences. + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (8,4) + figsize = (8, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: fig = plt.gcf() ax1 = axs - if 'ls' in kwargs.keys(): - linestyle = kwargs['ls'] + if "ls" in kwargs.keys(): + linestyle = kwargs["ls"] else: - linestyle = '-' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + linestyle = "-" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = "green" - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.2 - if 'loc' in kwargs.keys(): - loc = kwargs['loc'] + if "loc" in kwargs.keys(): + loc = kwargs["loc"] else: loc = "upper right" - #Plot frequency (MHz) vs delay (microseconds) - ax1.plot(freqs,FD_delay,label = legend_text,color=clr,ls=linestyle) - ax1.fill_between(freqs, - FD_delay_err_plus, - FD_delay_err_minus, - color=clr,alpha=alpha) + # Plot frequency (MHz) vs delay (microseconds) + ax1.plot(freqs, FD_delay, label=legend_text, color=clr, ls=linestyle) + ax1.fill_between( + freqs, FD_delay_err_plus, FD_delay_err_minus, color=clr, alpha=alpha + ) if show_bin: if (FD_delay > 0).any(): - ax1.axhline(P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(P0_bin_max * 1e6, label="1 profile bin") if (FD_delay < 0).any(): - ax1.axhline(-P0_bin_max*1E6, label="1 profile bin") + ax1.axhline(-P0_bin_max * 1e6, label="1 profile bin") ax1.set_xlabel("Frequency (MHz)") ax1.set_ylabel("Delay ($\mu$s)") if title: @@ -812,8 +736,21 @@ def get_FD_delay(pint_model_object,freqs): return -def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False, avg = False, mixed_ecorr=False,\ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_residuals_freq( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency @@ -856,209 +793,203 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) - - + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) - # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays if avg == True and mixed_ecorr == True: combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) # Get freqs - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: freqs = fitter.toas.get_freqs().value - # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) -#. Seems to run a little faster but not robust to obs? -# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + # . Seems to run a little faster but not robust to obs? + # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] - + colorscheme, markerscheme = set_color_and_marker(colorby) - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -1067,103 +998,180 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' - if restype == 'both': - mkr_pre = '.' + mkr = "x" + if restype == "both": + mkr_pre = "." log.log(1, "Color by Flag doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by Flag doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(freqs[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(freqs[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + freqs[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + freqs[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(freqs[inds], combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + freqs[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(freqs[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + freqs[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha, picker=True) - if restype == 'both': - ax1.errorbar(freqs[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha, picker=True) + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + picker=True, + ) + if restype == "both": + ax1.errorbar( + freqs[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + picker=True, + ) # Set axis - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s frequency residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s frequency residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1176,9 +1184,9 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -1186,38 +1194,52 @@ def plot_residuals_freq(fitter, restype = 'postfit', colorby='f',plotsig = False if axs == None: # Define clickable points - text = ax1.text(0,0,"") - stamp_color= "#FD9927" + text = ax1.text(0, 0, "") + stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = freqs if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ - axs = None, title = True, compare = False, **kwargs): +def plot_dmx_time( + fitter, + savedmx=False, + save=False, + legend=True, + axs=None, + title=True, + compare=False, + **kwargs, +): """ Make a plot of DMX vs. time @@ -1255,111 +1277,161 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ dmxname = "%s_dmxparse.nb.out" % (psrname) # Get plotting dmx and error values for WB - if 'dmx' in kwargs.keys(): - DMXs = kwargs['dmx'] + if "dmx" in kwargs.keys(): + DMXs = kwargs["dmx"] else: # get dmx dictionary from pint dmxparse function dmx_dict = pu.dmxparse(fitter, save="dmxparse.out") - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # move file name if savedmx: os.rename("dmxparse.out", dmxname) # Double check/overwrite errors if necessary - if 'errs' in kwargs.keys(): - DMX_vErrs = kwargs['errs'] + if "errs" in kwargs.keys(): + DMX_vErrs = kwargs["errs"] # Double check/overwrite dmx mjd epochs if necessary - if 'mjds' in kwargs.keys(): - DMX_center_MJD = kwargs['mjds'] - DMX_center_Year = (DMX_center_MJD- 51544.0)/365.25 + 2000.0 + if "mjds" in kwargs.keys(): + DMX_center_MJD = kwargs["mjds"] + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 # If we want to compare WB to NB, we need to look for the right output file if compare == True: # Look for other dmx file if NB: - #log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.wb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.wb.out" % (psrname)): raise RuntimeError("Cannot find Wideband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.wb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.wb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) else: - #log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) - if not os.path.isfile("%s_dmxparse.nb.out"%(psrname)): + # log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname)) + if not os.path.isfile("%s_dmxparse.nb.out" % (psrname)): raise RuntimeError("Cannot find Narrowband DMX parse output file.") else: # Get the values from the DMX parse file - dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt("%s_dmxparse.nb.out"%(psrname),\ - unpack=True, usecols=(0,1,2,3,4)) - dmx_mid_yr = (dmx_epochs- 51544.0)/365.25 + 2000.0 + dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt( + "%s_dmxparse.nb.out" % (psrname), + unpack=True, + usecols=(0, 1, 2, 3, 4), + ) + dmx_mid_yr = (dmx_epochs - 51544.0) / 365.25 + 2000.0 # Define the plotting function if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: - mkr = 's' + mkr = "s" if compare: - mkr_nb = 'o' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + mkr_nb = "o" + if "color" in kwargs.keys(): + clr = kwargs["color"] else: - clr = 'gray' + clr = "gray" if compare: - clr_nb = 'k' - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + clr_nb = "k" + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 # Not actually plot if NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) elif not NB and not compare: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) elif compare: if NB: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Narrowband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Wideband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Narrowband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Wideband", + ) else: - ax1.errorbar(DMX_center_Year, DMXs*10**3, yerr=DMX_vErrs*10**3, fmt='.', c = clr, marker = mkr, \ - label="Wideband") - ax1.errorbar(dmx_mid_yr, nb_dmx*10**3, yerr = nb_dmx_var*10**3, fmt = '.', color = clr_nb, marker = mkr_nb, \ - label='Narrowband') + ax1.errorbar( + DMX_center_Year, + DMXs * 10**3, + yerr=DMX_vErrs * 10**3, + fmt=".", + c=clr, + marker=mkr, + label="Wideband", + ) + ax1.errorbar( + dmx_mid_yr, + nb_dmx * 10**3, + yerr=nb_dmx_var * 10**3, + fmt=".", + color=clr_nb, + marker=mkr_nb, + label="Narrowband", + ) # Set second axis - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") if legend: - ax1.legend(loc='best') + ax1.legend(loc="best") if title: if NB and not compare: - plt.title("%s narrowband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s narrowband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif not NB and not compare: - plt.title("%s wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title("%s wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]) elif compare: - plt.title("%s narrowband and wideband dmx" % (psrname), y=1.0+1.0/figsize[1]) + plt.title( + "%s narrowband and wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1] + ) if axs == None: plt.tight_layout() if save: @@ -1374,32 +1446,36 @@ def plot_dmx_time(fitter, savedmx = False, save = False, legend = True,\ if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") # Define color for highlighting points stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = DMX_center_Year - ydata = DMXs*10**3 + ydata = DMXs * 10**3 # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 's', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="s", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): - """ Make simple dmx vs. time plot with dmxout file(s) as input + +def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model=None): + """Make simple dmx vs. time plot with dmxout file(s) as input Parameters ========== @@ -1420,41 +1496,72 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): dmxout information (mjd, val, err, r1, r2) for each label """ from astropy.time import Time - if isinstance(dmxout_files, str): dmxout_files = [dmxout_files] - if isinstance(labels, str): labels = [labels] - figsize = (10,4) + if isinstance(dmxout_files, str): + dmxout_files = [dmxout_files] + if isinstance(labels, str): + labels = [labels] + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") dmxDict = {} - for ii,(df,lab) in enumerate(zip(dmxout_files,labels)): - dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(df, unpack=True, usecols=range(0,5)) - idmxDict = {'mjd':dmxmjd,'val':dmxval,'err':dmxerr,'r1':dmxr1,'r2':dmxr2} - ax2.errorbar(dmxmjd, dmxval*10**3, yerr=dmxerr*10**3, label=lab, marker='o', ls='', markerfacecolor='none') + for ii, (df, lab) in enumerate(zip(dmxout_files, labels)): + dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt( + df, unpack=True, usecols=range(0, 5) + ) + idmxDict = { + "mjd": dmxmjd, + "val": dmxval, + "err": dmxerr, + "r1": dmxr1, + "r2": dmxr2, + } + ax2.errorbar( + dmxmjd, + dmxval * 10**3, + yerr=dmxerr * 10**3, + label=lab, + marker="o", + ls="", + markerfacecolor="none", + ) dmxDict[lab] = idmxDict # set ax1 lims (year) based on ax2 lims (mjd) mjd_xlo, mjd_xhi = ax2.get_xlim() - dy_xlo = Time(mjd_xlo,format='mjd').decimalyear - dy_xhi = Time(mjd_xhi,format='mjd').decimalyear - ax1.set_xlim(dy_xlo,dy_xhi) + dy_xlo = Time(mjd_xlo, format="mjd").decimalyear + dy_xhi = Time(mjd_xhi, format="mjd").decimalyear + ax1.set_xlim(dy_xlo, dy_xhi) # capture ylim orig_ylim = ax2.get_ylim() - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) if model: from pint.simulation import make_fake_toas_fromMJDs from pint_pal.lite_utils import remove_noise - fake_mjds = np.linspace(np.min(dmxmjd),np.max(dmxmjd),num=int(np.max(dmxmjd)-np.min(dmxmjd))) - fake_mjdTime = Time(fake_mjds,format='mjd') + + fake_mjds = np.linspace( + np.min(dmxmjd), np.max(dmxmjd), num=int(np.max(dmxmjd) - np.min(dmxmjd)) + ) + fake_mjdTime = Time(fake_mjds, format="mjd") # copy the model and add sw component mo_swm = copy.deepcopy(model) @@ -1462,20 +1569,22 @@ def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model = None): mo_swm.NE_SW.value = 10.0 # generate fake TOAs and calculate excess DM due to solar wind - fake_toas = make_fake_toas_fromMJDs(fake_mjdTime,mo_swm) - sun_dm_delays = mo_swm.solar_wind_dm(fake_toas)*10**3 # same scaling as above - ax2.plot(fake_mjds,sun_dm_delays,c='lightgray',label='Excess DM') + fake_toas = make_fake_toas_fromMJDs(fake_mjdTime, mo_swm) + sun_dm_delays = mo_swm.solar_wind_dm(fake_toas) * 10**3 # same scaling as above + ax2.plot(fake_mjds, sun_dm_delays, c="lightgray", label="Excess DM") # don't change ylim based on excess dm trace, if plotted ax2.set_ylim(orig_ylim) - ax2.legend(loc='best') + ax2.legend(loc="best") plt.tight_layout() - if outfile: plt.savefig(outfile) + if outfile: + plt.savefig(outfile) return dmxDict + def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): - """ Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values + """Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values Parameters ========== @@ -1493,78 +1602,126 @@ def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None): None? """ # should check that both nb/wb entries exist first... - nbmjd = dmxDict['nb']['mjd'] - wbmjd = dmxDict['wb']['mjd'] + nbmjd = dmxDict["nb"]["mjd"] + wbmjd = dmxDict["wb"]["mjd"] allmjds = set(list(nbmjd) + list(wbmjd)) # May need slightly more curation if nb/wb mjds are *almost* identical - wbonly = allmjds-set(nbmjd) - nbonly = allmjds-set(wbmjd) + wbonly = allmjds - set(nbmjd) + nbonly = allmjds - set(wbmjd) both = set(nbmjd).intersection(set(wbmjd)) # assemble arrays of common inds for plotting later; probably a better way to do this nb_common_inds = [] wb_common_inds = [] for b in both: - nb_common_inds.append(np.where(nbmjd==b)[0][0]) - wb_common_inds.append(np.where(wbmjd==b)[0][0]) + nb_common_inds.append(np.where(nbmjd == b)[0][0]) + wb_common_inds.append(np.where(wbmjd == b)[0][0]) nb_common_inds, wb_common_inds = np.array(nb_common_inds), np.array(wb_common_inds) - nbdmx,nbdmxerr = dmxDict['nb']['val'],dmxDict['nb']['err'] - wbdmx,wbdmxerr = dmxDict['wb']['val'],dmxDict['wb']['err'] + nbdmx, nbdmxerr = dmxDict["nb"]["val"], dmxDict["nb"]["err"] + wbdmx, wbdmxerr = dmxDict["wb"]["val"], dmxDict["wb"]["err"] # propagate errors as quadrature sum, though Michael thinks geometric mean might be better? - nbwb_dmx_diffs = nbdmx[nb_common_inds]-wbdmx[wb_common_inds] - nbwb_err_prop = np.sqrt(nbdmxerr[nb_common_inds]**2 + wbdmxerr[wb_common_inds]**2) + nbwb_dmx_diffs = nbdmx[nb_common_inds] - wbdmx[wb_common_inds] + nbwb_err_prop = np.sqrt( + nbdmxerr[nb_common_inds] ** 2 + wbdmxerr[wb_common_inds] ** 2 + ) # make the plot from astropy.time import Time - figsize = (10,4) + + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.set_ylabel(r"$\Delta$DMX ($10^{-3}$ pc cm$^{-3}$)") ax1.grid(True) ax2 = ax1.twiny() - ax2.set_xlabel('MJD') + ax2.set_xlabel("MJD") botharray = np.array(list(both)) - mjdbothTime = Time(botharray,format='mjd') + mjdbothTime = Time(botharray, format="mjd") dybothTime = mjdbothTime.decimalyear minmjd, maxmjd = np.sort(botharray)[0], np.sort(botharray)[-1] ax2.set_xlim(minmjd, maxmjd) - ax1.errorbar(dybothTime,nbwb_dmx_diffs*1e3,yerr=nbwb_err_prop*1e3, - marker='o', ls='', markerfacecolor='none',label='nb - wb') + ax1.errorbar( + dybothTime, + nbwb_dmx_diffs * 1e3, + yerr=nbwb_err_prop * 1e3, + marker="o", + ls="", + markerfacecolor="none", + label="nb - wb", + ) # want arrows indicating missing nb/wb DMX values to difference if show_missing: stddiffs = np.std(nbwb_dmx_diffs) - mjdnbonlyTime = Time(np.array(list(nbonly)),format='mjd') + mjdnbonlyTime = Time(np.array(list(nbonly)), format="mjd") dynbonlyTime = mjdnbonlyTime.decimalyear - ax1.scatter(dynbonlyTime,np.zeros(len(nbonly))+stddiffs*1e3,marker='v',c='r',label='nb only') + ax1.scatter( + dynbonlyTime, + np.zeros(len(nbonly)) + stddiffs * 1e3, + marker="v", + c="r", + label="nb only", + ) nbonlystr = [str(no) for no in nbonly] - if nbonlystr: log.warning(f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}") + if nbonlystr: + log.warning( + f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}" + ) - mjdwbonlyTime = Time(np.array(list(wbonly)),format='mjd') + mjdwbonlyTime = Time(np.array(list(wbonly)), format="mjd") dywbonlyTime = mjdwbonlyTime.decimalyear - ax1.scatter(dywbonlyTime,np.zeros(len(wbonly))-stddiffs*1e3,marker='^',c='r',label='wb only') + ax1.scatter( + dywbonlyTime, + np.zeros(len(wbonly)) - stddiffs * 1e3, + marker="^", + c="r", + label="wb only", + ) wbonlystr = [str(wo) for wo in wbonly] - if wbonlystr: log.warning(f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}") - - if psrname: ax1.text(0.975,0.05,psrname,transform=ax1.transAxes,size=18,c='lightgray', - horizontalalignment='right', verticalalignment='bottom') + if wbonlystr: + log.warning( + f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}" + ) + + if psrname: + ax1.text( + 0.975, + 0.05, + psrname, + transform=ax1.transAxes, + size=18, + c="lightgray", + horizontalalignment="right", + verticalalignment="bottom", + ) plt.tight_layout() - ax1.legend(loc='best') - if outfile: plt.savefig(outfile) + ax1.legend(loc="best") + if outfile: + plt.savefig(outfile) return None + # Now we want to make wideband DM vs. time plot, this uses the premade dm_resids from PINT -def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False, legend = True, title = True,\ - axs = None, mean_sub = True, **kwargs): +def plot_dm_residuals( + fitter, + restype="postfit", + plotsig=False, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a plot of Wideband timing DM residuals v. time. @@ -1601,60 +1758,64 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False # Check if wideband if not fitter.is_wideband: - raise RuntimeError("Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead.") + raise RuntimeError( + "Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead." + ) # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) # Get the MJDs - if 'mjds' in kwargs.keys(): - mjds = kwargs['mjds'] + if "mjds" in kwargs.keys(): + mjds = kwargs["mjds"] else: mjds = fitter.toas.get_mjds().value - years = (mjds - 51544.0)/365.25 + 2000.0 + years = (mjds - 51544.0) / 365.25 + 2000.0 # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: ylabel = r"DM/Uncertainty" @@ -1667,82 +1828,131 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False ylabel = r"$\Delta$DM [cm$^{-3}$ pc]" if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if restype == 'both': - mkr_pre = '.' - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if restype == "both": + mkr_pre = "." + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 # Do plotting command - if restype == 'both': + if restype == "both": if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - dm_sig_pre = dm_resids_init[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_sig_pre, yerr=len(dm_error_init[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + dm_sig_pre = dm_resids_init[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_sig_pre, + yerr=len(dm_error_init[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) - ax1.errorbar(years[inds], dm_resids_init[inds], yerr=dm_error_init[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label+" Prefit", alpha = 0.5) + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) + ax1.errorbar( + years[inds], + dm_resids_init[inds], + yerr=dm_error_init[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label + " Prefit", + alpha=0.5, + ) else: if plotsig: - dm_sig = dm_resids[inds]/dm_error[inds] - ax1.errorbar(years[inds], dm_sig, yerr=len(dm_error[inds])*[1], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + dm_sig = dm_resids[inds] / dm_error[inds] + ax1.errorbar( + years[inds], + dm_sig, + yerr=len(dm_error[inds]) * [1], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) else: - ax1.errorbar(years[inds], dm_resids[inds], yerr=dm_error[inds], fmt=markers[r_b_label], \ - color=colorscheme[r_b_label], label=r_b_label, alpha = 0.5) + ax1.errorbar( + years[inds], + dm_resids[inds], + yerr=dm_error[inds], + fmt=markers[r_b_label], + color=colorscheme[r_b_label], + label=r_b_label, + alpha=0.5, + ) # Set second axis ax1.set_ylabel(ylabel) - ax1.set_xlabel(r'Year') + ax1.set_xlabel(r"Year") ax1.grid(True) ax2 = ax1.twiny() - mjd0 = ((ax1.get_xlim()[0])-2004.0)*365.25+53005. - mjd1 = ((ax1.get_xlim()[1])-2004.0)*365.25+53005. + mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0 + mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0 ax2.set_xlim(mjd0, mjd1) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -1757,7 +1967,7 @@ def plot_dm_residuals(fitter, restype = 'postfit', plotsig = False, save = False if axs == None: # Define clickable points - text = ax2.text(0,0,"") + text = ax2.text(0, 0, "") # Define point highlight color if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: @@ -1769,27 +1979,42 @@ def onclick(event): # Get X and Y axis data xdata = mjds if plotsig: - ydata = dm_resids/dm_error + ydata = dm_resids / dm_error else: ydata = dm_resids # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt(((xdata - xclick)/1000.0)**2 + (ydata - yclick)**2) + d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax2.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) - text.set_text("DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "DM Params:\n MJD: %s \n Res: %.6f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return -def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = 50, avg = False, whitened = False, \ - save = False, legend = True, title = True, axs = None, **kwargs): + +def plot_measurements_v_res( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -1829,188 +2054,228 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True # Check if want epoch averaged residuals - if avg == True and restype == 'prefit': + if avg == True and restype == "prefit": avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit': + elif avg == True and restype == "postfit": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both': + elif avg == True and restype == "both": avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs - xmax=0 + xmax = 0 for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] if plotsig: - sig = res[inds]/errs[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(sig),max(-sig)) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = res[inds] / errs[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(sig), max(-sig)) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) else: - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - xmax = max(xmax,max(res[inds]),max(-res[inds])) - if restype == 'both': - ax1.hist(res[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + xmax = max(xmax, max(res[inds]), max(-res[inds])) + if restype == "both": + ax1.hist( + res[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) ax1.grid(True) ax1.set_ylabel("Number of measurements") if plotsig: if avg and whitened: - ax1.set_xlabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual/Uncertainty') + ax1.set_xlabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_xlabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_xlabel('Residual/Uncertainty') + ax1.set_xlabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_xlabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_xlabel('Average Residual ($\mu$s)') + ax1.set_xlabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_xlabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_xlabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_xlabel('Residual ($\mu$s)') - ax1.set_xlim(-1.1*xmax,1.1*xmax) + ax1.set_xlabel("Residual ($\mu$s)") + ax1.set_xlim(-1.1 * xmax, 1.1 * xmax) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2023,9 +2288,9 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2033,8 +2298,19 @@ def plot_measurements_v_res(fitter, restype = 'postfit', plotsig = False, nbin = return -def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin = 50, \ - save = False, legend = True, title = True, axs = None, mean_sub = True, **kwargs): + +def plot_measurements_v_dmres( + fitter, + restype="postfit", + plotsig=False, + nbin=50, + save=False, + legend=True, + title=True, + axs=None, + mean_sub=True, + **kwargs, +): """ Make a histogram of number of measurements v. residuals @@ -2070,53 +2346,57 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin # Check if wideband if not fitter.is_wideband: - raise ValueError("Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead.") + raise ValueError( + "Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead." + ) # Get the DM residuals - if 'dmres' in kwargs.keys(): - dm_resids = kwargs['dmres'] + if "dmres" in kwargs.keys(): + dm_resids = kwargs["dmres"] else: if restype == "postfit": - dm_resids = fitter.resids.residual_objs['dm'].resids.value - elif restype == 'prefit': - dm_resids = fitter.resids_init.residual_objs['dm'].resids.value - elif restype == 'both': - dm_resids = fitter.resids.residual_objs['dm'].resids.value - dm_resids_init = fitter.resids_init.residual_objs['dm'].resids.value + dm_resids = fitter.resids.residual_objs["dm"].resids.value + elif restype == "prefit": + dm_resids = fitter.resids_init.residual_objs["dm"].resids.value + elif restype == "both": + dm_resids = fitter.resids.residual_objs["dm"].resids.value + dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value # Get the DM residual errors if "errs" in kwargs.keys(): - dm_error = kwargs['errs'] + dm_error = kwargs["errs"] else: - if restype == 'postfit': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - elif restype == 'prefit': - dm_error = fitter.resids_init.residual_objs['dm'].get_data_error().value - elif restype == 'both': - dm_error = fitter.resids.residual_objs['dm'].get_data_error().value - dm_error_init = fitter.resids_init.residual_objs['dm'].get_data_error().value + if restype == "postfit": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + elif restype == "prefit": + dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value + elif restype == "both": + dm_error = fitter.resids.residual_objs["dm"].get_data_error().value + dm_error_init = ( + fitter.resids_init.residual_objs["dm"].get_data_error().value + ) # Get the receiver-backend combos - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # If we don't want mean subtraced data we add the mean if not mean_sub: - if 'dmres' in kwargs.keys(): + if "dmres" in kwargs.keys(): dm_avg = dm_resids else: - dm_avg = fitter.resids.residual_objs['dm'].dm_data + dm_avg = fitter.resids.residual_objs["dm"].dm_data if "errs" in kwargs.keys(): dm_avg_err = dm_error else: - dm_avg_err = fitter.resids.residual_objs['dm'].get_data_error().value - DM0 = np.average(dm_avg, weights=(dm_avg_err)**-2) + dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value + DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2) dm_resids += DM0.value - if restype == 'both': + if restype == "both": dm_resids_init += DM0.value if plotsig: xlabel = r"DM/Uncertainty" @@ -2129,63 +2409,94 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin xlabel = r"$\Delta$DM [cm$^{-3}$ pc]" if axs == None: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) else: ax1 = axs for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] if plotsig: - sig = dm_resids[inds]/dm_error[inds] - ax1.hist(sig, nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - sig_pre = dm_resids_init[inds]/dm_error_init[inds] - ax1.hist(sig_pre, nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + sig = dm_resids[inds] / dm_error[inds] + ax1.hist( + sig, + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + sig_pre = dm_resids_init[inds] / dm_error_init[inds] + ax1.hist( + sig_pre, + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) else: - ax1.hist(dm_resids[inds], nbin, histtype='step', color=colorscheme[r_b_label], label=r_b_label) - if restype == 'both': - ax1.hist(dm_resids_init[inds], nbin, histtype='step', color=colorscheme[r_b_label], linestyle = '--',\ - label=r_b_label+" Prefit") + ax1.hist( + dm_resids[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + label=r_b_label, + ) + if restype == "both": + ax1.hist( + dm_resids_init[inds], + nbin, + histtype="step", + color=colorscheme[r_b_label], + linestyle="--", + label=r_b_label + " Prefit", + ) ax1.grid(True) ax1.set_ylabel("Number of measurements") ax1.set_xlabel(xlabel) if legend: if len(RCVR_BCKNDS) > 5: - ncol = int(np.ceil(len(RCVR_BCKNDS)/2)) + ncol = int(np.ceil(len(RCVR_BCKNDS) / 2)) y_offset = 1.15 else: ncol = len(RCVR_BCKNDS) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(RCVR_BCKNDS) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s DM residual measurements" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s DM residual measurements" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: ext = "" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2194,8 +2505,20 @@ def plot_measurements_v_dmres(fitter, restype = 'postfit', plotsig = False, nbin return -def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False, avg = False, mixed_ecorr=False, \ - whitened = False, save = False, legend = True, title = True, axs = None, **kwargs): +def plot_residuals_orb( + fitter, + restype="postfit", + colorby="f", + plotsig=False, + avg=False, + mixed_ecorr=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. orbital phase. @@ -2233,170 +2556,171 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True - # Check if want epoch averaged residuals - if avg == True and restype == 'prefit' and mixed_ecorr == True: + if avg == True and restype == "prefit" and mixed_ecorr == True: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "postfit" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == True: + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) + elif avg == True and restype == "both" and mixed_ecorr == True: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) - elif avg == True and restype == 'prefit' and mixed_ecorr == False: + no_avg_dict_pre = no_ecorr_average( + fitter.toas, fitter.resids_init, use_noise_model=True + ) + elif avg == True and restype == "prefit" and mixed_ecorr == False: avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'postfit' and mixed_ecorr==False: + elif avg == True and restype == "postfit" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) - elif avg == True and restype == 'both' and mixed_ecorr == False: + elif avg == True and restype == "both" and mixed_ecorr == False: avg_dict = fitter.resids.ecorr_average(use_noise_model=True) avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) - # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: - if restype == 'prefit': + if restype == "prefit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - elif avg==True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + elif avg == True and mixed_ecorr == False: + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'postfit': + res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) elif avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - elif restype == 'both': + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - res = avg_dict['time_resids'].to(u.us) - res_no_avg = no_avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) - res_pre_no_avg = no_avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_no_avg = no_avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) + res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us) elif avg == True and mixed_ecorr == False: - res = avg_dict['time_resids'].to(u.us) - res_pre = avg_dict_pre['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) + res_pre = avg_dict_pre["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) res_pre = fitter.resids_init.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) - res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) + res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us) else: - raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ - %(restype)) - - + raise ValueError( + "Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'." + % (restype) + ) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True and mixed_ecorr == True: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) res_no_avg = whiten_resids(no_avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) - res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') - res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_no_avg = whiten_resids(avg_dict_pre, restype="prefit") + res_pre_no_avg = whiten_resids(avg_dict, restype="postfit") res_pre_no_avg = res_pre_no_avg.to(u.us) res = res.to(u.us) res_no_avg = res_no_avg.to(u.us) elif avg == True and mixed_ecorr == False: - if restype != 'both': + if restype != "both": res = whiten_resids(avg_dict, restype=restype) else: - res = whiten_resids(avg_dict_pre, restype='prefit') - res_pre = whiten_resids(avg_dict, restype='postfit') + res = whiten_resids(avg_dict_pre, restype="prefit") + res_pre = whiten_resids(avg_dict, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) else: - if restype != 'both': + if restype != "both": res = whiten_resids(fitter, restype=restype) else: - res = whiten_resids(fitter, restype='prefit') - res_pre = whiten_resids(fitter, restype='postfit') + res = whiten_resids(fitter, restype="prefit") + res_pre = whiten_resids(fitter, restype="postfit") res_pre = res_pre.to(u.us) res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: - if restype == 'prefit': + if restype == "prefit": if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.toas.get_errors().to(u.us) - elif restype == 'postfit': + elif restype == "postfit": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) - elif restype == 'both': + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) + elif restype == "both": if NB == True: if avg == True and mixed_ecorr == True: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) - errs_no_avg = no_avg_dict['errors'].to(u.us) - errs_no_avg_pre = no_avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) + errs_no_avg = no_avg_dict["errors"].to(u.us) + errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us) elif avg == True and mixed_ecorr == False: - errs = avg_dict['errors'].to(u.us) - errs_pre = avg_dict_pre['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) + errs_pre = avg_dict_pre["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) errs_pre = fitter.toas.get_errors().to(u.us) # Get MJDs - if 'orbphase' not in kwargs.keys(): + if "orbphase" not in kwargs.keys(): mjds = fitter.toas.get_mjds().value if avg == True: - mjds = avg_dict['mjds'].value + mjds = avg_dict["mjds"].value if mixed_ecorr == True: - mjds_no_avg = no_avg_dict['mjds'].value - - + mjds_no_avg = no_avg_dict["mjds"].value # Now we need to the orbital phases; start with binary model name - if 'orbphase' in kwargs.keys(): - orbphase = kwargs['orbphase'] + if "orbphase" in kwargs.keys(): + orbphase = kwargs["orbphase"] else: - orbphase = fitter.model.orbital_phase(mjds, radians = False) + orbphase = fitter.model.orbital_phase(mjds, radians=False) if avg and mixed_ecorr: - no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians = False) - + no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians=False) # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. # Create combined arrays @@ -2405,50 +2729,40 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False combo_res = np.hstack((res, res_no_avg)) combo_errs = np.hstack((errs, errs_no_avg)) combo_orbphase = np.hstack((orbphase, no_avg_orbphase)) - if restype =='both': + if restype == "both": combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) - # Get colorby flag values (obs, PTA, febe, etc.) - if 'colorby' in kwargs.keys(): - cb = kwargs['colorby'] + if "colorby" in kwargs.keys(): + cb = kwargs["colorby"] else: cb = np.array(fitter.toas[colorby]) - #. Seems to run a little faster but not robust to obs + # . Seems to run a little faster but not robust to obs # cb = np.array(fitter.toas.get_flag_value(colorby)[0]) if avg == True: avg_cb = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_cb.append(cb[iis[0]]) if mixed_ecorr == True: no_avg_cb = [] - for jjs in no_avg_dict['indices']: + for jjs in no_avg_dict["indices"]: no_avg_cb.append(cb[jjs]) no_ecorr_cb = np.array(no_avg_cb) cb = np.array(avg_cb) # Get the set of unique flag values - if avg==True and mixed_ecorr==True: - cb = np.hstack((cb,no_ecorr_cb)) - + if avg == True and mixed_ecorr == True: + cb = np.hstack((cb, no_ecorr_cb)) CB = set(cb) - if colorby== 'pta': - colorscheme = colorschemes['pta'] - markerscheme = markers['pta'] - elif colorby == 'obs': - colorscheme = colorschemes['observatories'] - markerscheme = markers['observatories'] - elif colorby == 'f': - colorscheme = colorschemes['febe'] - markerscheme = markers['febe'] + colorscheme, markerscheme = set_color_and_marker(colorby) - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (10,4) + figsize = (10, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -2456,99 +2770,168 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False fig = plt.gcf() ax1 = axs for i, c in enumerate(CB): - inds = np.where(cb==c)[0] + inds = np.where(cb == c)[0] if not inds.tolist(): cb_label = "" else: cb_label = cb[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: try: mkr = markerscheme[cb_label] - if restype == 'both': - mkr_pre = '.' + if restype == "both": + mkr_pre = "." except Exception: - mkr = 'x' + mkr = "x" log.log(1, "Color by flag value doesn't have a marker label!!") - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: try: clr = colorscheme[cb_label] except Exception: - clr = 'k' + clr = "k" log.log(1, "Color by flag value doesn't have a color!!") - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 0.5 if avg and mixed_ecorr: if plotsig: - combo_sig = combo_res[inds]/combo_errs[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] - ax1.errorbar(combo_orbphase[inds], combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + combo_sig = combo_res[inds] / combo_errs[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig, + yerr=len(combo_errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds] + ax1.errorbar( + combo_orbphase[inds], + combo_sig_pre, + yerr=len(combo_errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: - ax1.errorbar(combo_orbphase[inds], combo_res[inds], yerr = combo_errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(combo_orbphase[inds], combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + ax1.errorbar( + combo_orbphase[inds], + combo_res[inds], + yerr=combo_errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + combo_orbphase[inds], + combo_res_pre[inds], + yerr=combo_errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(orbphase[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - sig_pre = res_pre[inds]/errs_pre[inds] - ax1.errorbar(orbphase[inds], sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + orbphase[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + sig_pre = res_pre[inds] / errs_pre[inds] + ax1.errorbar( + orbphase[inds], + sig_pre, + yerr=len(errs_pre[inds]) * [1], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) else: - ax1.errorbar(orbphase[inds], res[inds], yerr = errs[inds], fmt=mkr, \ - color=clr, label=cb_label, alpha = alpha) - if restype == 'both': - ax1.errorbar(orbphase[inds], res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ - color=clr, label=cb_label+" Prefit", alpha = alpha) + ax1.errorbar( + orbphase[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=cb_label, + alpha=alpha, + ) + if restype == "both": + ax1.errorbar( + orbphase[inds], + res_pre[inds], + yerr=errs_pre[inds], + fmt=mkr_pre, + color=clr, + label=cb_label + " Prefit", + alpha=alpha, + ) # Set second axis - ax1.set_xlabel(r'Orbital Phase') + ax1.set_xlabel(r"Orbital Phase") ax1.grid(True) if plotsig: if avg and whitened: - ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual/Uncertainty \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual/Uncertainty') + ax1.set_ylabel("Average Residual/Uncertainty") elif whitened and not avg: - ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Residual/Uncertainty \n (Whitened)", multialignment="center" + ) else: - ax1.set_ylabel('Residual/Uncertainty') + ax1.set_ylabel("Residual/Uncertainty") else: if avg and whitened: - ax1.set_ylabel('Average Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel( + "Average Residual ($\mu$s) \n (Whitened)", multialignment="center" + ) elif avg and not whitened: - ax1.set_ylabel('Average Residual ($\mu$s)') + ax1.set_ylabel("Average Residual ($\mu$s)") elif whitened and not avg: - ax1.set_ylabel('Residual ($\mu$s) \n (Whitened)', multialignment='center') + ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center") else: - ax1.set_ylabel('Residual ($\mu$s)') + ax1.set_ylabel("Residual ($\mu$s)") if legend: if len(CB) > 5: - ncol = int(np.ceil(len(CB)/2)) + ncol = int(np.ceil(len(CB) / 2)) y_offset = 1.15 else: ncol = len(CB) y_offset = 1.0 - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]), + ncol=ncol, + ) if title: if len(CB) > 5: y_offset = 1.1 else: y_offset = 1.0 - plt.title("%s %s timing residuals" % (fitter.model.PSR.value, restype), y=y_offset+1.0/figsize[1]) + plt.title( + "%s %s timing residuals" % (fitter.model.PSR.value, restype), + y=y_offset + 1.0 / figsize[1], + ) if axs == None: plt.tight_layout() if save: @@ -2561,9 +2944,9 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False ext += "_NB" else: ext += "_WB" - if restype == 'prefit': + if restype == "prefit": ext += "_prefit" - elif restype == 'postfit': + elif restype == "postfit": ext += "_postfit" elif restype == "both": ext += "_pre_post_fit" @@ -2571,37 +2954,43 @@ def plot_residuals_orb(fitter, restype = 'postfit', colorby='f', plotsig = False if axs == None: # Define clickable points - text = ax1.text(0,0,"") + text = ax1.text(0, 0, "") stamp_color = "#FD9927" # Define color for highlighting points - #if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: + # if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS: # stamp_color = "#61C853" - #else: + # else: # stamp_color = "#FD9927" def onclick(event): # Get X and Y axis data xdata = orbphase if plotsig: - ydata = (res/errs).decompose().value + ydata = (res / errs).decompose().value else: ydata = res.value # Get x and y data from click xclick = event.xdata yclick = event.ydata # Calculate scaled distance, find closest point index - d = np.sqrt((xdata - xclick)**2 + ((ydata - yclick)/100.0)**2) + d = np.sqrt((xdata - xclick) ** 2 + ((ydata - yclick) / 100.0) ** 2) ind_close = np.where(np.min(d) == d)[0] # highlight clicked point - ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color) # Print point info text.set_position((xdata[ind_close], ydata[ind_close])) if plotsig: - text.set_text("TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) else: - text.set_text("TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + text.set_text( + "TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s" + % (xdata[ind_close][0], ydata[ind_close], ind_close[0]) + ) - fig.canvas.mpl_connect('button_press_event', onclick) + fig.canvas.mpl_connect("button_press_event", onclick) return @@ -3052,9 +3441,18 @@ def onclick(event): return - -def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whitened = False, save = False, \ - legend = True, title = True, axs = None, **kwargs): +def plot_fd_res_v_freq( + fitter, + plotsig=False, + comp_FD=True, + avg=False, + whitened=False, + save=False, + legend=True, + title=True, + axs=None, + **kwargs, +): """ Make a plot of the residuals vs. frequency, can do WB as well. Note, if WB fitter, comp_FD may not work. If comp_FD is True, the panels are organized as follows: @@ -3094,7 +3492,9 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi if fitter.is_wideband: NB = False if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) else: NB = True @@ -3103,19 +3503,19 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi avg_dict = fitter.resids.ecorr_average(use_noise_model=True) # Get residuals - if 'res' in kwargs.keys(): - res = kwargs['res'] + if "res" in kwargs.keys(): + res = kwargs["res"] else: if NB == True: if avg == True: - res = avg_dict['time_resids'].to(u.us) + res = avg_dict["time_resids"].to(u.us) else: res = fitter.resids.time_resids.to(u.us) else: - res = fitter.resids.residual_objs['toa'].time_resids.to(u.us) + res = fitter.resids.residual_objs["toa"].time_resids.to(u.us) # Check if we want whitened residuals - if whitened == True and ('res' not in kwargs.keys()): + if whitened == True and ("res" not in kwargs.keys()): if avg == True: res = whiten_resids(avg_dict) res = res.to(u.us) @@ -3124,36 +3524,36 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res = res.to(u.us) # Get errors - if 'errs' in kwargs.keys(): - errs = kwargs['errs'] + if "errs" in kwargs.keys(): + errs = kwargs["errs"] else: if NB == True: if avg == True: - errs = avg_dict['errors'].to(u.us) + errs = avg_dict["errors"].to(u.us) else: errs = fitter.resids.get_data_error().to(u.us) else: - errs = fitter.resids.residual_objs['toa'].get_data_error().to(u.us) + errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us) # Get receiver backends - if 'rcvr_bcknds' in kwargs.keys(): - rcvr_bcknds = kwargs['rcvr_bcknds'] + if "rcvr_bcknds" in kwargs.keys(): + rcvr_bcknds = kwargs["rcvr_bcknds"] else: - rcvr_bcknds = np.array(fitter.toas.get_flag_value('f')[0]) + rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0]) if avg == True: avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]]) rcvr_bcknds = np.array(avg_rcvr_bcknds) # Get the set of unique receiver-bandend combos RCVR_BCKNDS = set(rcvr_bcknds) # get frequencies - if 'freqs' in kwargs.keys(): - freqs = kwargs['freqs'] + if "freqs" in kwargs.keys(): + freqs = kwargs["freqs"] else: if avg == True: - freqs = avg_dict['freqs'].value + freqs = avg_dict["freqs"].value else: freqs = fitter.toas.get_freqs().value @@ -3162,19 +3562,19 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi if axs != None: log.warn("Cannot do full comparison with three panels") axs = None - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,12) + figsize = (4, 12) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(313) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(311) else: - if 'figsize' in kwargs.keys(): - figsize = kwargs['figsize'] + if "figsize" in kwargs.keys(): + figsize = kwargs["figsize"] else: - figsize = (4,4) + figsize = (4, 4) if axs == None: fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(111) @@ -3183,52 +3583,66 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Make the plot of residual vs. frequency for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = res[inds]/errs[inds] - ax1.errorbar(freqs[inds], sig, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = res[inds] / errs[inds] + ax1.errorbar( + freqs[inds], + sig, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax1.errorbar(freqs[inds], res[inds], yerr=errs[inds], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + ax1.errorbar( + freqs[inds], + res[inds], + yerr=errs[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) # assign axis labels - ax1.set_xlabel(r'Frequency (MHz)') + ax1.set_xlabel(r"Frequency (MHz)") ax1.grid(True) if plotsig: if avg and whitened: - ylabel = 'Average Residual/Uncertainty \n (Whitened)' + ylabel = "Average Residual/Uncertainty \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual/Uncertainty' + ylabel = "Average Residual/Uncertainty" elif whitened and not avg: - ylabel ='Residual/Uncertainty \n (Whitened)' + ylabel = "Residual/Uncertainty \n (Whitened)" else: - ylabel ='Residual/Uncertainty' + ylabel = "Residual/Uncertainty" else: if avg and whitened: - ylabel = 'Average Residual ($\mu$s) \n (Whitened)' + ylabel = "Average Residual ($\mu$s) \n (Whitened)" elif avg and not whitened: - ylabel = 'Average Residual ($\mu$s)' + ylabel = "Average Residual ($\mu$s)" elif whitened and not avg: - ylabel = 'Residual ($\mu$s) \n (Whitened)' + ylabel = "Residual ($\mu$s) \n (Whitened)" else: - ylabel = 'Residual ($\mu$s)' + ylabel = "Residual ($\mu$s)" ax1.set_ylabel(ylabel) # Now if we want to show the other plots, we plot them @@ -3241,22 +3655,22 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi sorted_freqs = np.linspace(np.min(freqs), np.max(freqs), 1000) FD_line = np.zeros(np.size(sorted_freqs)) for i, fd in enumerate(cur_fd): - fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds - FD_offsets += fd_val * np.log(freqs/1000.0)**(i+1) - FD_line += fd_val * np.log(sorted_freqs/1000.0)**(i+1) + fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds + FD_offsets += fd_val * np.log(freqs / 1000.0) ** (i + 1) + FD_line += fd_val * np.log(sorted_freqs / 1000.0) ** (i + 1) # Now edit residuals fd_cor_res = res.value + FD_offsets # Now we need to redo the fit without the FD parameters psr_fitter_nofd = copy.deepcopy(fitter) try: - psr_fitter_nofd.model.remove_component('FD') + psr_fitter_nofd.model.remove_component("FD") except: log.warning("No FD parameters in the initial timing model...") # Check if fitter is wideband or not if psr_fitter_nofd.is_wideband: - resids = psr_fitter_nofd.resids.residual_objs['toa'] + resids = psr_fitter_nofd.resids.residual_objs["toa"] else: resids = psr_fitter_nofd.resids @@ -3271,7 +3685,7 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi res_nofd = wres_avg.to(u.us).value else: # need to average - res_nofd = avg['time_resids'].to(u.us).value + res_nofd = avg["time_resids"].to(u.us).value elif whitened: # Need to whiten wres_nofd = whiten_resids(psr_fitter_nofd) @@ -3281,55 +3695,93 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi # Now plot for i, r_b in enumerate(RCVR_BCKNDS): - inds = np.where(rcvr_bcknds==r_b)[0] + inds = np.where(rcvr_bcknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = rcvr_bcknds[inds][0] # Get plot preferences - if 'fmt' in kwargs.keys(): - mkr = kwargs['fmt'] + if "fmt" in kwargs.keys(): + mkr = kwargs["fmt"] else: mkr = markers[r_b_label] - if 'color' in kwargs.keys(): - clr = kwargs['color'] + if "color" in kwargs.keys(): + clr = kwargs["color"] else: clr = colorscheme[r_b_label] - if 'alpha' in kwargs.keys(): - alpha = kwargs['alpha'] + if "alpha" in kwargs.keys(): + alpha = kwargs["alpha"] else: alpha = 1.0 if plotsig: - sig = fd_cor_res[inds]/errs[inds] - ax3.errorbar(freqs[inds], sig.value, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - sig_nofd = res_nofd[inds]/errs[inds].value - ax2.errorbar(freqs[inds], sig_nofd, yerr=len(errs[inds])*[1], fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) + sig = fd_cor_res[inds] / errs[inds] + ax3.errorbar( + freqs[inds], + sig.value, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + sig_nofd = res_nofd[inds] / errs[inds].value + ax2.errorbar( + freqs[inds], + sig_nofd, + yerr=len(errs[inds]) * [1], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) else: - ax3.errorbar(freqs[inds], fd_cor_res[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax2.errorbar(freqs[inds], res_nofd[inds], yerr=errs[inds].value, fmt=mkr, \ - color=clr, label=r_b_label, alpha = alpha) - - ax3.plot(sorted_freqs, FD_line, c = 'k', ls = '--') + ax3.errorbar( + freqs[inds], + fd_cor_res[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax2.errorbar( + freqs[inds], + res_nofd[inds], + yerr=errs[inds].value, + fmt=mkr, + color=clr, + label=r_b_label, + alpha=alpha, + ) + + ax3.plot(sorted_freqs, FD_line, c="k", ls="--") # assign axis labels - ax3.set_xlabel(r'Frequency (MHz)') + ax3.set_xlabel(r"Frequency (MHz)") ax3.set_ylabel(ylabel) ax3.grid(True) - ax2.set_xlabel(r'Frequency (MHz)') + ax2.set_xlabel(r"Frequency (MHz)") ax2.set_ylabel(ylabel) ax2.grid(True) if legend: if comp_FD: - ax3.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax3.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) else: - ax1.legend(loc='upper center', bbox_to_anchor= (0.5, 1.0+1.0/figsize[1]), ncol=int(len(RCVR_BCKNDS)/2)) + ax1.legend( + loc="upper center", + bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]), + ncol=int(len(RCVR_BCKNDS) / 2), + ) if title: - plt.title("%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0+1.0/figsize[1]) + plt.title( + "%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0 + 1.0 / figsize[1] + ) plt.tight_layout() if save: ext = "" @@ -3349,7 +3801,11 @@ def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whi We also offer some options for convenience plotting functions, one that will show all possible summary plots, and another that will show just the summary plots that are typically created in finalize_timing.py in that order. """ -def summary_plots(fitter, title = None, legends = False, save = False, avg = True, whitened = True): + + +def summary_plots( + fitter, title=None, legends=False, save=False, avg=True, whitened=True +): """ Function to make a composite set of summary plots for sets of TOAs. NOTE - This is noe the same set of plots as will be in the pdf writer @@ -3367,7 +3823,9 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru if fitter.is_wideband: if avg == True: - raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + raise ValueError( + "Cannot epoch average wideband residuals, please change 'avg' to False." + ) # Determine how long the figure size needs to be figlength = 18 gs_rows = 6 @@ -3381,7 +3839,7 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength += 18 gs_rows += 4 # adjust size if not in a binary - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): sub_rows = 1 sub_len = 3 if whitened: @@ -3396,126 +3854,241 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru figlength -= sub_len gs_rows -= sub_rows - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax1 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, axs = ax1, figsize=(12,3)) + ax1 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, plotsig=True, axs=ax1, figsize=(12, 3) + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax2 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax2 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb(fitter, title=False, legend=False, axs=ax2, figsize=(12, 3)) k += 1 # Now add the measurement vs. uncertainty - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, axs = ax3_0, \ - figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, axs = ax3_1, \ - figsize=(6,3),) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + axs=ax3_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # and the DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # And residual vs. Frequency - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 # Now if whitened add the whitened residual plots if whitened: - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax6, figsize=(12, 3) + ) k += 1 # Plot the residuals divided by uncertainty vs. time - ax7 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, whitened = True, axs = ax7, figsize=(12,3)) + ax7 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax7, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax8 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax8, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax8 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax8, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax9_0 = fig.add_subplot(gs[count+k, 0]) - ax9_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, whitened = True,\ - axs = ax9_0, figsize=(6,3),) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax9_1, figsize=(6,3),) + ax9_0 = fig.add_subplot(gs[count + k, 0]) + ax9_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + whitened=True, + axs=ax9_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax9_1, + figsize=(6, 3), + ) k += 1 # Now plot the average residuals if avg: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, title=False, avg=True, axs=ax10, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax11 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, axs = ax11, figsize=(12,3)) + ax11 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + axs=ax11, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 # Now add the measurement vs. uncertainty - ax13_0 = fig.add_subplot(gs[count+k, 0]) - ax13_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False,\ - avg = True, axs = ax13_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, axs = ax13_1, figsize=(6,3)) + ax13_0 = fig.add_subplot(gs[count + k, 0]) + ax13_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + axs=ax13_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + axs=ax13_1, + figsize=(6, 3), + ) k += 1 # Now plot the whitened average residuals if avg and whitened: - ax14 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, avg = True, whitened = True, axs = ax14, figsize=(12,3)) + ax14 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, avg=True, whitened=True, axs=ax14, figsize=(12, 3)) k += 1 # Plot the residuals divided by uncertainty vs. time - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True, whitened = True,\ - axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Second plot is residual v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, axs = ax16, \ - figsize=(12,3)) + if hasattr(fitter.model, "binary_model_name"): + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=False, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_0, figsize=(6,3)) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, avg = True, whitened = True, \ - axs = ax17_1, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=False, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 plt.tight_layout() @@ -3524,8 +4097,11 @@ def summary_plots(fitter, title = None, legends = False, save = False, avg = Tru return + """We also define a function to output the summary plots exactly as is done in finalize_timing.py (for now)""" -def summary_plots_ft(fitter, title = None, legends = False, save = False): + + +def summary_plots_ft(fitter, title=None, legends=False, save=False): """ Function to make a composite set of summary plots for sets of TOAs NOTE - This is note the same set of plots as will be in the pdf writer @@ -3540,131 +4116,247 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): """ # Define the figure # Determine how long the figure size needs to be - figlength = 18*3 + figlength = 18 * 3 gs_rows = 13 - if not hasattr(fitter.model, 'binary_model_name'): + if not hasattr(fitter.model, "binary_model_name"): figlength -= 9 gs_rows -= 3 if fitter.is_wideband: figlength -= 9 gs_rows -= 3 - fig = plt.figure(figsize = (12,figlength)) # not sure what we'll need for a fig size + fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size if title != None: - plt.title(title, y = 1.015, size = 16) + plt.title(title, y=1.015, size=16) gs = fig.add_gridspec(gs_rows, 2) count = 0 k = 0 # First plot is all residuals vs. time. ax0 = fig.add_subplot(gs[count, :]) - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(12,3)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3)) k += 1 # Then the epoch averaged residuals v. time if not fitter.is_wideband: - ax10 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, avg = True, axs = ax10, figsize=(12,3)) + ax10 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, title=False, legend=False, avg=True, axs=ax10, figsize=(12, 3) + ) k += 1 # Epoch averaged vs. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3) + ) k += 1 else: - ax12 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, axs = ax12, figsize=(12,3)) + ax12 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax12, figsize=(12, 3) + ) k += 1 # And DMX vs. time - ax4 = fig.add_subplot(gs[count+k, :]) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax4, figsize=(12,3)) + ax4 = fig.add_subplot(gs[count + k, :]) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax4, + figsize=(12, 3), + ) k += 1 # Whitened residuals v. time - ax6 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, whitened = True, axs = ax6, figsize=(12,3)) + ax6 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time(fitter, whitened=True, axs=ax6, figsize=(12, 3)) k += 1 # Whitened epoch averaged residuals v. time if not fitter.is_wideband: - ax15 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = False, avg = True, \ - whitened = True, axs = ax15, figsize=(12,3)) + ax15 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=False, + avg=True, + whitened=True, + axs=ax15, + figsize=(12, 3), + ) k += 1 # Whitened epoch averaged residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = True, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 else: - ax16 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, \ - avg = False, whitened = True, axs = ax16, figsize=(12,3)) + ax16 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax16, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals and epoch averaged - ax3_0 = fig.add_subplot(gs[count+k, 0]) - ax3_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, plotsig=False, \ - whitened = True, axs = ax3_0, figsize=(6,3)) + ax3_0 = fig.add_subplot(gs[count + k, 0]) + ax3_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + plotsig=False, + whitened=True, + axs=ax3_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, \ - whitened = True, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, \ - whitened = False, axs = ax3_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=False, + axs=ax3_1, + figsize=(6, 3), + ) k += 1 # Whitened residual/uncertainty v. time - ax26 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, plotsig = True, title = False, legend = False, whitened = True,\ - axs = ax26, figsize=(12,3)) + ax26 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax26, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. time if not fitter.is_wideband: - ax25 = fig.add_subplot(gs[count+k, :]) - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax25, figsize=(12,3)) + ax25 = fig.add_subplot(gs[count + k, :]) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax25, + figsize=(12, 3), + ) k += 1 # Epoch averaged Whitened residual/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): + if hasattr(fitter.model, "binary_model_name"): if not fitter.is_wideband: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = True, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 else: - ax36 = fig.add_subplot(gs[count+k, :]) - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, avg = False, \ - whitened = True, axs = ax36, figsize=(12,3)) + ax36 = fig.add_subplot(gs[count + k, :]) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax36, + figsize=(12, 3), + ) k += 1 # Now add the measurement vs. uncertainty for both all reaiduals/uncertainty and epoch averaged/uncertainty - ax17_0 = fig.add_subplot(gs[count+k, 0]) - ax17_1 = fig.add_subplot(gs[count+k, 1]) - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax17_0, figsize=(6,3)) + ax17_0 = fig.add_subplot(gs[count + k, 0]) + ax17_1 = fig.add_subplot(gs[count + k, 1]) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax17_0, + figsize=(6, 3), + ) if not fitter.is_wideband: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = True, whitened = True, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=True, + whitened=True, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 else: - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, \ - legend = False, avg = False, whitened =False, axs = ax17_1, figsize=(6,3)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + avg=False, + whitened=False, + axs=ax17_1, + figsize=(6, 3), + ) k += 1 # Now plot the frequencies of the TOAs vs. time - ax5 = fig.add_subplot(gs[count+k, :]) - plot_residuals_freq(fitter, title = False, legend = False, axs =ax5, figsize=(12,3)) + ax5 = fig.add_subplot(gs[count + k, :]) + plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3)) k += 1 plt.tight_layout() @@ -3673,8 +4365,9 @@ def summary_plots_ft(fitter, title = None, legends = False, save = False): return + # JUST THE PLOTS FOR THE PDF WRITERS LEFT -def plots_for_summary_pdf_nb(fitter, title = None, legends = False): +def plots_for_summary_pdf_nb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`. @@ -3689,137 +4382,297 @@ def plots_for_summary_pdf_nb(fitter, title = None, legends = False): """ if fitter.is_wideband: - raise ValueError("Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead.") + raise ValueError( + "Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - gs = fig.add_gridspec(nrows = 4, ncols = 1) + gs = fig.add_gridspec(nrows=4, ncols=1) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) # Plot residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot averaged residuals v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, avg = True, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + avg=True, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) else: - log.warning("ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals.") - plot_residuals_time(fitter, avg = False, axs = ax1, title = False, legend = False, figsize=(8,2.5)) + log.warning( + "ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals." + ) + plot_residuals_time( + fitter, + avg=False, + axs=ax1, + title=False, + legend=False, + figsize=(8, 2.5), + ) # Plot residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + axs=ax2, + figsize=(8, 2.5), + ) # plot dmx v. time - if 'dispersion_dmx' in fitter.model.get_components_by_category().keys(): - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if "dispersion_dmx" in fitter.model.get_components_by_category().keys(): + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) else: log.warning("No DMX bins in timing model, cannot plot DMX v. Time.") plt.tight_layout() plt.savefig("%s_summary_plot_1_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals v time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # plot whitened, epoch averaged residuals v time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, avg = True, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, avg = False, \ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened, epoch averaged residuals v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, avg = True, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, avg = False, whitened = True, \ - axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals histogram - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = True, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, title = False, legend = False, avg = False, whitened = True, \ - axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened residuals/uncertainty v. time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. time - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = True,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) else: - plot_residuals_time(fitter, title = False, legend = False, plotsig = True, avg = False,\ - whitened = True, axs = ax1, figsize=(8,2.5)) + plot_residuals_time( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax1, + figsize=(8, 2.5), + ) # plot whitened, epoch averaged residuals/uncertainty v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) else: - plot_residuals_orb(fitter, title = False, legend = False, plotsig = True, \ - avg = False, whitened = True, axs = ax2, figsize=(8,2.5)) + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + avg=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened residuals/uncertainty histogram - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of whitened, epoch averaged residuals/uncertainties histogram - if 'ecorr_noise' in fitter.model.get_components_by_category().keys(): - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = True, whitened = True, axs = ax4, figsize=(4,2.5)) + if "ecorr_noise" in fitter.model.get_components_by_category().keys(): + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=True, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) else: - plot_measurements_v_res(fitter, nbin = 50, plotsig=True, title = False, legend = False, \ - avg = False, whitened = True, axs = ax4, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + plotsig=True, + title=False, + legend=False, + avg=False, + whitened=True, + axs=ax4, + figsize=(4, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_nb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_nb.png" % (fitter.model.PSR.value)) plt.close() -def plots_for_summary_pdf_wb(fitter, title = None, legends = False): + +def plots_for_summary_pdf_wb(fitter, title=None, legends=False): """ Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf. This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`. @@ -3833,160 +4686,207 @@ def plots_for_summary_pdf_wb(fitter, title = None, legends = False): legend [boolean] : If True, will add legends to ALL plots [default: False]. """ if not fitter.is_wideband: - raise ValueError("Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead.") + raise ValueError( + "Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead." + ) # Need to make four sets of plots for ii in range(4): if ii != 3: - fig = plt.figure(figsize=(8,10.0),dpi=100) + fig = plt.figure(figsize=(8, 10.0), dpi=100) else: - fig = plt.figure(figsize=(8,5),dpi=100) + fig = plt.figure(figsize=(8, 5), dpi=100) if title != None: - plt.title(title, y = 1.08, size = 14) + plt.title(title, y=1.08, size=14) if ii == 0: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(nrows = 4, ncols = 1) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,:]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(nrows=4, ncols=1) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, :]) else: - gs = fig.add_gridspec(nrows = 3, ncols = 1) - ax3 = fig.add_subplot(gs[2,:]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(nrows=3, ncols=1) + ax3 = fig.add_subplot(gs[2, :]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # Plot time residuals v. time - plot_residuals_time(fitter, title = False, axs = ax0, figsize=(8, 2.5)) + plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5)) # Plot DM residuals v. time - plot_dm_residuals(fitter, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, save=False, legend=False, title=False, axs=ax1, figsize=(8, 2.5) + ) # Plot time residuals v. orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, axs = ax2, figsize=(8,2.5)) - plot_dmx_time(fitter, savedmx = "dmxparse.out", legend = False, title = False, axs = ax3, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, title=False, legend=False, axs=ax2, figsize=(8, 2.5) + ) + plot_dmx_time( + fitter, + savedmx="dmxparse.out", + legend=False, + title=False, + axs=ax3, + figsize=(8, 2.5), + ) plt.tight_layout() plt.savefig("%s_summary_plot_1_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 1: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(3,2) - ax2 = fig.add_subplot(gs[1,:]) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(3, 2) + ax2 = fig.add_subplot(gs[1, :]) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) else: - gs = fig.add_gridspec(2,2) - ax3 = fig.add_subplot(gs[1,0]) - ax4 = fig.add_subplot(gs[1,1]) - ax0 = fig.add_subplot(gs[0,:]) - #ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(2, 2) + ax3 = fig.add_subplot(gs[1, 0]) + ax4 = fig.add_subplot(gs[1, 1]) + ax0 = fig.add_subplot(gs[0, :]) + # ax1 = fig.add_subplot(gs[1,:]) # Plot whitened time residuals v. time - plot_residuals_time(fitter, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5) + ) # Plot whitened time residuals v. time - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # Plot number of whitened residuals histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=False, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=False, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals histograms - plot_measurements_v_dmres(fitter, nbin = 50, legend = False, title = False, axs = ax4) + plot_measurements_v_dmres( + fitter, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_2_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 2: - if hasattr(fitter.model, 'binary_model_name'): - gs = fig.add_gridspec(4,2) - ax2 = fig.add_subplot(gs[2,:]) - ax3 = fig.add_subplot(gs[3,0]) - ax4 = fig.add_subplot(gs[3,1]) + if hasattr(fitter.model, "binary_model_name"): + gs = fig.add_gridspec(4, 2) + ax2 = fig.add_subplot(gs[2, :]) + ax3 = fig.add_subplot(gs[3, 0]) + ax4 = fig.add_subplot(gs[3, 1]) else: - gs = fig.add_gridspec(3,2) - ax3 = fig.add_subplot(gs[2,0]) - ax4 = fig.add_subplot(gs[2,1]) - ax0 = fig.add_subplot(gs[0,:]) - ax1 = fig.add_subplot(gs[1,:]) + gs = fig.add_gridspec(3, 2) + ax3 = fig.add_subplot(gs[2, 0]) + ax4 = fig.add_subplot(gs[2, 1]) + ax0 = fig.add_subplot(gs[0, :]) + ax1 = fig.add_subplot(gs[1, :]) # plot whitened time residuals/uncertainty v time - plot_residuals_time(fitter, plotsig = True, title = False, whitened = True, axs = ax0, figsize=(8,2.5)) + plot_residuals_time( + fitter, + plotsig=True, + title=False, + whitened=True, + axs=ax0, + figsize=(8, 2.5), + ) # Plot DM residuals/uncertainty v. time - plot_dm_residuals(fitter, plotsig = True, save = False, legend = False, title = False, axs = ax1, figsize=(8, 2.5)) + plot_dm_residuals( + fitter, + plotsig=True, + save=False, + legend=False, + title=False, + axs=ax1, + figsize=(8, 2.5), + ) # Plot whitened time residuals/uncertainty v orbital phase - if hasattr(fitter.model, 'binary_model_name'): - plot_residuals_orb(fitter, title = False, legend = False, \ - plotsig = True, whitened = True, axs = ax2, figsize=(8,2.5)) + if hasattr(fitter.model, "binary_model_name"): + plot_residuals_orb( + fitter, + title=False, + legend=False, + plotsig=True, + whitened=True, + axs=ax2, + figsize=(8, 2.5), + ) # plot number of whitened time residuals/uncertainty histograms - plot_measurements_v_res(fitter, nbin = 50, title = False, plotsig=True, legend = False, whitened = True,\ - axs = ax3, figsize=(4,2.5)) + plot_measurements_v_res( + fitter, + nbin=50, + title=False, + plotsig=True, + legend=False, + whitened=True, + axs=ax3, + figsize=(4, 2.5), + ) # plot number of DM residuals/uncertainty histograms - plot_measurements_v_dmres(fitter, plotsig = True, nbin = 50, legend = False, title = False, \ - axs = ax4) + plot_measurements_v_dmres( + fitter, plotsig=True, nbin=50, legend=False, title=False, axs=ax4 + ) plt.tight_layout() plt.savefig("%s_summary_plot_3_wb.png" % (fitter.model.PSR.value)) plt.close() elif ii == 3: - gs = fig.add_gridspec(1,1) + gs = fig.add_gridspec(1, 1) ax0 = fig.add_subplot(gs[0]) - plot_residuals_freq(fitter, title = False, legend = True, axs =ax0, figsize=(8,4)) + plot_residuals_freq( + fitter, title=False, legend=True, axs=ax0, figsize=(8, 4) + ) plt.tight_layout() plt.savefig("%s_summary_plot_4_wb.png" % (fitter.model.PSR.value)) plt.close() -def plot_settings(): + +def plot_settings(colorby="f"): """ Initialize plot rc params, define color scheme """ fig_width_pt = 620 - inches_per_pt = 1.0/72.27 # Convert pt to inches - golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio - fig_width = fig_width_pt*inches_per_pt # width in inches - fig_height = fig_width*golden_mean*2 # height in inches - fig_size = [fig_width,fig_height] + inches_per_pt = 1.0 / 72.27 # Convert pt to inches + golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio + fig_width = fig_width_pt * inches_per_pt # width in inches + fig_height = fig_width * golden_mean * 2 # height in inches + fig_size = [fig_width, fig_height] fontsize = 20 # for xlabel, backend labels - plotting_params = {'backend': 'pdf', 'axes.labelsize': 12, 'lines.markersize': 4, 'font.size': 12, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'xtick.major.width': 0.5, 'ytick.major.width': 0.5, 'xtick.minor.width': 0.5, 'ytick.minor.width': 0.5, 'lines.markeredgewidth': 1, 'axes.linewidth': 1.2, 'legend.fontsize': 10, 'xtick.labelsize': 12, 'ytick.labelsize': 10, 'savefig.dpi': 400, 'path.simplify': True, 'font.family': 'serif', 'font.serif': 'Times', 'text.usetex': True, 'figure.figsize': fig_size, 'text.latex.preamble': r'\usepackage{amsmath}'} + plotting_params = { + "backend": "pdf", + "axes.labelsize": 12, + "lines.markersize": 4, + "font.size": 12, + "xtick.major.size": 6, + "xtick.minor.size": 3, + "ytick.major.size": 6, + "ytick.minor.size": 3, + "xtick.major.width": 0.5, + "ytick.major.width": 0.5, + "xtick.minor.width": 0.5, + "ytick.minor.width": 0.5, + "lines.markeredgewidth": 1, + "axes.linewidth": 1.2, + "legend.fontsize": 10, + "xtick.labelsize": 12, + "ytick.labelsize": 10, + "savefig.dpi": 400, + "path.simplify": True, + "font.family": "serif", + "font.serif": "Times", + "text.usetex": True, + "figure.figsize": fig_size, + "text.latex.preamble": r"\usepackage{amsmath}", + } plt.rcParams.update(plotting_params) - # Color scheme for consistent reciever-backend combos, same as published 12.5 yr - colorschemes = {'thankful_2':{ - "327_ASP": "#BE0119", - "327_PUPPI": "#BE0119", - "430_ASP": "#FD9927", - "430_PUPPI": "#FD9927", - "L-wide_ASP": "#6BA9E2", - "L-wide_PUPPI": "#6BA9E2", - "Rcvr1_2_GASP": "#407BD5", - "Rcvr1_2_GUPPI": "#407BD5", - "Rcvr1_2_VEGAS": "#61C853", - "Rcvr_800_GASP": "#61C853", - "Rcvr_800_GUPPI": "#61C853", - "Rcvr_800_VEGAS": "#61C853", - "S-wide_ASP": "#855CA0", - "S-wide_PUPPI": "#855CA0", - "1.5GHz_YUPPI": "#45062E", - "3GHz_YUPPI": "#E5A4CB", - "6GHz_YUPPI": "#40635F", - "CHIME": "#ECE133", - }} - - # marker dictionary to be used if desired, currently all 'x' - markers = {"327_ASP": "x", - "327_PUPPI": "x", - "430_ASP": "x", - "430_PUPPI": "x", - "L-wide_ASP": "x", - "L-wide_PUPPI": "x", - "Rcvr1_2_GASP": "x", - "Rcvr1_2_GUPPI": "x", - "Rcvr1_2_VEGAS": "x", - "Rcvr_800_GASP": "x", - "Rcvr_800_GUPPI": "x", - "Rcvr_800_VEGAS": "x", - "S-wide_ASP": "x", - "S-wide_PUPPI": "x", - "1.5GHz_YUPPI": "x", - "3GHz_YUPPI": "x", - "6GHz_YUPPI": "x", - "CHIME": "x", - } - - # Define the color map option - colorscheme = colorschemes['thankful_2'] - - return markers, colorscheme + colorscheme, markerscheme = set_color_and_marker(colorby) + return markerscheme, colorscheme + def get_fitter(yaml): """ @@ -4002,13 +4902,14 @@ def get_fitter(yaml): mo, to = tc.get_model_and_toas(excised=True, usepickle=True) tc.manual_cuts(to) receivers = lu.get_receivers(to) - if tc.get_toa_type() == 'WB': + if tc.get_toa_type() == "WB": lu.add_feDMJumps(mo, receivers) else: lu.add_feJumps(mo, receivers) fo = tc.construct_fitter(to, mo) return fo, mo + def get_avg_years(fo_nb, fo_wb, avg_dict): """ Get MJDS for each data set in years @@ -4021,13 +4922,14 @@ def get_avg_years(fo_nb, fo_wb, avg_dict): """ mjd_nb = fo_nb.toas.get_mjds().value - years_nb = (mjd_nb - 51544.0)/365.25 + 2000.0 + years_nb = (mjd_nb - 51544.0) / 365.25 + 2000.0 mjd_wb = fo_wb.toas.get_mjds().value - years_wb = (mjd_wb - 51544.0)/365.25 + 2000.0 - mjds_avg = avg_dict['mjds'].value - years_avg = (mjds_avg - 51544.0)/365.25 + 2000.0 + years_wb = (mjd_wb - 51544.0) / 365.25 + 2000.0 + mjds_avg = avg_dict["mjds"].value + years_avg = (mjds_avg - 51544.0) / 365.25 + 2000.0 return years_nb, years_wb, years_avg + def get_backends(fo_nb, fo_wb, avg_dict): """ Grab backends via flags to make plotting easier @@ -4039,17 +4941,18 @@ def get_backends(fo_nb, fo_wb, avg_dict): avg_dict: from fo.resids.ecorr_average() """ - rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value('f')[0]) + rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value("f")[0]) rcvr_set_nb = set(rcvr_bcknds_nb) - rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value('f')[0]) + rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value("f")[0]) rcvr_set_wb = set(rcvr_bcknds_wb) avg_rcvr_bcknds = [] - for iis in avg_dict['indices']: + for iis in avg_dict["indices"]: avg_rcvr_bcknds.append(rcvr_bcknds_nb[iis[0]]) rcvr_bcknds_avg = np.array(avg_rcvr_bcknds) rcvr_set_avg = set(rcvr_bcknds_avg) return rcvr_bcknds_nb, rcvr_bcknds_wb, rcvr_bcknds_avg + def get_DMX_info(fo): """ Get DMX timeseries info from dmxparse @@ -4060,12 +4963,13 @@ def get_DMX_info(fo): """ dmx_dict = pint.utils.dmxparse(fo) - DMXs = dmx_dict['dmxs'].value - DMX_vErrs = dmx_dict['dmx_verrs'].value - DMX_center_MJD = dmx_dict['dmxeps'].value - DMX_center_Year = (DMX_center_MJD - 51544.0)/365.25 + 2000.0 + DMXs = dmx_dict["dmxs"].value + DMX_vErrs = dmx_dict["dmx_verrs"].value + DMX_center_MJD = dmx_dict["dmxeps"].value + DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0 return DMXs, DMX_vErrs, DMX_center_Year + def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ Plot color-divided-by-receiver/BE points on any axis @@ -4082,47 +4986,44 @@ def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format): """ markers, colorscheme = plot_settings() for i, r_b in enumerate(set(bknds)): - inds = np.where(bknds==r_b)[0] + inds = np.where(bknds == r_b)[0] if not inds.tolist(): r_b_label = "" else: r_b_label = bknds[inds][0] mkr = markers[r_b_label] clr = colorscheme[r_b_label] - ax.errorbar(x[inds], y[inds] - (rn_off * u.us), yerr=err[inds], fmt=mkr, color=clr, label=r_b_label, alpha=0.5) - - ylim = (max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value) + ax.errorbar( + x[inds], + y[inds] - (rn_off * u.us), + yerr=err[inds], + fmt=mkr, + color=clr, + label=r_b_label, + alpha=0.5, + ) + + ylim = max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value ax.set_ylim(-1 * ylim * 1.08, ylim * 1.08) if be_legend: handles, labels = ax.get_legend_handles_labels() labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) - label_names = {"327_ASP": "ASP 327 MHz", - "327_PUPPI": "PUPPI 327 MHz", - "430_ASP": "ASP 430 MHz", - "430_PUPPI": "PUPPI 430 MHz", - "L-wide_ASP": "ASP L-wide", - "L-wide_PUPPI": "PUPPI L-wide", - "Rcvr1_2_GASP": "GASP L-band", - "Rcvr1_2_GUPPI": "GUPPI L-band", - "Rcvr1_2_VEGAS": "VEGAS L-band", - "Rcvr_800_GASP": "GASP 820 MHz", - "Rcvr_800_GUPPI": "GUPPI 820 MHz", - "Rcvr_800_VEGAS": "VEGAS 820 MHz", - "S-wide_ASP": "ASP S-wide", - "S-wide_PUPPI": "PUPPI S-wide", - "1.5GHz_YUPPI": "YUPPI 1.5 GHz", - "3GHz_YUPPI": "YUPPI 3 GHz", - "6GHz_YUPPI": "YUPPI 6 GHz", - "CHIME": "CHIME", - } fixed_labels = [label_names[l] for l in labels] - if be_format == 'vert': + if be_format == "vert": plt.legend(handles, fixed_labels, loc=(1.005, 0), fontsize=12) - if be_format == 'horiz': - plt.legend(handles, fixed_labels, loc='lower left', ncol=len(fixed_labels), borderpad=0.1, columnspacing=0.1) + if be_format == "horiz": + plt.legend( + handles, + fixed_labels, + loc="lower left", + ncol=len(fixed_labels), + borderpad=0.1, + columnspacing=0.1, + ) ax.set_ylim(-1 * ylim * 1.2, ylim * 1.08) + def rec_labels(axs, bcknds, years_avg): """ Mark transitions between backends @@ -4149,64 +5050,135 @@ def rec_labels(axs, bcknds, years_avg): has_yuppi = False for r in bcknds: - if 'ASP' in r: + if "ASP" in r: has_asp = True - if 'PUPPI' in r: + if "PUPPI" in r: has_puppi = True - if 'GASP' in r: + if "GASP" in r: has_gasp = True - if 'GUPPI' in r: + if "GUPPI" in r: has_guppi = True - if 'YUPPI' in r: + if "YUPPI" in r: has_yuppi = True if has_asp and has_puppi: for a in axs: has_ao = True - a.axvline(puppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(puppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) if has_gasp and has_guppi: for a in axs: has_gbt = True - a.axvline(guppi, linewidth=0.75, color='k', linestyle='--', alpha=0.6) + a.axvline(guppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6) ycoord = 1.1 x_min_yr = min(years_avg) x_max_yr = max(years_avg) tform = axs[0].get_xaxis_transform() - va = ha = 'center' + va = ha = "center" if has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'ASP/GASP', transform=tform, va=va, ha=ha) - axs[0].text((guppi+puppi)/2., ycoord, 'ASP/GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/GUPPI", + transform=tform, + va=va, + ha=ha, + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "ASP/GASP", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + puppi) / 2.0, ycoord, "ASP/GUPPI", transform=tform, va=va, ha=ha + ) elif has_ao and not has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) - axs[0].text((puppi+x_min_yr)/2. - 0.2, ycoord, 'ASP', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, ycoord, "PUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (puppi + x_min_yr) / 2.0 - 0.2, ycoord, "ASP", transform=tform, va=va, ha=ha + ) elif not has_ao and has_gbt: if has_yuppi: - axs[0].text((puppi+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (puppi + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((guppi+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) - axs[0].text((guppi+x_min_yr)/2., ycoord, 'GASP', transform=tform, va=va, ha=ha) + axs[0].text( + (guppi + x_max_yr) / 2.0, ycoord, "GUPPI", transform=tform, va=va, ha=ha + ) + axs[0].text( + (guppi + x_min_yr) / 2.0, ycoord, "GASP", transform=tform, va=va, ha=ha + ) if has_puppi and not has_asp and not has_gasp and not has_guppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'PUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "PUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_guppi and not has_asp and not has_gasp and not has_puppi: if has_yuppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI/YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI/YUPPI", + transform=tform, + va=va, + ha=ha, + ) else: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'GUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, + ycoord, + "GUPPI", + transform=tform, + va=va, + ha=ha, + ) if has_yuppi and not has_guppi and not has_puppi: - axs[0].text((x_min_yr+x_max_yr)/2., ycoord, 'YUPPI', transform=tform, va=va, ha=ha) + axs[0].text( + (x_min_yr + x_max_yr) / 2.0, ycoord, "YUPPI", transform=tform, va=va, ha=ha + ) + def rn_sub(testing, rn_subtract, fo_nb, fo_wb): if rn_subtract: @@ -4214,8 +5186,8 @@ def rn_sub(testing, rn_subtract, fo_nb, fo_wb): rn_nb = 0.0 rn_wb = 0.0 else: - rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0,0] * 1e6 - rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0,0] * 1e6 + rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0, 0] * 1e6 + rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0, 0] * 1e6 else: rn_nb = 0.0 rn_wb = 0.0 From 99b7a69058351ce16d417c31b7704d4c29afd0b5 Mon Sep 17 00:00:00 2001 From: tcromartie Date: Wed, 4 Sep 2024 17:09:55 +0000 Subject: [PATCH 141/193] Removed commented entries from yaml --- src/pint_pal/plot_settings.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml index 9f67ae96..f8bb90f0 100644 --- a/src/pint_pal/plot_settings.yaml +++ b/src/pint_pal/plot_settings.yaml @@ -1,3 +1,6 @@ +# This YAML contains various marker colors and shapes for the three plotting schemes +# Changes here will be applied to notebook plotting immediately (after restarting the kernel) + obs_c: { "ao": "#6BA9E2", "arecibo": "#6BA9E2", @@ -111,25 +114,17 @@ ng20_c: { "430_PUPPI": "#FD9927", "L-wide_ASP": "#BDB6F6", "L-wide_PUPPI": "#BDB6F6", - # "L-wide_ASP": "#C3BEF7", - # "L-wide_PUPPI": "#A393BF", - # "Rcvr1_2_GASP": "#81BDEE", "Rcvr1_2_GASP": "#79A3E2", "Rcvr1_2_GUPPI": "#79A3E2", "Rcvr1_2_VEGAS": "#79A3E2", "Rcvr_800_GASP": "#8DD883", "Rcvr_800_GUPPI": "#8DD883", "Rcvr_800_VEGAS": "#8DD883", - # "VEGAS": "#465922", - # "S-wide_ASP": "#D81159", - # "S-wide_PUPPI": "#D81159", "S-wide_ASP": "#C4457A", "S-wide_PUPPI": "#C4457A", "1.5GHz_YUPPI": "#EBADCB", "3GHz_YUPPI": "#E79CC1", "6GHz_YUPPI": "#DB6BA1", - # "CHIME": "#F3689B", - # "Rcvr_CHIME": "#F3689B", } obs_m: { From ec37c29dc3e031fc3ed75d4c9fe5d50ddb2da29d Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Mon, 18 Nov 2024 17:02:39 +0000 Subject: [PATCH 142/193] Fixing syntax error in pint_pulsar versioning by pinning the version at 1.0.1 --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 185ae2cb..fcc78a5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,11 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", +<<<<<<< HEAD "pint_pulsar>=1.1.1", +======= + "pint_pulsar==1.0.1", +>>>>>>> 760f230 (Fixing syntax error in pint_pulsar versioning by pinning the version at 1.0.1) "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", From 861534b9e4cf47d6fd7c3bcaf52fbab387c3495b Mon Sep 17 00:00:00 2001 From: Golam Date: Thu, 5 Dec 2024 15:35:41 +0100 Subject: [PATCH 143/193] Update pyproject.toml to fix missing plot_settings.yaml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fcc78a5e..e01a8090 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ classifiers = [ ] [tool.setuptools.package-data] -pint_pal = ["defaults.yaml"] +pint_pal = ["defaults.yaml", "plot_settings.yaml"] [project.urls] "Homepage" = "https://github.com/nanograv/pint_pal" From a321ac1a8f7517e98a9383c9c2ef23a5a29c74d5 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 10:33:16 -0800 Subject: [PATCH 144/193] fix typo --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 683cee3f..6b425359 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -477,7 +477,7 @@ def model_noise( resume=resume, groups=groups, empirical_distr = sampler_kwargs['empirical_distr'] - xx) + ) if sampler_kwargs['empirical_distr'] is not None: try: samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) From 329d0fd5b446a706e17de213585d26583c39b0f4 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 12:14:18 -0800 Subject: [PATCH 145/193] add laforge, arviz to pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e01a8090..71cc9916 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ authors = [ { name="Anne Archibald", email="anne.archibald@nanograv.org" }, { name="Kevin Wilson", email="kevin.wilson@nanograv.org" }, { name="Ross Jennings", email="ross.jennings@nanograv.org" }, + { name="Jeremy Baier", email="jeremy.baier@nanograv.org"} ] description = "A long-lived repository for NANOGrav Pulsar Timing analysis work." readme = "README.md" @@ -36,6 +37,8 @@ dependencies = [ "notebook", "seaborn", "gitpython", + "laforge", + "arviz", ] classifiers = [ "Programming Language :: Python :: 3", From 34464477e909842c82994f57439c553af4660332 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 12:33:09 -0800 Subject: [PATCH 146/193] changes to noise utils --- src/pint_pal/noise_utils.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 6b425359..3fa93721 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -848,7 +848,6 @@ def add_noise_to_model( noise_dict[psr_name + "_red_noise_log10_A"] ) rn_comp.RNIDX.quantity = -1 * noise_dict[psr_name + "_red_noise_gamma"] - # Add red noise to the timing model model.add_component(rn_comp, validate=True, force=True) else: @@ -909,14 +908,18 @@ def add_noise_to_model( noise = noise_class() # Make the dispersion instance. model.add_component(noise, validate=False) # add parameters - model['NE_SW'].quantity = noise_dict[f'{psr_name}_NE_SW'] - model['NE_SW'].frozen = True + if f'{psr_name}_n_earth' in sw_pars: + model['NE_SW'].quantity = noise_dict[f'{psr_name}_n_earth'] + model['NE_SW'].frozen = True + elif f'{psr_name}_sw_gp_log10_A' in sw_pars: + raise NotImplementedError('Solar Wind Dispersion power-law GP not yet implemented') + elif f'{psr_name}_sw_gp_log10_rho' in sw_pars: + raise NotImplementedError('Solar Wind Dispersion free spec GP not yet implemented') # Setup and validate the timing model to ensure things are correct model.setup() model.validate() - #FIXME:::not sure why this is broken model.noise_mtime = mtime.isot if convert_equad_to_t2: @@ -931,7 +934,7 @@ def plot_free_specs(c0, freqs, fs_type='Red Noise'): """ Plot free specs when using free spectral model """ - ImpelmentationError("not yet implemented") + raise NotImplementedError("not yet implemented") return None From edb1f1c840dd3eef8bb0683683c2f636aef0b790 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Tue, 17 Dec 2024 13:36:34 -0800 Subject: [PATCH 147/193] fixing stuff in noise utils --- src/pint_pal/noise_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 3fa93721..54f7a726 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -518,7 +518,7 @@ def model_noise( #**noise_kwargs, ) log.info("Beginnning to sample...") - samp.sample(niter=n_iter, savepath=outdir) + samp.sample(niter=sampler_kwargs['n_iter'], savepath=outdir) log.info("Finished sampling.") # sorta redundant to have both, but la_forge doesn't look for .npy files chain = np.load(f'{outdir}/chain_1.npy') @@ -973,6 +973,7 @@ def setup_discovery_noise(psr, elif model_kwargs['rn_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) psl = ds.PulsarLikelihood(model_components) + ## this prior transform is no longer required and should be removed prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) log_x = makelogtransform_uniform(psl.logL) # x0 = sample_uniform(psl.logL.params) From 5f39b56ea0e5534a2397df1ca22b85e0cf6d1def Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 18 Dec 2024 11:32:27 -0800 Subject: [PATCH 148/193] updating to la-forge in the pyproject --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 71cc9916..f979eaae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ "notebook", "seaborn", "gitpython", - "laforge", + "la-forge", "arviz", ] classifiers = [ From 91c535022c34c971949c4244d6decfc9480cc14b Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Wed, 18 Dec 2024 11:41:05 -0800 Subject: [PATCH 149/193] changing test nb --- nb_templates/process_v1.2.ipynb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index ae1871cd..446a3b5d 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -257,7 +257,12 @@ "if run_noise_analysis or use_existing_noise_dir:\n", " mo_new = copy.deepcopy(mo)\n", " lu.remove_noise(mo_new)\n", - " nu.model_noise(mo_new, to, using_wideband = using_wideband, run_noise_analysis = run_noise_analysis, n_iter = num_noise_iter)\n", + " nu.model_noise(mo_new, to,\n", + " using_wideband = using_wideband,\n", + " run_noise_analysis = run_noise_analysis,\n", + " model_kwargs=tc.config['noise_run']['model'],\n", + " sampler_kwargs=tc.config['noise_run']['inference'],\n", + " )\n", " try:\n", " mo_new = nu.add_noise_to_model(mo_new, using_wideband = using_wideband, base_dir=tc.get_noise_dir(), \n", " compare_dir=tc.get_compare_noise_dir(), no_corner_plot=tc.get_no_corner())\n", From 7728611c3b6a2ab5c8c6df51aa4aeee1bfbb526e Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 06:29:42 -0800 Subject: [PATCH 150/193] change enterprise to lowercase; add noise_run section to test config files; fix typo in setup discovery --- src/pint_pal/noise_utils.py | 24 ++++++++++++------------ tests/configs/J0605+3757.nb.yaml | 11 +++++++++++ tests/configs/J0605+3757.wb.yaml | 11 +++++++++++ 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 54f7a726..08a2ff5a 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -363,10 +363,10 @@ def model_noise( Recommended to pass model_kwargs and sampler_kwargs from the config file. Default kwargs given by function `get_model_and_sampler_default_settings`. Import configuration parameters: - likelihood: choose from ['Enterprise', 'discovery'] - enterprise -- Enterprise likelihood + likelihood: choose from ['enterprise', 'discovery'] + enterprise -- enterprise likelihood discovery -- various numpyro samplers with a discovery likelihood - sampler: for Enterprise choose from ['PTMCMCSampler','GibbsSampler'] + sampler: for enterprise choose from ['PTMCMCSampler','GibbsSampler'] for discovery choose from ['HMC', 'NUTS', 'HMC-GIBBS'] Returns @@ -410,12 +410,12 @@ def model_noise( # Create enterprise Pulsar object for supplied pulsar timing model (mo) and toas (to) - log.info(f"Creating Enterprise.Pulsar object from model with {mo.NTOA.value} toas...") + log.info(f"Creating enterprise.Pulsar object from model with {mo.NTOA.value} toas...") e_psr = Pulsar(mo, to) ########################################################## ################ PTMCMCSampler ################## ########################################################## - if likelihood == "Enterprise" and sampler == 'PTMCMCSampler': + if likelihood == "enterprise" and sampler == 'PTMCMCSampler': log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions # Ensure n_iter is an integer @@ -494,7 +494,7 @@ def model_noise( ############################################################## ################## GibbsSampler ######################## ############################################################## - elif likelihood == "Enterprise" and sampler == "GibbsSampler": + elif likelihood == "enterprise" and sampler == "GibbsSampler": try: from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler except: @@ -575,7 +575,7 @@ def model_noise( else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ - + "\nCan only use Enterprise with PTMCMCSampler or GibbsSampler." + + "\nCan only use enterprise with PTMCMCSampler or GibbsSampler." ) if return_sampler: return samp @@ -968,10 +968,10 @@ def setup_discovery_noise(psr, elif model_kwargs['dmgp_psd'] == 'free_spectral': model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) if model_kwargs['inc_chromgp']: - if model_kwargs['rn_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) - elif model_kwargs['rn_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='dm_gp')) + if model_kwargs['chrom_psd'] == 'powerlaw': + model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) + elif model_kwargs['chrom_psd'] == 'free_spectral': + model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) psl = ds.PulsarLikelihood(model_components) ## this prior transform is no longer required and should be removed prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) @@ -1097,7 +1097,7 @@ def get_model_and_sampler_default_settings(): # path to empirical distribution } sampler_defaults = { - 'likelihood': 'Enterprise', + 'likelihood': 'enterprise', 'sampler': 'PTMCMCSampler', # ptmcmc kwargs 'n_iter': 2e5, diff --git a/tests/configs/J0605+3757.nb.yaml b/tests/configs/J0605+3757.nb.yaml index 949bd3f5..b08dae5b 100644 --- a/tests/configs/J0605+3757.nb.yaml +++ b/tests/configs/J0605+3757.nb.yaml @@ -25,6 +25,16 @@ outlier: # control outlier analysis runs n-burn: 1000 n-samples: 20000 +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20220301.Noise.nb.ac12e98/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220222.Noise.nb.4e07003/ @@ -53,3 +63,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.nb.par' - '2022-03-08 joe.swiggum READY_FOR: v1.1' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' diff --git a/tests/configs/J0605+3757.wb.yaml b/tests/configs/J0605+3757.wb.yaml index bf7b6d16..6734fa6b 100644 --- a/tests/configs/J0605+3757.wb.yaml +++ b/tests/configs/J0605+3757.wb.yaml @@ -23,6 +23,16 @@ dmx: # control dmx windowing/fixing max-sw-delay: 0.1 # finer binning when solar wind delay > threshold (us) custom-dmx: [] # designated by [mjd_low,mjd_hi,binsize] +noise_run: + model: + inc_rn: true + inc_dmgp: false + inc_chromgp: false + inference: + likelihood: enterprise + sampler: PTMCMCSampler + n_iter: 200000 + intermediate-results: # use results from previous runs #noise-dir: /nanograv/share/15yr/timing/intermediate/20221021.Noise.wb.a8ff4ddc/ #compare-noise-dir: /nanograv/share/15yr/timing/intermediate/20220822.Noise.wb.a77c37bb/ @@ -49,3 +59,4 @@ changelog: - '2021-09-24 joe.swiggum NOTE: updated AO/GBO coords (pint v0.8.3) and refit' - '2021-09-30 joe.swiggum NOTE: par file handed off to DWG for v1.0 is J0605+3757_PINT_20210928.wb.par' - '2022-08-24 thankful.cromartie NOISE: changed to 20220822.Noise.wb.a77c37bb' +- '2024-12-19 jeremy.baier NOTE: adding noise_run to config' From 010a2632f24db550580bc013265a4d05ea4c101f Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 08:52:27 -0800 Subject: [PATCH 151/193] delete 'num noise iter' from notebook --- nb_templates/process_v1.2.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index 446a3b5d..9f69b712 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -48,7 +48,6 @@ "autorun = False\n", "run_Ftest = True # Set to False if you don't need F-tests and want a faster notebook run!\n", "check_excision = True\n", - "num_noise_iter = 2e5\n", "\n", "if not autorun:\n", " run_noise_analysis = False \n", From 8f4275b38198918a2393b4dabb7e01bc657c6eb3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 19 Dec 2024 13:48:47 -0800 Subject: [PATCH 152/193] fixing SW GP in addition to noise model --- src/pint_pal/noise_utils.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 08a2ff5a..5badbfd4 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -875,7 +875,7 @@ def add_noise_to_model( ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_dm_gp_log10_rho_0' in dm_pars: log.info('Adding Free Spectral DM GP as DMWaveXnoise to par file') - NotImplementedError('DMWaveXNoise not yet implemented') + raise NotImplementedError('DMWaveXNoise not yet implemented') # Check to see if higher order chromatic noise is present chrom_pars = [key for key in noise_pars if "_chrom_gp" in key] @@ -885,7 +885,7 @@ def add_noise_to_model( log.info('Adding Powerlaw CHROM GP noise as PLCMNoise to par file') # Add the ML RN parameters to their component chrom_comp = pm.noise_model.PLCMNoise() - chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) + # chrom_keys = np.array([key for key, val in noise_dict.items() if "_chrom_gp_" in key]) chrom_comp.TNCMAMP.quantity = convert_to_RNAMP( noise_dict[psr_name + "_chrom_gp_log10_A"] ) @@ -897,7 +897,7 @@ def add_noise_to_model( ###### FREE SPECTRAL (WaveX) DM NOISE ###### elif f'{psr_name}_chrom_gp_log10_rho_0' in chrom_pars: log.info('Adding Free Spectral CHROM GP as CMWaveXnoise to par file') - NotImplementedError('CMWaveXNoise not yet implemented') + raise NotImplementedError('CMWaveXNoise not yet implemented') # Check to see if solar wind is present sw_pars = [key for key in noise_pars if "sw_r2" in key] @@ -906,14 +906,22 @@ def add_noise_to_model( all_components = Component.component_types noise_class = all_components["SolarWindDispersion"] noise = noise_class() # Make the dispersion instance. - model.add_component(noise, validate=False) + model.add_component(noise, validate=False, force=False) # add parameters if f'{psr_name}_n_earth' in sw_pars: model['NE_SW'].quantity = noise_dict[f'{psr_name}_n_earth'] model['NE_SW'].frozen = True - elif f'{psr_name}_sw_gp_log10_A' in sw_pars: - raise NotImplementedError('Solar Wind Dispersion power-law GP not yet implemented') - elif f'{psr_name}_sw_gp_log10_rho' in sw_pars: + if f'{psr_name}_sw_gp_log10_A' in sw_pars: + sw_comp = pm.noise_model.PLSWNoise() + sw_comp.TNSWAMP.quantity = convert_to_RNAMP(noise_dict[f'{psr_name}_sw_gp_log10_A']) + sw_comp.TNSWAMP.frozen = True + sw_comp.TNSWGAM.quantity = -1.*noise_dict[f'{psr_name}_sw_gp_gamma'] + sw_comp.TNSWGAM.frozen = True + # FIXMEEEEEEE : need to figure out some way to softcode this + sw_comp.TNSWC.quantity = 10 + sw_comp.TNSWC.frozen = True + model.add_component(sw_comp, validate=False, force=True) + if f'{psr_name}_sw_gp_log10_rho' in sw_pars: raise NotImplementedError('Solar Wind Dispersion free spec GP not yet implemented') From bb34a953ccc5d11dc12c95836abff41dfe78c77b Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:10:53 -0500 Subject: [PATCH 153/193] Avoid slicing toas objects --- src/pint_pal/dmx_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 55a65383..3bb37bfb 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -460,27 +460,27 @@ def get_dmx_epoch(toas: pint.toa.TOAs, weighted_average: bool = True) -> float: return epoch -def get_dmx_freqs(toas: pint.toa.TOAs, allow_wideband: bool = True) -> Tuple[float, float]: +def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = True) -> Tuple[float, float]: """ Return the lowest and highest frequency of the TOAs in a DMX bin. - toas is a PINT TOA object of TOAs in the DMX bin. + toas is a PINT TOA object containing all the relevant TOAs. + mask is a boolean mask that identifies the TOAs in this DMX bin. allow_wideband=True will consider the -fratio and -bw flags in the determination of these frequencies, if toas contains wideband TOAs. """ - freqs = toas.get_freqs().value # MHz + freqs = toas.get_freqs()[mask].value # MHz high_freq = 0.0 low_freq = np.inf # indices of wideband TOAs - iwb = np.arange(len(toas))[np.array(toas.get_flag_value('pp_dm')[0]) \ - != None] + wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) if allow_wideband: # the following arrays will be empty if narrowband TOAs - fratios = toas[iwb].get_flag_value('fratio') # frequency ratio / WB TOA - fratios = np.array(fratios[0]) - bws = toas[iwb].get_flag_value('bw') # bandwidth [MHz] / WB TOA - bws = np.array(bws[0]) + fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA + fratios = np.array(fratios[wb_mask]) + bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA + bws = np.array(bws[wb_mask]) low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) high_freqs = bws.astype('float32') + low_freqs From e22bbe24c9d79a2cddafa9884a65e12cbe94c813 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:13:11 -0500 Subject: [PATCH 154/193] Fix call sites --- src/pint_pal/dmx_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 3bb37bfb..6638efa6 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -530,7 +530,7 @@ def check_frequency_ratio( low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) if high_freq / low_freq >= frequency_ratio: # passes toa_mask += mask @@ -612,7 +612,7 @@ def check_solar_wind( low_mjd, high_mjd = dmx_range[0], dmx_range[1] mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion=strict_inclusion) - low_freq, high_freq = get_dmx_freqs(toas[mask], + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband=allow_wideband) # Convert to time delay, using calc from David's code (fixed) theta = np.pi - phis[mask] # rad @@ -911,7 +911,7 @@ def make_dmx( high_mjd = max(dmx_ranges[irange]) mask = get_dmx_mask(toas, low_mjd, high_mjd, strict_inclusion) epoch = get_dmx_epoch(toas[mask], weighted_average) - low_freq, high_freq = get_dmx_freqs(toas[mask], allow_wideband) + low_freq, high_freq = get_dmx_freqs(toas, mask, allow_wideband) dmx_parameter = DMXParameter() dmx_parameter.idx = idx dmx_parameter.val = dmx_vals[irange] From 3d521521ca84fc0afd4def92005a7a8bab72c962 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:22:00 -0500 Subject: [PATCH 155/193] Slice after array conversion --- src/pint_pal/dmx_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index 6638efa6..cf932296 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -478,9 +478,9 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) if allow_wideband: # the following arrays will be empty if narrowband TOAs fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA - fratios = np.array(fratios[wb_mask]) + fratios = np.array(fratios)[wb_mask] bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA - bws = np.array(bws[wb_mask]) + bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) high_freqs = bws.astype('float32') + low_freqs From 573e7f71d803bbe8a6d0752903ec17e3d2a85352 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:31:01 -0500 Subject: [PATCH 156/193] Let Numpy do the mins and maxes --- src/pint_pal/dmx_utils.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index cf932296..ab374250 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -471,26 +471,21 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = """ freqs = toas.get_freqs()[mask].value # MHz - high_freq = 0.0 - low_freq = np.inf + high_freq = np.max(freqs) + low_freq = np.min(freqs) - # indices of wideband TOAs - wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) - if allow_wideband: # the following arrays will be empty if narrowband TOAs + if allow_wideband: + # indices of wideband TOAs + wb_mask = mask & (np.array(toas.get_flag_value('pp_dm')[0]) != None) + # the following arrays will be empty if all TOAs are narrowband fratios = toas.get_flag_value('fratio')[0] # frequency ratio / WB TOA fratios = np.array(fratios)[wb_mask] bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) + low_freq = min(low_freq, np.min(low_freqs)) high_freqs = bws.astype('float32') + low_freqs - - for itoa in range(len(toas)): - if itoa in iwb and allow_wideband: - if low_freqs[itoa] < low_freq: low_freq = low_freqs[itoa] - if high_freqs[itoa] > high_freq: high_freq = high_freqs[itoa] - else: - if freqs[itoa] < low_freq: low_freq = freqs[itoa] - if freqs[itoa] > high_freq: high_freq = freqs[itoa] + high_freq = max(high_freq, np.max(high_freqs)) return low_freq, high_freq From 76603ae83a47927b90974f896c2c141e29547eb0 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Wed, 11 Dec 2024 14:36:03 -0500 Subject: [PATCH 157/193] Add initial values --- src/pint_pal/dmx_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index ab374250..eac6432b 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -471,8 +471,8 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = """ freqs = toas.get_freqs()[mask].value # MHz - high_freq = np.max(freqs) - low_freq = np.min(freqs) + high_freq = np.max(freqs, initial=0.) + low_freq = np.min(freqs, initial=np.inf) if allow_wideband: # indices of wideband TOAs @@ -483,9 +483,9 @@ def get_dmx_freqs(toas: pint.toa.TOAs, mask: np.ndarray, allow_wideband: bool = bws = toas.get_flag_value('bw')[0] # bandwidth [MHz] / WB TOA bws = np.array(bws)[wb_mask] low_freqs = bws.astype('float32') / (fratios.astype('float32') - 1) - low_freq = min(low_freq, np.min(low_freqs)) + low_freq = min(low_freq, np.min(low_freqs, initial=np.inf)) high_freqs = bws.astype('float32') + low_freqs - high_freq = max(high_freq, np.max(high_freqs)) + high_freq = max(high_freq, np.max(high_freqs, initial=0.)) return low_freq, high_freq From 1e93d15b873f5906bb12da71a34fd5d9e3c18416 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Fri, 13 Dec 2024 16:42:29 +0000 Subject: [PATCH 158/193] Also avoid slicing TOAs for log messages --- src/pint_pal/dmx_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/dmx_utils.py b/src/pint_pal/dmx_utils.py index eac6432b..2049674e 100644 --- a/src/pint_pal/dmx_utils.py +++ b/src/pint_pal/dmx_utils.py @@ -531,9 +531,9 @@ def check_frequency_ratio( toa_mask += mask dmx_range_mask[irange] = True else: # fails - nfail_toas += len(toas[mask]) + nfail_toas += np.sum(mask) if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that do not pass the frequency ratio test (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nfail_ranges = sum(np.logical_not(dmx_range_mask)) @@ -621,7 +621,7 @@ def check_solar_wind( toa_mask += mask dmx_range_mask[irange] = True if not quiet: - msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas[mask].get_mjds().value})." + msg = f"DMX range with pythonic index {irange}, correponding to the DMX range {dmx_ranges[irange]}, contains TOAs that are affected by the solar wind (TOAs with MJDs {toas.get_mjds()[mask].value})." log.info(msg) nsolar = sum(dmx_range_mask) if not quiet and nsolar: From da3b9afb0c30c1a5d21781f535492c8c8129a133 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 18 Dec 2024 16:18:41 +0000 Subject: [PATCH 159/193] Update to use new version of PINT. --- pyproject.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f979eaae..e475bdea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,11 +20,7 @@ readme = "README.md" requires-python = ">=3.8" dependencies = [ "ruamel.yaml", -<<<<<<< HEAD "pint_pulsar>=1.1.1", -======= - "pint_pulsar==1.0.1", ->>>>>>> 760f230 (Fixing syntax error in pint_pulsar versioning by pinning the version at 1.0.1) "enterprise-pulsar>=3.3.2", "enterprise-extensions>=v2.4.1", "pytest", From 57703510a97f3190419acfd8333af946a69031a6 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 17 Feb 2025 23:47:01 +0000 Subject: [PATCH 160/193] Added a few frozen params to frozen ignore list --- src/pint_pal/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index ba71d011..3ecfd28c 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -716,7 +716,7 @@ def alert(s): and pm.frozen and pm.value is not None and pm.value != 0): - if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX"}: + if p in {"START", "FINISH", "POSEPOCH", "DMEPOCH", "PEPOCH", "TZRMJD", "DM", "DMX", "NTOA", "CHI2", "DMDATA", "TZRFRQ", "RNAMP", "RNIDX", "CHI2R", "TRES", "SWP"}: ignoring.append(p) continue skip = False @@ -1348,4 +1348,4 @@ def no_ecorr_average(toas, resids, use_noise_model=True): return no_avg - \ No newline at end of file + From 84184565d405e1a351629173bb44499092b084b3 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 18 Feb 2025 01:28:58 +0000 Subject: [PATCH 161/193] Get rid of bad warnings for noise chain directories --- src/pint_pal/utils.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 3ecfd28c..8d4bbb9e 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1240,12 +1240,10 @@ def check_recentness_noise(tc): return None, None d = os.path.abspath(tc.get_noise_dir()) - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) - for p in sorted(glob.glob(os.path.join(d, - "..", - "*.Noise.*", - tc.get_source()+"_"+tc.get_toa_type().lower(), - "chain*.txt")))] + if os.path.isfile(os.path.join(d, "chain*.txt")): + noise_runs = glob.glob(os.path.join(d, "chain*.txt")) + else: + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") From 9091b6c3ddb9ec818a70dc7a4cb759213c2589b3 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Wed, 19 Feb 2025 02:30:00 +0000 Subject: [PATCH 162/193] Reverted behavior of noise chain search, changed date format in PDF for range --- src/pint_pal/utils.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 8d4bbb9e..1a97330c 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -544,7 +544,9 @@ def alert(s): # Get some values from the fitter start = fitter.toas.first_MJD.value + start_ymd = fitter.toas.first_MJD.to_value(format='iso') finish = fitter.toas.last_MJD.value + finish_ymd = fitter.toas.last_MJD.to_value(format='iso') span = finish - start label = f"{psr} {'narrowband' if NB else 'wideband'}" @@ -573,8 +575,8 @@ def alert(s): for tf in tim_files: fsum.write(r'\item ' + verb(tf.split('/')[-1]) + '\n') fsum.write(r'\end{itemize}' + "\n") - fsum.write('Span: %.1f years (%.1f -- %.1f)\\\\\n ' % (span/365.24, - year(float(start)), year(float(finish)))) + fsum.write('Span: %.1f years (%s -- %s)\\\\\n ' % (span/365.24, + str(start_ymd).split(' ')[0], str(finish_ymd).split(' ')[0])) if NB: try: @@ -1240,10 +1242,14 @@ def check_recentness_noise(tc): return None, None d = os.path.abspath(tc.get_noise_dir()) - if os.path.isfile(os.path.join(d, "chain*.txt")): - noise_runs = glob.glob(os.path.join(d, "chain*.txt")) - else: - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] + if glob.glob(os.path.join(d,"chain*.txt")): + log.warning(f'Ignoring chains directly in {d}. Chains should be in a subdirectory of {os.path.split(d)[1]} called {tc.get_source()}_{tc.get_toa_type().lower()}') + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) + for p in sorted(glob.glob(os.path.join(d, + "..", + "????-??-??", + tc.get_source()+"_"+tc.get_toa_type().lower(), + "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") From 010c8ddeff0391b454096629b65bf25f7eab8b9d Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Thu, 20 Feb 2025 00:29:21 +0000 Subject: [PATCH 163/193] Allow for noise date directories of format YYYY-MM-DD(a,b,c,etc.). Disallow overwriting noise directories. --- src/pint_pal/noise_utils.py | 16 +++++++++------- src/pint_pal/utils.py | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 5badbfd4..e1d2bedd 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -387,14 +387,16 @@ def model_noise( if not using_wideband: outdir = base_op_dir + mo.PSR.value + "_nb/" else: - outdir = base_op_dir + mo.PSR.value + "_wb/" - os.makedirs(outdir, exist_ok=True) - if os.path.exists(outdir) and (run_noise_analysis) and (not resume): - log.info( - "A noise directory for pulsar {} already exists! Re-running noise modeling from scratch".format( - mo.PSR.value - ) + outdir = base_op_dir + mo.PSR.value + '_wb/' + + if os.path.exists(outdir) and run_noise_analysis and not resume: + log.warning( + f"A noise directory for pulsar {mo.PSR.value} already exists! " + "Please rename the existing directory or specify a new location with " + "base_op_dir. If you're trying to resume noise modeling, use " + "resume=True with the existing directory. Skipping noise analysis." ) + return None elif os.path.exists(outdir) and (run_noise_analysis) and (resume): log.info( "A noise directory for pulsar {} already exists! Re-running noise modeling starting from previous chain".format( diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 1a97330c..cc8d27ef 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1247,7 +1247,7 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, "..", - "????-??-??", + "????-??-*", tc.get_source()+"_"+tc.get_toa_type().lower(), "chain*.txt")))] used_chains = os.path.basename(d) From dc0039e08f444ed8a3198dc574e515e5f862d553 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 25 Feb 2025 17:52:52 +0000 Subject: [PATCH 164/193] Support for home directory nosie runs --- src/pint_pal/utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index cc8d27ef..d1429cdd 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1252,6 +1252,15 @@ def check_recentness_noise(tc): "chain*.txt")))] used_chains = os.path.basename(d) available_chains = [os.path.basename(n) for n in noise_runs] + + if not noise_runs: + log.warning('Looking for noise chains in working directory.') + noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] + if len(noise_runs) > 1: + log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') + used_chains = os.path.basename(noise_runs[-1]) + available_chains = [os.path.basename(n) for n in noise_runs] + log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") try: From eb0c8e9cb9da70d37c4a4ebce3f988b22014e025 Mon Sep 17 00:00:00 2001 From: Ross Jennings Date: Sun, 2 Mar 2025 13:53:52 -0500 Subject: [PATCH 165/193] Add fastshermanmorrison to requirements --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e475bdea..a48cbc97 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,8 +33,12 @@ dependencies = [ "notebook", "seaborn", "gitpython", +<<<<<<< HEAD "la-forge", "arviz", +======= + "fastshermanmorrison-pulsar", +>>>>>>> 8c63ad5 (Add fastshermanmorrison to requirements) ] classifiers = [ "Programming Language :: Python :: 3", From b373889cca683e5973d3f0d94d7401adf879c578 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 11:33:05 -0800 Subject: [PATCH 166/193] yonking the discovery changes into another PR --- src/pint_pal/noise_utils.py | 318 ++++++++++-------------------------- 1 file changed, 82 insertions(+), 236 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index e1d2bedd..2ecf91b3 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,5 +1,4 @@ import numpy as np, os, json -import arviz as az from astropy import log from astropy.time import Time @@ -12,36 +11,12 @@ from pint.models.parameter import maskParameter from pint.models.timing_model import Component -import matplotlib as mpl import matplotlib.pyplot as pl import la_forge.core as co -import la_forge.diagnostics as dg -import la_forge.utils as lu - -# Imports necessary for e_e noise modeling functions -import functools -from collections import OrderedDict - -from enterprise.signals import parameter -from enterprise.signals import selections -from enterprise.signals import signal_base -from enterprise.signals import white_signals -from enterprise.signals import gp_signals -from enterprise.signals import deterministic_signals -from enterprise import constants as const from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils -from enterprise_extensions import deterministic -from enterprise_extensions.timing import timing_block - -# from enterprise_extensions.blocks import (white_noise_block, red_noise_block) - -import types - -from enterprise.signals import utils -from enterprise.signals import gp_priors as gpp def setup_sampling_groups(pta, @@ -115,6 +90,7 @@ def setup_sampling_groups(pta, def analyze_noise( chaindir="./noise_run_chains/", + use_noise_point='MAP', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -128,6 +104,9 @@ def analyze_noise( Parameters ========== chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' + use_noise_point: point to use for noise analysis; Default: 'MAP'. + Options: 'MAP', 'median', + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False @@ -151,17 +130,15 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. Also make sure you have an up-to-date la_forge installation. ") + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. + Also make sure you have an up-to-date la_forge installation. ") raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") - if sampler == 'PTMCMCSampler' or sampler == "GibbsSampler": + if sampler == 'PTMCMCSampler': # standard burn ins noise_core.set_burn(burn_frac) - elif likelihood == 'discovery': - # the numpyro sampler already deals with the burn in - noise_core.set_burn(0) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :] psr_name = noise_core.params[0].split("_")[0] pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) if len(pars)+2 != chain.shape[1]: @@ -171,7 +148,7 @@ def analyze_noise( if chaindir_compare is not None: compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) - chain_compare = compare_core.chain + chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :] pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) if len(pars_compare)+2 != chain_compare.shape[1]: chain_compare = chain_compare[:, :len(pars_compare)+2] @@ -278,6 +255,8 @@ def analyze_noise( nrows = 5 # number of rows per page mp_idx = noise_core.map_idx + param_medians = [noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']] + param_medians_dict = {p: noise_core.get_param_median(p) for p in noise_core.params if p not in ['lnlike', 'lnpost']} #mp_idx = np.argmax(chain[:, a]) if chaindir_compare is not None: mp_compare_idx = compare_core.map_idx @@ -298,7 +277,8 @@ def analyze_noise( color="black", label="Current", ) - ax.axvline(chain[:, idx][mp_idx], ls="--", color="black") + ax.axvline(chain[:, idx][mp_idx], ls="--", color="black", label="MAP") + ax.axvline(param_medians[idx], ls="--", color="green", label="median") if chaindir_compare is not None: ax.hist( chain_compare[:, idx], @@ -325,7 +305,13 @@ def analyze_noise( # ax[nr][nc].legend(loc = 'best') pl.show() - noise_dict = noise_core.get_map_dict() + if use_noise_point == 'MAP': + noise_dict = noise_core.get_map_dict() + elif use_noise_point == 'median': + noise_dict = param_medians_dict + else: + log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") + raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") # Print bayes factor for red noise in pulsar rn_amp_nm = psr_name+"_red_noise_log10_A" @@ -344,7 +330,7 @@ def model_noise( base_op_dir="./", model_kwargs={}, sampler_kwargs={}, - return_sampler=False, + return_sampler_without_sampling=False, ): """ Setup enterprise or discovery likelihood and perform Bayesian inference on noise model @@ -486,100 +472,22 @@ def model_noise( except: log.warning("Failed to add draws from empirical distribution.") # Initial sample - x0 = np.hstack([p.sample() for p in pta.params]) - # Start sampling - log.info("Beginnning to sample...") - samp.sample( - x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs - ) - log.info("Finished sampling.") - ############################################################## - ################## GibbsSampler ######################## - ############################################################## - elif likelihood == "enterprise" and sampler == "GibbsSampler": - try: - from enterprise_extensions.gibbs_sampling.gibbs_chromatic import GibbsSampler - except: - log.error("Please upgrade to the latest version of enterprise_extensions to use GibbsSampler.") - raise ValueError("Please install a version of enterprise extensions which contains the `gibbs_sampling` module.") - log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") - samp = GibbsSampler( - e_psr, - vary_wn=True, - tm_marg=False, - inc_ecorr=True, - ecorr_type='kernel', - vary_rn=model_kwargs['inc_rn'], - rn_components=model_kwargs['rn_nfreqs'], - vary_dm=model_kwargs['inc_dmgp'], - dm_components=model_kwargs['dmgp_nfreqs'], - vary_chrom=model_kwargs['inc_chromgp'], - chrom_components=model_kwargs['chromgp_nfreqs'], - noise_dict={}, - tnequad=model_kwargs['tnequad'], - #**noise_kwargs, - ) - log.info("Beginnning to sample...") - samp.sample(niter=sampler_kwargs['n_iter'], savepath=outdir) - log.info("Finished sampling.") - # sorta redundant to have both, but la_forge doesn't look for .npy files - chain = np.load(f'{outdir}/chain_1.npy') - np.savetxt(f'{outdir}/chain_1.txt', chain,) - ################################################################# - ################## discovery likelihood ################### - ################################################################# - elif likelihood == "discovery": - try: # make sure requisite packages are installed - import xarray as xr - import jax - from jax import numpy as jnp - import numpyro - from numpyro.infer import log_likelihood - from numpyro import distributions as dist - from numpyro import infer - import discovery as ds - from discovery import prior as ds_prior - from discovery.prior import (makelogtransform_uniform, - makelogprior_uniform, - sample_uniform) - except ImportError: - log.error("Please install the latest version of discovery, numpyro, and/or jax") - raise ValueError("Please install the latest version of discovery, numpyro, and/or jax") - log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") - os.makedirs(outdir, exist_ok=True) - with open(outdir+"model_kwargs.json", "w") as f: - json.dump(model_kwargs, f) - with open(outdir+"sampler_kwargs.json", "w") as f: - json.dump(sampler_kwargs, f) - samp, log_x, numpyro_model = setup_discovery_noise(e_psr, model_kwargs, sampler_kwargs) - # run the sampler - log.info("Beginnning to sample...") - samp.run(jax.random.key(42)) - log.info("Finished sampling.") - # convert to a DataFrame - df = log_x.to_df(samp.get_samples()['par']) - # convert DataFrame to dictionary - samples_dict = df.to_dict(orient='list') - if sampler_kwargs['sampler'] != 'HMC-GIBBS': - log.info("Reconstructing Log Likelihood and Posterior from samples...") - ln_like = log_likelihood(numpyro_model, samp.get_samples(), parallel=True)['ll'] - ln_prior = dist.Normal(0, 10).log_prob(samp.get_samples()['par']).sum(axis=-1) - ln_post = ln_like + ln_prior - samples_dict['lnlike'] = ln_like - samples_dict['lnpost'] = ln_post - else: - samples_dict['lnlike'] = None - samples_dict['lnpost'] = None - # convert dictionary to ArviZ InferenceData object - inference_data = az.from_dict(samples_dict) - # Save to NetCDF file which can be loaded into la_forge - inference_data.to_netcdf(outdir+"chain.nc") + # try to initialize the sampler to the maximum likelihood value from a previous run + # initialize to a random point if any points are missing + x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + if not return_sampler_without_sampling: + # Start sampling + log.info("Beginnning to sample...") + samp.sample( + x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs + ) + log.info("Finished sampling.") else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ + "\nCan only use enterprise with PTMCMCSampler or GibbsSampler." ) - if return_sampler: + if return_sampler_without_sampling: return samp @@ -592,6 +500,7 @@ def convert_to_RNAMP(value): def add_noise_to_model( model, + use_noise_point='MAP', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -607,6 +516,9 @@ def add_noise_to_model( Parameters ========== model: PINT (or tempo2) timing model + use_noise_point: point to use for noise analysis; Default: 'MAP'. + Options: 'MAP', 'median', + Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True ignore_red_noise: Flag to manually force RN exclusion from timing model. When False, @@ -940,109 +852,6 @@ def add_noise_to_model( return model -def plot_free_specs(c0, freqs, fs_type='Red Noise'): - """ - Plot free specs when using free spectral model - """ - raise NotImplementedError("not yet implemented") - return None - - -def setup_discovery_noise(psr, - model_kwargs={}, - sampler_kwargs={}): - """ - Setup the discovery likelihood with numpyro sampling for noise analysis - """ - # set up the model - sampler = sampler_kwargs['sampler'] - time_span = ds.getspan([psr]) - # need 64-bit precision for PTA inference - numpyro.enable_x64() - # this updates the ds.stand_priordict object - ds.priordict_standard.update(prior_dictionary_updates()) - model_components = [ - psr.residuals, - ds.makegp_timing(psr, svd=True), - ds.makenoise_measurement(psr), - ds.makegp_ecorr(psr), - ] - if model_kwargs['inc_rn']: - if model_kwargs['rn_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) - elif model_kwargs['rn_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['rn_nfreqs'], T=time_span, name='red_noise')) - if model_kwargs['inc_dmgp']: - if model_kwargs['dmgp_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - elif model_kwargs['dmgp_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['dmgp_nfreqs'], T=time_span, name='dm_gp')) - if model_kwargs['inc_chromgp']: - if model_kwargs['chrom_psd'] == 'powerlaw': - model_components.append(ds.makegp_fourier(psr, ds.powerlaw, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) - elif model_kwargs['chrom_psd'] == 'free_spectral': - model_components.append(ds.makegp_fourier(psr, ds.free_spectral, model_kwargs['chromgp_nfreqs'], T=time_span, name='chrom_gp')) - psl = ds.PulsarLikelihood(model_components) - ## this prior transform is no longer required and should be removed - prior = ds_prior.makelogprior_uniform(psl.logL.params, ds.priordict_standard) - log_x = makelogtransform_uniform(psl.logL) - # x0 = sample_uniform(psl.logL.params) - if sampler == 'HMC-Gibbs': - try: - from discovery.gibbs import setup_single_psr_hmc_gibbs - except ImportError: - log.error("Need to have most up-to-date version of discovery installed.") - raise ValueError("Make sure proper version of discovery is imported") - numpyro_model = None # this doesnt get used for HMC-Gibbs - gibbs_hmc_kernel = setup_single_psr_hmc_gibbs( - psrl=psl, psrs=psr, - priordict=ds.priordict_standard, - invhdorf=None, nuts_kwargs={}) - sampler = infer.MCMC(gibbs_hmc_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - elif sampler == 'NUTS': - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - nuts_kernel = infer.NUTS(numpyro_model, - max_tree_depth=sampler_kwargs['max_tree_depth'], - dense_mass=sampler_kwargs['dense_mass'], - forward_mode_differentiation=False, - target_accept_prob=0.99) - sampler = infer.MCMC(nuts_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - elif sampler == 'HMC': - def numpyro_model(): - params = jnp.array(numpyro.sample("par", dist.Normal(0,10).expand([len(log_x.params)]))) - numpyro.factor("ll", log_x(params)) - hmc_kernel = infer.HMC(numpyro_model, num_steps=sampler_kwargs['num_steps']) - sampler = infer.MCMC(hmc_kernel, - num_warmup=sampler_kwargs['num_warmup'], - num_samples=sampler_kwargs['num_samples'], - num_chains=sampler_kwargs['num_chains'], - chain_method=sampler_kwargs['chain_method'], - progress_bar=True, - ) - else: - log.error( - f"Invalid likelihood ({sampler_kwargs['likelihood']}) and sampler ({sampler_kwargs['sampler']}) combination." \ - + "\nCan only use discovery with 'HMC', 'HMC-Gibbs', or 'NUTS'." - ) - - - return sampler, log_x, numpyro_model - - def test_equad_convention(pars_list): """ If (t2/tn)equad present, report convention used. @@ -1070,14 +879,51 @@ def test_equad_convention(pars_list): return None -def prior_dictionary_updates(): - return { - '(.*_)?dm_gp_log10_A': [-20, -11], - '(.*_)?dm_gp_gamma': [0, 7], - '(.*_)?chrom_gp_log10_A': [-20, -11], - '(.*_)?chrom_gp_gamma': [0, 7], - } - +def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): + """ + Get the initial sample from a chain directory or json file path. + If parameters are missing, draw randomly from the prior + Parameters + ========== + pta: enterprise PTA object + chaindir: path to chain directory + json_path: path to json file containing starting point + Returns + ======= + x0: initial sample + """ + try: + if chaindir is not None: + core = co.Core(chaindir) + starting_point = core.get_map_dict() + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + elif json_path is not None: + with open(json_path, 'r') as fin: + starting_point = json.load(fin) + x0_dict = {} + for prior, par_name in zip(pta.params, pta.param_names): + if par_name in starting_point.keys(): + x0_dict.update({par_name: starting_point[par_name]}) + else: + x0_dict.update({par_name: prior.sample()}) + x0 = np.hstack([x0_dict[p] for p in pta.param_names]) + else: + x0 = np.hstack([p.sample() for p in pta.params]) + except: + x0 = np.hstack([p.sample() for p in pta.params]) + x0_dict = None + log.warning( + f"Unable to initialize sampler from chain directory or json file. Drawing random initial sample." + ) + return x0 + + def get_model_and_sampler_default_settings(): model_defaults = { # white noise @@ -1110,7 +956,7 @@ def get_model_and_sampler_default_settings(): 'likelihood': 'enterprise', 'sampler': 'PTMCMCSampler', # ptmcmc kwargs - 'n_iter': 2e5, + 'n_iter': 2.5e5, 'empirical_distr': None, # numpyro kwargs 'num_steps': 25, From 5fbf664212736a69e8261f82bff4b9e1a1d268c2 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 13:25:14 -0800 Subject: [PATCH 167/193] fix notebook; fix typo in nu --- nb_templates/process_v1.1.ipynb | 2 +- nb_templates/process_v1.2.ipynb | 2 +- src/pint_pal/noise_utils.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nb_templates/process_v1.1.ipynb b/nb_templates/process_v1.1.ipynb index bc1a16ad..87ba7441 100644 --- a/nb_templates/process_v1.1.ipynb +++ b/nb_templates/process_v1.1.ipynb @@ -262,7 +262,7 @@ " # to temporarily address current inconsistency between enterprise <= v3.1.0 and pint implementations\n", " mo_new = lu.convert_enterprise_equads(mo_new)\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/nb_templates/process_v1.2.ipynb b/nb_templates/process_v1.2.ipynb index 9f69b712..ee97cf91 100644 --- a/nb_templates/process_v1.2.ipynb +++ b/nb_templates/process_v1.2.ipynb @@ -266,7 +266,7 @@ " mo_new = nu.add_noise_to_model(mo_new, using_wideband = using_wideband, base_dir=tc.get_noise_dir(), \n", " compare_dir=tc.get_compare_noise_dir(), no_corner_plot=tc.get_no_corner())\n", " \n", - " except OSError as e:\n", + " except (OSError, ValueError) as e:\n", " log.warning(f\"Unable to read noise chains from {tc.get_noise_dir()}: {e}\")\n", " else:\n", " mo = mo_new\n", diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 2ecf91b3..366b299c 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -130,8 +130,8 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. - Also make sure you have an up-to-date la_forge installation. ") + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct." \ + +"Also make sure you have an up-to-date la_forge installation. ") raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") if sampler == 'PTMCMCSampler': # standard burn ins From 68ec8e68e4ef7e2e5bb4e0c4faeb8868ac6787f1 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 14:10:34 -0800 Subject: [PATCH 168/193] add mean large likelihoods function; implementation errors --- src/pint_pal/noise_utils.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 366b299c..7522d196 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -88,9 +88,30 @@ def setup_sampling_groups(pta, return groups +def get_mean_large_likelihoods(core, N=10): + ''' + Calculate the mean of the top N likelihood samples from the chain. + This is an alternate to fixing the noise values in the timing model to + the MAP or the median. + + Params + ====== + core: la_forge.core object + N: int, number of top likelihood samples to average + Returns + ======= + mean_data: np.array, mean of the top N likelihood samples + ''' + chain = core.chain[core.burn:,:] + lnlike_idx = core.params.index('lnlike') + sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] + return np.mean(sorted_data[:N,:],axis=0) + + def analyze_noise( chaindir="./noise_run_chains/", - use_noise_point='MAP', + use_noise_point='mean_large_likelihood', + likelihoods_to_average=10, burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -105,8 +126,10 @@ def analyze_noise( ========== chaindir: path to enterprise noise run chain; Default: './noise_run_chains/' use_noise_point: point to use for noise analysis; Default: 'MAP'. - Options: 'MAP', 'median', + Options: 'MAP', 'median', 'mean_large_likelihood', Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + likelihoods_to_average: number of top likelihood samples to average; Default: 10 + Only applicable if use_noise_point is 'mean_large_likelihood'. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True no_corner_plot: Flag to toggle saving of corner plots; Default: False @@ -309,6 +332,8 @@ def analyze_noise( noise_dict = noise_core.get_map_dict() elif use_noise_point == 'median': noise_dict = param_medians_dict + elif use_noise_point == 'mean_large_likelihood': + noise_dict = get_mean_large_likelihoods(noise_core, N=likelihoods_to_average) else: log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") @@ -482,6 +507,12 @@ def model_noise( x0, sampler_kwargs['n_iter'], SCAMweight=30, AMweight=15, DEweight=50, #**sampler_kwargs ) log.info("Finished sampling.") + elif likelihood == "enterprise" and sampler == 'GibbsSampler': + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("GibbsSampler not yet implemented for enterprise likelihood") + elif likelihood == "discovery": + log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") + raise NotImplementedError("Discovery likelihood not yet implemented") else: log.error( f"Invalid likelihood ({likelihood}) and sampler ({sampler}) combination." \ From 67e88e728d69e4e7e293f57c7333331741fa80e1 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 14:28:34 -0800 Subject: [PATCH 169/193] 10->50 likelihoods to average --- src/pint_pal/noise_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 7522d196..539b52e6 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -111,7 +111,7 @@ def get_mean_large_likelihoods(core, N=10): def analyze_noise( chaindir="./noise_run_chains/", use_noise_point='mean_large_likelihood', - likelihoods_to_average=10, + likelihoods_to_average=50, burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -128,7 +128,7 @@ def analyze_noise( use_noise_point: point to use for noise analysis; Default: 'MAP'. Options: 'MAP', 'median', 'mean_large_likelihood', Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. - likelihoods_to_average: number of top likelihood samples to average; Default: 10 + likelihoods_to_average: number of top likelihood samples to average; Default: 50 Only applicable if use_noise_point is 'mean_large_likelihood'. burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True From 3603c4fade3de4c793fbf67b7c77c5047beaea69 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 16:09:03 -0800 Subject: [PATCH 170/193] remove dmgp compatibility :( --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 539b52e6..edf76fbd 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -448,17 +448,17 @@ def model_noise( dmjump_var=False, wb_efac_sigma=wb_efac_sigma, # DM GP - dm_var=model_kwargs['inc_dmgp'], - dm_Nfreqs=model_kwargs['dmgp_nfreqs'], + #dm_var=model_kwargs['inc_dmgp'], + #dm_Nfreqs=model_kwargs['dmgp_nfreqs'], # CHROM GP - chrom_gp=model_kwargs['inc_chromgp'], - chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], - chrom_gp_kernel='diag', # Fourier basis chromg_gp + #chrom_gp=model_kwargs['inc_chromgp'], + #chrom_Nfreqs=model_kwargs['chromgp_nfreqs'], + #chrom_gp_kernel='diag', # Fourier basis chromg_gp # DM SOLAR WIND #dm_sw_deter=model_kwargs['inc_sw_deter'], #ACE_prior=model_kwargs['ACE_prior'], # can pass extra signals in here - extra_sigs=model_kwargs['extra_sigs'], + #extra_sigs=model_kwargs['extra_sigs'], ) pta.set_default_params({}) else: From a7bc1285631f28b40209c497e276ee9975b715ee Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 17:35:54 -0800 Subject: [PATCH 171/193] add extra prints --- src/pint_pal/noise_utils.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index edf76fbd..36010a85 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -447,6 +447,7 @@ def model_noise( use_dmdata=False, dmjump_var=False, wb_efac_sigma=wb_efac_sigma, + tm_svd=True, # DM GP #dm_var=model_kwargs['inc_dmgp'], #dm_Nfreqs=model_kwargs['dmgp_nfreqs'], @@ -485,13 +486,19 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions + if sampler_kwargs['empirical_distr'] is not None: + log.info(f"Attempting to set up sampler with empirical distribution from {sampler_kwargs['empirical_distr']}") + emp_dist = sampler_kwargs['empirical_distr'] + else: + log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") + emp_dist = None samp = ee_sampler.setup_sampler(pta, outdir=outdir, resume=resume, groups=groups, - empirical_distr = sampler_kwargs['empirical_distr'] + empirical_distr = emp_dist, ) - if sampler_kwargs['empirical_distr'] is not None: + if emp_dist is not None: try: samp.addProposalToCycle(samp.jp.draw_from_empirical_distr, 50) except: @@ -981,7 +988,8 @@ def get_model_and_sampler_default_settings(): 'ACE_prior': False, # 'extra_sigs': None, - # path to empirical distribution + # misc + 'tm_svd': True } sampler_defaults = { 'likelihood': 'enterprise', From 819f9f6a7a6301f3284ba81e2656ca82cdb5b5b5 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 17:51:21 -0800 Subject: [PATCH 172/193] turns out you cant make empirical distributions like that --- src/pint_pal/noise_utils.py | 54 ++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 36010a85..6dc57f11 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os, json +import numpy as np, os, json, itertools from astropy import log from astropy.time import Time @@ -17,6 +17,8 @@ from enterprise_extensions.sampler import group_from_params, get_parameter_groups from enterprise_extensions import model_utils +from enterprise_extensions.empirical_distr import (EmpiricalDistribution1D, + EmpiricalDistribution2D) def setup_sampling_groups(pta, @@ -487,8 +489,13 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to set up sampler with empirical distribution from {sampler_kwargs['empirical_distr']}") - emp_dist = sampler_kwargs['empirical_distr'] + log.info(f"Attempting to create empirical distribution from {sampler_kwargs['empirical_distr']}") + try: + core = co.Core(chaindir=sampler_kwargs['empirical_distr']) + emp_dist = make_emp_distr(core) + except: + log.warning(f"Failed to create empirical distribution from {sampler_kwargs['empirical_distr']}... check path.") + emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") emp_dist = None @@ -961,6 +968,47 @@ def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): ) return x0 +def make1d(par, samples, bins=None, nbins=81): + if bins is None: + bins = np.linspace(min(samples), max(samples), nbins) + + return EmpiricalDistribution1D(par, samples, bins) + +def make2d(pars, samples, bins=None, nbins=81): + idx = [0,1] + if bins is None: + bins = [np.linspace(min(samples[:, i]), max(samples[:, i]), nbins) for i in idx] + return EmpiricalDistribution2D(pars, samples.T, bins) + + +def make_emp_distr(core): + """ + Make empirical distributions for all parameters in core. + Parameters + ========== + core: enterprise_extensions.core.Core object + + Returns + ======= + dists: list of EmpiricalDistribution1D and EmpiricalDistribution2D objects + """ + types = ['dm_gp', 'chrom_gp', 'red_noise', 'ecorr', 'chrom_s1yr', 'dm_s1yr', 'exp',] + # made 1d hist for everything + dists = [make1d(par, core(par)) for par in core.params[:-4] if 'chrom_gp_idx' not in par] + # get list of parameters minus chrom_gp_idx cuz this prior is weird. + params = [p for p in core.params if 'chrom_gp_idx' not in p] + groups = {ii: [par for par in params if ii in par] for ii in types} + # make 2ds for various related parameter subgroups + for group in groups.values(): + _ = [dists.append(make2d(pars,core(list(pars)))) for pars in list(itertools.combinations(group,2)) if len(group)>1] + # make 2d cross groups + _ = [[dists.append(make2d([ecr, dm], core([ecr, dm]))) for ecr in groups['ecorr']] for dm in groups['dm_gp']] + _ = [[dists.append(make2d([dm, chrom], core([dm, chrom]))) for dm in groups['dm_gp']] for chrom in groups['chrom_gp']] + + return dists + + + def get_model_and_sampler_default_settings(): model_defaults = { From a9f85c57a666194436f5a46e598609418008c3dc Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:39:42 -0800 Subject: [PATCH 173/193] removing chain accept --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 6dc57f11..3f7de998 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -163,11 +163,11 @@ def analyze_noise( noise_core.set_burn(burn_frac) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :] + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-2] psr_name = noise_core.params[0].split("_")[0] - pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost']]) - if len(pars)+2 != chain.shape[1]: - chain = chain[:, :len(pars)+2] + pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) + # if len(pars)+2 != chain.shape[1]: + # chain = chain[:, :len(pars)+2] # load in same for comparison noise model if chaindir_compare is not None: @@ -489,12 +489,12 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to create empirical distribution from {sampler_kwargs['empirical_distr']}") + log.info(f"Attempting to create empirical distributions from {sampler_kwargs['empirical_distr']}") try: core = co.Core(chaindir=sampler_kwargs['empirical_distr']) emp_dist = make_emp_distr(core) except: - log.warning(f"Failed to create empirical distribution from {sampler_kwargs['empirical_distr']}... check path.") + log.warning(f"Failed to create empirical distributions ... check path.") emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") From acabed9c8650f6dad38629f8358c04b5237873cd Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:43:25 -0800 Subject: [PATCH 174/193] removing lnlike --- src/pint_pal/noise_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 3f7de998..390f692c 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -163,7 +163,7 @@ def analyze_noise( noise_core.set_burn(burn_frac) else: noise_core.set_burn(burn_frac) - chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-2] + chain = noise_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] psr_name = noise_core.params[0].split("_")[0] pars = np.array([p for p in noise_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) # if len(pars)+2 != chain.shape[1]: @@ -171,12 +171,12 @@ def analyze_noise( # load in same for comparison noise model if chaindir_compare is not None: - compare_core = co.Core(chaindir=chaindir) + compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) - chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :] + chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) - if len(pars_compare)+2 != chain_compare.shape[1]: - chain_compare = chain_compare[:, :len(pars_compare)+2] + # if len(pars_compare)+2 != chain_compare.shape[1]: + # chain_compare = chain_compare[:, :len(pars_compare)+2] psr_name_compare = pars_compare[0].split("_")[0] if psr_name_compare != psr_name: From 08d0751a1f6485bfc15e3c78383d6187b286b51f Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:48:11 -0800 Subject: [PATCH 175/193] fix bug --- src/pint_pal/noise_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 390f692c..5a3c3c9d 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -595,10 +595,12 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") noise_core, noise_dict, rn_bf = analyze_noise( - chaindir, - burn_frac, - save_corner, - no_corner_plot, + chaindir=chaindir, + use_noise_point='mean_large_likelihood', + likelihoods_to_average=50, + burn_frac=0.25, + save_corner=save_corner, + no_corner_plot=no_corner_plot, chaindir_compare=chaindir_compare, ) chainfile = chaindir + "chain_1.txt" From 2ba70e3157ebea170b43a4e43d6b6adb13788f28 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:54:41 -0800 Subject: [PATCH 176/193] fix bug --- src/pint_pal/noise_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 5a3c3c9d..05313e97 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -107,7 +107,8 @@ def get_mean_large_likelihoods(core, N=10): chain = core.chain[core.burn:,:] lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] - return np.mean(sorted_data[:N,:],axis=0) + vals = np.mean(sorted_data[:N,:],axis=0) + return {p: val[p] for p, par in enumerate(core.params)} def analyze_noise( From 5caf7dd769a6cad2091fd9060bd991b606544ec7 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:55:55 -0800 Subject: [PATCH 177/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 05313e97..c70f066c 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: val[p] for p, par in enumerate(core.params)} + return {p: vals[p] for p, par in enumerate(core.params)} def analyze_noise( From 48fafbf2e9cc1ebe613f0683edbfa8bc1decd1f8 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:57:39 -0800 Subject: [PATCH 178/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index c70f066c..63d54cb2 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: vals[p] for p, par in enumerate(core.params)} + return {p: vals[par] for p, par in enumerate(core.params)} def analyze_noise( From c186e32f0d50118f5e28fbbc7f7cecf1205fc185 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 6 Mar 2025 19:59:06 -0800 Subject: [PATCH 179/193] fix bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 63d54cb2..32e74c57 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -108,7 +108,7 @@ def get_mean_large_likelihoods(core, N=10): lnlike_idx = core.params.index('lnlike') sorted_data = chain[chain[:, lnlike_idx].argsort()[::-1]] vals = np.mean(sorted_data[:N,:],axis=0) - return {p: vals[par] for p, par in enumerate(core.params)} + return {par: vals[p] for p, par in enumerate(core.params)} def analyze_noise( From 2e8bc47bd13f2dbc4698851647f6a663dc8e01c3 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:11:53 -0800 Subject: [PATCH 180/193] extra logger statements --- src/pint_pal/noise_utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 32e74c57..d3286e17 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -546,7 +546,7 @@ def convert_to_RNAMP(value): def add_noise_to_model( model, - use_noise_point='MAP', + use_noise_point='mean_large_likelihood', burn_frac=0.25, save_corner=True, no_corner_plot=False, @@ -555,6 +555,7 @@ def add_noise_to_model( rn_bf_thres=1e2, base_dir=None, compare_dir=None, + return_noise_core=False, ): """ Add WN, RN, DMGP, ChromGP, and SW parameters to timing model. @@ -562,9 +563,10 @@ def add_noise_to_model( Parameters ========== model: PINT (or tempo2) timing model - use_noise_point: point to use for noise analysis; Default: 'MAP'. - Options: 'MAP', 'median', + use_noise_point: point to use for noise analysis; Default: 'mean_large_likelihood'. + Options: 'MAP', 'median', 'mean_large_likelihood' Note that the MAP is the the same as the maximum likelihood value when all the priors are uniform. + Mean large likelihood takes N of the largest likelihood values and then takes the mean of those. (Recommended). burn_frac: fraction of chain to use for burn-in; Default: 0.25 save_corner: Flag to toggle saving of corner plots; Default: True ignore_red_noise: Flag to manually force RN exclusion from timing model. When False, @@ -573,10 +575,13 @@ def add_noise_to_model( using_wideband: Flag to toggle between narrowband and wideband datasets; Default: False base_dir: directory containing {psr}_nb and {psr}_wb chains directories; if None, will check for results in the current working directory './'. + return_noise_core: Flag to return the la_forge.core object; Default: False Returns ======= model: New timing model which includes WN and RN (and potentially dmgp, chrom_gp, and solar wind) parameters + (optional) + noise_core: la_forge.core object which contains noise chains and run metadata """ # Assume results are in current working directory if not specified @@ -595,11 +600,17 @@ def add_noise_to_model( log.info(f"Using existing noise analysis results in {chaindir}") log.info("Adding new noise parameters to model.") + if use_noise_point == 'mean_large_likelihood': + log.info("Using mean of top 50 likelihood samples for noise parameters.") + elif use_noise_point == 'MAP': + log.info("Using maximum a posteriori values for noise parameters.") + elif use_noise_point == 'median': + log.info("Using median values for noise parameters.") noise_core, noise_dict, rn_bf = analyze_noise( chaindir=chaindir, - use_noise_point='mean_large_likelihood', + use_noise_point=use_noise_point, likelihoods_to_average=50, - burn_frac=0.25, + burn_frac=burn_frac, save_corner=save_corner, no_corner_plot=no_corner_plot, chaindir_compare=chaindir_compare, @@ -897,7 +908,10 @@ def add_noise_to_model( model = convert_enterprise_equads(model) - return model + if not return_noise_core: + return model + if return_noise_core: + return model, noise_core def test_equad_convention(pars_list): From eebeecf5539c3bcb2b9b0fa6b9aecf2ac45e36bb Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:31:52 -0800 Subject: [PATCH 181/193] adding legend to fig --- src/pint_pal/noise_utils.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index d3286e17..a974bd3e 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -304,7 +304,13 @@ def analyze_noise( label="Current", ) ax.axvline(chain[:, idx][mp_idx], ls="--", color="black", label="MAP") - ax.axvline(param_medians[idx], ls="--", color="green", label="median") + if use_noise_point == 'mean_large_likelihood': + lbl = "mean of 50 MLVs" + if use_noise_point == 'MAP': + lbl = "MAP" + if use_noise_point == 'median': + lbl = "median" + ax.axvline(param_medians[idx], ls="--", color="green", label=lbl) if chaindir_compare is not None: ax.hist( chain_compare[:, idx], @@ -329,6 +335,7 @@ def analyze_noise( # Wasn't working before, but how do I implement a legend? # ax[nr][nc].legend(loc = 'best') + pl.legend(loc="best") pl.show() if use_noise_point == 'MAP': @@ -339,7 +346,7 @@ def analyze_noise( noise_dict = get_mean_large_likelihoods(noise_core, N=likelihoods_to_average) else: log.error(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") - raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' ") + raise ValueError(f"Invalid noise point {use_noise_point}. Must be 'MAP' or 'median' or 'mean_large_likelihood' ") # Print bayes factor for red noise in pulsar rn_amp_nm = psr_name+"_red_noise_log10_A" From d47da300cace17c4b354c3e635561e99c155e621 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 7 Mar 2025 07:49:03 -0800 Subject: [PATCH 182/193] fix bug in chain comparison --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index a974bd3e..15d64c08 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -175,7 +175,7 @@ def analyze_noise( compare_core = co.Core(chaindir=chaindir) compare_core.set_burn(noise_core.burn) chain_compare = compare_core.chain[int(burn_frac * len(noise_core.chain)) :, :-4] - pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost']]) + pars_compare = np.array([p for p in compare_core.params if p not in ['lnlike', 'lnpost', 'chain_accept', 'pt_chain_accept']]) # if len(pars_compare)+2 != chain_compare.shape[1]: # chain_compare = chain_compare[:, :len(pars_compare)+2] From 1ec7a3a8ca2d95e35bb8ecd01ada5dae7069920b Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 17:03:00 +0000 Subject: [PATCH 183/193] Quick fix for finding old noise runs --- src/pint_pal/noise_utils.py | 12 ++++++++---- src/pint_pal/utils.py | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 15d64c08..d2afbffd 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -156,9 +156,13 @@ def analyze_noise( try: noise_core = co.Core(chaindir=chaindir) except: - log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct." \ - +"Also make sure you have an up-to-date la_forge installation. ") - raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") + if os.path.isfile(chaindir): + log.error(f"Could not load noise run from {chaindir}. Make sure the path is correct. " \ + +"Also make sure you have an up-to-date la_forge installation. ") + raise ValueError(f"Could not load noise run from {chaindir}. Check path and la_forge installation.") + else: + log.error(f"No noise runs found in {chaindir}. Make sure the path is correct.") + raise ValueError(f"Could not load noise run from {chaindir}. Check path.") if sampler == 'PTMCMCSampler': # standard burn ins noise_core.set_burn(burn_frac) @@ -1078,4 +1082,4 @@ def get_model_and_sampler_default_settings(): 'max_tree_depth': 5, 'dense_mass': False, } - return model_defaults, sampler_defaults \ No newline at end of file + return model_defaults, sampler_defaults diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index d1429cdd..056bd1d3 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1258,8 +1258,8 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 1: log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') - used_chains = os.path.basename(noise_runs[-1]) - available_chains = [os.path.basename(n) for n in noise_runs] + used_chains = os.path.basename(noise_runs[-1]) + available_chains = [os.path.basename(n) for n in noise_runs] log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") From 9e1ac7ca6c018ee205b90d85df8065376d10d4de Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 20:49:30 +0000 Subject: [PATCH 184/193] Fixed recentness checking to handle cases outside the expected noise-dir location, including home dir --- src/pint_pal/utils.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 056bd1d3..6459d882 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1238,7 +1238,7 @@ def check_recentness_noise(tc): name of the most recent available set of chains """ if not tc.get_noise_dir(): - log.warning(f"Yaml file does not have a noise-dir field (or it is unset).") + log.warning(f"Yaml file does not have a noise-dir field (or it is unset). Will check working directory.") return None, None d = os.path.abspath(tc.get_noise_dir()) @@ -1254,12 +1254,18 @@ def check_recentness_noise(tc): available_chains = [os.path.basename(n) for n in noise_runs] if not noise_runs: - log.warning('Looking for noise chains in working directory.') - noise_runs = [os.path.dirname(os.path.dirname(os.path.abspath(p))) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] - if len(noise_runs) > 1: - log.warning(f'{len(noise_runs)} noise chains found in the working directory. Using first in sorted list.') - used_chains = os.path.basename(noise_runs[-1]) - available_chains = [os.path.basename(n) for n in noise_runs] + log.warning('Looking for noise chains in given noise-dir, but does not follow current conventions.') + noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] + if len(noise_runs) > 0: + if len(noise_runs) == 1: + log.warning(f'{len(noise_runs)} noise chain found in noise-dir.') + else: + log.warning(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + used_chains = os.path.abspath(noise_runs[0]) + available_chains = [os.path.abspath(n) for n in noise_runs] + + if not noise_runs: + log.warning('No chains found. Will search working directory and apply if found.') log.info(f"Using: {used_chains}") log.info(f"Available: {' '.join(available_chains)}") From 9e9abd2a499a956fda3168d8ad87a4100f455dc7 Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Mon, 10 Mar 2025 21:14:51 +0000 Subject: [PATCH 185/193] Changed warning-->info for non-standard directory case --- src/pint_pal/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 6459d882..5621d959 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1258,9 +1258,9 @@ def check_recentness_noise(tc): noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 0: if len(noise_runs) == 1: - log.warning(f'{len(noise_runs)} noise chain found in noise-dir.') + log.info(f'{len(noise_runs)} noise chain found in noise-dir.') else: - log.warning(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + log.info(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') used_chains = os.path.abspath(noise_runs[0]) available_chains = [os.path.abspath(n) for n in noise_runs] From 1781e1e0223c48ae05a35bf75434d3484e21f81f Mon Sep 17 00:00:00 2001 From: Thankful Cromartie Date: Tue, 11 Mar 2025 16:09:04 +0000 Subject: [PATCH 186/193] Added path to noise-dir warnings. See simultaneous MR in NG20 repo (notebook changes) --- src/pint_pal/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/utils.py b/src/pint_pal/utils.py index 5621d959..b2b940a9 100644 --- a/src/pint_pal/utils.py +++ b/src/pint_pal/utils.py @@ -1254,13 +1254,13 @@ def check_recentness_noise(tc): available_chains = [os.path.basename(n) for n in noise_runs] if not noise_runs: - log.warning('Looking for noise chains in given noise-dir, but does not follow current conventions.') + log.warning(f'Looking for noise chains in given noise-dir ({d}), but does not follow current conventions (shared chains in /nanograv/share/20yr/noise-chains///).') noise_runs = [os.path.dirname(os.path.abspath(p)) for p in sorted(glob.glob(os.path.join(d, tc.get_source()+"_"+tc.get_toa_type().lower()+"*", "chain*.txt")))] if len(noise_runs) > 0: if len(noise_runs) == 1: - log.info(f'{len(noise_runs)} noise chain found in noise-dir.') + log.info(f'{len(noise_runs)} noise chain found in noise-dir ({d}).') else: - log.info(f'{len(noise_runs)} noise chains found in noise-dir. Using first in sorted list.') + log.info(f'{len(noise_runs)} noise chains found in noise-dir ({d}). Using first in sorted list.') used_chains = os.path.abspath(noise_runs[0]) available_chains = [os.path.abspath(n) for n in noise_runs] From 0854e3ed3d0d49ec6285f924bbb5815ae75a64dd Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 22:55:55 -0700 Subject: [PATCH 187/193] fix integer bug --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index d2afbffd..f8d10549 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -446,7 +446,7 @@ def model_noise( log.info(f"Setting up noise analysis with {likelihood} likelihood and {sampler} sampler for {e_psr.name}") # Setup a single pulsar PTA using enterprise_extensions # Ensure n_iter is an integer - sampler_kwargs['n_iter'] = int(sampler_kwargs['n_iter']) + sampler_kwargs['n_iter'] = int(float(sampler_kwargs['n_iter'])) if sampler_kwargs['n_iter'] < 1e4: log.warning( From 269fc964dd955c1b2ac207b590dcb745530393b5 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 23:17:19 -0700 Subject: [PATCH 188/193] add print statements --- src/pint_pal/noise_utils.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index f8d10549..22f8d30a 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -501,12 +501,19 @@ def model_noise( ####### # setup sampler using enterprise_extensions if sampler_kwargs['empirical_distr'] is not None: - log.info(f"Attempting to create empirical distributions from {sampler_kwargs['empirical_distr']}") try: + log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['empirical_distr']}") core = co.Core(chaindir=sampler_kwargs['empirical_distr']) - emp_dist = make_emp_distr(core) except: - log.warning(f"Failed to create empirical distributions ... check path.") + log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['empirical_distr']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") + core = None + try: + if core is not None: + emp_dist = make_emp_distr(core) + log.info(f"Successfully created empirical distributions !!") + log.info("Setting up sampler ...") + except: + log.warning(f"Failed to create empirical distributions from successfully loaded directory.") emp_dist = None else: log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") From 538a57903175eebd59a60720ae0f99ba586a8ccb Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Thu, 13 Mar 2025 23:49:52 -0700 Subject: [PATCH 189/193] adding single likelihood evaluation timer --- src/pint_pal/noise_utils.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index 22f8d30a..b6ec0287 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -1,4 +1,4 @@ -import numpy as np, os, json, itertools +import numpy as np, os, json, itertools, time from astropy import log from astropy.time import Time @@ -533,6 +533,10 @@ def model_noise( # try to initialize the sampler to the maximum likelihood value from a previous run # initialize to a random point if any points are missing x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + try: + log_single_likelihood_evaluation_time(pta, sampler_kwargs) + except: + log.warning("Failed to time likelihood.") if not return_sampler_without_sampling: # Start sampling log.info("Beginnning to sample...") @@ -974,6 +978,7 @@ def get_init_sample_from_chain_path(pta, chaindir=None, json_path=None): """ try: if chaindir is not None: + log.info(f"Attempting to initialize sampler from MAP of chain directory {chaindir}") core = co.Core(chaindir) starting_point = core.get_map_dict() x0_dict = {} @@ -1015,7 +1020,6 @@ def make2d(pars, samples, bins=None, nbins=81): bins = [np.linspace(min(samples[:, i]), max(samples[:, i]), nbins) for i in idx] return EmpiricalDistribution2D(pars, samples.T, bins) - def make_emp_distr(core): """ Make empirical distributions for all parameters in core. @@ -1042,6 +1046,19 @@ def make_emp_distr(core): return dists +def log_single_likelihood_evaluation_time(pta, sampler_kwargs): + """ + Log the time it takes to evaluate the likelihood once. + """ + log.info("Building the enterprise likelihood and estimating evaluation time...") + x1 = [[p.sample() for p in pta.params] for _ in range(11)] + pta.get_lnlikelihood(x1[0]) + start_time = time.time() + [pta.get_lnlikelihood(x1[i]) for i in range(1,11)] + end_time = time.time() + slet = (end_time-start_time)/10 + log.info(f"Single likelihood evaluation time is approximately {slet:.1e} seconds") + log.info(f"4 times {sampler_kwargs['n_iter']} likelihood evaluations will take approximately: {4*slet*float(sampler_kwargs['n_iter'])/3600/24:.2f} days") From 755ba75e02490d54e29abd22ca04dca1ef13c414 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 14 Mar 2025 00:06:23 -0700 Subject: [PATCH 190/193] rename empirical distribuition key --- src/pint_pal/noise_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index b6ec0287..d08776fd 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -500,12 +500,12 @@ def model_noise( groups = setup_sampling_groups(pta, write_groups=False, outdir=outdir) ####### # setup sampler using enterprise_extensions - if sampler_kwargs['empirical_distr'] is not None: + if sampler_kwargs['emp_distribution'] is not None: try: - log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['empirical_distr']}") - core = co.Core(chaindir=sampler_kwargs['empirical_distr']) + log.info(f"Attempting to load chains for an empirical distributions from {sampler_kwargs['emp_distribution']}") + core = co.Core(chaindir=sampler_kwargs['emp_distribution']) except: - log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['empirical_distr']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") + log.warning(f"Failed to load chains for empirical distributions from {sampler_kwargs['emp_distribution']}.\nCheck path. Need absolute path to chain directory with `pars.txt` and `chain_1.txt`. files") core = None try: if core is not None: @@ -532,7 +532,7 @@ def model_noise( # Initial sample # try to initialize the sampler to the maximum likelihood value from a previous run # initialize to a random point if any points are missing - x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['empirical_distr']) + x0 = get_init_sample_from_chain_path(pta, chaindir=sampler_kwargs['emp_distribution']) try: log_single_likelihood_evaluation_time(pta, sampler_kwargs) except: @@ -1096,7 +1096,7 @@ def get_model_and_sampler_default_settings(): 'sampler': 'PTMCMCSampler', # ptmcmc kwargs 'n_iter': 2.5e5, - 'empirical_distr': None, + 'emp_distribution': None, # numpyro kwargs 'num_steps': 25, 'num_warmup': 500, From a85430267fca6b87ac2e8704ab47c091864751a0 Mon Sep 17 00:00:00 2001 From: Jeremy Baier Date: Fri, 14 Mar 2025 00:15:16 -0700 Subject: [PATCH 191/193] add additional instructions for adding empirical distributions --- src/pint_pal/noise_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pint_pal/noise_utils.py b/src/pint_pal/noise_utils.py index d08776fd..a4796999 100644 --- a/src/pint_pal/noise_utils.py +++ b/src/pint_pal/noise_utils.py @@ -516,7 +516,7 @@ def model_noise( log.warning(f"Failed to create empirical distributions from successfully loaded directory.") emp_dist = None else: - log.warning("Setting up sampler without empirical distributions...consider adding one for faster sampling...") + log.warning("Setting up sampler without empirical distributions... Consider adding one for faster sampling by adding `emp_distribution`: //_nb to the `noise_run`->`inference` section of the config file.") emp_dist = None samp = ee_sampler.setup_sampler(pta, outdir=outdir, From 51b47be02b0f6acd8b56fa86cd93665d939dd7d1 Mon Sep 17 00:00:00 2001 From: "deborah.good" Date: Thu, 17 Jul 2025 14:43:24 -0600 Subject: [PATCH 192/193] Added LOFAR, NenuFaR, CPTA colors. Changed CHIME color --- src/pint_pal/plot_settings.yaml | 34 +++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml index f8bb90f0..3bc2e260 100644 --- a/src/pint_pal/plot_settings.yaml +++ b/src/pint_pal/plot_settings.yaml @@ -2,22 +2,25 @@ # Changes here will be applied to notebook plotting immediately (after restarting the kernel) obs_c: { - "ao": "#6BA9E2", - "arecibo": "#6BA9E2", + "ao": "#40C3C3", + "arecibo": "#40C3C3" , "gbt": "#61C853", "vla": "#40635F", - "CHIME": "#ECE133", + "CHIME": "#A3DB8B", "nancay": "#407BD5", "ncyobs": "#407BD5", - "effelsberg_asterix": "#407BD5", - "effelsberg": "#407BD5", - "leap": "#ECE133", - "jodrell": "#407BD5", - "jbroach": "#407BD5", - "wsrt": "#E5A4CB", + "effelsberg_asterix": "#00A9E2", + "effelsberg": "#00A9E2", + "leap": "#004B97", + "jodrell": "#404BD5", + "jbroach": "#404BD5", + "wsrt": "#404B97", "parkes": "#BE0119", "gmrt": "#855CA0", "meerkat": "#FD9927", + "fast": "#FD99CA", + "nenufar": "#E0DEFF", + "lofar": "#E0EAFF", "None": "#808080", } @@ -27,6 +30,7 @@ pta_c: { "NANOGrav": "#61C853", "PPTA": "#BE0119", "MPTA": "#FD9927", + "CPTA": "#FD99CA", "None": "#808080", } @@ -103,7 +107,12 @@ febe_c: { "UWL_PDFB4_40CM": "#BE0119", "None": "#808080", "unknown_asterix": "#855CA0", - "CHIME": "#ECE133", + "CHIME": "#A3DB8B", + "unknown_LuMP": "#E0EAFF", + "unknown_COBALT": "#E0EAFF", + "unknown_LOFAR": "#E0EAFF", + "LaNewBa_LUPPI": "#E0DEFF", + "19BEAM_MB4K": "#FD99CA", } ng20_c: { @@ -144,6 +153,9 @@ obs_m: { "parkes": "x", "gmrt": "x", "meerkat": "x", + "fast": "x", + "lofar": "x", + "nenufar": "x", "None": "x", } @@ -153,6 +165,7 @@ pta_m: { "NANOGrav": "x", "PPTA": "x", "MPTA": "x", + "CPTA": "x", "None": "x", } @@ -252,6 +265,7 @@ febe_m: { "3GHz_YUPPI": "x", "6GHz_YUPPI": "x", "CHIME": "x", + "19BEAM_MB4K": "x", } label_names: { From f95c71cf9002dc01469c8a154601c2153607bc19 Mon Sep 17 00:00:00 2001 From: "deborah.good" Date: Tue, 29 Jul 2025 11:32:42 -0600 Subject: [PATCH 193/193] Resolved additional merge conflict in pyproject.toml --- .DS_Store | Bin 0 -> 6148 bytes pyproject.toml | 10 --------- src/.DS_Store | Bin 0 -> 6148 bytes src/pint_pal/plot_settings.yaml | 35 -------------------------------- tests/.DS_Store | Bin 0 -> 6148 bytes 5 files changed, 45 deletions(-) create mode 100644 .DS_Store create mode 100644 src/.DS_Store create mode 100644 tests/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6fc8a45be0aa1d79ecf6d18a038965acc52c2f6b GIT binary patch literal 6148 zcmeHKu}T9$5S=wA1{FC_(DD#V71LRWoN{)8fqZ~^i6lbe1(VcXV;!;Z7p&5Yje=ll zA%cbefVGw)&g^cIT`vh1A|f*|^Y(UTcIF=3?kxa_^+q8B-~vDzmCzYQ(?iHlnvfwq zGleK8nIR5SF75=hA1bR%Y(z zcFpz|2KuWdV{cfv##9(-*M$-^AhfE( zZ~!4}Lml>@0#zu(t{m5_&cyP?pjflV=Evpntm3Kf+Ge-kiMRR|a;r^24Gtl1bvbdh ziCt=s+0*^=>%&P?^49To`AT}NdakTDtXM&7GqY8vnugb`d%F7Gu4_efYxj<@>Swc9 zfaQl6z%!d9J*JQ?Gr$Zm104ovfACNVeT}g~xpknCR{%r@r={T7r2(R+(bpI&L=TEk zsfa37=qrX$={WD{IA3F|P^E*=mk*&U3w=WovO2Es$#4+9Lbl8RGhj22Qjad3{~P`9 z|8^2TF$2uNzhXc*K`vOwSJG!|;^pY9^-yC{xhr<5&*pD4s!;fH SA=2.3.0", "notebook", "seaborn", - "gitpython", -<<<<<<< HEAD "la-forge", "arviz", "fastshermanmorrison-pulsar", -======= -<<<<<<< HEAD - "la-forge", - "arviz", -======= - "fastshermanmorrison-pulsar", ->>>>>>> 8c63ad5 (Add fastshermanmorrison to requirements) ->>>>>>> merge_pint_v1.1_fix ] classifiers = [ "Programming Language :: Python :: 3", diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a43e719ca326b77ad7820be7c296ed61b9f9c2de GIT binary patch literal 6148 zcmeHKu}T9$5Pcg{L573{nuoV0kdq2Q8 zGs~GX5n`hvGsn!ko1MAceS6&O0tnsfK?i69Xi)_l%QSOD+C^ui5k1lv^c_biaDvfj z*y+tgyW=l1V2fMe5))iuYW+_0aiIfVeRw3cxU#jFah_%UQQnufo4u#It>>5h`Ei4V z;);t-uj*4iM_}!9pV-A2hP-xkEa%AOORKW5e!p1O^w{4f+ncYqI`#$n8|NRh`Cevi zemK+#x^V`a0cYTs7+}p7X{{K#bq1UPXP{z0{tpRNFpXFm>aT-Fj{w9b-737-%^NWh z2QiIU8qz}vmP)kL5RVwa(&B>wsk~EG#nu{bn6T_19b+L z?6EEN|KRKMe?7^coB?OxUojAxSug8yO0l-iOipTTLOrI6NL*=HrSL{eF=M3^Z%|d} XPvnM}Ml22Kq4-8XX>j8V{3rwOqHAC< literal 0 HcmV?d00001 diff --git a/src/pint_pal/plot_settings.yaml b/src/pint_pal/plot_settings.yaml index c9221ba1..3bc2e260 100644 --- a/src/pint_pal/plot_settings.yaml +++ b/src/pint_pal/plot_settings.yaml @@ -2,24 +2,6 @@ # Changes here will be applied to notebook plotting immediately (after restarting the kernel) obs_c: { -<<<<<<< HEAD - "ao": "#6BA9E2", - "arecibo": "#6BA9E2", - "gbt": "#61C853", - "vla": "#40635F", - "CHIME": "#ECE133", - "nancay": "#407BD5", - "ncyobs": "#407BD5", - "effelsberg_asterix": "#407BD5", - "effelsberg": "#407BD5", - "leap": "#ECE133", - "jodrell": "#407BD5", - "jbroach": "#407BD5", - "wsrt": "#E5A4CB", - "parkes": "#BE0119", - "gmrt": "#855CA0", - "meerkat": "#FD9927", -======= "ao": "#40C3C3", "arecibo": "#40C3C3" , "gbt": "#61C853", @@ -39,7 +21,6 @@ obs_c: { "fast": "#FD99CA", "nenufar": "#E0DEFF", "lofar": "#E0EAFF", ->>>>>>> merge_pint_v1.1_fix "None": "#808080", } @@ -49,10 +30,7 @@ pta_c: { "NANOGrav": "#61C853", "PPTA": "#BE0119", "MPTA": "#FD9927", -<<<<<<< HEAD -======= "CPTA": "#FD99CA", ->>>>>>> merge_pint_v1.1_fix "None": "#808080", } @@ -129,16 +107,12 @@ febe_c: { "UWL_PDFB4_40CM": "#BE0119", "None": "#808080", "unknown_asterix": "#855CA0", -<<<<<<< HEAD - "CHIME": "#ECE133", -======= "CHIME": "#A3DB8B", "unknown_LuMP": "#E0EAFF", "unknown_COBALT": "#E0EAFF", "unknown_LOFAR": "#E0EAFF", "LaNewBa_LUPPI": "#E0DEFF", "19BEAM_MB4K": "#FD99CA", ->>>>>>> merge_pint_v1.1_fix } ng20_c: { @@ -179,12 +153,9 @@ obs_m: { "parkes": "x", "gmrt": "x", "meerkat": "x", -<<<<<<< HEAD -======= "fast": "x", "lofar": "x", "nenufar": "x", ->>>>>>> merge_pint_v1.1_fix "None": "x", } @@ -194,10 +165,7 @@ pta_m: { "NANOGrav": "x", "PPTA": "x", "MPTA": "x", -<<<<<<< HEAD -======= "CPTA": "x", ->>>>>>> merge_pint_v1.1_fix "None": "x", } @@ -297,10 +265,7 @@ febe_m: { "3GHz_YUPPI": "x", "6GHz_YUPPI": "x", "CHIME": "x", -<<<<<<< HEAD -======= "19BEAM_MB4K": "x", ->>>>>>> merge_pint_v1.1_fix } label_names: { diff --git a/tests/.DS_Store b/tests/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..55c6c62b3d54c9fb69e1e619540e74353715f851 GIT binary patch literal 6148 zcmeHKu};G<5IxgMtzhW@f-&L`I^*-wfc1AuAGqb5KD0P5(3)m>~h7{$pw+mRkBHg1d&3}H4N zHoJqw%hnE6Ko$6H3edM(g8?LP3)fKie&Y#GwXyGK#7P+T$4MU}^^c3i!}0U-bW>LJ zjjYKH?G_oE#bKZV6S#ya?;}}c5#^C0|I)r4zTP{2sf&}G*N>L@R>W6>DO|w}<}il4 zZ2!u5J5PQ_k^iN$=KnHIaUR>V%Jb>bd@4AL5yYkSw+~i%q{x5r_@1xd)fIXDXo;_V zPJC+rQvp1)$?AKI+NuJofGV(7fcA#~ozM!VjBe||#$5prtJv)fpXrx^Y63wkm@;An z&2T9qmm0ZZ7%s>DK=Q0$%E;wlUa$1N0s`8Reynt^^x7j@<(t#YgDQFc#1Qq7_UT Ru?J0m1e^@or~*H#z$dO$i$?$e literal 0 HcmV?d00001