From 860b541da8238d5b253962c0e655367b23151cfa Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 16:19:26 +0000 Subject: [PATCH 01/16] Converting to setup-micromamba@v1 --- .github/workflows/test_notebook.yml | 34 ++++++++++++++++++----------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index b430d58..cfff2ff 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -10,12 +10,12 @@ on: jobs: build: - - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + os: [ubuntu-latest, macos-13] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - name: Install pdflatex @@ -23,22 +23,30 @@ jobs: sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 + - uses: mamba-org/setup-micromamba@v1 with: + init-shell: bash python-version: ${{ matrix.python-version }} - mamba-version: "*" - channels: conda-forge,defaults + mamba-version: "latest" + environment-name: pulsar + channels: conda-forge channel-priority: true - - name: Conda info - shell: bash -el {0} - run: conda info - - name: Install Dependencies & Main Code - shell: bash -el {0} + create-args: >- + python=${{ matrix.python-version }} + pytest + cython=0.29.36 + tempo2 + pint + enterprise-pulsar + enterprise_extensions + enterprise_outliers + scikit-sparse + - name: Install Main Code + shell: micromamba shell {0} run: | - mamba install -c conda-forge python=${{ matrix.python-version }} pytest cython=0.29.36 tempo2 enterprise-pulsar enterprise_extensions scikit-sparse pip install -e . - name: Test with Standard Pulsar - shell: bash -el {0} + shell: micromamba shell {0} run: | export PULSAR_NAME='J0605+3757' export JUPYTER_PLATFORM_DIRS=1 && jupyter --paths From a96b0ebf790b46940b3cd3336c0cd8bd72d9fe8a Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 16:25:15 +0000 Subject: [PATCH 02/16] Decreasing tests for bugfixing and removing old variables. --- .github/workflows/test_notebook.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index cfff2ff..fbd01c0 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-13] - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10"] #["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - name: Install pdflatex @@ -26,15 +26,13 @@ jobs: - uses: mamba-org/setup-micromamba@v1 with: init-shell: bash - python-version: ${{ matrix.python-version }} mamba-version: "latest" environment-name: pulsar - channels: conda-forge - channel-priority: true create-args: >- + -c conda-forge python=${{ matrix.python-version }} pytest - cython=0.29.36 + cython tempo2 pint enterprise-pulsar From aeb69eb2ca9cd8e88aa567a915d355989991d751 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 16:30:20 +0000 Subject: [PATCH 03/16] Dealing with the enterprise_outlier issue and fixing a typo. Remove macos test for now. --- .github/workflows/test_notebook.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index fbd01c0..16d1cec 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-13] + os: [ubuntu-latest] #macos-13] python-version: ["3.10"] #["3.8", "3.9", "3.10", "3.11", "3.12"] steps: @@ -33,11 +33,10 @@ jobs: python=${{ matrix.python-version }} pytest cython + pint-pulsar tempo2 - pint enterprise-pulsar enterprise_extensions - enterprise_outliers scikit-sparse - name: Install Main Code shell: micromamba shell {0} From e92c6ceec03bb52d720651a4e5ee8b6d04999630 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 16:47:43 +0000 Subject: [PATCH 04/16] Trying to find the artifacts. --- .github/workflows/test_notebook.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 16d1cec..d85af33 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -49,6 +49,7 @@ jobs: export JUPYTER_PLATFORM_DIRS=1 && jupyter --paths tree tests pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME + ls -lah mv tmp-* nb_outputs - name: Archive Notebook Output Files uses: actions/upload-artifact@v3 From 3f1d89022dd5a7c4469fd13f96651446d34ff2ff Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 16:56:44 +0000 Subject: [PATCH 05/16] Locating archive files ... --- .github/workflows/test_notebook.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index d85af33..287eab5 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -26,7 +26,6 @@ jobs: - uses: mamba-org/setup-micromamba@v1 with: init-shell: bash - mamba-version: "latest" environment-name: pulsar create-args: >- -c conda-forge @@ -50,7 +49,9 @@ jobs: tree tests pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME ls -lah - mv tmp-* nb_outputs + mv temp* nb_outputs + - name: Find Files + run: ls -lah - name: Archive Notebook Output Files uses: actions/upload-artifact@v3 with: From a1137036dd48f4fdf8ee78cb4d429bb7a1173387 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:02:05 +0000 Subject: [PATCH 06/16] Still searching. --- .github/workflows/test_notebook.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 287eab5..387555c 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -42,11 +42,10 @@ jobs: run: | pip install -e . - name: Test with Standard Pulsar - shell: micromamba shell {0} + shell: bash {0} run: | export PULSAR_NAME='J0605+3757' - export JUPYTER_PLATFORM_DIRS=1 && jupyter --paths - tree tests + export JUPYTER_PLATFORM_DIRS=1 pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME ls -lah mv temp* nb_outputs From d8a75f99e8784ffa5dd756cb44e566863f6963ee Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:06:42 +0000 Subject: [PATCH 07/16] Fixing pytest? --- .github/workflows/test_notebook.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 387555c..5657e25 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -42,13 +42,14 @@ jobs: run: | pip install -e . - name: Test with Standard Pulsar - shell: bash {0} + shell: bash -el {0} run: | export PULSAR_NAME='J0605+3757' export JUPYTER_PLATFORM_DIRS=1 + tree tests pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME ls -lah - mv temp* nb_outputs + #mv temp* nb_outputs - name: Find Files run: ls -lah - name: Archive Notebook Output Files From c68925b31ae5889f005a38112e182313e7e254ee Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:07:57 +0000 Subject: [PATCH 08/16] Updateing artifact and checkout to v4. --- .github/workflows/test_notebook.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 5657e25..c5321bf 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -22,7 +22,7 @@ jobs: run: | sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: mamba-org/setup-micromamba@v1 with: init-shell: bash @@ -53,7 +53,7 @@ jobs: - name: Find Files run: ls -lah - name: Archive Notebook Output Files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: TestNB-OutputFiles path: | From 1467a7c69d2452bef5feefa8671e8086bf11a557 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:13:22 +0000 Subject: [PATCH 09/16] removing "-n auto" from pytest. --- .github/workflows/test_notebook.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index c5321bf..4a2083c 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -47,7 +47,7 @@ jobs: export PULSAR_NAME='J0605+3757' export JUPYTER_PLATFORM_DIRS=1 tree tests - pytest tests/test_run_notebook.py -n auto -k $PULSAR_NAME + pytest tests/test_run_notebook.py -k $PULSAR_NAME ls -lah #mv temp* nb_outputs - name: Find Files From f945a0fa3e5d96d568a9fdb194c01d5d39470901 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:16:56 +0000 Subject: [PATCH 10/16] Switching back to bash -el for pip install. --- .github/workflows/test_notebook.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 4a2083c..c479700 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -38,7 +38,7 @@ jobs: enterprise_extensions scikit-sparse - name: Install Main Code - shell: micromamba shell {0} + shell: bash -el {0} run: | pip install -e . - name: Test with Standard Pulsar From 8c17b60005a464e87c6e370b7c83babc7d072f56 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 17:34:40 +0000 Subject: [PATCH 11/16] Update to include pint-pal requirements and cache conda env. --- .github/workflows/test_notebook.yml | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index c479700..82b462f 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -18,7 +18,7 @@ jobs: python-version: ["3.10"] #["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - - name: Install pdflatex + - name: Install Required Ubuntu Packages run: | sudo apt-get update sudo apt-get install texlive-latex-base cm-super-minimal pdftk latex2html @@ -27,6 +27,8 @@ jobs: with: init-shell: bash environment-name: pulsar + cache-environment: true + cache-downloads: true create-args: >- -c conda-forge python=${{ matrix.python-version }} @@ -37,6 +39,14 @@ jobs: enterprise-pulsar enterprise_extensions scikit-sparse + ruamel.yaml + nbconvert + ipywidgets>=7.6.3 + weasyprint + pytest-xdist>=2.3.0 + jupyter + seaborn + gitpython - name: Install Main Code shell: bash -el {0} run: | @@ -46,12 +56,9 @@ jobs: run: | export PULSAR_NAME='J0605+3757' export JUPYTER_PLATFORM_DIRS=1 - tree tests pytest tests/test_run_notebook.py -k $PULSAR_NAME ls -lah - #mv temp* nb_outputs - - name: Find Files - run: ls -lah + mv tmp* nb_outputs - name: Archive Notebook Output Files uses: actions/upload-artifact@v4 with: From dcd4a390761f9a898f8230f17cabe8d649342fa9 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 18:03:27 +0000 Subject: [PATCH 12/16] Include libstempo and scikit learn. --- .github/workflows/test_notebook.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 82b462f..3f7c684 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -36,9 +36,11 @@ jobs: cython pint-pulsar tempo2 + libstempo enterprise-pulsar enterprise_extensions scikit-sparse + scikit-learn ruamel.yaml nbconvert ipywidgets>=7.6.3 From 507419ce21af9a6e47db95bcd57832727d2b0cc1 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 18:04:55 +0000 Subject: [PATCH 13/16] Update to include comment regarding macos and include more python versions. --- .github/workflows/test_notebook.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index 3f7c684..a7b5f54 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -14,8 +14,8 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest] #macos-13] - python-version: ["3.10"] #["3.8", "3.9", "3.10", "3.11", "3.12"] + os: [ubuntu-latest] # Once we get the tex packages changed, we should include "macos-13" + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - name: Install Required Ubuntu Packages From 441af4d09f489fd40cbab577cfac2de34d456873 Mon Sep 17 00:00:00 2001 From: Joseph Glaser Date: Wed, 16 Oct 2024 19:55:06 +0000 Subject: [PATCH 14/16] Fixing archive name and testing archive commenting. --- .github/workflows/commenting_artifacts.yml | 50 ++++++++++++++++++++++ .github/workflows/test_notebook.yml | 7 ++- 2 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/commenting_artifacts.yml diff --git a/.github/workflows/commenting_artifacts.yml b/.github/workflows/commenting_artifacts.yml new file mode 100644 index 0000000..bd7bc20 --- /dev/null +++ b/.github/workflows/commenting_artifacts.yml @@ -0,0 +1,50 @@ +name: Bind Reports to Pull-Request + +# Triggered by the name of the previos +on: + workflow_run: + workflows: ["Notebook Pipeline (Ubuntu)"] + types: [completed] + +# Enable the option to post a comment +permissions: + pull-requests: write + +jobs: + build: + runs-on: ubuntu-latest + # Check that the previos workflow succeeded + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Get Artifacts Link + env: + WORKFLOW_RUN_EVENT_OBJ: ${{ toJSON(github.event.workflow_run) }} + GH_TOKEN: ${{ github.token }} + run: | + PREVIOUS_JOB_ID=$(jq -r '.id' <<< "$WORKFLOW_RUN_EVENT_OBJ") + SUITE_ID=$(jq -r '.check_suite_id' <<< "$WORKFLOW_RUN_EVENT_OBJ") + # Sample for a single artifact, can be improved for a multiple artifacts + ARTIFACT_ID=$(gh api "/repos/${{ github.repository }}/actions/artifacts" \ + --jq ".artifacts.[] | + select(.workflow_run.id==${PREVIOUS_JOB_ID}) | + .id") + echo "ARTIFACT_URL=https://github.com/${{ github.repository }}/suites/${SUITE_ID}/artifacts/${ARTIFACT_ID}" >> $GITHUB_ENV + PR_NUMBER=$(jq -r '.pull_requests[0].number' <<< "$WORKFLOW_RUN_EVENT_OBJ") + echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_ENV + + - uses: actions/github-script@v6 + env: + PR_NUMBER: ${{ env.PR_NUMBER }} + PR_NOTES: | + Build artifacts: + | Name | Link | + |------|------| + | NB/WB Pipeline | [Archived ZIP](${{ env.ARTIFACT_URL }}) | + with: + script: | + github.rest.issues.createComment({ + issue_number: process.env.PR_NUMBER, + owner: context.repo.owner, + repo: context.repo.repo, + body: process.env.PR_NOTES + }) \ No newline at end of file diff --git a/.github/workflows/test_notebook.yml b/.github/workflows/test_notebook.yml index a7b5f54..cd1c5f8 100644 --- a/.github/workflows/test_notebook.yml +++ b/.github/workflows/test_notebook.yml @@ -64,7 +64,10 @@ jobs: - name: Archive Notebook Output Files uses: actions/upload-artifact@v4 with: - name: TestNB-OutputFiles + name: TestNB-OutputFiles_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} path: | - nb_outputs + nb_outputs/*/*.pdf + nb_outputs/*/*.tim + nb_outputs/*/*.par + compression-level: 6 From 9977c6d9ac0a0a2ed5e5dd96d71ef284b6bf45d8 Mon Sep 17 00:00:00 2001 From: Kathryn Crowter Date: Fri, 18 Oct 2024 11:49:24 -0700 Subject: [PATCH 15/16] Add plot_residuals_serial --- src/pint_pal/plot_utils.py | 440 +++++++++++++++++++++++++++++++++++++ 1 file changed, 440 insertions(+) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index d46b346..3d0a93f 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -2601,6 +2601,446 @@ def onclick(event): return +def plot_residuals_serial(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ + save = False, legend = True, title = True, axs = None, mixed_ecorr=False, epoch_lines=False, milli=False, **kwargs): + """ + Make a plot of the residuals vs. time + + + Arguments + --------- + fitter [object] : The PINT fitter object. + restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are: + 'prefit' - plot the prefit residuals. + 'postfit' - plot the postfit residuals (default) + 'both' - overplot both the pre and post-fit residuals. + colorby ['string']: What to use to determine color/markers + 'pta' - color residuals by PTA (default) + 'obs' - color residuals by telescope + 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). + plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals + [default: False]. + avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False]. + whitened [boolean] : If True will compute and plot whitened residuals [default: False]. + save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened + as necessary [default: False]. + legend [boolean] : If False, will not print legend with plot [default: True]. + title [boolean] : If False, will not print plot title [default: True]. + axs [string] : If not None, should be defined subplot value and the figure will be used as part of a + larger figure [default: None]. + + + Optional Arguments: + -------------------- + res [list/array] : List or array of residual values to plot. Will override values from fitter object. + errs [list/array] : List or array of residual error values to plot. Will override values from fitter object. + mjds [list/array] : List or array of TOA MJDs to plot. Will override values from toa object. + obs[list/array] : List or array of TOA observatories combinations. Will override values from toa object. + figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)]. + fmt ['string'] : matplotlib format option for markers [default: ('x')] + color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file] + alpha [float] : matplotlib alpha options for plot points [default: 0.5] + mixed_ecorr [boolean]: If True, allows avging with mixed ecorr/no ecorr TOAs. + epoch_lines [boolean]: If True, plot a vertical line at the first TOA of each observation file. + milli [boolean]: If True, plot y-axis in milliseconds rather than microseconds. + """ + # Check if wideband + if fitter.is_wideband: + NB = False + if avg == True: + raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + else: + NB = True + + # Check if want epoch averaged residuals + if avg == True and restype == 'prefit' and mixed_ecorr == True: + avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) + elif avg == True and restype == 'postfit' and mixed_ecorr == True: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + elif avg == True and restype == 'both' and mixed_ecorr == True: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) + no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) + elif avg == True and restype == 'prefit' and mixed_ecorr == False: + avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) + elif avg == True and restype == 'postfit' and mixed_ecorr==False: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + elif avg == True and restype == 'both' and mixed_ecorr == False: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) + + + # adjust to millisec + if milli: + unit = u.ms + unitstr = "ms" + else: + unit = u.us + unitstr = "$\mu$s" + + # Get residuals + if 'res' in kwargs.keys(): + res = kwargs['res'] + else: + if restype == 'prefit': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + elif avg==True and mixed_ecorr == False: + res = avg_dict['time_resids'].to(unit) + else: + res = fitter.resids_init.time_resids.to(unit) + else: + res = fitter.resids_init.residual_objs['toa'].time_resids.to(unit) + elif restype == 'postfit': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + elif avg == True: + res = avg_dict['time_resids'].to(unit) + else: + res = fitter.resids.time_resids.to(unit) + else: + res = fitter.resids.residual_objs['toa'].time_resids.to(unit) + elif restype == 'both': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + res_pre = avg_dict_pre['time_resids'].to(unit) + res_pre_no_avg = no_avg_dict_pre['time_resids'].to(unit) + elif avg == True and mixed_ecorr == False: + res = avg_dict['time_resids'].to(unit) + res_pre = avg_dict_pre['time_resids'].to(unit) + else: + res = fitter.resids.time_resids.to(unit) + res_pre = fitter.resids_init.time_resids.to(unit) + else: + res = fitter.resids.residual_objs['toa'].time_resids.to(unit) + res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(unit) + else: + raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ + %(restype)) + + # Check if we want whitened residuals + if whitened == True and ('res' not in kwargs.keys()): + if avg == True and mixed_ecorr == True: + if restype != 'both': + res = whiten_resids(avg_dict, restype=restype) + res_no_avg = whiten_resids(no_avg_dict, restype=restype) + else: + res = whiten_resids(avg_dict_pre, restype='prefit') + res_pre = whiten_resids(avg_dict, restype='postfit') + res_pre = res_pre.to(unit) + res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') + res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_pre_no_avg = res_pre_no_avg.to(unit) + res = res.to(unit) + res_no_avg = res_no_avg.to(unit) + elif avg == True and mixed_ecorr == False: + if restype != 'both': + res = whiten_resids(avg_dict, restype=restype) + else: + res = whiten_resids(avg_dict_pre, restype='prefit') + res_pre = whiten_resids(avg_dict, restype='postfit') + res_pre = res_pre.to(unit) + res = res.to(unit) + else: + if restype != 'both': + res = whiten_resids(fitter, restype=restype) + else: + res = whiten_resids(fitter, restype='prefit') + res_pre = whiten_resids(fitter, restype='postfit') + res_pre = res_pre.to(unit) + res = res.to(unit) + + # Get errors + if 'errs' in kwargs.keys(): + errs = kwargs['errs'] + else: + if restype == 'prefit': + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + else: + errs = fitter.toas.get_errors().to(unit) + elif restype == 'postfit': + if NB == True: + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + else: + errs = fitter.resids.get_data_error().to(unit) + else: + errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit) + elif restype == 'both': + if NB == True: + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_pre = avg_dict_pre['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + errs_no_avg_pre = no_avg_dict_pre['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + errs_pre = avg_dict_pre['errors'].to(unit) + else: + errs = fitter.resids.get_data_error().to(unit) + errs_pre = fitter.toas.get_errors().to(unit) + else: + errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit) + errs_pre = fitter.toas.get_errors().to(unit) + # Get MJDs + if 'mjds' in kwargs.keys(): + mjds = kwargs['mjds'] + else: + mjds = fitter.toas.get_mjds().value + if avg == True and mixed_ecorr == True : + mjds = avg_dict['mjds'].value + mjds_no_avg = no_avg_dict['mjds'].value + years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0 + + elif avg == True and mixed_ecorr == False: + mjds = avg_dict['mjds'].value + # Convert to years + years = (mjds - 51544.0)/365.25 + 2000.0 + + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. + # Create combined arrays + + if avg == True and mixed_ecorr == True: + combo_res = np.hstack((res, res_no_avg)) + combo_errs = np.hstack((errs, errs_no_avg)) + combo_years = np.hstack((years, years_no_avg)) + if restype =='both': + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) + + # Get colorby flag values (obs, PTA, febe, etc.) + if 'colorby' in kwargs.keys(): + cb = kwargs['colorby'] + else: + cb = np.array(fitter.toas[colorby]) +#. Seems to run a little faster but not robust to obs? +# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + if avg == True: + avg_cb = [] + for iis in avg_dict['indices']: + avg_cb.append(cb[iis[0]]) + if mixed_ecorr == True: + no_avg_cb = [] + for jjs in no_avg_dict['indices']: + no_avg_cb.append(cb[jjs]) + no_ecorr_cb = np.array(no_avg_cb) + + cb = np.array(avg_cb) + + # Get the set of unique flag values + if avg==True and mixed_ecorr==True: + cb = np.hstack((cb,no_ecorr_cb)) + + CB = set(cb) + + + if colorby== 'pta': + colorscheme = colorschemes['pta'] + elif colorby == 'obs': + colorscheme = colorschemes['observatories'] + elif colorby == 'f': + colorscheme = colorschemes['febe'] + + + if 'figsize' in kwargs.keys(): + figsize = kwargs['figsize'] + else: + figsize = (10,5) + if axs == None: + fig = plt.figure(figsize=figsize) + ax1 = fig.add_subplot(111) + else: + fig = plt.gcf() + ax1 = axs + + # if want tempo2 version, where serial = order in tim file + #x = range(len(inds)) + # if want serial = order in mjd + x = np.argsort(mjds) + + # plot vertical line at the first TOA of each observation file + if epoch_lines and not avg: + names = fitter.toas["name"] + for nm in np.unique(names): + inds_name = np.where(names==nm)[0] + x_nm = x[inds_name] + ax1.axvline(min(x_nm), c="k", alpha=0.1) + + for i, c in enumerate(CB): + inds = np.where(cb==c)[0] + if not inds.tolist(): + cb_label = "" + else: + cb_label = cb[inds][0] + # Get plot preferences + if 'fmt' in kwargs.keys(): + mkr = kwargs['fmt'] + else: + try: + mkr = markers[cb_label] + if restype == 'both': + mkr_pre = '.' + except Exception: + mkr = 'x' + log.log(1, "Color by Flag doesn't have a marker label!!") + if 'color' in kwargs.keys(): + clr = kwargs['color'] + else: + try: + clr = colorscheme[cb_label] + except Exception: + clr = 'k' + log.log(1, "Color by Flag doesn't have a color!!") + if 'alpha' in kwargs.keys(): + alpha = kwargs['alpha'] + else: + alpha = 0.5 + + if 'label' in kwargs.keys(): + label = kwargs['label'] + else: + label = cb_label + + x_subsec = x[inds] + if avg == True and mixed_ecorr == True: + if plotsig: + combo_sig = combo_res[inds]/combo_errs[inds] + ax1.errorbar(x_subsec, combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] + ax1.errorbar(x_subsec, combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + else: + ax1.errorbar(x_subsec, combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + ax1.errorbar(x_subsec, combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + + else: + if plotsig: + sig = res[inds]/errs[inds] + ax1.errorbar(x_subsec, sig, yerr=len(errs[inds])*[1], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + sig_pre = res_pre[inds]/errs_pre[inds] + ax1.errorbar(x_subsec, sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + else: + ax1.errorbar(x_subsec, res[inds], yerr=errs[inds], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + ax1.errorbar(x_subsec, res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + + # Set second axis + ax1.set_xlabel(r'TOA Number') + + if plotsig: + if avg and whitened: + ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + elif avg and not whitened: + ax1.set_ylabel('Average Residual/Uncertainty') + elif whitened and not avg: + ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + else: + ax1.set_ylabel('Residual/Uncertainty') + else: + if avg and whitened: + ax1.set_ylabel(f'Average Residual ({unitstr}) \n (Whitened)', multialignment='center') + elif avg and not whitened: + ax1.set_ylabel(f'Average Residual ({unitstr})') + elif whitened and not avg: + ax1.set_ylabel(f'Residual ({unitstr}) \n (Whitened)', multialignment='center') + else: + ax1.set_ylabel(f'Residual ({unitstr})') + if legend: + if len(CB) > 5: + ncol = int(np.ceil(len(CB)/2)) + y_offset = 1.15 + else: + ncol = len(CB) + y_offset = 1.0 + ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + if title: + if len(CB) > 5: + y_offset = 1.1 + else: + y_offset = 1.0 + if isinstance(title, str): + title_str = title + else: + title_str = "%s %s timing residuals" % (fitter.model.PSR.value, restype) + plt.title(title_str, y=y_offset+1.0/figsize[1]) + if axs == None: + plt.tight_layout() + if save: + ext = "" + if whitened: + ext += "_whitened" + if avg: + ext += "_averaged" + if NB: + ext += "_NB" + else: + ext += "_WB" + if restype == 'prefit': + ext += "_prefit" + elif restype == 'postfit': + ext += "_postfit" + elif restype == "both": + ext += "_pre_post_fit" + plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext)) + + if axs == None: + # Define clickable points + text = ax1.text(0,0,"") + + # Define point highlight color + stamp_color = "#FD9927" + + def onclick(event): + # Get X and Y axis data + xdata = x + if plotsig: + ydata = (res/errs).decompose().value + else: + ydata = res.value + # Get x and y data from click + xclick = event.xdata + yclick = event.ydata + # Calculate scaled distance, find closest point index + d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + ind_close = np.where(np.min(d) == d)[0] + # highlight clicked point + ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + # Print point info + text.set_position((xdata[ind_close], ydata[ind_close])) + if plotsig: + text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + else: + text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + + fig.canvas.mpl_connect('button_press_event', onclick) + + return def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whitened = False, save = False, \ From 7d94fa22e751a4c31fe254ac7aa19f3081927564 Mon Sep 17 00:00:00 2001 From: Kathryn Crowter Date: Fri, 18 Oct 2024 11:49:24 -0700 Subject: [PATCH 16/16] Add plot_residuals_serial --- src/pint_pal/plot_utils.py | 440 +++++++++++++++++++++++++++++++++++++ 1 file changed, 440 insertions(+) diff --git a/src/pint_pal/plot_utils.py b/src/pint_pal/plot_utils.py index d46b346..3d0a93f 100644 --- a/src/pint_pal/plot_utils.py +++ b/src/pint_pal/plot_utils.py @@ -2601,6 +2601,446 @@ def onclick(event): return +def plot_residuals_serial(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \ + save = False, legend = True, title = True, axs = None, mixed_ecorr=False, epoch_lines=False, milli=False, **kwargs): + """ + Make a plot of the residuals vs. time + + + Arguments + --------- + fitter [object] : The PINT fitter object. + restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are: + 'prefit' - plot the prefit residuals. + 'postfit' - plot the postfit residuals (default) + 'both' - overplot both the pre and post-fit residuals. + colorby ['string']: What to use to determine color/markers + 'pta' - color residuals by PTA (default) + 'obs' - color residuals by telescope + 'f' - color residuals by frontend/backend pair (flag not used by all PTAs). + plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals + [default: False]. + avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False]. + whitened [boolean] : If True will compute and plot whitened residuals [default: False]. + save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened + as necessary [default: False]. + legend [boolean] : If False, will not print legend with plot [default: True]. + title [boolean] : If False, will not print plot title [default: True]. + axs [string] : If not None, should be defined subplot value and the figure will be used as part of a + larger figure [default: None]. + + + Optional Arguments: + -------------------- + res [list/array] : List or array of residual values to plot. Will override values from fitter object. + errs [list/array] : List or array of residual error values to plot. Will override values from fitter object. + mjds [list/array] : List or array of TOA MJDs to plot. Will override values from toa object. + obs[list/array] : List or array of TOA observatories combinations. Will override values from toa object. + figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)]. + fmt ['string'] : matplotlib format option for markers [default: ('x')] + color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file] + alpha [float] : matplotlib alpha options for plot points [default: 0.5] + mixed_ecorr [boolean]: If True, allows avging with mixed ecorr/no ecorr TOAs. + epoch_lines [boolean]: If True, plot a vertical line at the first TOA of each observation file. + milli [boolean]: If True, plot y-axis in milliseconds rather than microseconds. + """ + # Check if wideband + if fitter.is_wideband: + NB = False + if avg == True: + raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.") + else: + NB = True + + # Check if want epoch averaged residuals + if avg == True and restype == 'prefit' and mixed_ecorr == True: + avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) + elif avg == True and restype == 'postfit' and mixed_ecorr == True: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + elif avg == True and restype == 'both' and mixed_ecorr == True: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True) + avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) + no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True) + elif avg == True and restype == 'prefit' and mixed_ecorr == False: + avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True) + elif avg == True and restype == 'postfit' and mixed_ecorr==False: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + elif avg == True and restype == 'both' and mixed_ecorr == False: + avg_dict = fitter.resids.ecorr_average(use_noise_model=True) + avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True) + + + # adjust to millisec + if milli: + unit = u.ms + unitstr = "ms" + else: + unit = u.us + unitstr = "$\mu$s" + + # Get residuals + if 'res' in kwargs.keys(): + res = kwargs['res'] + else: + if restype == 'prefit': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + elif avg==True and mixed_ecorr == False: + res = avg_dict['time_resids'].to(unit) + else: + res = fitter.resids_init.time_resids.to(unit) + else: + res = fitter.resids_init.residual_objs['toa'].time_resids.to(unit) + elif restype == 'postfit': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + elif avg == True: + res = avg_dict['time_resids'].to(unit) + else: + res = fitter.resids.time_resids.to(unit) + else: + res = fitter.resids.residual_objs['toa'].time_resids.to(unit) + elif restype == 'both': + if NB == True: + if avg == True and mixed_ecorr == True: + res = avg_dict['time_resids'].to(unit) + res_no_avg = no_avg_dict['time_resids'].to(unit) + res_pre = avg_dict_pre['time_resids'].to(unit) + res_pre_no_avg = no_avg_dict_pre['time_resids'].to(unit) + elif avg == True and mixed_ecorr == False: + res = avg_dict['time_resids'].to(unit) + res_pre = avg_dict_pre['time_resids'].to(unit) + else: + res = fitter.resids.time_resids.to(unit) + res_pre = fitter.resids_init.time_resids.to(unit) + else: + res = fitter.resids.residual_objs['toa'].time_resids.to(unit) + res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(unit) + else: + raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\ + %(restype)) + + # Check if we want whitened residuals + if whitened == True and ('res' not in kwargs.keys()): + if avg == True and mixed_ecorr == True: + if restype != 'both': + res = whiten_resids(avg_dict, restype=restype) + res_no_avg = whiten_resids(no_avg_dict, restype=restype) + else: + res = whiten_resids(avg_dict_pre, restype='prefit') + res_pre = whiten_resids(avg_dict, restype='postfit') + res_pre = res_pre.to(unit) + res_no_avg = whiten_resids(avg_dict_pre, restype='prefit') + res_pre_no_avg = whiten_resids(avg_dict, restype='postfit') + res_pre_no_avg = res_pre_no_avg.to(unit) + res = res.to(unit) + res_no_avg = res_no_avg.to(unit) + elif avg == True and mixed_ecorr == False: + if restype != 'both': + res = whiten_resids(avg_dict, restype=restype) + else: + res = whiten_resids(avg_dict_pre, restype='prefit') + res_pre = whiten_resids(avg_dict, restype='postfit') + res_pre = res_pre.to(unit) + res = res.to(unit) + else: + if restype != 'both': + res = whiten_resids(fitter, restype=restype) + else: + res = whiten_resids(fitter, restype='prefit') + res_pre = whiten_resids(fitter, restype='postfit') + res_pre = res_pre.to(unit) + res = res.to(unit) + + # Get errors + if 'errs' in kwargs.keys(): + errs = kwargs['errs'] + else: + if restype == 'prefit': + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + else: + errs = fitter.toas.get_errors().to(unit) + elif restype == 'postfit': + if NB == True: + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + else: + errs = fitter.resids.get_data_error().to(unit) + else: + errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit) + elif restype == 'both': + if NB == True: + if avg == True and mixed_ecorr == True: + errs = avg_dict['errors'].to(unit) + errs_pre = avg_dict_pre['errors'].to(unit) + errs_no_avg = no_avg_dict['errors'].to(unit) + errs_no_avg_pre = no_avg_dict_pre['errors'].to(unit) + elif avg == True and mixed_ecorr == False: + errs = avg_dict['errors'].to(unit) + errs_pre = avg_dict_pre['errors'].to(unit) + else: + errs = fitter.resids.get_data_error().to(unit) + errs_pre = fitter.toas.get_errors().to(unit) + else: + errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit) + errs_pre = fitter.toas.get_errors().to(unit) + # Get MJDs + if 'mjds' in kwargs.keys(): + mjds = kwargs['mjds'] + else: + mjds = fitter.toas.get_mjds().value + if avg == True and mixed_ecorr == True : + mjds = avg_dict['mjds'].value + mjds_no_avg = no_avg_dict['mjds'].value + years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0 + + elif avg == True and mixed_ecorr == False: + mjds = avg_dict['mjds'].value + # Convert to years + years = (mjds - 51544.0)/365.25 + 2000.0 + + # In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr. + # Create combined arrays + + if avg == True and mixed_ecorr == True: + combo_res = np.hstack((res, res_no_avg)) + combo_errs = np.hstack((errs, errs_no_avg)) + combo_years = np.hstack((years, years_no_avg)) + if restype =='both': + combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre)) + combo_res_pre = np.hstack((res_pre, res_no_avg_pre)) + + # Get colorby flag values (obs, PTA, febe, etc.) + if 'colorby' in kwargs.keys(): + cb = kwargs['colorby'] + else: + cb = np.array(fitter.toas[colorby]) +#. Seems to run a little faster but not robust to obs? +# cb = np.array(fitter.toas.get_flag_value(colorby)[0]) + if avg == True: + avg_cb = [] + for iis in avg_dict['indices']: + avg_cb.append(cb[iis[0]]) + if mixed_ecorr == True: + no_avg_cb = [] + for jjs in no_avg_dict['indices']: + no_avg_cb.append(cb[jjs]) + no_ecorr_cb = np.array(no_avg_cb) + + cb = np.array(avg_cb) + + # Get the set of unique flag values + if avg==True and mixed_ecorr==True: + cb = np.hstack((cb,no_ecorr_cb)) + + CB = set(cb) + + + if colorby== 'pta': + colorscheme = colorschemes['pta'] + elif colorby == 'obs': + colorscheme = colorschemes['observatories'] + elif colorby == 'f': + colorscheme = colorschemes['febe'] + + + if 'figsize' in kwargs.keys(): + figsize = kwargs['figsize'] + else: + figsize = (10,5) + if axs == None: + fig = plt.figure(figsize=figsize) + ax1 = fig.add_subplot(111) + else: + fig = plt.gcf() + ax1 = axs + + # if want tempo2 version, where serial = order in tim file + #x = range(len(inds)) + # if want serial = order in mjd + x = np.argsort(mjds) + + # plot vertical line at the first TOA of each observation file + if epoch_lines and not avg: + names = fitter.toas["name"] + for nm in np.unique(names): + inds_name = np.where(names==nm)[0] + x_nm = x[inds_name] + ax1.axvline(min(x_nm), c="k", alpha=0.1) + + for i, c in enumerate(CB): + inds = np.where(cb==c)[0] + if not inds.tolist(): + cb_label = "" + else: + cb_label = cb[inds][0] + # Get plot preferences + if 'fmt' in kwargs.keys(): + mkr = kwargs['fmt'] + else: + try: + mkr = markers[cb_label] + if restype == 'both': + mkr_pre = '.' + except Exception: + mkr = 'x' + log.log(1, "Color by Flag doesn't have a marker label!!") + if 'color' in kwargs.keys(): + clr = kwargs['color'] + else: + try: + clr = colorscheme[cb_label] + except Exception: + clr = 'k' + log.log(1, "Color by Flag doesn't have a color!!") + if 'alpha' in kwargs.keys(): + alpha = kwargs['alpha'] + else: + alpha = 0.5 + + if 'label' in kwargs.keys(): + label = kwargs['label'] + else: + label = cb_label + + x_subsec = x[inds] + if avg == True and mixed_ecorr == True: + if plotsig: + combo_sig = combo_res[inds]/combo_errs[inds] + ax1.errorbar(x_subsec, combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds] + ax1.errorbar(x_subsec, combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + else: + ax1.errorbar(x_subsec, combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + ax1.errorbar(x_subsec, combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + + else: + if plotsig: + sig = res[inds]/errs[inds] + ax1.errorbar(x_subsec, sig, yerr=len(errs[inds])*[1], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + sig_pre = res_pre[inds]/errs_pre[inds] + ax1.errorbar(x_subsec, sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + else: + ax1.errorbar(x_subsec, res[inds], yerr=errs[inds], fmt=mkr, \ + color=clr, label=label, alpha = alpha, picker=True) + if restype == 'both': + ax1.errorbar(x_subsec, res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \ + color=clr, label=label+" Prefit", alpha = alpha, picker=True) + + # Set second axis + ax1.set_xlabel(r'TOA Number') + + if plotsig: + if avg and whitened: + ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center') + elif avg and not whitened: + ax1.set_ylabel('Average Residual/Uncertainty') + elif whitened and not avg: + ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center') + else: + ax1.set_ylabel('Residual/Uncertainty') + else: + if avg and whitened: + ax1.set_ylabel(f'Average Residual ({unitstr}) \n (Whitened)', multialignment='center') + elif avg and not whitened: + ax1.set_ylabel(f'Average Residual ({unitstr})') + elif whitened and not avg: + ax1.set_ylabel(f'Residual ({unitstr}) \n (Whitened)', multialignment='center') + else: + ax1.set_ylabel(f'Residual ({unitstr})') + if legend: + if len(CB) > 5: + ncol = int(np.ceil(len(CB)/2)) + y_offset = 1.15 + else: + ncol = len(CB) + y_offset = 1.0 + ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol) + if title: + if len(CB) > 5: + y_offset = 1.1 + else: + y_offset = 1.0 + if isinstance(title, str): + title_str = title + else: + title_str = "%s %s timing residuals" % (fitter.model.PSR.value, restype) + plt.title(title_str, y=y_offset+1.0/figsize[1]) + if axs == None: + plt.tight_layout() + if save: + ext = "" + if whitened: + ext += "_whitened" + if avg: + ext += "_averaged" + if NB: + ext += "_NB" + else: + ext += "_WB" + if restype == 'prefit': + ext += "_prefit" + elif restype == 'postfit': + ext += "_postfit" + elif restype == "both": + ext += "_pre_post_fit" + plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext)) + + if axs == None: + # Define clickable points + text = ax1.text(0,0,"") + + # Define point highlight color + stamp_color = "#FD9927" + + def onclick(event): + # Get X and Y axis data + xdata = x + if plotsig: + ydata = (res/errs).decompose().value + else: + ydata = res.value + # Get x and y data from click + xclick = event.xdata + yclick = event.ydata + # Calculate scaled distance, find closest point index + d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2) + ind_close = np.where(np.min(d) == d)[0] + # highlight clicked point + ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color) + # Print point info + text.set_position((xdata[ind_close], ydata[ind_close])) + if plotsig: + text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + else: + text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0])) + + fig.canvas.mpl_connect('button_press_event', onclick) + + return def plot_fd_res_v_freq(fitter, plotsig = False, comp_FD = True, avg = False, whitened = False, save = False, \