Skip to content

Commit

Permalink
Easy review changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Michael Wilensky committed Sep 26, 2024
1 parent e52f86d commit 3add143
Showing 1 changed file with 6 additions and 21 deletions.
27 changes: 6 additions & 21 deletions notebooks/single_baseline_postprocessing_and_pspec.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"source": [
"# Single Baseline Filtering and Power Spectrum Estimation\n",
"\n",
"**by Josh Dillon**, last updated August 21, 2024\n",
"**by Josh Dillon and Mike Wilensky**, last updated September 26, 2024\n",
"\n",
"This notebook is designed to take a single redundantly-averaged unique baseline (typically after LST-binning) and push it through all the way to the power spectrum. It operates on single files that contain a single baseline for all LSTs and both `'ee'` and `'nn'` polarizations. It then can:\n",
"* Throw out highly flagged times and/or channels\n",
Expand Down Expand Up @@ -69,6 +69,7 @@
"from functools import reduce\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib\n",
"from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
"import copy\n",
"import warnings\n",
"from astropy import units\n",
Expand Down Expand Up @@ -149,6 +150,7 @@
"FR_QUANTILE_HIGH = float(os.environ.get(\"FR_QUANTILE_HIGH\", 0.95))\n",
"FR_EIGENVAL_CUTOFF = float(os.environ.get(\"FR_EIGENVAL_CUTOFF\", 1e-12))\n",
"TARGET_AVERAGING_TIME = 300 # coherent integration time in seconds. Actual time might be less to so that all interleaves have the same number of samples averaged\n",
"USE_CORR_MATRIX = True # Whether or not to use the correlation matrix to do the noise statistics correction factor\n",
"\n",
"# Power spectrum settings\n",
"EFIELD_HEALPIX_BEAM_FILE = os.environ.get(\"EFIELD_HEALPIX_BEAM_FILE\", \"/lustre/aoc/projects/hera/h6c-analysis/IDR2/beams/NF_HERA_Vivaldi_efield_beam_healpix.fits\")\n",
Expand Down Expand Up @@ -232,26 +234,14 @@
"AUTO_BL_FILE = sorted([f for f in all_files if len(set(re.search(r'\\d+_\\d+', f).group().split('_'))) == 1])[0]"
]
},
{
"cell_type": "markdown",
"id": "c1db0856",
"metadata": {},
"source": [
"# Added a four_pol option here"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "561e6dbe",
"metadata": {},
"outputs": [],
"source": [
"polarizations = [\"ee\", \"nn\"]\n",
"\n",
"FOUR_POL = True\n",
"if FOUR_POL: \n",
" polarizations += [\"en\", \"ne\"]\n",
"polarizations = [\"ee\", \"nn\", \"en\", \"ne\"]\n",
"# load data for both crosses and autos with times corresponding only to those in the crosses\n",
"single_bl_times = np.array(io.HERAData(SINGLE_BL_FILE).times)\n",
"hd = io.HERAData([AUTO_BL_FILE, SINGLE_BL_FILE])\n",
Expand Down Expand Up @@ -902,8 +892,6 @@
" for ap in data.antpairs():\n",
" # loop over pseudo-stokes parameters\n",
" iter_list = [('ee', 'nn', 'pI'), ('ee', 'nn', 'pQ'), ('en', 'ne', 'pU'), ('en', 'ne', 'pV')]\n",
" if not FOUR_POL:\n",
" iter_list = iter_list[:2]\n",
" for pol1, pol2, pstokes in iter_list:\n",
" bl1 = ap + (pol1,)\n",
" bl2 = ap + (pol2,)\n",
Expand Down Expand Up @@ -1315,7 +1303,7 @@
},
"outputs": [],
"source": [
"from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
"\n",
"\n",
"def get_frop_wrapper(pol=\"nn\", stream_ind=0, band_ind=0, t_avg=AVERAGING_TIME, \n",
" rephase=True, wgt_tavg_by_nsample=True, nsamples=None, bl_vec=None,\n",
Expand Down Expand Up @@ -1360,6 +1348,7 @@
" return deint_frf_data_test, alt_frf_data\n",
"\n",
"def plot_streams(test_data, alt_data, band_ind=0, amp=True, linthresh=1e-6):\n",
"\n",
" band_slice = band_slices[band_ind]\n",
"\n",
" fig, ax = plt.subplots(nrows=1, ncols=4, figsize=[12, 8])\n",
Expand Down Expand Up @@ -2092,9 +2081,6 @@
"source": [
"# TODO: graduate this code into hera_pspec\n",
"\n",
"# Whether or not to use the correlation matrix to do the noise statistics correction factor\n",
"USE_CORR_MATRIX = True\n",
"\n",
"# Apply coherent_avg_correction_factor and compute P_SN\n",
"coherent_avg_correction_factors = []\n",
"for spw, band in enumerate(bands):\n",
Expand Down Expand Up @@ -2206,7 +2192,6 @@
" for i, uvp in enumerate(uvps):\n",
" high_dlys = np.abs(uvp.get_dlys(key[0]) * 1e9) > 1000\n",
" SNRs.append(np.ravel((uvp.get_data(key) / uvp.get_stats('P_N', key).real)[:, high_dlys]))\n",
" #print((uvp.get_data(key) / uvp.get_stats('P_N', key).real)[:, high_dlys].shape)\n",
" for i, uvp in enumerate(uvps_time_avg):\n",
" high_dlys = np.abs(uvp.get_dlys(key[0]) * 1e9) > 1000\n",
" tavg_SNRs.append(np.ravel((uvp.get_data(key) / uvp.get_stats('P_N', key).real)[:, high_dlys]))\n"
Expand Down

0 comments on commit 3add143

Please sign in to comment.