Skip to content

Commit

Permalink
Merge pull request #19 from LCOGT/feature/traces
Browse files Browse the repository at this point in the history
Merge in traces and extraction.
  • Loading branch information
cmccully authored Mar 23, 2020
2 parents d486ba6 + 8cd47dc commit 4f307a6
Show file tree
Hide file tree
Showing 31 changed files with 1,100 additions and 1,148 deletions.
4 changes: 4 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
build
dist
*.egg*
test_data
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down Expand Up @@ -105,3 +106,7 @@ venv.bak/

# Jetbrains
.idea

.vscode

test_data/
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
FROM docker.lco.global/banzai:0.27.5-90-g3fdc726
FROM docker.lco.global/banzai:0.28.9-166-g95161e4

USER root

RUN conda install -y coveralls sphinx
RUN conda install -y coveralls sphinx statsmodels docutils=0.15

COPY --chown=10087:10000 . /lco/banzai-nres

Expand Down
28 changes: 28 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,34 @@ pipeline {
}
}
}
}
}
stage('Test-Science-Frame-Creation') {
when {
anyOf {
branch 'PR-*'
expression { return params.forceEndToEnd }
}
}
steps {
script {
withKubeConfig([credentialsId: "dev-kube-config"]) {
sh("kubectl exec -n dev ${podName} -c banzai-nres-listener -- " +
"pytest -s --durations=0 --junitxml=/home/archive/pytest-science-frames.xml " +
"-m science_frames /lco/banzai-nres/")
}
}
}
post {
always {
script {
withKubeConfig([credentialsId: "dev-kube-config"]) {
sh("kubectl cp -n dev -c banzai-nres-listener ${podName}:/home/archive/pytest-science-frames.xml " +
"pytest-science-frames.xml")
junit "pytest-science-frames.xml"
}
}
}
success {
script {
withKubeConfig([credentialsId: "dev-kube-config"]) {
Expand Down
13 changes: 13 additions & 0 deletions banzai_nres/background.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from banzai.stages import Stage
import numpy as np
import sep


class BackgroundSubtractor(Stage):
def do_stage(self, image):
# TODO: Try LOWESS for hole filling
# TODO: fix bug that background is subtracting
if image.traces is not None:
indices_to_interpolate = np.logical_or(image.traces > 0, image.mask)
image.background = sep.Background(image.data, mask=indices_to_interpolate).back()
return image
84 changes: 84 additions & 0 deletions banzai_nres/extract.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import numpy as np
from astropy.table import Table

from banzai.stages import Stage
from banzai_nres.frames import EchelleSpectralCCDData
from banzai_nres.utils.trace_utils import get_trace_region
import logging

logger = logging.getLogger('banzai')


class WeightedExtract(Stage):
def do_stage(self, image: EchelleSpectralCCDData):
if image.weights is None:
logger.error('Extraction weights missing. Rejecting image.', image=image)
return None
flux = np.zeros((image.num_traces, image.data.shape[1]), dtype=float)
variance = np.zeros_like(flux, dtype=float)

trace_ids = np.arange(1, image.num_traces + 1)
for i, trace_id in enumerate(trace_ids):
yx = get_trace_region(np.isclose(image.traces, trace_id))
x_extent = slice(np.min(yx[1]), np.max(yx[1]) + 1) # get the horizontal (x) extent of the trace.
flux[i, x_extent] = self.extract_order(image.data[yx], image.weights[yx])
variance[i, x_extent] = self.extract_order(image.uncertainty[yx] ** 2, image.weights[yx] ** 2)

image.spectrum = Table({'id': trace_ids, 'flux': flux, 'uncertainty': np.sqrt(variance)})
return image

@staticmethod
def extract_order(values, weights=1):
"""
Performs a weighted sum of values vertically (i.e. along the rows (along axis 0)), weighted by weights.
:param values: ndarray, shape N, M. The values to extract by summing along the rows.
:param weights: float, or ndarray with the same shape as values: N, M.
weights are the extraction weights to be applied to each value in values.
If weights is float, every value in values will have that weight.
:return: out, ndarray.
out has shape M. Meaning values has been summed vertically (across the rows).
"""
return np.sum(values * weights, axis=0)


class GetOptimalExtractionWeights(WeightedExtract):
def do_stage(self, image: EchelleSpectralCCDData):
if image.profile is None:
logger.error('Profile missing. Rejecting image.', image=image)
return None
image.weights = np.zeros_like(image.data, dtype=float)
trace_ids = np.arange(1, image.num_traces + 1)
for trace_id in trace_ids:
yx = get_trace_region(np.isclose(image.traces, trace_id))
image.weights[yx] = self.weights(image.profile[yx], image.uncertainty[yx]**2, image.mask[yx])
return image

def weights(self, profile_im, var_im, mask):
"""
Calculates the optimal extraction weights for a single order
per Horne (1986, 1986PASP...98..609H) Equation 8 .
The optimally extracted spectrum is then the sum of data * weights
The associated variance is by definition var * weights**2 , which agrees with Horne (1986)
equation 9.
Note: Our conventions differ slightly with that of Horne 1986.
The weights by which the spectrum is extracted in Equation 8 of Horne are:
.. math:: W_{x, \lambda} = \frac{M_{x, \lambda} P_{x, \lambda} / V_{x, \lambda}, \sum_x M_{x, \lambda} P^2_{x, \lambda} / V_{x, \lambda} }
These quantities are labelled here as:
.. math:: profile_im = P_{x, \lambda}
.. math:: var_im = V_{x, \lambda}
.. math:: mask = M_{x, \lambda}
Note that in Horne, x is the cross-dispersed direction, which for us is the
vertical (y) pixels.
:param profile_im: ndarray. 2d image of the profile.
:param var_im: ndarray. 2d image of the variance per pixel
:param mask: ndarray. 2d image where 0 is a good pixel and 1 is a masked, bad pixel.
:return: ndarray. Has same shape as profile_im, var_im and mask.
"""
invmask = np.invert(mask.astype(bool)) # array such that pixels to ignore have value 0
normalization = self.extract_order(invmask * profile_im ** 2 / var_im)
weights = np.divide(invmask * profile_im / var_im, normalization)
weights[~np.isfinite(weights)] = 0 # remove infinities and nans from division by zero.
return weights
41 changes: 41 additions & 0 deletions banzai_nres/fitting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
from scipy.interpolate import SmoothBivariateSpline, UnivariateSpline
from statsmodels.robust.norms import HuberT


def fit_smooth_spline(y, error, mask=None, n_iter=1, x=None, sigma=5):
if mask is None:
mask = np.zeros(y.shape, dtype=bool)
y_to_fit = y[np.logical_not(mask)]
error_to_fit = error[np.logical_not(mask)]
# Note that from the docs, the weights should be about 1 over the standard deviation for the smoothing to
# work well
weights = 1.0 / error_to_fit
huber_t = HuberT(t=sigma)
if len(y.shape) == 2 and x is None:
spline = SmoothBivariateSpline
x1d, y1d = np.arange(y.shape[0], dtype=np.float), np.arange(y.shape[1], dtype=np.float)
x2d, y2d = np.meshgrid(x1d, y1d)
x_to_fit = x2d.T[np.logical_not(mask)], y2d.T[np.logical_not(mask)]
kwargs = {'grid': False}

elif len(y.shape) == 2 or len(x) == 2:
spline = SmoothBivariateSpline
x_to_fit = (x[0][np.logical_not(mask)], x[1][np.logical_not(mask)])
kwargs = {'grid': False}

elif x is not None:
spline = UnivariateSpline
x_to_fit = (x[np.logical_not(mask)], )
kwargs = {}
else:
x_to_fit = (x[0][np.logical_not(mask)],)
spline = UnivariateSpline
kwargs = {}

for i in range(n_iter):
best_fit = spline(*x_to_fit, y_to_fit, w=weights)

# Iteratively reweight to reject outliers
weights = huber_t.weights(np.abs(y_to_fit - best_fit(*x_to_fit, **kwargs)) / error_to_fit) / error_to_fit
return best_fit
22 changes: 21 additions & 1 deletion banzai_nres/flats.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from banzai.calibrations import CalibrationStacker
from banzai.calibrations import CalibrationStacker, CalibrationUser
from banzai.stages import Stage
import numpy as np


class FlatStacker(CalibrationStacker):
Expand All @@ -8,3 +10,21 @@ def __init__(self, runtime_context):
@property
def calibration_type(self):
return 'LAMPFLAT'


class FlatLoader(CalibrationUser):
@property
def calibration_type(self):
return 'LAMPFLAT'

def on_missing_master_calibration(self, image):
if image.obstype.upper() == 'LAMPFLAT':
return image
else:
super().on_missing_master_calibration(image)

def apply_master_calibration(self, image, master_calibration_image):
image.traces = master_calibration_image.traces
image.profile = master_calibration_image.profile
image.blaze = master_calibration_image.blaze
return image
Loading

0 comments on commit 4f307a6

Please sign in to comment.