diff --git a/ctapipe/irf/__init__.py b/ctapipe/irf/__init__.py new file mode 100644 index 00000000000..e6d04e2cff0 --- /dev/null +++ b/ctapipe/irf/__init__.py @@ -0,0 +1,15 @@ +from .irf_classes import ( + CutOptimizer, + DataBinning, + EventPreProcessor, + OutputEnergyBinning, + ThetaCutsCalculator, +) + +__all__ = [ + "CutOptimizer", + "DataBinning", + "OutputEnergyBinning", + "EventPreProcessor", + "ThetaCutsCalculator", +] diff --git a/ctapipe/irf/irf_classes.py b/ctapipe/irf/irf_classes.py new file mode 100644 index 00000000000..bd7d99ff4f5 --- /dev/null +++ b/ctapipe/irf/irf_classes.py @@ -0,0 +1,414 @@ +""" +Define a parent IrfTool class to hold all the options +""" +import operator + +import astropy.units as u +import numpy as np +from astropy.table import QTable +from pyirf.binning import create_bins_per_decade +from pyirf.cut_optimization import optimize_gh_cut +from pyirf.cuts import calculate_percentile_cut, evaluate_binned_cut + +from ..core import Component, QualityQuery +from ..core.traits import Float, Integer, List, Unicode + + +class CutOptimizer(Component): + """Performs cut optimisation""" + + initial_gh_cut_efficency = Float( + default_value=0.4, help="Start value of gamma efficiency before optimisation" + ).tag(config=True) + + max_gh_cut_efficiency = Float( + default_value=0.8, help="Maximum gamma efficiency requested" + ).tag(config=True) + + gh_cut_efficiency_step = Float( + default_value=0.1, + help="Stepsize used for scanning after optimal gammaness cut", + ).tag(config=True) + + reco_energy_min = Float( + help="Minimum value for Reco Energy bins in TeV units", + default_value=0.005, + ).tag(config=True) + + reco_energy_max = Float( + help="Maximum value for Reco Energy bins in TeV units", + default_value=200, + ).tag(config=True) + + reco_energy_n_bins_per_decade = Float( + help="Number of edges per decade for Reco Energy bins", + default_value=5, + ).tag(config=True) + + def reco_energy_bins(self): + """ + Creates bins per decade for reconstructed MC energy using pyirf function. + """ + reco_energy = create_bins_per_decade( + self.reco_energy_min * u.TeV, + self.reco_energy_max * u.TeV, + self.reco_energy_n_bins_per_decade, + ) + return reco_energy + + def optimise_gh_cut( + self, signal, background, alpha, min_fov_radius, max_fov_radius, theta + ): + initial_gh_cuts = calculate_percentile_cut( + signal["gh_score"], + signal["reco_energy"], + bins=self.reco_energy_bins(), + fill_value=0.0, + percentile=100 * (1 - self.initial_gh_cut_efficency), + min_events=25, + smoothing=1, + ) + + initial_gh_mask = evaluate_binned_cut( + signal["gh_score"], + signal["reco_energy"], + initial_gh_cuts, + op=operator.gt, + ) + + theta_cuts = theta.calculate_theta_cuts( + signal["theta"][initial_gh_mask], + signal["reco_energy"][initial_gh_mask], + self.reco_energy_bins(), + ) + + self.log.info("Optimizing G/H separation cut for best sensitivity") + gh_cut_efficiencies = np.arange( + self.gh_cut_efficiency_step, + self.max_gh_cut_efficiency + self.gh_cut_efficiency_step / 2, + self.gh_cut_efficiency_step, + ) + + sens2, gh_cuts = optimize_gh_cut( + signal, + background, + reco_energy_bins=self.reco_energy_bins(), + gh_cut_efficiencies=gh_cut_efficiencies, + op=operator.ge, + theta_cuts=theta_cuts, + alpha=alpha, + fov_offset_max=max_fov_radius * u.deg, + fov_offset_min=min_fov_radius * u.deg, + ) + + # now that we have the optimized gh cuts, we recalculate the theta + # cut as 68 percent containment on the events surviving these cuts. + for tab in (signal, background): + tab["selected_gh"] = evaluate_binned_cut( + tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge + ) + self.log.info("Recalculating theta cut for optimized GH Cuts") + + theta_cuts = theta.calculate_theta_cuts( + signal[signal["selected_gh"]]["theta"], + signal[signal["selected_gh"]]["reco_energy"], + self.reco_energy_bins(), + ) + + return gh_cuts, theta_cuts, sens2 + + +class ThetaCutsCalculator(Component): + theta_min_angle = Float( + default_value=-1, help="Smallest angular cut value allowed (-1 means no cut)" + ).tag(config=True) + + theta_max_angle = Float( + default_value=0.32, help="Largest angular cut value allowed" + ).tag(config=True) + + theta_min_counts = Integer( + default_value=10, + help="Minimum number of events in a bin to attempt to find a cut value", + ).tag(config=True) + + theta_fill_value = Float( + default_value=0.32, help="Angular cut value used for bins with too few events" + ).tag(config=True) + + theta_smoothing = Float( + default_value=-1, + help="When given, the width (in units of bins) of gaussian smoothing applied (-1)", + ).tag(config=True) + + target_percentile = Float( + default_value=68, + help="Percent of events in each energy bin keep after the theta cut", + ).tag(config=True) + + def calculate_theta_cuts(self, theta, reco_energy, energy_bins): + theta_min_angle = ( + None if self.theta_min_angle < 0 else self.theta_min_angle * u.deg + ) + theta_max_angle = ( + None if self.theta_max_angle < 0 else self.theta_max_angle * u.deg + ) + theta_smoothing = None if self.theta_smoothing < 0 else self.theta_smoothing + + return calculate_percentile_cut( + theta, + reco_energy, + energy_bins, + min_value=theta_min_angle, + max_value=theta_max_angle, + smoothing=theta_smoothing, + percentile=self.target_percentile, + fill_value=self.theta_fill_value * u.deg, + min_events=self.theta_min_counts, + ) + + +class EventPreProcessor(QualityQuery): + """Defines preselection cuts and the necessary renaming of columns""" + + energy_reconstructor = Unicode( + default_value="RandomForestRegressor", + help="Prefix of the reco `_energy` column", + ).tag(config=True) + geometry_reconstructor = Unicode( + default_value="HillasReconstructor", + help="Prefix of the `_alt` and `_az` reco geometry columns", + ).tag(config=True) + gammaness_classifier = Unicode( + default_value="RandomForestClassifier", + help="Prefix of the classifier `_prediction` column", + ).tag(config=True) + + quality_criteria = List( + default_value=[ + ("multiplicity 4", "np.count_nonzero(tels_with_trigger,axis=1) >= 4"), + ("valid classifier", "RandomForestClassifier_is_valid"), + ("valid geom reco", "HillasReconstructor_is_valid"), + ("valid energy reco", "RandomForestRegressor_is_valid"), + ], + help=QualityQuery.quality_criteria.help, + ).tag(config=True) + + rename_columns = List( + help="List containing translation pairs new and old column names" + "used when processing input with names differing from the CTA prod5b format" + "Ex: [('valid_geom','HillasReconstructor_is_valid')]", + default_value=[], + ).tag(config=True) + + def normalise_column_names(self, events): + keep_columns = [ + "obs_id", + "event_id", + "true_energy", + "true_az", + "true_alt", + ] + rename_from = [ + f"{self.energy_reconstructor}_energy", + f"{self.geometry_reconstructor}_az", + f"{self.geometry_reconstructor}_alt", + f"{self.gammaness_classifier}_prediction", + ] + rename_to = ["reco_energy", "reco_az", "reco_alt", "gh_score"] + + # We never enter the loop if rename_columns is empty + for new, old in self.rename_columns: + rename_from.append(old) + rename_to.append(new) + + keep_columns.extend(rename_from) + events = QTable(events[keep_columns], copy=False) + events.rename_columns(rename_from, rename_to) + return events + + def make_empty_table(self): + """This function defines the columns later functions expect to be present in the event table""" + columns = [ + "obs_id", + "event_id", + "true_energy", + "true_az", + "true_alt", + "reco_energy", + "reco_az", + "reco_alt", + "gh_score", + "pointing_az", + "pointing_alt", + "theta", + "true_source_fov_offset", + "reco_source_fov_offset", + "weight", + ] + units = { + "true_energy": u.TeV, + "true_az": u.deg, + "true_alt": u.deg, + "reco_energy": u.TeV, + "reco_az": u.deg, + "reco_alt": u.deg, + "pointing_az": u.deg, + "pointing_alt": u.deg, + "theta": u.deg, + "true_source_fov_offset": u.deg, + "reco_source_fov_offset": u.deg, + } + + return QTable(names=columns, units=units) + + +class OutputEnergyBinning(Component): + """Collects energy binning settings""" + + true_energy_min = Float( + help="Minimum value for True Energy bins in TeV units", + default_value=0.005, + ).tag(config=True) + + true_energy_max = Float( + help="Maximum value for True Energy bins in TeV units", + default_value=200, + ).tag(config=True) + + true_energy_n_bins_per_decade = Float( + help="Number of edges per decade for True Energy bins", + default_value=10, + ).tag(config=True) + + reco_energy_min = Float( + help="Minimum value for Reco Energy bins in TeV units", + default_value=0.006, + ).tag(config=True) + + reco_energy_max = Float( + help="Maximum value for Reco Energy bins in TeV units", + default_value=190, + ).tag(config=True) + + reco_energy_n_bins_per_decade = Float( + help="Number of edges per decade for Reco Energy bins", + default_value=5, + ).tag(config=True) + + energy_migration_min = Float( + help="Minimum value of Energy Migration matrix", + default_value=0.2, + ).tag(config=True) + + energy_migration_max = Float( + help="Maximum value of Energy Migration matrix", + default_value=5, + ).tag(config=True) + + energy_migration_n_bins = Integer( + help="Number of bins in log scale for Energy Migration matrix", + default_value=31, + ).tag(config=True) + + def true_energy_bins(self): + """ + Creates bins per decade for true MC energy using pyirf function. + """ + true_energy = create_bins_per_decade( + self.true_energy_min * u.TeV, + self.true_energy_max * u.TeV, + self.true_energy_n_bins_per_decade, + ) + return true_energy + + def reco_energy_bins(self): + """ + Creates bins per decade for reconstructed MC energy using pyirf function. + """ + reco_energy = create_bins_per_decade( + self.reco_energy_min * u.TeV, + self.reco_energy_max * u.TeV, + self.reco_energy_n_bins_per_decade, + ) + return reco_energy + + def energy_migration_bins(self): + """ + Creates bins for energy migration. + """ + energy_migration = np.geomspace( + self.energy_migration_min, + self.energy_migration_max, + self.energy_migration_n_bins, + ) + return energy_migration + + +class DataBinning(Component): + """ + Collects information on generating energy and angular bins for + generating IRFs as per pyIRF requirements. + + Stolen from LSTChain + """ + + fov_offset_min = Float( + help="Minimum value for FoV Offset bins in degrees", + default_value=0.0, + ).tag(config=True) + + fov_offset_max = Float( + help="Maximum value for FoV offset bins in degrees", + default_value=5.0, + ).tag(config=True) + + fov_offset_n_edges = Integer( + help="Number of edges for FoV offset bins", + default_value=2, + ).tag(config=True) + + source_offset_min = Float( + help="Minimum value for Source offset for PSF IRF", + default_value=0, + ).tag(config=True) + + source_offset_max = Float( + help="Maximum value for Source offset for PSF IRF", + default_value=1, + ).tag(config=True) + + source_offset_n_edges = Integer( + help="Number of edges for Source offset for PSF IRF", + default_value=101, + ).tag(config=True) + + def fov_offset_bins(self): + """ + Creates bins for single/multiple FoV offset. + """ + fov_offset = ( + np.linspace( + self.fov_offset_min, + self.fov_offset_max, + self.fov_offset_n_edges, + ) + * u.deg + ) + return fov_offset + + def source_offset_bins(self): + """ + Creates bins for source offset for generating PSF IRF. + Using the same binning as in pyirf example. + """ + + source_offset = ( + np.linspace( + self.source_offset_min, + self.source_offset_max, + self.source_offset_n_edges, + ) + * u.deg + ) + return source_offset diff --git a/ctapipe/tools/info.py b/ctapipe/tools/info.py index 8ded56de360..73f2d9f9998 100644 --- a/ctapipe/tools/info.py +++ b/ctapipe/tools/info.py @@ -26,6 +26,7 @@ "iminuit", "tables", "eventio", + "pyirf", ] ) diff --git a/ctapipe/tools/make_irf.py b/ctapipe/tools/make_irf.py new file mode 100644 index 00000000000..1c168c4ac4b --- /dev/null +++ b/ctapipe/tools/make_irf.py @@ -0,0 +1,441 @@ +"""Tool to generate IRFs""" +import operator +from enum import Enum + +import astropy.units as u +import numpy as np +from astropy.io import fits +from astropy.table import vstack +from pyirf.benchmarks import angular_resolution, energy_bias_resolution +from pyirf.binning import create_histogram_table +from pyirf.cuts import evaluate_binned_cut +from pyirf.io import ( + create_aeff2d_hdu, + create_background_2d_hdu, + create_energy_dispersion_hdu, + create_psf_table_hdu, + create_rad_max_hdu, +) +from pyirf.irf import ( + background_2d, + effective_area_per_energy_and_fov, + energy_dispersion, + psf_table, +) +from pyirf.sensitivity import calculate_sensitivity, estimate_background +from pyirf.simulations import SimulatedEventsInfo +from pyirf.spectral import ( + CRAB_HEGRA, + IRFDOC_ELECTRON_SPECTRUM, + IRFDOC_PROTON_SPECTRUM, + PowerLaw, + calculate_event_weights, +) +from pyirf.utils import calculate_source_fov_offset, calculate_theta + +from ..core import Provenance, Tool, traits +from ..core.traits import Bool, Float, Integer, Unicode +from ..io import TableLoader +from ..irf import ( + CutOptimizer, + DataBinning, + EventPreProcessor, + OutputEnergyBinning, + ThetaCutsCalculator, +) + + +class Spectra(Enum): + CRAB_HEGRA = 1 + IRFDOC_ELECTRON_SPECTRUM = 2 + IRFDOC_PROTON_SPECTRUM = 3 + + +PYIRF_SPECTRA = { + Spectra.CRAB_HEGRA: CRAB_HEGRA, + Spectra.IRFDOC_ELECTRON_SPECTRUM: IRFDOC_ELECTRON_SPECTRUM, + Spectra.IRFDOC_PROTON_SPECTRUM: IRFDOC_PROTON_SPECTRUM, +} + + +class IrfTool(Tool): + name = "ctapipe-make-irfs" + description = "Tool to create IRF files in GAD format" + + gamma_file = traits.Path( + default_value=None, directory_ok=False, help="Gamma input filename and path" + ).tag(config=True) + gamma_sim_spectrum = traits.UseEnum( + Spectra, + default_value=Spectra.CRAB_HEGRA, + help="Name of the pyrif spectra used for the simulated gamma spectrum", + ).tag(config=True) + proton_file = traits.Path( + default_value=None, directory_ok=False, help="Proton input filename and path" + ).tag(config=True) + proton_sim_spectrum = traits.UseEnum( + Spectra, + default_value=Spectra.IRFDOC_PROTON_SPECTRUM, + help="Name of the pyrif spectra used for the simulated proton spectrum", + ).tag(config=True) + electron_file = traits.Path( + default_value=None, directory_ok=False, help="Electron input filename and path" + ).tag(config=True) + electron_sim_spectrum = traits.UseEnum( + Spectra, + default_value=Spectra.IRFDOC_ELECTRON_SPECTRUM, + help="Name of the pyrif spectra used for the simulated electron spectrum", + ).tag(config=True) + + chunk_size = Integer( + default_value=100000, + allow_none=True, + help="How many subarray events to load at once for making predictions.", + ).tag(config=True) + + output_path = traits.Path( + default_value="./IRF.fits.gz", + allow_none=False, + directory_ok=False, + help="Output file", + ).tag(config=True) + + overwrite = Bool( + False, + help="Overwrite the output file if it exists", + ).tag(config=True) + + obs_time = Float(default_value=50.0, help="Observation time").tag(config=True) + obs_time_unit = Unicode( + default_value="hour", + help="Unit used to specify observation time as an astropy unit string.", + ).tag(config=True) + + alpha = Float( + default_value=0.2, help="Ratio between size of on and off regions" + ).tag(config=True) + # ON_radius = Float(default_value=1.0, help="Radius of ON region in degrees").tag( + # config=True + # ) + max_bg_radius = Float( + default_value=3.0, help="Radius used to calculate background rate in degrees" + ).tag(config=True) + + classes = [CutOptimizer, DataBinning, OutputEnergyBinning, EventPreProcessor] + + def make_derived_columns(self, kind, events, spectrum, target_spectrum, obs_conf): + if obs_conf["subarray_pointing_lat"].std() < 1e-3: + assert all(obs_conf["subarray_pointing_frame"] == 0) + # Lets suppose 0 means ALTAZ + events["pointing_alt"] = obs_conf["subarray_pointing_lat"][0] * u.deg + events["pointing_az"] = obs_conf["subarray_pointing_lon"][0] * u.deg + else: + raise NotImplementedError( + "No support for making irfs from varying pointings yet" + ) + + events["theta"] = calculate_theta( + events, + assumed_source_az=events["true_az"], + assumed_source_alt=events["true_alt"], + ) + events["true_source_fov_offset"] = calculate_source_fov_offset( + events, prefix="true" + ) + events["reco_source_fov_offset"] = calculate_source_fov_offset( + events, prefix="reco" + ) + # TODO: Honestly not sure why this integral is needed, nor what + # are correct bounds + if kind == "gamma": + spectrum = spectrum.integrate_cone( + self.bins.fov_offset_min * u.deg, self.bins.fov_offset_max * u.deg + ) + events["weight"] = calculate_event_weights( + events["true_energy"], + target_spectrum=target_spectrum, + simulated_spectrum=spectrum, + ) + + return events + + def get_metadata(self, loader): + obs = loader.read_observation_information() + sim = loader.read_simulation_configuration() + show = loader.read_shower_distribution() + + for itm in ["spectral_index", "energy_range_min", "energy_range_max"]: + if len(np.unique(sim[itm])) > 1: + raise NotImplementedError( + f"Unsupported: '{itm}' differs across simulation runs" + ) + + sim_info = SimulatedEventsInfo( + n_showers=show["n_entries"].sum(), + energy_min=sim["energy_range_min"].quantity[0], + energy_max=sim["energy_range_max"].quantity[0], + max_impact=sim["max_scatter_range"].quantity[0], + spectral_index=sim["spectral_index"][0], + viewcone_max=sim["max_viewcone_radius"].quantity[0], + viewcone_min=sim["min_viewcone_radius"].quantity[0], + ) + + return ( + sim_info, + PowerLaw.from_simulation( + sim_info, obstime=self.obs_time * u.Unit(self.obs_time_unit) + ), + obs, + ) + + def load_preselected_events(self): + opts = dict(load_dl2=True, load_simulated=True, load_dl1_parameters=False) + reduced_events = dict() + for kind, file, target_spectrum in [ + ("gamma", self.gamma_file, PYIRF_SPECTRA[self.gamma_sim_spectrum]), + ("proton", self.proton_file, PYIRF_SPECTRA[self.proton_sim_spectrum]), + ( + "electron", + self.electron_file, + PYIRF_SPECTRA[self.electron_sim_spectrum], + ), + ]: + with TableLoader(file, **opts) as load: + Provenance().add_input_file(file) + header = self.epp.make_empty_table() + sim_info, spectrum, obs_conf = self.get_metadata(load) + if kind == "gamma": + self.sim_info = sim_info + self.spectrum = spectrum + bits = [header] + n_raw_events = 0 + for start, stop, events in load.read_subarray_events_chunked( + self.chunk_size + ): + selected = events[self.epp.get_table_mask(events)] + selected = self.epp.normalise_column_names(selected) + selected = self.make_derived_columns( + kind, selected, spectrum, target_spectrum, obs_conf + ) + bits.append(selected) + n_raw_events += len(events) + + table = vstack(bits, join_type="exact") + reduced_events[kind] = table + reduced_events[f"{kind}_count"] = n_raw_events + + self.log.debug( + "Loaded %d gammas, %d protons, %d electrons" + % ( + reduced_events["gamma_count"], + reduced_events["proton_count"], + reduced_events["electron_count"], + ) + ) + self.log.debug( + "Keeping %d gammas, %d protons, %d electrons" + % ( + len(reduced_events["gamma"]), + len(reduced_events["proton"]), + len(reduced_events["electron"]), + ) + ) + select_fov = ( + reduced_events["gamma"]["true_source_fov_offset"] + <= self.bins.fov_offset_max * u.deg + ) + # TODO: verify that this fov cut on only gamma is ok + self.signal_events = reduced_events["gamma"][select_fov] + self.background_events = vstack( + [reduced_events["proton"], reduced_events["electron"]] + ) + + def setup(self): + self.epp = EventPreProcessor(parent=self) + self.co = CutOptimizer(parent=self) + self.theta = ThetaCutsCalculator(parent=self) + self.e_bins = OutputEnergyBinning(parent=self) + self.bins = DataBinning(parent=self) + + self.reco_energy_bins = self.e_bins.reco_energy_bins() + self.true_energy_bins = self.e_bins.true_energy_bins() + self.energy_migration_bins = self.e_bins.energy_migration_bins() + + self.source_offset_bins = self.bins.source_offset_bins() + self.fov_offset_bins = self.bins.fov_offset_bins() + + def start(self): + self.load_preselected_events() + self.log.info( + "Optimising cuts using %d signal and %d background events" + % (len(self.signal_events), len(self.background_events)), + ) + self.gh_cuts, self.theta_cuts_opt, self.sens2 = self.co.optimise_gh_cut( + self.signal_events, + self.background_events, + self.alpha, + self.bins.fov_offset_min, + self.bins.fov_offset_max, + self.theta, + ) + + self.signal_events["selected_theta"] = evaluate_binned_cut( + self.signal_events["theta"], + self.signal_events["reco_energy"], + self.theta_cuts_opt, + operator.le, + ) + self.signal_events["selected"] = ( + self.signal_events["selected_theta"] & self.signal_events["selected_gh"] + ) + self.background_events["selected_theta"] = evaluate_binned_cut( + self.background_events["theta"], + self.background_events["reco_energy"], + self.theta_cuts_opt, + operator.le, + ) + # calculate sensitivity + signal_hist = create_histogram_table( + self.signal_events[self.signal_events["selected"]], + bins=self.reco_energy_bins, + ) + + background_hist = estimate_background( + self.background_events[self.background_events["selected_gh"]], + reco_energy_bins=self.reco_energy_bins, + theta_cuts=self.theta_cuts_opt, + alpha=self.alpha, + fov_offset_min=self.bins.fov_offset_min * u.deg, + fov_offset_max=self.bins.fov_offset_max * u.deg, + ) + self.sensitivity = calculate_sensitivity( + signal_hist, background_hist, alpha=self.alpha + ) + + # scale relative sensitivity by Crab flux to get the flux sensitivity + for s in (self.sens2, self.sensitivity): + s["flux_sensitivity"] = s["relative_sensitivity"] * self.spectrum( + s["reco_energy_center"] + ) + + def finish(self): + masks = { + "": self.signal_events["selected"], + "_NO_CUTS": slice(None), + "_ONLY_GH": self.signal_events["selected_gh"], + "_ONLY_THETA": self.signal_events["selected_theta"], + } + hdus = [ + fits.PrimaryHDU(), + fits.BinTableHDU(self.sensitivity, name="SENSITIVITY"), + fits.BinTableHDU(self.sens2, name="SENSITIVITY_STEP_2"), + fits.BinTableHDU(self.theta_cuts_opt, name="THETA_CUTS_OPT"), + fits.BinTableHDU(self.gh_cuts, name="GH_CUTS"), + ] + + self.log.debug("True Energy bins: %s" % str(self.true_energy_bins.value)) + self.log.debug("FoV offset bins: %s" % str(self.fov_offset_bins.value)) + for label, mask in masks.items(): + effective_area = effective_area_per_energy_and_fov( + self.signal_events[mask], + self.sim_info, + true_energy_bins=self.true_energy_bins, + fov_offset_bins=self.fov_offset_bins, + ) + hdus.append( + create_aeff2d_hdu( + effective_area[..., np.newaxis], # +1 dimension for FOV offset + self.true_energy_bins, + self.fov_offset_bins, + extname="EFFECTIVE AREA" + label, + ) + ) + edisp = energy_dispersion( + self.signal_events[mask], + true_energy_bins=self.true_energy_bins, + fov_offset_bins=self.fov_offset_bins, + migration_bins=self.energy_migration_bins, + ) + hdus.append( + create_energy_dispersion_hdu( + edisp, + true_energy_bins=self.true_energy_bins, + migration_bins=self.energy_migration_bins, + fov_offset_bins=self.fov_offset_bins, + extname="ENERGY_DISPERSION" + label, + ) + ) + # Here we use reconstructed energy instead of true energy for the sake of + # current pipelines comparisons + bias_resolution = energy_bias_resolution( + self.signal_events[self.signal_events["selected"]], + self.true_energy_bins, + bias_function=np.mean, + energy_type="true", + ) + hdus.append(fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) + + # Here we use reconstructed energy instead of true energy for the sake of + # current pipelines comparisons + ang_res = angular_resolution( + self.signal_events[self.signal_events["selected_gh"]], + self.reco_energy_bins, + energy_type="reco", + ) + hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) + + sel = self.background_events["selected_gh"] + self.log.debug("%d background events selected" % sel.sum()) + self.log.debug("%f obs time" % self.obs_time) + background_rate = background_2d( + self.background_events[sel], + self.reco_energy_bins, + fov_offset_bins=self.fov_offset_bins, + t_obs=self.obs_time * u.Unit(self.obs_time_unit), + ) + hdus.append( + create_background_2d_hdu( + background_rate, + self.reco_energy_bins, + fov_offset_bins=self.fov_offset_bins, + ) + ) + + psf = psf_table( + self.signal_events[self.signal_events["selected_gh"]], + self.true_energy_bins, + fov_offset_bins=self.fov_offset_bins, + source_offset_bins=self.source_offset_bins, + ) + hdus.append( + create_psf_table_hdu( + psf, + self.true_energy_bins, + self.source_offset_bins, + self.fov_offset_bins, + ) + ) + + hdus.append( + create_rad_max_hdu( + self.theta_cuts_opt["cut"].reshape(-1, 1), + self.true_energy_bins, + self.fov_offset_bins, + ) + ) + + self.log.info("Writing outputfile '%s'" % self.output_path) + fits.HDUList(hdus).writeto( + self.output_path, + overwrite=self.overwrite, + ) + Provenance().add_output_file(self.output_path, role="IRF") + + +def main(): + tool = IrfTool() + tool.run() + + +if __name__ == "main": + main() diff --git a/docs/changes/2315.irf-maker.rst b/docs/changes/2315.irf-maker.rst new file mode 100644 index 00000000000..37a898ba437 --- /dev/null +++ b/docs/changes/2315.irf-maker.rst @@ -0,0 +1 @@ +Add a `make-irf tool` able to produce irfs given a gamma, proton and electron DL2 input files. diff --git a/docs/conf.py b/docs/conf.py index 29d5cdee0c5..790d74e5cc2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -130,7 +130,13 @@ def setup(app): ("py:class", "traitlets.traitlets.Int"), ("py:class", "traitlets.config.application.Application"), ("py:class", "traitlets.utils.sentinel.Sentinel"), + ("py:class", "traitlets.traitlets.T"), + ("py:class", "re.Pattern[t.Any]"), + ("py:class", "Sentinel"), + ("py:class", "ObserveHandler"), ("py:class", "traitlets.traitlets.ObserveHandler"), + ("py:obj", "traitlets.traitlets.G"), + ("py:obj", "traitlets.traitlets.S"), ("py:obj", "traitlets.config.boolean_flag"), ("py:obj", "traitlets.TraitError"), ("py:obj", "-v"), # fix for wrong syntax in a traitlets docstring diff --git a/environment.yml b/environment.yml index 7f0f0dadeed..f8f2d9087fb 100644 --- a/environment.yml +++ b/environment.yml @@ -25,6 +25,7 @@ dependencies: - pypandoc - pre-commit - psutil + - pyirf - pytables - pytest - pytest-cov diff --git a/setup.cfg b/setup.cfg index dbd35656b60..eda6b89f790 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,6 +35,7 @@ install_requires= numba >=0.56 numpy ~=1.16 psutil + pyirf pyyaml >=5.1 requests scikit-learn @@ -93,6 +94,7 @@ console_scripts = ctapipe-display-dl1 = ctapipe.tools.display_dl1:main ctapipe-process = ctapipe.tools.process:main ctapipe-merge = ctapipe.tools.merge:main + ctapipe-make-irfs = ctapipe.tools.make_irf:main ctapipe-fileinfo = ctapipe.tools.fileinfo:main ctapipe-quickstart = ctapipe.tools.quickstart:main ctapipe-train-energy-regressor = ctapipe.tools.train_energy_regressor:main