Skip to content

Commit

Permalink
updated model cookbook
Browse files Browse the repository at this point in the history
  • Loading branch information
Jammy2211 committed Jul 2, 2024
1 parent 322ac25 commit da49de0
Show file tree
Hide file tree
Showing 17 changed files with 225 additions and 106 deletions.
6 changes: 3 additions & 3 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ We define our model, a 1D Gaussian by writing a Python class using the format be
This method will be used to fit the model to data and compute a likelihood.
"""
def model_data_1d_via_xvalues_from(self, xvalues):
def model_data_from(self, xvalues):
transformed_xvalues = xvalues - self.centre
Expand Down Expand Up @@ -133,12 +133,12 @@ To fit this Gaussian to the ``data`` we create an Analysis object, which gives *
"""
We fit the ``data`` with the Gaussian instance, using its
"model_data_1d_via_xvalues_from" function to create the model data.
"model_data_from" function to create the model data.
"""
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
log_likelihood = -0.5 * sum(chi_squared_map)
Expand Down
7 changes: 0 additions & 7 deletions autofit/config/priors/Gaussian.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,3 @@
GaussianPrior:
lower_limit:
type: Constant
value: -inf
upper_limit:
type: Constant
value: inf
centre:
gaussian_limits:
lower: -inf
Expand Down
40 changes: 40 additions & 0 deletions autofit/config/priors/Gaussian2D.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
centre_0:
gaussian_limits:
lower: -inf
upper: inf
lower_limit: 0.0
type: Uniform
upper_limit: 100.0
width_modifier:
type: Absolute
value: 20.0
centre_1:
gaussian_limits:
lower: -inf
upper: inf
lower_limit: 0.0
type: Uniform
upper_limit: 100.0
width_modifier:
type: Absolute
value: 20.0
normalization:
gaussian_limits:
lower: 0.0
upper: inf
lower_limit: 1.0e-06
type: LogUniform
upper_limit: 1000000.0
width_modifier:
type: Relative
value: 0.5
sigma:
gaussian_limits:
lower: 0.0
upper: inf
lower_limit: 0.0
type: Uniform
upper_limit: 25.0
width_modifier:
type: Relative
value: 0.5
8 changes: 4 additions & 4 deletions autofit/example/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def model_data_1d_from(self, instance: af.ModelInstance) -> np.ndarray:
The way this is generated changes depending on if the model is a `Model` (therefore having only one profile)
or a `Collection` (therefore having multiple profiles).
If its a model, the model component's `model_data_1d_via_xvalues_from` is called and the output returned.
For a collection, each components `model_data_1d_via_xvalues_from` is called, iterated through and summed
If its a model, the model component's `model_data_from` is called and the output returned.
For a collection, each components `model_data_from` is called, iterated through and summed
to return the combined model data.
Parameters
Expand All @@ -103,13 +103,13 @@ def model_data_1d_from(self, instance: af.ModelInstance) -> np.ndarray:
try:
for profile in instance:
try:
model_data_1d += profile.model_data_1d_via_xvalues_from(
model_data_1d += profile.model_data_from(
xvalues=xvalues
)
except AttributeError:
pass
except TypeError:
model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data_1d += instance.model_data_from(xvalues=xvalues)

return model_data_1d

Expand Down
10 changes: 5 additions & 5 deletions autofit/example/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
The log_likelihood_function in the Analysis class receives an instance of this classes where the values of its
parameters have been set up according to the non-linear search. Because instances of the classes are used, this means
their methods (e.g. model_data_1d_via_xvalues_from) can be used in the log likelihood function.
their methods (e.g. model_data_from) can be used in the log likelihood function.
"""


Expand Down Expand Up @@ -56,7 +56,7 @@ def __eq__(self, other):
and self.sigma == other.sigma
)

def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:
def model_data_from(self, xvalues: np.ndarray) -> np.ndarray:
"""
Calculate the normalization of the profile on a 1D grid of Cartesian x coordinates.
Expand Down Expand Up @@ -91,7 +91,7 @@ def __call__(self, xvalues: np.ndarray) -> np.ndarray:
xvalues
The x coordinates in the original reference frame of the grid.
"""
return self.model_data_1d_via_xvalues_from(xvalues=xvalues)
return self.model_data_from(xvalues=xvalues)

def inverse(self, y):
"""
Expand Down Expand Up @@ -129,7 +129,7 @@ def __init__(
self.normalization = normalization
self.rate = rate

def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:
def model_data_from(self, xvalues: np.ndarray) -> np.ndarray:
"""
Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates.
Expand All @@ -156,7 +156,7 @@ def __call__(self, xvalues: np.ndarray) -> np.ndarray:
values
The x coordinates in the original reference frame of the grid.
"""
return self.model_data_1d_via_xvalues_from(xvalues=xvalues)
return self.model_data_from(xvalues=xvalues)


class PhysicalNFW:
Expand Down
4 changes: 2 additions & 2 deletions autofit/example/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@ def visualize(
try:
for profile in instance:
try:
model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data_1d += profile.model_data_from(xvalues=xvalues)
except AttributeError:
pass
except TypeError:
model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data_1d += instance.model_data_from(xvalues=xvalues)

plt.errorbar(
x=xvalues,
Expand Down
18 changes: 9 additions & 9 deletions docs/cookbooks/analysis.rst
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ This can be adapted for your use case.
Returns the log likelihood of a fit of a 1D Gaussian to the dataset.
The data is fitted using an `instance` of the `Gaussian` class where
its `model_data_1d_via_xvalues_from` is called in order to create a
its `model_data_from` is called in order to create a
model data representation of the Gaussian that is fitted to the data.
"""
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
Expand Down Expand Up @@ -255,7 +255,7 @@ Function", are also automatically output during the model-fit on the fly.
"""
xvalues = np.arange(analysis.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = analysis.data - model_data
"""
Expand Down Expand Up @@ -326,7 +326,7 @@ overwritten with the `Visualizer` class above.
"""
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
chi_squared = sum(chi_squared_map)
Expand Down Expand Up @@ -372,7 +372,7 @@ The custom result API allows us to do this. First, we define a custom ``Result``
"""
xvalues = np.arange(self.analysis.data.shape[0])
return self.instance.model_data_1d_via_xvalues_from(instance=xvalues)
return self.instance.model_data_from(instance=xvalues)
The custom result has access to the analysis class, meaning that we can use any of its methods or properties to
compute custom result properties.
Expand Down Expand Up @@ -404,7 +404,7 @@ of the ``Analysis`` and define a ``make_result`` object describing what we want
"""
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
chi_squared = sum(chi_squared_map)
Expand Down Expand Up @@ -518,7 +518,7 @@ contains settings customizing what files are output and how often.
"""
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
chi_squared = sum(chi_squared_map)
Expand Down Expand Up @@ -613,7 +613,7 @@ These files can then also be loaded via the database, as described in the databa
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
Expand Down Expand Up @@ -685,7 +685,7 @@ These files can then also be loaded via the database, as described in the databa
instance = result.max_log_likelihood_instance
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
# The path where model_data.json is saved, e.g. output/dataset_name/unique_id/files/model_data.json
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/configs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ not have a config file.
self.normalization = normalization
self.sigma = sigma
def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:
def model_data_from(self, xvalues: np.ndarray) -> np.ndarray:
"""
The usual method that returns the 1D data of the `Gaussian` profile.
"""
Expand Down
4 changes: 2 additions & 2 deletions docs/cookbooks/database.rst
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u
xvalues = np.arange(self.data.shape[0])
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
residual_map = self.data - model_data
chi_squared_map = (residual_map / self.noise_map) ** 2.0
Expand Down Expand Up @@ -417,7 +417,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u
instance = result.max_log_likelihood_instance
model_data = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_data = instance.model_data_from(xvalues=xvalues)
# The path where model_data.json is saved, e.g. output/dataset_name/unique_id/files/model_data.json
Expand Down
73 changes: 70 additions & 3 deletions docs/cookbooks/model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ Python classes, with the following sections:
- **Priors (Model)**: How the default priors of a model are set and how to customize them.
- **Instances (Model)**: Creating an instance of a model via input parameters.
- **Model Customization (Model)**: Customizing a model (e.g. fixing parameters or linking them to one another).
- **Tuple Parameters (Model)**: Defining model components with parameters that are tuples.
- **Json Output (Model)**: Output a model in human readable text via a .json file and loading it back again.

It then describes how to use the ``af.Collection`` object to define models with many model components from multiple
Expand Down Expand Up @@ -48,9 +49,9 @@ We define a 1D Gaussian model component to illustrate model composition in PyAut
class Gaussian:
def __init__(
self,
centre=30.0, # <- **PyAutoFit** recognises these constructor arguments
normalization=1.0, # <- are the Gaussian``s model parameters.
sigma=5.0,
centre : float = 30.0, # <- **PyAutoFit** recognises these constructor arguments
normalization : float = 1.0, # <- are the Gaussian``s model parameters.
sigma : float = 5.0,
):
self.centre = centre
self.normalization = normalization
Expand Down Expand Up @@ -291,6 +292,72 @@ This API can also be used for fixing a parameter to a certain value:
model = af.Model(Gaussian, centre=0.0)
Tuple Parameters (Model)
------------------------

The `Gaussian` model component above only has parameters that are single-valued floats.

Parameters can also be tuples, which is useful for defining model components where certain parameters are naturally
grouped together.

For example, we can define a 2D Gaussian with a center that has two coordinates and therefore free parameters, (x, y),
using a tuple.

.. code-block:: python
class Gaussian2D:
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0), # <- **PyAutoFit** recognises these constructor arguments
normalization: float = 0.1, # <- are the Gaussian``s model parameters.
sigma: float = 1.0,
):
self.centre = centre
self.normalization = normalization
self.sigma = sigma
The model's `total_free_parameters` attribute now includes 4 free parameters, as the tuple `centre` parameter accounts
for 2 free parameters.

.. code-block:: python
model = af.Model(Gaussian2D)
print(f"Model Total Free Parameters = {model.total_free_parameters}")
This information is again displayed in the `info` attribute:

.. code-block:: python
print(model.info)
This gives the following output:

.. code-block:: bash
Total Free Parameters = 4
model Gaussian2D (N=4)
centre
centre_0 UniformPrior [3], lower_limit = 0.0, upper_limit = 100.0
centre_1 UniformPrior [4], lower_limit = 0.0, upper_limit = 100.0
normalization LogUniformPrior [5], lower_limit = 1e-06, upper_limit = 1000000.0
sigma UniformPrior [6], lower_limit = 0.0, upper_limit = 25.0
Here are examples of how model customization can be applied to a model with tuple parameters:

.. code-block:: python
model = af.Model(Gaussian2D)
model.centre = (0.0, 0.0)
model.centre_0 = model.normalization
model.centre_1 = model.normalization + model.sigma
model.add_assertion(model.centre_0 > model.normalization)
Json Outputs (Model)
--------------------

Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/multiple_datasets.rst
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ We can plot the model-fit to each dataset by iterating over the results:
for data, result in zip(data_list, result_list):
instance = result.max_log_likelihood_instance
model_data = instance.model_data_1d_via_xvalues_from(
model_data = instance.model_data_from(
xvalues=np.arange(data.shape[0])
)
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/result.rst
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ This makes it straight forward to plot the median PDF model:

.. code-block:: python
model_data = instance.model_data_1d_via_xvalues_from(xvalues=np.arange(data.shape[0]))
model_data = instance.model_data_from(xvalues=np.arange(data.shape[0]))
plt.plot(range(data.shape[0]), data)
plt.plot(range(data.shape[0]), model_data)
Expand Down
2 changes: 1 addition & 1 deletion docs/features/sensitivity_mapping.rst
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ gaussian features.
print(instance.perturb.normalization)
print(instance.perturb.sigma)
model_line = instance.gaussian_main.model_data_1d_via_xvalues_from(xvalues=xvalues) + instance.perturb.model_data_1d_via_xvalues_from(xvalues=xvalues)
model_line = instance.gaussian_main.model_data_from(xvalues=xvalues) + instance.perturb.model_data_from(xvalues=xvalues)
"""Determine the noise (at a specified signal to noise level) in every pixel of our model profile."""
signal_to_noise_ratio = 25.0
Expand Down
Loading

0 comments on commit da49de0

Please sign in to comment.