Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Checked #66

Open
wants to merge 3 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions .idea/pygom.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions pygom/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# -*- coding: utf-8 -*-
# @Author: Martin Grunnill
# @Date: 2020-07-03 07:28:58
# @Last Modified by: Martin Grunnill
# @Last Modified time: 2022-01-18 16:42:11
''' pygom

.. moduleauthor:: Edwin Tye <[email protected]>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,10 +159,10 @@ def create_loss(loss_type, parameters, ode, x0, t0, t, y, state_name,
return SquareLoss(theta, ode, x0, t0, t, y, state_name, state_weight, target_param, target_state)

elif loss_type == NormalLoss:
return NormalLoss(theta, ode, x0, t0, t, y, state_name, sigma, target_param, target_state)
return NormalLoss(theta, ode, x0, t0, t, y, state_name, state_weight, sigma, target_param, target_state)

elif loss_type == PoissonLoss:
return PoissonLoss(theta, ode, x0, t0, t, y, state_name, target_param, target_state)
return PoissonLoss(theta, ode, x0, t0, t, y, state_name, state_weight, target_param, target_state)

#%%
""" ABC class and methods for obtaining an approximate posterior sample/plotting the results """
Expand Down
5 changes: 5 additions & 0 deletions pygom/loss/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# -*- coding: utf-8 -*-
# @Author: Martin Grunnill
# @Date: 2020-07-03 07:28:58
# @Last Modified by: Martin Grunnill
# @Last Modified time: 2022-01-18 16:42:40
'''
.. moduleauthor:: Edwin Tye <[email protected]>

Expand Down
123 changes: 82 additions & 41 deletions pygom/loss/base_loss.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# -*- coding: utf-8 -*-
# @Author: Martin Grunnill
# @Date: 2020-07-03 07:28:58
# @Last Modified by: Martin Grunnill
# @Last Modified time: 2022-01-18 16:45:07
"""

.. moduleauthor:: Edwin Tye <[email protected]>
Expand Down Expand Up @@ -44,8 +49,12 @@ class BaseLoss(object):
observations
state_name: str
the state which the observations came from
state_weight: array like
state_weight: array like or none
weight for the observations
spread_param: array like or none
spead parameter for obsevations
(e.g. normal, negative binomial and gamma distributions sigma, k and
shape, respectivly).
target_param: str or array like
parameters that are not fixed
target_state: str or array like
Expand All @@ -55,7 +64,7 @@ class BaseLoss(object):
def __init__(self, theta, ode,
x0, t0,
t, y,
state_name, state_weight=None,
state_name, state_weight=None,spread_param=None,
target_param=None, target_state=None):

### Execute all the checks first
Expand Down Expand Up @@ -136,7 +145,11 @@ def __init__(self, theta, ode,
# then if
if solution.shape[1] == p:
state_name = [str(i) for i in self._ode._iterStateList()]
self._setWeight(n, p, state_weight)
self._weight = self._setWeight_or_spread(n, p, state_weight,is_weights= True)
if spread_param is not None:
self._spread_param = self._setWeight_or_spread(n, p, spread_param,is_weights= False)
else:
self._spread_param = None
else:
raise InputError("Expecting the name of states " +
"for the observations")
Expand All @@ -145,13 +158,14 @@ def __init__(self, theta, ode,
state_name = [state_name]

assert p == len(state_name), "len(state_name) and len(y[0]) not equal"
self._setWeight(n, p, state_weight)
self._weight = self._setWeight_or_spread(n, p, state_weight,is_weights= True)
if spread_param is not None:
self._spread_param = self._setWeight_or_spread(n, p, spread_param,is_weights= False)
else:
self._spread_param = None
else:
raise InputError("State name should be str or of type list/tuple")

# if self._stateWeight is not None:
if np.any(self._stateWeight <= 0):
raise InputError("Weights should be strictly positive")

# finish ordering information
# now get the index of target states
Expand Down Expand Up @@ -202,8 +216,8 @@ def _get_model_str(self):
self._observeT.tolist(),
self._y.tolist(),
self._stateName)
if self._stateWeight is not None:
model_str += ", %s" % self._stateWeight.tolist()
if self._weight is not None:
model_str += ", %s" % self._weight.tolist()
if self._targetParam is not None:
model_str += ", %s" % self._targetParam
if self._targetState is not None:
Expand Down Expand Up @@ -918,7 +932,7 @@ def fisher_information(self, theta=None, full_output=False, method=None):
#
############################################################

def cost(self, theta=None):
def cost(self, theta=None, apply_weighting = True):
"""
Find the cost/loss given time points and the corresponding
observations.
Expand All @@ -927,6 +941,9 @@ def cost(self, theta=None):
----------
theta: array like
input value of the parameters
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals are used.

Returns
-------
Expand All @@ -943,11 +960,11 @@ def cost(self, theta=None):

"""
yhat = self._getSolution(theta)
c = self._lossObj.loss(yhat)
c = self._lossObj.loss(yhat,apply_weighting = apply_weighting)

return np.nan_to_num(c) if c == np.inf else c

def diff_loss(self, theta=None):
def diff_loss(self, theta=None, apply_weighting = True):
"""
Find the derivative of the loss function given time points
and the corresponding observations, with initial conditions
Expand All @@ -956,6 +973,9 @@ def diff_loss(self, theta=None):
----------
theta: array like
input value of the parameters
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals are used.

Returns
-------
Expand All @@ -969,13 +989,13 @@ def diff_loss(self, theta=None):
try:
# the solution does not include the origin
solution = self._getSolution(theta)
return self._lossObj.diff_loss(solution)
return self._lossObj.diff_loss(solution,apply_weighting = apply_weighting)
except Exception as e:
# print(e)
# print("parameters = " +str(theta))
return np.nan_to_num((np.ones(self._y.shape)*np.inf))

def residual(self, theta=None):
def residual(self, theta=None, apply_weighting = True):
"""
Find the residuals given time points and the corresponding
observations, with initial conditions
Expand All @@ -984,6 +1004,9 @@ def residual(self, theta=None):
----------
theta: array like
input value of the parameters
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals returned.

Returns
-------
Expand All @@ -1003,7 +1026,7 @@ def residual(self, theta=None):
try:
# the solution does not include the origin
solution = self._getSolution(theta)
return self._lossObj.residual(solution)
return self._lossObj.residual(solution, apply_weighting = apply_weighting)
except Exception as e:
# print(e)
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
Expand All @@ -1014,7 +1037,7 @@ def residual(self, theta=None):
#
############################################################

def costIV(self, theta=None):
def costIV(self, theta=None, apply_weighting = True):
"""
Find the cost/loss given the parameters. The input theta
here is assumed to include both the parameters as well as the
Expand All @@ -1024,6 +1047,10 @@ def costIV(self, theta=None):
----------
theta: array like
parameters and guess of initial values of the states
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals returned.


Returns
-------
Expand All @@ -1039,9 +1066,9 @@ def costIV(self, theta=None):
self._setParamStateInput(theta)

solution = self._getSolution()
return self._lossObj.loss(solution)
return self._lossObj.loss(solution, apply_weighting = apply_weighting)

def diff_lossIV(self, theta=None):
def diff_lossIV(self, theta=None, apply_weighting = True):
"""
Find the derivative of the loss function w.r.t. the parameters
given time points and the corresponding observations, with
Expand All @@ -1051,6 +1078,9 @@ def diff_lossIV(self, theta=None):
----------
theta: array like
parameters and initial values of the states
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals returned.

Returns
-------
Expand All @@ -1068,13 +1098,13 @@ def diff_lossIV(self, theta=None):
try:
# the solution does not include the origin
solution = self._getSolution()
return self._lossObj.diff_loss(solution)
return self._lossObj.diff_loss(solution, apply_weighting = apply_weighting)
except Exception as e:
# print(e)
# print("parameters = " + str(theta))
return np.nan_to_num((np.ones(self._y.shape)*np.inf))

def residualIV(self, theta=None):
def residualIV(self, theta=None, apply_weighting = True):
"""
Find the residuals given time points and the corresponding
observations, with initial conditions.
Expand All @@ -1083,6 +1113,9 @@ def residualIV(self, theta=None):
----------
theta: array like
parameters and initial values of the states
apply_weighting: boolean
If True multiplies array of residuals by weightings, else raw
residuals returned.

Returns
-------
Expand All @@ -1105,7 +1138,7 @@ def residualIV(self, theta=None):
try:
# the solution does not include the origin
solution = self._getSolution()
return self._lossObj.residual(solution)
return self._lossObj.residual(solution, apply_weighting = apply_weighting)
except Exception as e:
# print(e)
return np.nan_to_num((np.ones(self._y.shape)*np.inf))
Expand Down Expand Up @@ -1145,7 +1178,7 @@ def sens_to_grad(self, sens, diff_loss):

sens = np.reshape(sens, (n, num_s, num_out), 'F')
for j in range(num_out):
sens[:, :, j] *= self._stateWeight
sens[:, :, j] *= self._weight

grad = functools.reduce(np.add,map(np.dot, diff_loss, sens)).ravel()

Expand Down Expand Up @@ -1185,7 +1218,7 @@ def sens_to_jtj(self, sens, resid=None):
sens = np.reshape(sens, (n, num_s, num_out), 'F')

for j in range(num_out):
sens[:,:,j] *= self._stateWeight
sens[:,:,j] *= self._weight

for i, s in enumerate(sens):
if resid is None:
Expand Down Expand Up @@ -1554,37 +1587,45 @@ def _setParam(self, theta):
theta = ode_utils.check_array_type(theta)
self._theta = np.copy(theta)

def _setWeight(self, n, p, w):
def _setWeight_or_spread(self, n, p, x,is_weights):
# note that we NEVER scale the weights
# also note that we can use the weights as a control
# with normalized input

w = ode_utils.check_array_type(w)
if len(w) == w.size:
m, q = len(w), 1
# with normalized input
x = ode_utils.check_array_type(x,accept_booleans=is_weights)
if is_weights== True:
object_contents='weights'
else:
object_contents='spread parameter values'

if len(x) == x.size:
m, q = len(x), 1
else:
m, q = w.shape
m, q = x.shape

if p == q:
if n == m:
self._stateWeight = w
x = x
elif m == 1:
self._stateWeight = np.ones((n, p))*w
x = np.ones((n, p))*x
else:
raise InputError("Number of input weights is not equal " +
"to the number of observations")
raise AssertionError("Number of input " + object_contents +
" is not equal " +
"to the number of observations")
elif p == m:
if q == 1:
self._stateWeight = np.ones((n, p))*w
x = np.ones((n, p))*x
else:
raise InputError("Number of input weights is not equal " +
"to number of states")
raise AssertionError("Number of input " + object_contents +
" is not equal " +
"to number of states")
else:
if q == 1 and m == 1:
self._stateWeight = np.ones((n, p))*w
x = np.ones((n, p))*x
else:
raise InputError("Number of input weights differs from " +
"the number of observations")
raise AssertionError("Number of input " + object_contents +
" differs from " +
"the number of observations")
return x

def _setX0(self, x0):
"""
Expand All @@ -1600,7 +1641,7 @@ def _setLossType(self):
be override in the module odeLoss. Basically, all other
operations remains but this will change.
"""
self._lossObj = Square(self._y, self._stateWeight)
self._lossObj = Square(self._y, self._weight)
return self._lossObj

def _unrollParam(self, theta):
Expand Down
Loading