diff --git a/nistats/contrasts.py b/nistats/contrasts.py index f7e0ecae..b4339711 100644 --- a/nistats/contrasts.py +++ b/nistats/contrasts.py @@ -90,7 +90,7 @@ def compute_contrast(labels, regression_result, con_val, contrast_type=None): effect_[:, label_mask] = wcbeta var_[label_mask] = rss - dof_ = regression_result[label_].df_resid + dof_ = regression_result[label_].df_residuals return Contrast(effect=effect_, variance=var_, dim=dim, dof=dof_, contrast_type=contrast_type) diff --git a/nistats/first_level_model.py b/nistats/first_level_model.py index 643f3041..11004d62 100644 --- a/nistats/first_level_model.py +++ b/nistats/first_level_model.py @@ -133,8 +133,8 @@ def run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0): if noise_model == 'ar1': # compute and discretize the AR1 coefs - ar1 = ((ols_result.resid[1:] * ols_result.resid[:-1]).sum(axis=0) / - (ols_result.resid ** 2).sum(axis=0)) + ar1 = ((ols_result.residuals[1:] * ols_result.residuals[:-1]).sum(axis=0) / + (ols_result.residuals ** 2).sum(axis=0)) del ols_result ar1 = (ar1 * bins).astype(np.int) * 1. / bins # Fit the AR model acccording to current AR(1) estimates diff --git a/nistats/model.py b/nistats/model.py index fba86006..63b14a84 100644 --- a/nistats/model.py +++ b/nistats/model.py @@ -3,6 +3,7 @@ Author: Bertrand Thirion, 2011--2015 """ +import warnings import numpy as np @@ -72,8 +73,16 @@ def __init__(self, theta, Y, model, cov=None, dispersion=1., nuisance=None, self.df_total = Y.shape[0] self.df_model = model.df_model # put this as a parameter of LikelihoodModel - self.df_resid = self.df_total - self.df_model - + self.df_residuals = self.df_total - self.df_model + + @setattr_on_read + def df_resid(self): + warnings.warn("'df_resid' from LikelihoodModelResults " + "has been deprecated and will be removed. " + "Please use 'df_residuals'.", + FutureWarning) + return self.df_residuals + @setattr_on_read def logL(self): """ @@ -200,7 +209,7 @@ def Tcontrast(self, matrix, store=('t', 'effect', 'sd'), dispersion=None): if 't' in store: st_t = np.squeeze(effect * positive_reciprocal(sd)) return TContrastResults(effect=st_effect, t=st_t, sd=st_sd, - df_den=self.df_resid) + df_den=self.df_residuals) def Fcontrast(self, matrix, dispersion=None, invcov=None): """ Compute an Fcontrast for a contrast matrix `matrix`. @@ -262,7 +271,7 @@ def Fcontrast(self, matrix, dispersion=None, invcov=None): return FContrastResults( effect=ctheta, covariance=self.vcov( matrix=matrix, dispersion=dispersion[np.newaxis]), - F=F, df_den=self.df_resid, df_num=invcov.shape[0]) + F=F, df_den=self.df_residuals, df_num=invcov.shape[0]) def conf_int(self, alpha=.05, cols=None, dispersion=None): ''' The confidence interval of the specified theta estimates. @@ -306,18 +315,18 @@ def conf_int(self, alpha=.05, cols=None, dispersion=None): ''' if cols is None: - lower = self.theta - inv_t_cdf(1 - alpha / 2, self.df_resid) *\ + lower = self.theta - inv_t_cdf(1 - alpha / 2, self.df_residuals) * \ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) - upper = self.theta + inv_t_cdf(1 - alpha / 2, self.df_resid) *\ + upper = self.theta + inv_t_cdf(1 - alpha / 2, self.df_residuals) * \ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) else: lower, upper = [], [] for i in cols: lower.append( - self.theta[i] - inv_t_cdf(1 - alpha / 2, self.df_resid) * + self.theta[i] - inv_t_cdf(1 - alpha / 2, self.df_residuals) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) upper.append( - self.theta[i] + inv_t_cdf(1 - alpha / 2, self.df_resid) * + self.theta[i] + inv_t_cdf(1 - alpha / 2, self.df_residuals) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) return np.asarray(list(zip(lower, upper))) diff --git a/nistats/regression.py b/nistats/regression.py index 37e64657..8f772691 100644 --- a/nistats/regression.py +++ b/nistats/regression.py @@ -18,6 +18,8 @@ __docformat__ = 'restructuredtext en' +import warnings + import numpy as np from nibabel.onetime import setattr_on_read @@ -25,6 +27,7 @@ import scipy.linalg as spl from .model import LikelihoodModelResults +from nistats._utils.helpers import replace_parameters from .utils import positive_reciprocal @@ -47,8 +50,8 @@ class OLSModel(object): design : ndarray This is the design, or X, matrix. - wdesign : ndarray - This is the whitened design matrix. `design` == `wdesign` by default + whitened_design : ndarray + This is the whitened design matrix. `design` == `whitened_design` by default for the OLSModel, though models that inherit from the OLSModel will whiten the design. @@ -58,7 +61,7 @@ class OLSModel(object): normalized_cov_beta : ndarray ``np.dot(calc_beta, calc_beta.T)`` - df_resid : scalar + df_residuals : scalar Degrees of freedom of the residuals. Number of observations less the rank of the design. @@ -82,15 +85,31 @@ def initialize(self, design): # PLEASE don't assume we have a constant... # TODO: handle case for noconstant regression self.design = design - self.wdesign = self.whiten(self.design) - self.calc_beta = spl.pinv(self.wdesign) + self.whitened_design = self.whiten(self.design) + self.calc_beta = spl.pinv(self.whitened_design) self.normalized_cov_beta = np.dot(self.calc_beta, np.transpose(self.calc_beta)) - self.df_total = self.wdesign.shape[0] + self.df_total = self.whitened_design.shape[0] eps = np.abs(self.design).sum() * np.finfo(np.float).eps self.df_model = matrix_rank(self.design, eps) - self.df_resid = self.df_total - self.df_model + self.df_residuals = self.df_total - self.df_model + + @setattr_on_read + def df_resid(self): + warnings.warn("'df_resid' from OLSModel" + "has been deprecated and will be removed. " + "Please use 'df_residuals'.", + FutureWarning) + return self.df_residuals + + @setattr_on_read + def wdesign(self): + warnings.warn("'wdesign' from OLSModel" + "has been deprecated and will be removed. " + "Please use 'whitened_design'.", + FutureWarning) + return self.whitened_design def logL(self, beta, Y, nuisance=None): r''' Returns the value of the loglikelihood function at beta. @@ -145,7 +164,7 @@ def logL(self, beta, Y, nuisance=None): .. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003. ''' # This is overwriting an abstract method of LikelihoodModel - X = self.wdesign + X = self.whitened_design wY = self.whiten(Y) r = wY - np.dot(X, beta) n = self.df_total @@ -167,7 +186,7 @@ def whiten(self, X): Returns ------- - wX : array + whitened_X : array This matrix is the matrix whose pseudoinverse is ultimately used in estimating the coefficients. For OLSModel, it is does nothing. For WLSmodel, ARmodel, it pre-applies @@ -195,9 +214,9 @@ def fit(self, Y): # squares models assume covariance is diagonal, i.e. heteroscedastic). wY = self.whiten(Y) beta = np.dot(self.calc_beta, wY) - wresid = wY - np.dot(self.wdesign, beta) - dispersion = np.sum(wresid ** 2, 0) / (self.wdesign.shape[0] - - self.wdesign.shape[1]) + wresid = wY - np.dot(self.whitened_design, beta) + dispersion = np.sum(wresid ** 2, 0) / (self.whitened_design.shape[0] - + self.whitened_design.shape[1]) lfit = RegressionResults(beta, Y, self, wY, wresid, dispersion=dispersion, cov=self.normalized_cov_beta) @@ -248,14 +267,14 @@ def whiten(self, X): Returns ------- - wX : ndarray + whitened_X : ndarray X whitened with order self.order AR """ X = np.asarray(X, np.float64) - _X = X.copy() + whitened_X = X.copy() for i in range(self.order): - _X[(i + 1):] = _X[(i + 1):] - self.rho[i] * X[0: - (i + 1)] - return _X + whitened_X[(i + 1):] = whitened_X[(i + 1):] - self.rho[i] * X[0: - (i + 1)] + return whitened_X class RegressionResults(LikelihoodModelResults): @@ -264,8 +283,8 @@ class RegressionResults(LikelihoodModelResults): It handles the output of contrasts, estimates of covariance, etc. """ - - def __init__(self, theta, Y, model, wY, wresid, cov=None, dispersion=1., + @replace_parameters({'wresid': 'whitened_residuals', 'wY': 'whitened_Y'}, lib_name='Nistats') + def __init__(self, theta, Y, model, whitened_Y, whitened_residuals, cov=None, dispersion=1., nuisance=None): """See LikelihoodModelResults constructor. @@ -274,12 +293,48 @@ def __init__(self, theta, Y, model, wY, wresid, cov=None, dispersion=1., """ LikelihoodModelResults.__init__(self, theta, Y, model, cov, dispersion, nuisance) - self.wY = wY - self.wresid = wresid - self.wdesign = model.wdesign + self.whitened_Y = whitened_Y + self.whitened_residuals = whitened_residuals + self.whitened_design = model.whitened_design + + @setattr_on_read + def wdesign(self): + warnings.warn("'wdesign' from RegressionResults" + "has been deprecated and will be removed. " + "Please use 'whitened_design'.", + FutureWarning) + return self.whitened_design + + + @setattr_on_read + def wY(self): + warnings.warn("'wY' from RegressionResults " + "has been deprecated and will be removed. " + "Please use 'whitened_Y' instead.", + FutureWarning, + ) + return self.whitened_Y + + @setattr_on_read + def wresid(self): + warnings.warn("'wresid' from RegressionResults " + "has been deprecated and will be removed. " + "Please use 'whitened_residuals' instead.", + FutureWarning, + ) + return self.whitened_residuals @setattr_on_read def resid(self): + warnings.warn("'resid' from RegressionResults " + "has been deprecated and will be removed. " + "Please use 'residuals' instead.", + FutureWarning, + ) + return self.residuals + + @setattr_on_read + def residuals(self): """ Residuals from the fit. """ @@ -287,6 +342,15 @@ def resid(self): @setattr_on_read def norm_resid(self): + warnings.warn("'norm_resid' from RegressionResults " + "has been deprecated and will be removed. " + "Please use 'normalized_residuals' instead.", + FutureWarning, + ) + return self.normalized_residuals + + @setattr_on_read + def normalized_residuals(self): """ Residuals, normalized to have unit length. @@ -303,7 +367,7 @@ def norm_resid(self): See: Montgomery and Peck 3.2.1 p. 68 Davidson and MacKinnon 15.2 p 662 """ - return self.resid * positive_reciprocal(np.sqrt(self.dispersion)) + return self.residuals * positive_reciprocal(np.sqrt(self.dispersion)) @setattr_on_read def predicted(self): @@ -311,26 +375,26 @@ def predicted(self): """ beta = self.theta # the LikelihoodModelResults has parameters named 'theta' - X = self.wdesign + X = self.whitened_design return np.dot(X, beta) @setattr_on_read def SSE(self): """Error sum of squares. If not from an OLS model this is "pseudo"-SSE. """ - return (self.wresid ** 2).sum(0) + return (self.whitened_residuals ** 2).sum(0) @setattr_on_read def r_square(self): """Proportion of explained variance. If not from an OLS model this is "pseudo"-R2. """ - return np.var(self.predicted, 0) / np.var(self.wY, 0) + return np.var(self.predicted, 0) / np.var(self.whitened_Y, 0) @setattr_on_read def MSE(self): """ Mean square (error) """ - return self.SSE / self.df_resid + return self.SSE / self.df_residuals class SimpleRegressionResults(LikelihoodModelResults): @@ -353,7 +417,7 @@ def __init__(self, results): self.df_total = results.Y.shape[0] self.df_model = results.model.df_model # put this as a parameter of LikelihoodModel - self.df_resid = self.df_total - self.df_model + self.df_residuals = self.df_total - self.df_model def logL(self, Y): """ @@ -362,12 +426,36 @@ def logL(self, Y): raise ValueError('can not use this method for simple results') def resid(self, Y): + warnings.warn("'resid()' from SimpleRegressionResults" + " has been deprecated and will be removed. " + "Please use 'residuals()'.", + FutureWarning, + ) + return self.residuals(Y) + + def residuals(self, Y): """ Residuals from the fit. """ return Y - self.predicted + @setattr_on_read + def df_resid(self): + warnings.warn("The attribute 'df_resid' from OLSModel" + "has been deprecated and will be removed. " + "Please use 'df_residuals'.", + FutureWarning) + return self.df_residuals + def norm_resid(self, Y): + warnings.warn("'SimpleRegressionResults.norm_resid' method " + "has been deprecated and will be removed. " + "Please use 'normalized_residuals'.", + FutureWarning, + ) + return self.normalized_residuals(Y) + + def normalized_residuals(self, Y): """ Residuals, normalized to have unit length. @@ -384,7 +472,7 @@ def norm_resid(self, Y): See: Montgomery and Peck 3.2.1 p. 68 Davidson and MacKinnon 15.2 p 662 """ - return self.resid(Y) * positive_reciprocal(np.sqrt(self.dispersion)) + return self.residuals(Y) * positive_reciprocal(np.sqrt(self.dispersion)) def predicted(self): """ Return linear predictor values from a design matrix. diff --git a/nistats/tests/test_first_level_model.py b/nistats/tests/test_first_level_model.py index 8275da8b..e9906196 100644 --- a/nistats/tests/test_first_level_model.py +++ b/nistats/tests/test_first_level_model.py @@ -627,9 +627,9 @@ def test_first_level_model_residuals(): noise_model='ols') model.fit(fmri_data, design_matrices=design_matrices) - resid = model.residuals[0] - mean_resids = model.masker_.transform(resid).mean(0) - assert_array_almost_equal(mean_resids, 0) + residuals = model.residuals[0] + mean_residuals = model.masker_.transform(residuals).mean(0) + assert_array_almost_equal(mean_residuals, 0) def test_first_level_model_predictions_r_square(): diff --git a/nistats/tests/test_regression.py b/nistats/tests/test_regression.py index b96bcba1..f1d27f48 100644 --- a/nistats/tests/test_regression.py +++ b/nistats/tests/test_regression.py @@ -3,6 +3,7 @@ """ import numpy as np +import pytest from numpy.testing import (assert_array_almost_equal, assert_almost_equal, @@ -20,16 +21,16 @@ def test_OLS(): model = OLSModel(design=X) results = model.fit(Y) - assert results.df_resid == 30 - assert results.resid.shape[0] == 40 + assert results.df_residuals == 30 + assert results.residuals.shape[0] == 40 assert results.predicted.shape[0] == 40 def test_AR(): model = ARModel(design=X, rho=0.4) results = model.fit(Y) - assert results.df_resid == 30 - assert results.resid.shape[0] == 40 + assert results.df_residuals == 30 + assert results.residuals.shape[0] == 40 assert results.predicted.shape[0] == 40 @@ -42,7 +43,8 @@ def test_residuals(): Xintercept[:, 0] = 1 model = OLSModel(design=Xintercept) results = model.fit(Y) - assert_almost_equal(results.resid.mean(), 0) + assert_almost_equal(results.residuals.mean(), 0) + assert len(results.whitened_residuals) == 40 def test_predicted_r_square(): @@ -54,7 +56,7 @@ def test_predicted_r_square(): # rounding errors) model = OLSModel(design=Xshort) results = model.fit(Yshort) - assert_almost_equal(results.resid.sum(), 0) + assert_almost_equal(results.residuals.sum(), 0) assert_array_almost_equal(results.predicted, Yshort) assert_almost_equal(results.r_square, 1.0) @@ -64,7 +66,7 @@ def test_OLS_degenerate(): Xd[:, 0] = Xd[:, 1] + Xd[:, 2] model = OLSModel(design=Xd) results = model.fit(Y) - assert results.df_resid == 31 + assert results.df_residuals == 31 def test_AR_degenerate(): @@ -72,4 +74,29 @@ def test_AR_degenerate(): Xd[:, 0] = Xd[:, 1] + Xd[:, 2] model = ARModel(design=Xd, rho=0.9) results = model.fit(Y) - assert results.df_resid == 31 + assert results.df_residuals == 31 + + +def test_resid_rename_warnings_ols(): + model = OLSModel(design=X) + results_ols = model.fit(Y) + with pytest.warns(FutureWarning, match="'df_resid'"): + assert_array_equal(results_ols.df_resid, results_ols.df_residuals) + with pytest.warns(FutureWarning, match="'resid'"): + assert_array_equal(results_ols.resid, results_ols.residuals) + with pytest.warns(FutureWarning, match="'wY'"): + assert_array_equal(results_ols.wY, results_ols.whitened_Y) + with pytest.warns(FutureWarning, match="'wdesign'"): + assert_array_equal(results_ols.wdesign, results_ols.whitened_design) + with pytest.warns(FutureWarning, match="'wdesign'"): + assert_array_equal(model.wdesign, model.whitened_design) + + + +def test_resid_rename_warnings_ar(): + model = ARModel(design=X, rho=0.4) + results_ar = model.fit(Y) + with pytest.warns(FutureWarning, match="'resid'"): + assert_array_equal(results_ar.resid, results_ar.residuals) + with pytest.warns(FutureWarning, match="'wresid"): + assert_array_equal(results_ar.wresid, results_ar.whitened_residuals)