diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_001.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_001.png index 529e65a3..8e78acfb 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_001.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_001.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_002.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_002.png index f9d2eae7..28ae543b 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_002.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_002.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png index 88c53f11..3b87be04 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png index b9af2888..856c01b4 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png index 648e38a3..57179857 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png index 5b1f8bac..0b05b74d 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png index 6e385435..a3a6f3f4 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png differ diff --git a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png index 97a04148..bf6245a1 100644 Binary files a/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png and b/docs/auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png differ diff --git a/docs/auto_examples/01_DataOperations/images/thumb/sphx_glr_plot_design_matrix_thumb.png b/docs/auto_examples/01_DataOperations/images/thumb/sphx_glr_plot_design_matrix_thumb.png index f256f40b..3e6d9cb6 100644 Binary files a/docs/auto_examples/01_DataOperations/images/thumb/sphx_glr_plot_design_matrix_thumb.png and b/docs/auto_examples/01_DataOperations/images/thumb/sphx_glr_plot_design_matrix_thumb.png differ diff --git a/docs/auto_examples/01_DataOperations/plot_design_matrix.ipynb b/docs/auto_examples/01_DataOperations/plot_design_matrix.ipynb index 19820921..6d348e83 100644 --- a/docs/auto_examples/01_DataOperations/plot_design_matrix.ipynb +++ b/docs/auto_examples/01_DataOperations/plot_design_matrix.ipynb @@ -15,7 +15,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\nDesign Matrix Creation\n======================\n\nThis tutorial illustrates how to use the Design_Matrix class to flexibly create design matrices that can then be used with the Brain_Data class to perform univariate regression.\n\nDesign Matrices can be thought of as \"enhanced\" pandas dataframes; they can do everything a pandas dataframe is capable of, with some added features. Design Matrices follow a data organization format common in many machine learning applications such as the sci-kit learn API: 2d tables organized as observations by features. In the context of neuro-imaging this often translates to TRs by conditions of interest + nuisance covariates (1st level analysis), or participants by conditions/groups (2nd level analysis).\n\n\n" + "\nDesign Matrix\n==============\n\nThis tutorial illustrates how to use the Design_Matrix class to flexibly create design matrices that can then be used with the Brain_Data class to perform univariate regression.\n\nDesign Matrices can be thought of as \"enhanced\" pandas dataframes; they can do everything a pandas dataframe is capable of, with some added features. Design Matrices follow a data organization format common in many machine learning applications such as the sci-kit learn API: 2d tables organized as observations by features. In the context of neuro-imaging this often translates to TRs by conditions of interest + nuisance covariates (1st level analysis), or participants by conditions/groups (2nd level analysis).\n\n\n" ] }, { @@ -33,7 +33,7 @@ }, "outputs": [], "source": [ - "from nltools.data import Design_Matrix\nimport numpy as np\n\ndm = Design_Matrix(np.array([\n [1,0,0,0],\n [1,0,0,0],\n [0,0,0,0],\n [0,1,0,0],\n [0,1,0,0],\n [0,0,0,0],\n [0,0,1,0],\n [0,0,1,0],\n [0,0,0,0],\n [0,0,0,1],\n [0,0,0,1]\n ]),\n sampling_rate = 1.5,\n columns=['stim_A','stim_B','stim_C','stim_D']\n )" + "from nltools.data import Design_Matrix\nimport numpy as np\n\nTR = 1.5 # Design Matrices take a sampling_freq argument specified in hertz which can be converted as 1./TR\n\ndm = Design_Matrix(np.array([\n [0,0,0,0],\n [0,0,0,0],\n [1,0,0,0],\n [1,0,0,0],\n [0,0,0,0],\n [0,1,0,0],\n [0,1,0,0],\n [0,0,0,0],\n [0,0,1,0],\n [0,0,1,0],\n [0,0,0,0],\n [0,0,0,1],\n [0,0,0,1],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0]\n ]),\n sampling_freq = 1./TR,\n columns=['face_A','face_B','house_A','house_B']\n )" ] }, { @@ -94,7 +94,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "A common operation might include adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Note that polynomial terms are normalized to unit variance (i.e. mean = 0, std = 1) before inclusion to keep values on approximately the same scale.\n\n" + "Adding nuisiance covariates\n---------------------------\n\nLegendre Polynomials\n********************\n\nA common operation is adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Consistent with other software packages, these are orthogonal Legendre poylnomials on the scale -1 to 1.\n\n" ] }, { @@ -105,7 +105,7 @@ }, "outputs": [], "source": [ - "# with include_lower = True (default), 1 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend\ndm_with_nuissance = dm.add_poly(2,include_lower=True)\ndm_with_nuissance.heatmap()" + "# with include_lower = True (default), 2 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend\ndm_with_nuissance = dm.add_poly(2,include_lower=True)\ndm_with_nuissance.heatmap()" ] }, { @@ -130,7 +130,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Polynomial variables are not the only type of nuisance covariates that can be generate for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s.\n\n" + "Discrete Cosine Basis Functions\n*******************************\n\nPolynomial variables are not the only type of nuisance covariates that can be generated for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. Let's create DCT filters for 20s durations in our toy data.\n\n" ] }, { @@ -141,14 +141,14 @@ }, "outputs": [], "source": [ - "# Short filter duration for our simple example\ndm_with_cosine = dm.add_dct_basis(duration=5)\nprint(dm_with_cosine.details())" + "# Short filter duration for our simple example\ndm_with_cosine = dm.add_dct_basis(duration=20)\ndm_with_cosine.heatmap()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Load and Manipulate an Onsets File\n-----------------------------------\n\nNltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR. Lets use that to create a design matrix with an intercept and linear trend\n\n" + "Data operations\n---------------\n\nPerforming convolution\n**********************\n\nDesign Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. The default convolution kernel is the Glover (1999) HRF parameterized by the glover_hrf implementation in nipy (see nltools.externals.hrf for details). However, any arbitrary kernel can be passed as a 1d numpy array, or multiple kernels can be passed as a 2d numpy array for highly flexible convolution across many types of data (e.g. SCR).\n\n" ] }, { @@ -159,32 +159,21 @@ }, "outputs": [], "source": [ - "from nltools.utils import get_resource_path\nfrom nltools.file_reader import onsets_to_dm\nfrom nltools.data import Design_Matrix\nimport os\n\nonsetsFile = os.path.join(get_resource_path(),'onsets_example.txt')\ndm = onsets_to_dm(onsetsFile, TR=2.0, runLength=160, sort=True,add_poly=1)\ndm.heatmap()" + "dm_with_nuissance_c = dm_with_nuissance.convolve()\nprint(dm_with_nuissance_c.details())\ndm_with_nuissance_c.heatmap()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. By default it will use the one-parameter glover_hrf kernel (see nipy for details). However, any kernel can be passed as an argument, including a list of different kernels for highly flexible convolution across many types of data (e.g. SCR).\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "dm = dm.convolve()\nprint(dm.details())\ndm.heatmap()" + "Design Matrix can do many different data operations in addition to convolution such as upsampling and downsampling to different frequencies, zscoring, etc. Check out the API documentation for how to use these methods.\n\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Load and Z-score a Covariates File\n----------------------------------\n\nNow we're going to handle a covariates file that's been generated by a preprocessing routine. First we'll read in the text file using pandas and convert it to a design matrix.\n\n" + "File Reading\n------------\n\nCreating a Design Matrix from an onsets file\n********************************************\n\nNltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR and TR = 2s. Lets use that to create a design matrix with an intercept and linear trend\n\n" ] }, { @@ -195,14 +184,14 @@ }, "outputs": [], "source": [ - "import pandas as pd\n\ncovariatesFile = os.path.join(get_resource_path(),'covariates_example.csv')\ncov = pd.read_csv(covariatesFile)\ncov = Design_Matrix(cov, sampling_rate = 2.0)\n# Design matrix uses seaborn's heatmap for plotting so excepts all keyword arguments\n# We're just adjusting colors here to visualize things a bit more nicely\ncov.heatmap(vmin=-1,vmax=1)" + "from nltools.utils import get_resource_path\nfrom nltools.file_reader import onsets_to_dm\nfrom nltools.data import Design_Matrix\nimport os\n\nTR = 2.0\nsampling_freq = 1./TR\nonsetsFile = os.path.join(get_resource_path(),'onsets_example.txt')\ndm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq, run_length=160, sort=True,add_poly=1)\ndm.heatmap()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Similar to adding polynomial terms, Design Matrix has multiple methods for data processing and transformation such as downsampling, upsampling, and z-scoring. Let's use the z-score method to normalize the covariates we just loaded.\n\n" + "Creating a Design Matrix from a generic csv file\n************************************************\n\nAlternatively you can read a generic csv file and transform it into a Design Matrix using pandas file reading capability. Here we'll read in an example covariates file that contains the output of motion realignment estimated during a fMRI preprocessing pipeline.\n\n" ] }, { @@ -213,14 +202,14 @@ }, "outputs": [], "source": [ - "# Use pandas built-in fillna to fill NaNs in the covariates files introduced during the pre-processing pipeline, before z-scoring\n# Z-score takes an optional argument of which columns to z-score. Since we don't want to z-score any spikes, so let's select everything except that column\ncov = cov.fillna(0).zscore(cov.columns[:-1])\ncov.heatmap(vmin=-1,vmax=1)" + "import pandas as pd\n\ncovariatesFile = os.path.join(get_resource_path(),'covariates_example.csv')\ncov = pd.read_csv(covariatesFile)\ncov = Design_Matrix(cov, sampling_freq =sampling_freq)\ncov.heatmap(vmin=-1,vmax=1) # alter plot to scale of covs; heatmap takes Seaborn heatmap arguments" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Concatenate Multiple Design Matrices\n------------------------------------\n\nA really nice feature of Design Matrix is simplified, but intelligent matrix concatentation. Here it's trivial to horizontally concatenate our convolved onsets and covariates, while keeping our column names and order.\n\n" + "Working with multiple Design Matrices\n-------------------------------------\n\nVertically \"stacking\" Design Matrices\n*************************************\nA common task is creating a separate design matrix for multiple runs of an experiment, (or multiple subjects) and vertically appending them to each other so that regression can be performed across all runs of an experiment. However, in order to account for run-differences its important (and common practice) to include separate run-wise polynomials (e.g. intercepts). Design Matrix's append method is intelligent and flexible enough to keep columns separated during appending automatically.\n\n" ] }, { @@ -231,14 +220,14 @@ }, "outputs": [], "source": [ - "full = dm.append(cov,axis=1)\nfull.heatmap(vmin=-1,vmax=1)" + "# Lets use the design matrix with polynomials from above\n# Stack \"run 1\" on top of \"run 2\"\nruns_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,axis=0)\nruns_1_and_2.heatmap()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "But we can also intelligently vertically concatenate design matrices to handle say, different experimental runs, or participants. The method enables the user to indicate which columns to keep separated (if any) during concatenation or which to treat as extensions along the first dimension. By default the class will keep all polylnomial terms separated. This is extremely useful when building 1 large design matrix composed of several runs or participants with separate means.\n\n" + "Separating columns during append operations\n*******************************************\nNotice that all polynomials have been kept separated for you automatically and have been renamed to reflect the fact that they come from different runs. But Design Matrix is even more flexible. Let's say you want to estimate separate run-wise coefficients for all house stimuli too. Simply pass that into the `unique_cols` parameter of append.\n\n" ] }, { @@ -249,14 +238,21 @@ }, "outputs": [], "source": [ - "dm2 = dm.append(dm, axis=0)\ndm2.heatmap(vmin=-1,vmax=1)" + "runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,unique_cols=['house*'],axis=0)\nruns_1_and_2.heatmap()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now notice how all stimuli that begin with 'house' have been made into separate columns for each run. In general `unique_cols` can take a list of columns to keep separated or simple wild cards that either begin with a term e.g. \"house*\" or end with one \"*house\".\n\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Specific columns of interest can also be kept separate during concatenation (e.g. keeping run-wise spikes separate). As an example, we treat our first experimental regressor as different across our two design matrices. Notice that the class also preserves (as best as possible) column ordering.\n\n" + "Putting it all together\n-----------------------\n\nA realistic workflow\n********************\nLet's combine all the examples above to build a work flow for a realistic first-level analysis fMRI analysis. This will include loading onsets from multiple experimental runs, and concatenating them into a large multi-run design matrix where we estimate a single set of coefficients for our variables of interest, but make sure we account for run-wise differences nuisiance covarites (e.g. motion) and baseline, trends, etc. For simplicity we'll just reuse the same onsets and covariates file multiple times.\n\n" ] }, { @@ -267,14 +263,21 @@ }, "outputs": [], "source": [ - "dm2 = dm.append(dm, axis=0, unique_cols=['BillyRiggins'])\ndm2.heatmap(vmin=-1,vmax=1)" + "num_runs = 4\nTR = 2.0\nsampling_freq = 1./TR\nall_runs = Design_Matrix(sampling_freq = sampling_freq)\nfor i in range(num_runs):\n\n # 1) Load in onsets for this run\n onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt')\n dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq,run_length=160,sort=True)\n\n # 2) Convolve them with the hrf\n dm = dm.convolve()\n\n # 2) Load in covariates for this run\n covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv')\n cov = pd.read_csv(covariatesFile)\n cov = Design_Matrix(cov, sampling_freq = sampling_freq)\n\n # 3) In the covariates, fill any NaNs with 0, add intercept and linear trends and dct basis functions\n cov = cov.fillna(0)\n\n # Retain a list of nuisance covariates (e.g. motion and spikes) which we'll also want to also keep separate for each run\n cov_columns = cov.columns\n cov = cov.add_poly(1).add_dct_basis()\n\n # 4) Join the onsets and covariates together\n full = dm.append(cov,axis=1)\n\n # 5) Append it to the master Design Matrix keeping things separated by run\n all_runs = all_runs.append(full,axis=0,unique_cols=cov.columns)\n\nall_runs.heatmap(vmin=-1,vmax=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see the left most columns of our multi-run design matrix contain our conditions of interest (stacked across all runs), the middle columns includes separate run-wise nuisiance covariates (motion, spikes) and the right most columns contain run specific polynomials (intercept, trends, etc).\n\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Design Matrix can also create polynomial terms and intelligently keep them separate during concatenation. For example lets concatenate 4 design matrices and create separate 2nd order polynomials for all of them\n\n" + "Data Diagnostics\n----------------\n\nLet's actually check if our design is estimable. Design Matrix provides a few tools for cleaning up highly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. By default the `clean` method will drop any columns correlated at r >= .95\n\n" ] }, { @@ -285,14 +288,21 @@ }, "outputs": [], "source": [ - "# Notice that append can take a list of Design Matrices in addition to just a single one\ndm_all = dm.append([dm,dm,dm], axis=0, add_poly=2)\ndm_all.heatmap(vmin=-1,vmax=1)" + "all_runs_cleaned = all_runs.clean(verbose=True)\nall_runs_cleaned.heatmap(vmin=-1,vmax=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Whoops, looks like above some of our polynomials and dct basis functions are highly correlated, but the clean method detected that and dropped them for us. In practice you'll often include polynomials or dct basis functions rather than both, but this was just an illustrative example.\n\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Data Diagnostics\n----------------\n\nDesign Matrix also provides a few tools for cleaning up perfectly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity.\n\n" + "Estimating a first-level model\n------------------------------\n\nYou can now set this multi-run Design Matrix as the `X` attribute of a Brain_Data object containing EPI data for these four runs and estimate a regression in just a few lines of code.\n\n" ] }, { @@ -303,7 +313,7 @@ }, "outputs": [], "source": [ - "# We have a good design here so no problems\ndm_all.clean(verbose=False)\ndm_all.vif()" + "# This code is commented because we don't actually have niftis loaded for the purposes of this tutorial\n# See the other tutorials for more details on working with nifti files and Brain_Data objects\n\n# Assuming you already loaded up Nifti images like this\n# list_of_niftis = ['run_1.nii.gz','run_2.nii.gz','run_3.nii.gz','run_4.nii.gz']\n# all_run_data = Brain_Data(list_of_niftis)\n\n# Set our Design Matrix to the X attribute of Brain_Data object\n# all_run_data.X = all_runs_cleaned\n\n# Run the regression\n# results = all_run_data.regress()\n\n# This will produce N beta, t, and p images\n# where N is the number of columns in the design matrix" ] } ], diff --git a/docs/auto_examples/01_DataOperations/plot_design_matrix.py b/docs/auto_examples/01_DataOperations/plot_design_matrix.py index c22947d9..cc4ac352 100644 --- a/docs/auto_examples/01_DataOperations/plot_design_matrix.py +++ b/docs/auto_examples/01_DataOperations/plot_design_matrix.py @@ -1,6 +1,6 @@ """ -Design Matrix Creation -====================== +Design Matrix +============== This tutorial illustrates how to use the Design_Matrix class to flexibly create design matrices that can then be used with the Brain_Data class to perform univariate regression. @@ -17,7 +17,11 @@ from nltools.data import Design_Matrix import numpy as np +TR = 1.5 # Design Matrices take a sampling_freq argument specified in hertz which can be converted as 1./TR + dm = Design_Matrix(np.array([ + [0,0,0,0], + [0,0,0,0], [1,0,0,0], [1,0,0,0], [0,0,0,0], @@ -28,10 +32,19 @@ [0,0,1,0], [0,0,0,0], [0,0,0,1], - [0,0,0,1] + [0,0,0,1], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0] ]), - sampling_rate = 1.5, - columns=['stim_A','stim_B','stim_C','stim_D'] + sampling_freq = 1./TR, + columns=['face_A','face_B','house_A','house_B'] ) ######################################################################### # Notice how this look exactly like a pandas dataframe. That's because design matrices are *subclasses* of dataframes with some extra attributes and methods. @@ -48,10 +61,17 @@ dm.heatmap() + ######################################################################### -# A common operation might include adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Note that polynomial terms are normalized to unit variance (i.e. mean = 0, std = 1) before inclusion to keep values on approximately the same scale. +# Adding nuisiance covariates +# --------------------------- +# +# Legendre Polynomials +# ******************** +# +# A common operation is adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Consistent with other software packages, these are orthogonal Legendre poylnomials on the scale -1 to 1. -# with include_lower = True (default), 1 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend +# with include_lower = True (default), 2 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend dm_with_nuissance = dm.add_poly(2,include_lower=True) dm_with_nuissance.heatmap() @@ -61,91 +81,162 @@ print(dm_with_nuissance.details()) ######################################################################### -# Polynomial variables are not the only type of nuisance covariates that can be generate for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. +# Discrete Cosine Basis Functions +# ******************************* +# +# Polynomial variables are not the only type of nuisance covariates that can be generated for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. Let's create DCT filters for 20s durations in our toy data. # Short filter duration for our simple example -dm_with_cosine = dm.add_dct_basis(duration=5) -print(dm_with_cosine.details()) +dm_with_cosine = dm.add_dct_basis(duration=20) +dm_with_cosine.heatmap() ######################################################################### -# Load and Manipulate an Onsets File -# ----------------------------------- +# Data operations +# --------------- +# +# Performing convolution +# ********************** # -# Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR. Lets use that to create a design matrix with an intercept and linear trend +# Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. The default convolution kernel is the Glover (1999) HRF parameterized by the glover_hrf implementation in nipy (see nltools.externals.hrf for details). However, any arbitrary kernel can be passed as a 1d numpy array, or multiple kernels can be passed as a 2d numpy array for highly flexible convolution across many types of data (e.g. SCR). + +dm_with_nuissance_c = dm_with_nuissance.convolve() +print(dm_with_nuissance_c.details()) +dm_with_nuissance_c.heatmap() + +######################################################################### +# Design Matrix can do many different data operations in addition to convolution such as upsampling and downsampling to different frequencies, zscoring, etc. Check out the API documentation for how to use these methods. + +######################################################################### +# File Reading +# ------------ +# +# Creating a Design Matrix from an onsets file +# ******************************************** +# +# Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR and TR = 2s. Lets use that to create a design matrix with an intercept and linear trend from nltools.utils import get_resource_path from nltools.file_reader import onsets_to_dm from nltools.data import Design_Matrix import os +TR = 2.0 +sampling_freq = 1./TR onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') -dm = onsets_to_dm(onsetsFile, TR=2.0, runLength=160, sort=True,add_poly=1) -dm.heatmap() - -######################################################################### -# Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. By default it will use the one-parameter glover_hrf kernel (see nipy for details). However, any kernel can be passed as an argument, including a list of different kernels for highly flexible convolution across many types of data (e.g. SCR). - -dm = dm.convolve() -print(dm.details()) +dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq, run_length=160, sort=True,add_poly=1) dm.heatmap() ######################################################################### -# Load and Z-score a Covariates File -# ---------------------------------- +# Creating a Design Matrix from a generic csv file +# ************************************************ # -# Now we're going to handle a covariates file that's been generated by a preprocessing routine. First we'll read in the text file using pandas and convert it to a design matrix. +# Alternatively you can read a generic csv file and transform it into a Design Matrix using pandas file reading capability. Here we'll read in an example covariates file that contains the output of motion realignment estimated during a fMRI preprocessing pipeline. import pandas as pd covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') cov = pd.read_csv(covariatesFile) -cov = Design_Matrix(cov, sampling_rate = 2.0) -# Design matrix uses seaborn's heatmap for plotting so excepts all keyword arguments -# We're just adjusting colors here to visualize things a bit more nicely -cov.heatmap(vmin=-1,vmax=1) +cov = Design_Matrix(cov, sampling_freq =sampling_freq) +cov.heatmap(vmin=-1,vmax=1) # alter plot to scale of covs; heatmap takes Seaborn heatmap arguments ######################################################################### -# Similar to adding polynomial terms, Design Matrix has multiple methods for data processing and transformation such as downsampling, upsampling, and z-scoring. Let's use the z-score method to normalize the covariates we just loaded. +# Working with multiple Design Matrices +# ------------------------------------- +# +# Vertically "stacking" Design Matrices +# ************************************* +# A common task is creating a separate design matrix for multiple runs of an experiment, (or multiple subjects) and vertically appending them to each other so that regression can be performed across all runs of an experiment. However, in order to account for run-differences its important (and common practice) to include separate run-wise polynomials (e.g. intercepts). Design Matrix's append method is intelligent and flexible enough to keep columns separated during appending automatically. -# Use pandas built-in fillna to fill NaNs in the covariates files introduced during the pre-processing pipeline, before z-scoring -# Z-score takes an optional argument of which columns to z-score. Since we don't want to z-score any spikes, so let's select everything except that column -cov = cov.fillna(0).zscore(cov.columns[:-1]) -cov.heatmap(vmin=-1,vmax=1) +# Lets use the design matrix with polynomials from above +# Stack "run 1" on top of "run 2" +runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,axis=0) +runs_1_and_2.heatmap() ######################################################################### -# Concatenate Multiple Design Matrices -# ------------------------------------ -# -# A really nice feature of Design Matrix is simplified, but intelligent matrix concatentation. Here it's trivial to horizontally concatenate our convolved onsets and covariates, while keeping our column names and order. +# Separating columns during append operations +# ******************************************* +# Notice that all polynomials have been kept separated for you automatically and have been renamed to reflect the fact that they come from different runs. But Design Matrix is even more flexible. Let's say you want to estimate separate run-wise coefficients for all house stimuli too. Simply pass that into the `unique_cols` parameter of append. -full = dm.append(cov,axis=1) -full.heatmap(vmin=-1,vmax=1) +runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,unique_cols=['house*'],axis=0) +runs_1_and_2.heatmap() ######################################################################### -# But we can also intelligently vertically concatenate design matrices to handle say, different experimental runs, or participants. The method enables the user to indicate which columns to keep separated (if any) during concatenation or which to treat as extensions along the first dimension. By default the class will keep all polylnomial terms separated. This is extremely useful when building 1 large design matrix composed of several runs or participants with separate means. - -dm2 = dm.append(dm, axis=0) -dm2.heatmap(vmin=-1,vmax=1) +# Now notice how all stimuli that begin with 'house' have been made into separate columns for each run. In general `unique_cols` can take a list of columns to keep separated or simple wild cards that either begin with a term e.g. "house*" or end with one "*house". ######################################################################### -# Specific columns of interest can also be kept separate during concatenation (e.g. keeping run-wise spikes separate). As an example, we treat our first experimental regressor as different across our two design matrices. Notice that the class also preserves (as best as possible) column ordering. +# Putting it all together +# ----------------------- +# +# A realistic workflow +# ******************** +# Let's combine all the examples above to build a work flow for a realistic first-level analysis fMRI analysis. This will include loading onsets from multiple experimental runs, and concatenating them into a large multi-run design matrix where we estimate a single set of coefficients for our variables of interest, but make sure we account for run-wise differences nuisiance covarites (e.g. motion) and baseline, trends, etc. For simplicity we'll just reuse the same onsets and covariates file multiple times. -dm2 = dm.append(dm, axis=0, unique_cols=['BillyRiggins']) -dm2.heatmap(vmin=-1,vmax=1) +num_runs = 4 +TR = 2.0 +sampling_freq = 1./TR +all_runs = Design_Matrix(sampling_freq = sampling_freq) +for i in range(num_runs): -######################################################################### -# Design Matrix can also create polynomial terms and intelligently keep them separate during concatenation. For example lets concatenate 4 design matrices and create separate 2nd order polynomials for all of them + # 1) Load in onsets for this run + onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') + dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq,run_length=160,sort=True) + + # 2) Convolve them with the hrf + dm = dm.convolve() + + # 2) Load in covariates for this run + covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') + cov = pd.read_csv(covariatesFile) + cov = Design_Matrix(cov, sampling_freq = sampling_freq) + + # 3) In the covariates, fill any NaNs with 0, add intercept and linear trends and dct basis functions + cov = cov.fillna(0) + + # Retain a list of nuisance covariates (e.g. motion and spikes) which we'll also want to also keep separate for each run + cov_columns = cov.columns + cov = cov.add_poly(1).add_dct_basis() + + # 4) Join the onsets and covariates together + full = dm.append(cov,axis=1) -# Notice that append can take a list of Design Matrices in addition to just a single one -dm_all = dm.append([dm,dm,dm], axis=0, add_poly=2) -dm_all.heatmap(vmin=-1,vmax=1) + # 5) Append it to the master Design Matrix keeping things separated by run + all_runs = all_runs.append(full,axis=0,unique_cols=cov.columns) + +all_runs.heatmap(vmin=-1,vmax=1) + +######################################################################### +# We can see the left most columns of our multi-run design matrix contain our conditions of interest (stacked across all runs), the middle columns includes separate run-wise nuisiance covariates (motion, spikes) and the right most columns contain run specific polynomials (intercept, trends, etc). ######################################################################### # Data Diagnostics # ---------------- # -# Design Matrix also provides a few tools for cleaning up perfectly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. +# Let's actually check if our design is estimable. Design Matrix provides a few tools for cleaning up highly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. By default the `clean` method will drop any columns correlated at r >= .95 + +all_runs_cleaned = all_runs.clean(verbose=True) +all_runs_cleaned.heatmap(vmin=-1,vmax=1) + +######################################################################### +# Whoops, looks like above some of our polynomials and dct basis functions are highly correlated, but the clean method detected that and dropped them for us. In practice you'll often include polynomials or dct basis functions rather than both, but this was just an illustrative example. + +######################################################################### +# Estimating a first-level model +# ------------------------------ +# +# You can now set this multi-run Design Matrix as the `X` attribute of a Brain_Data object containing EPI data for these four runs and estimate a regression in just a few lines of code. + +# This code is commented because we don't actually have niftis loaded for the purposes of this tutorial +# See the other tutorials for more details on working with nifti files and Brain_Data objects + +# Assuming you already loaded up Nifti images like this +# list_of_niftis = ['run_1.nii.gz','run_2.nii.gz','run_3.nii.gz','run_4.nii.gz'] +# all_run_data = Brain_Data(list_of_niftis) + +# Set our Design Matrix to the X attribute of Brain_Data object +# all_run_data.X = all_runs_cleaned + +# Run the regression +# results = all_run_data.regress() -# We have a good design here so no problems -dm_all.clean(verbose=False) -dm_all.vif() +# This will produce N beta, t, and p images +# where N is the number of columns in the design matrix diff --git a/docs/auto_examples/01_DataOperations/plot_design_matrix.py.md5 b/docs/auto_examples/01_DataOperations/plot_design_matrix.py.md5 index 91f275b9..869cb99a 100644 --- a/docs/auto_examples/01_DataOperations/plot_design_matrix.py.md5 +++ b/docs/auto_examples/01_DataOperations/plot_design_matrix.py.md5 @@ -1 +1 @@ -674c1c7e55385d8ebc2e109812f5c3b8 \ No newline at end of file +d25dc9c6791965dca7df05760d6549bf \ No newline at end of file diff --git a/docs/auto_examples/01_DataOperations/plot_design_matrix.rst b/docs/auto_examples/01_DataOperations/plot_design_matrix.rst index f4cd77d0..bd4abb81 100644 --- a/docs/auto_examples/01_DataOperations/plot_design_matrix.rst +++ b/docs/auto_examples/01_DataOperations/plot_design_matrix.rst @@ -3,8 +3,8 @@ .. _sphx_glr_auto_examples_01_DataOperations_plot_design_matrix.py: -Design Matrix Creation -====================== +Design Matrix +============== This tutorial illustrates how to use the Design_Matrix class to flexibly create design matrices that can then be used with the Brain_Data class to perform univariate regression. @@ -25,7 +25,11 @@ Lets just create a basic toy design matrix by hand corresponding to a single par from nltools.data import Design_Matrix import numpy as np + TR = 1.5 # Design Matrices take a sampling_freq argument specified in hertz which can be converted as 1./TR + dm = Design_Matrix(np.array([ + [0,0,0,0], + [0,0,0,0], [1,0,0,0], [1,0,0,0], [0,0,0,0], @@ -36,10 +40,19 @@ Lets just create a basic toy design matrix by hand corresponding to a single par [0,0,1,0], [0,0,0,0], [0,0,0,1], - [0,0,0,1] + [0,0,0,1], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0] ]), - sampling_rate = 1.5, - columns=['stim_A','stim_B','stim_C','stim_D'] + sampling_freq = 1./TR, + columns=['face_A','face_B','house_A','house_B'] ) @@ -64,18 +77,29 @@ Notice how this look exactly like a pandas dataframe. That's because design matr Out:: - stim_A stim_B stim_C stim_D - 0 1 0 0 0 - 1 1 0 0 0 - 2 0 0 0 0 - 3 0 1 0 0 - 4 0 1 0 0 - 5 0 0 0 0 - 6 0 0 1 0 - 7 0 0 1 0 - 8 0 0 0 0 - 9 0 0 0 1 - 10 0 0 0 1 + face_A face_B house_A house_B + 0 0 0 0 0 + 1 0 0 0 0 + 2 1 0 0 0 + 3 1 0 0 0 + 4 0 0 0 0 + 5 0 1 0 0 + 6 0 1 0 0 + 7 0 0 0 0 + 8 0 0 1 0 + 9 0 0 1 0 + 10 0 0 0 0 + 11 0 0 0 1 + 12 0 0 0 1 + 13 0 0 0 0 + 14 0 0 0 0 + 15 0 0 0 0 + 16 0 0 0 0 + 17 0 0 0 0 + 18 0 0 0 0 + 19 0 0 0 0 + 20 0 0 0 0 + 21 0 0 0 0 Let's take a look at some of that meta-data. We can see that no columns have been convolved as of yet and this design matrix has no polynomial terms (e.g. such as an intercept or linear trend). @@ -95,7 +119,7 @@ Let's take a look at some of that meta-data. We can see that no columns have bee Out:: - nltools.data.design_matrix.Design_Matrix(sampling_rate=1.5, shape=(11, 4), convolved=[], polynomials=[]) + nltools.data.design_matrix.Design_Matrix(sampling_freq=0.6666666666666666 (hz), shape=(22, 4), multi=False, convolved=[], polynomials=[]) We can also easily visualize the design matrix using an SPM/AFNI/FSL style heatmap @@ -110,20 +134,27 @@ We can also easily visualize the design matrix using an SPM/AFNI/FSL style heatm + .. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_001.png :align: center -A common operation might include adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Note that polynomial terms are normalized to unit variance (i.e. mean = 0, std = 1) before inclusion to keep values on approximately the same scale. +Adding nuisiance covariates +--------------------------- + +Legendre Polynomials +******************** + +A common operation is adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Consistent with other software packages, these are orthogonal Legendre poylnomials on the scale -1 to 1. .. code-block:: python - # with include_lower = True (default), 1 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend + # with include_lower = True (default), 2 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend dm_with_nuissance = dm.add_poly(2,include_lower=True) dm_with_nuissance.heatmap() @@ -153,10 +184,13 @@ We can see that 3 new columns were added to the design matrix. We can also inspe Out:: - nltools.data.design_matrix.Design_Matrix(sampling_rate=1.5, shape=(11, 7), convolved=[], polynomials=['intercept', 'poly_1', 'poly_2']) + nltools.data.design_matrix.Design_Matrix(sampling_freq=0.6666666666666666 (hz), shape=(22, 7), multi=False, convolved=[], polynomials=['poly_0', 'poly_1', 'poly_2']) -Polynomial variables are not the only type of nuisance covariates that can be generate for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. +Discrete Cosine Basis Functions +******************************* + +Polynomial variables are not the only type of nuisance covariates that can be generated for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. Let's create DCT filters for 20s durations in our toy data. @@ -164,77 +198,89 @@ Polynomial variables are not the only type of nuisance covariates that can be ge # Short filter duration for our simple example - dm_with_cosine = dm.add_dct_basis(duration=5) - print(dm_with_cosine.details()) + dm_with_cosine = dm.add_dct_basis(duration=20) + dm_with_cosine.heatmap() +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png + :align: center -.. rst-class:: sphx-glr-script-out - Out:: - nltools.data.design_matrix.Design_Matrix(sampling_rate=1.5, shape=(11, 10), convolved=[], polynomials=['cosine_1', 'cosine_2', 'cosine_3', 'cosine_4', 'cosine_5', 'cosine_6']) +Data operations +--------------- -Load and Manipulate an Onsets File ------------------------------------ +Performing convolution +********************** -Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR. Lets use that to create a design matrix with an intercept and linear trend +Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. The default convolution kernel is the Glover (1999) HRF parameterized by the glover_hrf implementation in nipy (see nltools.externals.hrf for details). However, any arbitrary kernel can be passed as a 1d numpy array, or multiple kernels can be passed as a 2d numpy array for highly flexible convolution across many types of data (e.g. SCR). .. code-block:: python - from nltools.utils import get_resource_path - from nltools.file_reader import onsets_to_dm - from nltools.data import Design_Matrix - import os - - onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') - dm = onsets_to_dm(onsetsFile, TR=2.0, runLength=160, sort=True,add_poly=1) - dm.heatmap() + dm_with_nuissance_c = dm_with_nuissance.convolve() + print(dm_with_nuissance_c.details()) + dm_with_nuissance_c.heatmap() -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_003.png +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png :align: center +.. rst-class:: sphx-glr-script-out + + Out:: + + nltools.data.design_matrix.Design_Matrix(sampling_freq=0.6666666666666666 (hz), shape=(22, 7), multi=False, convolved=['face_A', 'face_B', 'house_A', 'house_B'], polynomials=['poly_0', 'poly_1', 'poly_2']) + + +Design Matrix can do many different data operations in addition to convolution such as upsampling and downsampling to different frequencies, zscoring, etc. Check out the API documentation for how to use these methods. -Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. By default it will use the one-parameter glover_hrf kernel (see nipy for details). However, any kernel can be passed as an argument, including a list of different kernels for highly flexible convolution across many types of data (e.g. SCR). +File Reading +------------ + +Creating a Design Matrix from an onsets file +******************************************** + +Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR and TR = 2s. Lets use that to create a design matrix with an intercept and linear trend .. code-block:: python - dm = dm.convolve() - print(dm.details()) + from nltools.utils import get_resource_path + from nltools.file_reader import onsets_to_dm + from nltools.data import Design_Matrix + import os + + TR = 2.0 + sampling_freq = 1./TR + onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') + dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq, run_length=160, sort=True,add_poly=1) dm.heatmap() -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_004.png +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png :align: center -.. rst-class:: sphx-glr-script-out - - Out:: - - nltools.data.design_matrix.Design_Matrix(sampling_rate=2.0, shape=(160, 15), convolved=['BillyRiggins', 'BuddyGarrity', 'CoachTaylor', 'GrandmaSaracen', 'JasonStreet', 'JulieTaylor', 'LandryClarke', 'LylaGarrity', 'MattSaracen', 'SmashWilliams', 'TamiTaylor', 'TimRiggins', 'TyraCollette'], polynomials=['intercept', 'poly_1']) -Load and Z-score a Covariates File ----------------------------------- +Creating a Design Matrix from a generic csv file +************************************************ -Now we're going to handle a covariates file that's been generated by a preprocessing routine. First we'll read in the text file using pandas and convert it to a design matrix. +Alternatively you can read a generic csv file and transform it into a Design Matrix using pandas file reading capability. Here we'll read in an example covariates file that contains the output of motion realignment estimated during a fMRI preprocessing pipeline. @@ -245,91 +291,112 @@ Now we're going to handle a covariates file that's been generated by a preproces covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') cov = pd.read_csv(covariatesFile) - cov = Design_Matrix(cov, sampling_rate = 2.0) - # Design matrix uses seaborn's heatmap for plotting so excepts all keyword arguments - # We're just adjusting colors here to visualize things a bit more nicely - cov.heatmap(vmin=-1,vmax=1) + cov = Design_Matrix(cov, sampling_freq =sampling_freq) + cov.heatmap(vmin=-1,vmax=1) # alter plot to scale of covs; heatmap takes Seaborn heatmap arguments -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_005.png +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png :align: center -Similar to adding polynomial terms, Design Matrix has multiple methods for data processing and transformation such as downsampling, upsampling, and z-scoring. Let's use the z-score method to normalize the covariates we just loaded. +Working with multiple Design Matrices +------------------------------------- + +Vertically "stacking" Design Matrices +************************************* +A common task is creating a separate design matrix for multiple runs of an experiment, (or multiple subjects) and vertically appending them to each other so that regression can be performed across all runs of an experiment. However, in order to account for run-differences its important (and common practice) to include separate run-wise polynomials (e.g. intercepts). Design Matrix's append method is intelligent and flexible enough to keep columns separated during appending automatically. .. code-block:: python - # Use pandas built-in fillna to fill NaNs in the covariates files introduced during the pre-processing pipeline, before z-scoring - # Z-score takes an optional argument of which columns to z-score. Since we don't want to z-score any spikes, so let's select everything except that column - cov = cov.fillna(0).zscore(cov.columns[:-1]) - cov.heatmap(vmin=-1,vmax=1) + # Lets use the design matrix with polynomials from above + # Stack "run 1" on top of "run 2" + runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,axis=0) + runs_1_and_2.heatmap() -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_006.png +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png :align: center -Concatenate Multiple Design Matrices ------------------------------------- - -A really nice feature of Design Matrix is simplified, but intelligent matrix concatentation. Here it's trivial to horizontally concatenate our convolved onsets and covariates, while keeping our column names and order. +Separating columns during append operations +******************************************* +Notice that all polynomials have been kept separated for you automatically and have been renamed to reflect the fact that they come from different runs. But Design Matrix is even more flexible. Let's say you want to estimate separate run-wise coefficients for all house stimuli too. Simply pass that into the `unique_cols` parameter of append. .. code-block:: python - full = dm.append(cov,axis=1) - full.heatmap(vmin=-1,vmax=1) + runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,unique_cols=['house*'],axis=0) + runs_1_and_2.heatmap() -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_007.png +.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png :align: center -But we can also intelligently vertically concatenate design matrices to handle say, different experimental runs, or participants. The method enables the user to indicate which columns to keep separated (if any) during concatenation or which to treat as extensions along the first dimension. By default the class will keep all polylnomial terms separated. This is extremely useful when building 1 large design matrix composed of several runs or participants with separate means. +Now notice how all stimuli that begin with 'house' have been made into separate columns for each run. In general `unique_cols` can take a list of columns to keep separated or simple wild cards that either begin with a term e.g. "house*" or end with one "*house". +Putting it all together +----------------------- -.. code-block:: python - - - dm2 = dm.append(dm, axis=0) - dm2.heatmap(vmin=-1,vmax=1) +A realistic workflow +******************** +Let's combine all the examples above to build a work flow for a realistic first-level analysis fMRI analysis. This will include loading onsets from multiple experimental runs, and concatenating them into a large multi-run design matrix where we estimate a single set of coefficients for our variables of interest, but make sure we account for run-wise differences nuisiance covarites (e.g. motion) and baseline, trends, etc. For simplicity we'll just reuse the same onsets and covariates file multiple times. +.. code-block:: python -.. image:: /auto_examples/01_DataOperations/images/sphx_glr_plot_design_matrix_008.png - :align: center + num_runs = 4 + TR = 2.0 + sampling_freq = 1./TR + all_runs = Design_Matrix(sampling_freq = sampling_freq) + for i in range(num_runs): + # 1) Load in onsets for this run + onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') + dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq,run_length=160,sort=True) + # 2) Convolve them with the hrf + dm = dm.convolve() -Specific columns of interest can also be kept separate during concatenation (e.g. keeping run-wise spikes separate). As an example, we treat our first experimental regressor as different across our two design matrices. Notice that the class also preserves (as best as possible) column ordering. + # 2) Load in covariates for this run + covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') + cov = pd.read_csv(covariatesFile) + cov = Design_Matrix(cov, sampling_freq = sampling_freq) + # 3) In the covariates, fill any NaNs with 0, add intercept and linear trends and dct basis functions + cov = cov.fillna(0) + # Retain a list of nuisance covariates (e.g. motion and spikes) which we'll also want to also keep separate for each run + cov_columns = cov.columns + cov = cov.add_poly(1).add_dct_basis() -.. code-block:: python + # 4) Join the onsets and covariates together + full = dm.append(cov,axis=1) + # 5) Append it to the master Design Matrix keeping things separated by run + all_runs = all_runs.append(full,axis=0,unique_cols=cov.columns) - dm2 = dm.append(dm, axis=0, unique_cols=['BillyRiggins']) - dm2.heatmap(vmin=-1,vmax=1) + all_runs.heatmap(vmin=-1,vmax=1) @@ -340,16 +407,21 @@ Specific columns of interest can also be kept separate during concatenation (e.g -Design Matrix can also create polynomial terms and intelligently keep them separate during concatenation. For example lets concatenate 4 design matrices and create separate 2nd order polynomials for all of them +We can see the left most columns of our multi-run design matrix contain our conditions of interest (stacked across all runs), the middle columns includes separate run-wise nuisiance covariates (motion, spikes) and the right most columns contain run specific polynomials (intercept, trends, etc). + + +Data Diagnostics +---------------- + +Let's actually check if our design is estimable. Design Matrix provides a few tools for cleaning up highly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. By default the `clean` method will drop any columns correlated at r >= .95 .. code-block:: python - # Notice that append can take a list of Design Matrices in addition to just a single one - dm_all = dm.append([dm,dm,dm], axis=0, add_poly=2) - dm_all.heatmap(vmin=-1,vmax=1) + all_runs_cleaned = all_runs.clean(verbose=True) + all_runs_cleaned.heatmap(vmin=-1,vmax=1) @@ -362,41 +434,47 @@ Design Matrix can also create polynomial terms and intelligently keep them separ Out:: - Design Matrix already has intercept...skipping - Design Matrix already has 1th order polynomial...skipping - Design Matrix already has intercept...skipping - Design Matrix already has 1th order polynomial...skipping - Design Matrix already has intercept...skipping - Design Matrix already has 1th order polynomial...skipping - Design Matrix already has intercept...skipping - Design Matrix already has 1th order polynomial...skipping + 0_poly_1 and 0_cosine_1 correlated at 0.99 which is >= threshold of 0.95. Dropping 0_cosine_1 + 1_poly_1 and 1_cosine_1 correlated at 0.99 which is >= threshold of 0.95. Dropping 1_cosine_1 + 2_poly_1 and 2_cosine_1 correlated at 0.99 which is >= threshold of 0.95. Dropping 2_cosine_1 + 3_poly_1 and 3_cosine_1 correlated at 0.99 which is >= threshold of 0.95. Dropping 3_cosine_1 -Data Diagnostics ----------------- +Whoops, looks like above some of our polynomials and dct basis functions are highly correlated, but the clean method detected that and dropped them for us. In practice you'll often include polynomials or dct basis functions rather than both, but this was just an illustrative example. + + +Estimating a first-level model +------------------------------ -Design Matrix also provides a few tools for cleaning up perfectly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. +You can now set this multi-run Design Matrix as the `X` attribute of a Brain_Data object containing EPI data for these four runs and estimate a regression in just a few lines of code. .. code-block:: python - # We have a good design here so no problems - dm_all.clean(verbose=False) - dm_all.vif() + # This code is commented because we don't actually have niftis loaded for the purposes of this tutorial + # See the other tutorials for more details on working with nifti files and Brain_Data objects + # Assuming you already loaded up Nifti images like this + # list_of_niftis = ['run_1.nii.gz','run_2.nii.gz','run_3.nii.gz','run_4.nii.gz'] + # all_run_data = Brain_Data(list_of_niftis) + # Set our Design Matrix to the X attribute of Brain_Data object + # all_run_data.X = all_runs_cleaned + + # Run the regression + # results = all_run_data.regress() + + # This will produce N beta, t, and p images + # where N is the number of columns in the design matrix -.. rst-class:: sphx-glr-script-out - Out:: - Dropping columns not needed...skipping -**Total running time of the script:** ( 0 minutes 1.402 seconds) +**Total running time of the script:** ( 0 minutes 2.540 seconds) diff --git a/docs/auto_examples/01_DataOperations/plot_design_matrix_codeobj.pickle b/docs/auto_examples/01_DataOperations/plot_design_matrix_codeobj.pickle index ea62ebef..280911a6 100644 Binary files a/docs/auto_examples/01_DataOperations/plot_design_matrix_codeobj.pickle and b/docs/auto_examples/01_DataOperations/plot_design_matrix_codeobj.pickle differ diff --git a/docs/auto_examples/01_DataOperations/plot_mask_codeobj.pickle b/docs/auto_examples/01_DataOperations/plot_mask_codeobj.pickle index a0f37557..fc7a7e28 100644 Binary files a/docs/auto_examples/01_DataOperations/plot_mask_codeobj.pickle and b/docs/auto_examples/01_DataOperations/plot_mask_codeobj.pickle differ diff --git a/docs/auto_examples/01_DataOperations/plot_neurovault_io_codeobj.pickle b/docs/auto_examples/01_DataOperations/plot_neurovault_io_codeobj.pickle index 6c283062..3852af7b 100644 Binary files a/docs/auto_examples/01_DataOperations/plot_neurovault_io_codeobj.pickle and b/docs/auto_examples/01_DataOperations/plot_neurovault_io_codeobj.pickle differ diff --git a/docs/auto_examples/02_Analysis/plot_decomposition_codeobj.pickle b/docs/auto_examples/02_Analysis/plot_decomposition_codeobj.pickle index 2ed98b88..f44a13cd 100644 Binary files a/docs/auto_examples/02_Analysis/plot_decomposition_codeobj.pickle and b/docs/auto_examples/02_Analysis/plot_decomposition_codeobj.pickle differ diff --git a/docs/auto_examples/02_Analysis/plot_multivariate_classification_codeobj.pickle b/docs/auto_examples/02_Analysis/plot_multivariate_classification_codeobj.pickle index cb5c5a1f..e8dffee5 100644 Binary files a/docs/auto_examples/02_Analysis/plot_multivariate_classification_codeobj.pickle and b/docs/auto_examples/02_Analysis/plot_multivariate_classification_codeobj.pickle differ diff --git a/docs/auto_examples/02_Analysis/plot_similarity_example_codeobj.pickle b/docs/auto_examples/02_Analysis/plot_similarity_example_codeobj.pickle index 465f2e72..a5e3374d 100644 Binary files a/docs/auto_examples/02_Analysis/plot_similarity_example_codeobj.pickle and b/docs/auto_examples/02_Analysis/plot_similarity_example_codeobj.pickle differ diff --git a/docs/auto_examples/02_Analysis/plot_univariate_regression_codeobj.pickle b/docs/auto_examples/02_Analysis/plot_univariate_regression_codeobj.pickle index 25181d70..92b31f04 100644 Binary files a/docs/auto_examples/02_Analysis/plot_univariate_regression_codeobj.pickle and b/docs/auto_examples/02_Analysis/plot_univariate_regression_codeobj.pickle differ diff --git a/docs/auto_examples/auto_examples_jupyter.zip b/docs/auto_examples/auto_examples_jupyter.zip index adf0a30b..48a457b7 100644 Binary files a/docs/auto_examples/auto_examples_jupyter.zip and b/docs/auto_examples/auto_examples_jupyter.zip differ diff --git a/docs/auto_examples/auto_examples_python.zip b/docs/auto_examples/auto_examples_python.zip index 6b09ddff..4862341f 100644 Binary files a/docs/auto_examples/auto_examples_python.zip and b/docs/auto_examples/auto_examples_python.zip differ diff --git a/docs/auto_examples/index.rst b/docs/auto_examples/index.rst index 8b1af970..5ff409e2 100644 --- a/docs/auto_examples/index.rst +++ b/docs/auto_examples/index.rst @@ -235,13 +235,13 @@ Neuroimaging Analysis Examples .. container:: sphx-glr-download - :download:`Download all examples in Python source code: auto_examples_python.zip ` + :download:`Download all examples in Python source code: auto_examples_python.zip ` .. container:: sphx-glr-download - :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip ` + :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip ` .. only:: html diff --git a/examples/01_DataOperations/plot_design_matrix.py b/examples/01_DataOperations/plot_design_matrix.py index c22947d9..cc4ac352 100644 --- a/examples/01_DataOperations/plot_design_matrix.py +++ b/examples/01_DataOperations/plot_design_matrix.py @@ -1,6 +1,6 @@ """ -Design Matrix Creation -====================== +Design Matrix +============== This tutorial illustrates how to use the Design_Matrix class to flexibly create design matrices that can then be used with the Brain_Data class to perform univariate regression. @@ -17,7 +17,11 @@ from nltools.data import Design_Matrix import numpy as np +TR = 1.5 # Design Matrices take a sampling_freq argument specified in hertz which can be converted as 1./TR + dm = Design_Matrix(np.array([ + [0,0,0,0], + [0,0,0,0], [1,0,0,0], [1,0,0,0], [0,0,0,0], @@ -28,10 +32,19 @@ [0,0,1,0], [0,0,0,0], [0,0,0,1], - [0,0,0,1] + [0,0,0,1], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0], + [0,0,0,0] ]), - sampling_rate = 1.5, - columns=['stim_A','stim_B','stim_C','stim_D'] + sampling_freq = 1./TR, + columns=['face_A','face_B','house_A','house_B'] ) ######################################################################### # Notice how this look exactly like a pandas dataframe. That's because design matrices are *subclasses* of dataframes with some extra attributes and methods. @@ -48,10 +61,17 @@ dm.heatmap() + ######################################################################### -# A common operation might include adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Note that polynomial terms are normalized to unit variance (i.e. mean = 0, std = 1) before inclusion to keep values on approximately the same scale. +# Adding nuisiance covariates +# --------------------------- +# +# Legendre Polynomials +# ******************** +# +# A common operation is adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Consistent with other software packages, these are orthogonal Legendre poylnomials on the scale -1 to 1. -# with include_lower = True (default), 1 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend +# with include_lower = True (default), 2 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend dm_with_nuissance = dm.add_poly(2,include_lower=True) dm_with_nuissance.heatmap() @@ -61,91 +81,162 @@ print(dm_with_nuissance.details()) ######################################################################### -# Polynomial variables are not the only type of nuisance covariates that can be generate for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. +# Discrete Cosine Basis Functions +# ******************************* +# +# Polynomial variables are not the only type of nuisance covariates that can be generated for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. Let's create DCT filters for 20s durations in our toy data. # Short filter duration for our simple example -dm_with_cosine = dm.add_dct_basis(duration=5) -print(dm_with_cosine.details()) +dm_with_cosine = dm.add_dct_basis(duration=20) +dm_with_cosine.heatmap() ######################################################################### -# Load and Manipulate an Onsets File -# ----------------------------------- +# Data operations +# --------------- +# +# Performing convolution +# ********************** # -# Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR. Lets use that to create a design matrix with an intercept and linear trend +# Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. The default convolution kernel is the Glover (1999) HRF parameterized by the glover_hrf implementation in nipy (see nltools.externals.hrf for details). However, any arbitrary kernel can be passed as a 1d numpy array, or multiple kernels can be passed as a 2d numpy array for highly flexible convolution across many types of data (e.g. SCR). + +dm_with_nuissance_c = dm_with_nuissance.convolve() +print(dm_with_nuissance_c.details()) +dm_with_nuissance_c.heatmap() + +######################################################################### +# Design Matrix can do many different data operations in addition to convolution such as upsampling and downsampling to different frequencies, zscoring, etc. Check out the API documentation for how to use these methods. + +######################################################################### +# File Reading +# ------------ +# +# Creating a Design Matrix from an onsets file +# ******************************************** +# +# Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR and TR = 2s. Lets use that to create a design matrix with an intercept and linear trend from nltools.utils import get_resource_path from nltools.file_reader import onsets_to_dm from nltools.data import Design_Matrix import os +TR = 2.0 +sampling_freq = 1./TR onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') -dm = onsets_to_dm(onsetsFile, TR=2.0, runLength=160, sort=True,add_poly=1) -dm.heatmap() - -######################################################################### -# Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. By default it will use the one-parameter glover_hrf kernel (see nipy for details). However, any kernel can be passed as an argument, including a list of different kernels for highly flexible convolution across many types of data (e.g. SCR). - -dm = dm.convolve() -print(dm.details()) +dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq, run_length=160, sort=True,add_poly=1) dm.heatmap() ######################################################################### -# Load and Z-score a Covariates File -# ---------------------------------- +# Creating a Design Matrix from a generic csv file +# ************************************************ # -# Now we're going to handle a covariates file that's been generated by a preprocessing routine. First we'll read in the text file using pandas and convert it to a design matrix. +# Alternatively you can read a generic csv file and transform it into a Design Matrix using pandas file reading capability. Here we'll read in an example covariates file that contains the output of motion realignment estimated during a fMRI preprocessing pipeline. import pandas as pd covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') cov = pd.read_csv(covariatesFile) -cov = Design_Matrix(cov, sampling_rate = 2.0) -# Design matrix uses seaborn's heatmap for plotting so excepts all keyword arguments -# We're just adjusting colors here to visualize things a bit more nicely -cov.heatmap(vmin=-1,vmax=1) +cov = Design_Matrix(cov, sampling_freq =sampling_freq) +cov.heatmap(vmin=-1,vmax=1) # alter plot to scale of covs; heatmap takes Seaborn heatmap arguments ######################################################################### -# Similar to adding polynomial terms, Design Matrix has multiple methods for data processing and transformation such as downsampling, upsampling, and z-scoring. Let's use the z-score method to normalize the covariates we just loaded. +# Working with multiple Design Matrices +# ------------------------------------- +# +# Vertically "stacking" Design Matrices +# ************************************* +# A common task is creating a separate design matrix for multiple runs of an experiment, (or multiple subjects) and vertically appending them to each other so that regression can be performed across all runs of an experiment. However, in order to account for run-differences its important (and common practice) to include separate run-wise polynomials (e.g. intercepts). Design Matrix's append method is intelligent and flexible enough to keep columns separated during appending automatically. -# Use pandas built-in fillna to fill NaNs in the covariates files introduced during the pre-processing pipeline, before z-scoring -# Z-score takes an optional argument of which columns to z-score. Since we don't want to z-score any spikes, so let's select everything except that column -cov = cov.fillna(0).zscore(cov.columns[:-1]) -cov.heatmap(vmin=-1,vmax=1) +# Lets use the design matrix with polynomials from above +# Stack "run 1" on top of "run 2" +runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,axis=0) +runs_1_and_2.heatmap() ######################################################################### -# Concatenate Multiple Design Matrices -# ------------------------------------ -# -# A really nice feature of Design Matrix is simplified, but intelligent matrix concatentation. Here it's trivial to horizontally concatenate our convolved onsets and covariates, while keeping our column names and order. +# Separating columns during append operations +# ******************************************* +# Notice that all polynomials have been kept separated for you automatically and have been renamed to reflect the fact that they come from different runs. But Design Matrix is even more flexible. Let's say you want to estimate separate run-wise coefficients for all house stimuli too. Simply pass that into the `unique_cols` parameter of append. -full = dm.append(cov,axis=1) -full.heatmap(vmin=-1,vmax=1) +runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,unique_cols=['house*'],axis=0) +runs_1_and_2.heatmap() ######################################################################### -# But we can also intelligently vertically concatenate design matrices to handle say, different experimental runs, or participants. The method enables the user to indicate which columns to keep separated (if any) during concatenation or which to treat as extensions along the first dimension. By default the class will keep all polylnomial terms separated. This is extremely useful when building 1 large design matrix composed of several runs or participants with separate means. - -dm2 = dm.append(dm, axis=0) -dm2.heatmap(vmin=-1,vmax=1) +# Now notice how all stimuli that begin with 'house' have been made into separate columns for each run. In general `unique_cols` can take a list of columns to keep separated or simple wild cards that either begin with a term e.g. "house*" or end with one "*house". ######################################################################### -# Specific columns of interest can also be kept separate during concatenation (e.g. keeping run-wise spikes separate). As an example, we treat our first experimental regressor as different across our two design matrices. Notice that the class also preserves (as best as possible) column ordering. +# Putting it all together +# ----------------------- +# +# A realistic workflow +# ******************** +# Let's combine all the examples above to build a work flow for a realistic first-level analysis fMRI analysis. This will include loading onsets from multiple experimental runs, and concatenating them into a large multi-run design matrix where we estimate a single set of coefficients for our variables of interest, but make sure we account for run-wise differences nuisiance covarites (e.g. motion) and baseline, trends, etc. For simplicity we'll just reuse the same onsets and covariates file multiple times. -dm2 = dm.append(dm, axis=0, unique_cols=['BillyRiggins']) -dm2.heatmap(vmin=-1,vmax=1) +num_runs = 4 +TR = 2.0 +sampling_freq = 1./TR +all_runs = Design_Matrix(sampling_freq = sampling_freq) +for i in range(num_runs): -######################################################################### -# Design Matrix can also create polynomial terms and intelligently keep them separate during concatenation. For example lets concatenate 4 design matrices and create separate 2nd order polynomials for all of them + # 1) Load in onsets for this run + onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') + dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq,run_length=160,sort=True) + + # 2) Convolve them with the hrf + dm = dm.convolve() + + # 2) Load in covariates for this run + covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv') + cov = pd.read_csv(covariatesFile) + cov = Design_Matrix(cov, sampling_freq = sampling_freq) + + # 3) In the covariates, fill any NaNs with 0, add intercept and linear trends and dct basis functions + cov = cov.fillna(0) + + # Retain a list of nuisance covariates (e.g. motion and spikes) which we'll also want to also keep separate for each run + cov_columns = cov.columns + cov = cov.add_poly(1).add_dct_basis() + + # 4) Join the onsets and covariates together + full = dm.append(cov,axis=1) -# Notice that append can take a list of Design Matrices in addition to just a single one -dm_all = dm.append([dm,dm,dm], axis=0, add_poly=2) -dm_all.heatmap(vmin=-1,vmax=1) + # 5) Append it to the master Design Matrix keeping things separated by run + all_runs = all_runs.append(full,axis=0,unique_cols=cov.columns) + +all_runs.heatmap(vmin=-1,vmax=1) + +######################################################################### +# We can see the left most columns of our multi-run design matrix contain our conditions of interest (stacked across all runs), the middle columns includes separate run-wise nuisiance covariates (motion, spikes) and the right most columns contain run specific polynomials (intercept, trends, etc). ######################################################################### # Data Diagnostics # ---------------- # -# Design Matrix also provides a few tools for cleaning up perfectly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. +# Let's actually check if our design is estimable. Design Matrix provides a few tools for cleaning up highly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. By default the `clean` method will drop any columns correlated at r >= .95 + +all_runs_cleaned = all_runs.clean(verbose=True) +all_runs_cleaned.heatmap(vmin=-1,vmax=1) + +######################################################################### +# Whoops, looks like above some of our polynomials and dct basis functions are highly correlated, but the clean method detected that and dropped them for us. In practice you'll often include polynomials or dct basis functions rather than both, but this was just an illustrative example. + +######################################################################### +# Estimating a first-level model +# ------------------------------ +# +# You can now set this multi-run Design Matrix as the `X` attribute of a Brain_Data object containing EPI data for these four runs and estimate a regression in just a few lines of code. + +# This code is commented because we don't actually have niftis loaded for the purposes of this tutorial +# See the other tutorials for more details on working with nifti files and Brain_Data objects + +# Assuming you already loaded up Nifti images like this +# list_of_niftis = ['run_1.nii.gz','run_2.nii.gz','run_3.nii.gz','run_4.nii.gz'] +# all_run_data = Brain_Data(list_of_niftis) + +# Set our Design Matrix to the X attribute of Brain_Data object +# all_run_data.X = all_runs_cleaned + +# Run the regression +# results = all_run_data.regress() -# We have a good design here so no problems -dm_all.clean(verbose=False) -dm_all.vif() +# This will produce N beta, t, and p images +# where N is the number of columns in the design matrix diff --git a/nltools/data/design_matrix.py b/nltools/data/design_matrix.py index b30b033c..3a513ada 100644 --- a/nltools/data/design_matrix.py +++ b/nltools/data/design_matrix.py @@ -105,6 +105,14 @@ def _inherit_attributes(self, setattr(dm_out, item, getattr(self,item)) return dm_out + def _sort_cols(self): + """ + This is a helper function that tries to ensure that columns of a Design Matrix are sorted according to: a) those not separated during append operations, b) those separated during append operations, c) polynomials. Called primarily during vertical concatentation and cleaning. + """ + data_cols = [elem for elem in self.columns if not elem.split('_')[0].isdigit() and elem not in self.polys] + separated_cols = [elem for elem in self.columns if elem.split('_')[0].isdigit() and elem not in self.polys] + return self[data_cols + separated_cols + self.polys] + def details(self): """Print class meta data. @@ -144,7 +152,7 @@ def append(self, dm, axis=0, keep_separate = True, unique_cols = [], fill_na=0, if not all([isinstance(elem,self.__class__) for elem in to_append]): raise TypeError("Each object to be appended must be a Design_Matrix!") if not all([elem.sampling_freq == self.sampling_freq for elem in to_append]): - raise ValueError("All Design Matrices must have the same sampling rate!") + raise ValueError("All Design Matrices must have the same sampling frequency!") if axis == 1: if any([not set(self.columns).isdisjoint(elem.columns) for elem in to_append]): @@ -190,6 +198,7 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): modify_to_append = [] all_polys = [] cols_to_separate = [] + all_separated = [] if len(unique_cols): if not keep_separate: @@ -216,7 +225,10 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): count = c.split('_')[0] unique_count.append(int(count)) else: - to_rename[c] = '0_' + c + new_name = '0_' + c + all_separated.append(new_name) + to_rename[c] = new_name + all_separated.append(new_name) cols_to_separate.append(searchstr) if to_rename: @@ -256,10 +268,12 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): count = int(c.split('_')[0]) name = '_'.join(c.split('_')[1:]) count += max_unique_count + 1 - to_rename[c] = str(count) + '_' + name + new_name = str(count) + '_' + name + to_rename[c] = new_name else: - to_rename[c] = str(max_unique_count + 1) + '_' + c - + new_name = str(max_unique_count + 1) + '_' + c + to_rename[c] = new_name + all_separated.append(new_name) modify_to_append.append(dm.rename(columns=to_rename)) max_unique_count += 1 else: @@ -282,9 +296,12 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): count = int(c.split('_')[0]) name = '_'.join(c.split('_')[1:]) count += max_unique_count + 1 - to_rename[c] = str(count) + '_' + name + new_name = str(count) + '_' + name + to_rename[c] = new_name else: - to_rename[c] = str(max_unique_count + 1) + '_' + c + new_name = str(max_unique_count + 1) + '_' + c + to_rename[c] = new_name + all_separated.append(new_name) modify_to_append.append(dm.rename(columns=to_rename)) max_unique_count += 1 else: @@ -339,7 +356,6 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): current_poly_max += 1 all_polys += list(to_rename.values()) - # Handle renaming additional unique cols to keep separate if cols_to_separate: if verbose: @@ -353,9 +369,12 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): count = int(c.split('_')[0]) name = '_'.join(c.split('_')[1:]) count += max_unique_count + 1 - to_rename[c] = str(count) + '_' + name + new_name = str(count) + '_' + name + to_rename[c] = new_name else: - to_rename[c] = str(max_unique_count + 1) + '_' + c + new_name = str(max_unique_count + 1) + '_' + c + to_rename[c] = new_name + all_separated.append(new_name) # Combine renamed polynomials and renamed uniqu_cols modify_to_append.append(temp_dm.rename(columns=to_rename)) @@ -382,10 +401,12 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): count = int(c.split('_')[0]) name = '_'.join(c.split('_')[1:]) count += max_unique_count + 1 - to_rename[c] = str(count) + '_' + name + new_name = str(count) + '_' + name + to_rename[c] = new_name else: - to_rename[c] = str(max_unique_count + 1) + '_' + c - + new_name = str(max_unique_count + 1) + '_' + c + to_rename[c] = new_name + all_separated.append(new_name) modify_to_append.append(dm.rename(to_rename)) max_unique_count += 1 else: @@ -403,10 +424,8 @@ def _vertcat(self, df, keep_separate, unique_cols, fill_na, verbose): out.convolved = self.convolved out.multi = True out.polys = all_polys - data_cols = [elem for elem in out.columns if elem not in out.polys] - out = out[data_cols + out.polys] - return out + return out._sort_cols() def vif(self,exclude_polys=True): """Compute variance inflation factor amongst columns of design matrix, @@ -472,7 +491,7 @@ def convolve(self, conv_func='hrf', columns=None): assert len(conv_func.shape) <= 2, "2d conv_func must be formatted as samplex X kernals!" elif isinstance(conv_func, six.string_types): assert conv_func == 'hrf',"Did you mean 'hrf'? 'hrf' can generate a kernel for you, otherwise custom kernels should be passed in as 1d or 2d arrays." - conv_func = glover_hrf(1. / self.sampling_freq, oversampling=1) + conv_func = glover_hrf(1. / self.sampling_freq, oversampling=1.) else: raise TypeError("conv_func must be a 1d or 2d numpy array organized as samples x kernels, or the string 'hrf' for the canonical glover hrf") @@ -611,7 +630,7 @@ def add_dct_basis(self,duration=180,drop=0): if any([elem.count('_') == 2 and 'cosine' in elem for elem in self.polys]): raise AmbiguityError("It appears that this Design Matrix contains cosine bases that were kept seperate from a previous append operation. This makes it ambiguous for adding polynomials terms. Try calling .add_dct_basis() on each separate Design Matrix before appending them instead.") - basis_mat = make_cosine_basis(self.shape[0],self.sampling_freq,duration,drop=drop) + basis_mat = make_cosine_basis(self.shape[0],1./self.sampling_freq,duration,drop=drop) basis_frame = Design_Matrix(basis_mat, sampling_freq=self.sampling_freq,columns = [str(elem) for elem in range(basis_mat.shape[1])]) @@ -688,6 +707,8 @@ def clean(self,fill_na=0,exclude_polys=False,thresh=.95,verbose=True): remove.append(j) if remove: out = out.drop(remove, axis=1) + out.polys = [elem for elem in out.polys if elem not in remove] + out = out._sort_cols() else: print("Dropping columns not needed...skipping") np.seterr(**old_settings) diff --git a/nltools/file_reader.py b/nltools/file_reader.py index 8303f047..8d809e34 100644 --- a/nltools/file_reader.py +++ b/nltools/file_reader.py @@ -75,7 +75,7 @@ def onsets_to_dm(F, sampling_freq, run_length, header='infer', sort=False, keep_ df['Onset'] = df['Onset'].apply(lambda x: int(np.floor(x/TR))) #Build dummy codes - X = Design_Matrix(np.zeros([run_length,len(df['Stim'].unique())]),columns=df['Stim'].unique(),sampling_rate=TR) + X = Design_Matrix(np.zeros([run_length,len(df['Stim'].unique())]),columns=df['Stim'].unique(),sampling_freq=sampling_freq) for i, row in df.iterrows(): if df.shape[1] == 3: dur = np.ceil(row['Duration']/TR) diff --git a/nltools/stats.py b/nltools/stats.py index 4ef6daa2..0fa0517e 100644 --- a/nltools/stats.py +++ b/nltools/stats.py @@ -563,14 +563,15 @@ def make_cosine_basis(nsamples, sampling_freq, filter_length, unit_scale=True, d # Drop intercept ala SPM C = C[:,1:] + if C.size == 0: + raise ValueError('Basis function creation failed! nsamples is too small for requested filter_length.') + if unit_scale: C *= 1. / C[0,0] C = C[:, drop:] - if C.size == 0: - raise ValueError('Basis function creation failed! nsamples is too small for requested filter_length.') - else: - return C + + return C def transform_pairwise(X, y): '''Transforms data into pairs with balanced labels for ranking diff --git a/nltools/tests/test_data.py b/nltools/tests/test_data.py index a4a9638f..e2fab982 100644 --- a/nltools/tests/test_data.py +++ b/nltools/tests/test_data.py @@ -617,13 +617,13 @@ def test_designmat(tmpdir): assert mat.add_poly(2,include_lower=False).shape[1] == 5 matpd = matp.add_dct_basis() - assert matpd.shape[1] == 9 + assert matpd.shape[1] == 18 assert all(matpd.vif() < 2.0) assert not all(matpd.vif(exclude_polys=False) < 2.0) matc = matpd.clean() - assert matc.shape[1] == 7 + assert matc.shape[1] == 16 # Standard convolve assert matpd.convolve().shape == matpd.shape @@ -653,18 +653,17 @@ def test_designmat(tmpdir): # Otherwise stack them assert matpd.append(matpd,keep_separate=False).shape[1] == matpd.shape[1] # Keep a single stimulus column separate - assert matpd.append(matpd,unique_cols=['face_A']).shape[1] == 15 + assert matpd.append(matpd,unique_cols=['face_A']).shape[1] == 33 # Keep a common stimulus class separate - assert matpd.append(matpd,unique_cols=['face*']).shape[1] == 16 + assert matpd.append(matpd,unique_cols=['face*']).shape[1] == 34 # Keep a common stimulus class and a different single stim separate - assert matpd.append(matpd,unique_cols=['face*','house_A']).shape[1] == 17 + assert matpd.append(matpd,unique_cols=['face*','house_A']).shape[1] == 35 # Keep multiple stimulus class separate - assert matpd.append(matpd,unique_cols=['face*','house*']).shape[1] == 18 + assert matpd.append(matpd,unique_cols=['face*','house*']).shape[1] == 36 # Growing a multi-run design matrix; keeping things separate num_runs = 4 all_runs = Design_Matrix(sampling_freq=.5) - run_list = [] for i in range(num_runs): run = Design_Matrix(np.array([ [1,0,0,0], @@ -683,7 +682,6 @@ def test_designmat(tmpdir): columns=['stim_A','stim_B','cond_C','cond_D'] ) run = run.add_poly(2) - run_list.append(run) all_runs = all_runs.append(run,unique_cols=['stim*','cond*']) assert all_runs.shape == (44, 28)