Skip to content

Commit

Permalink
Adding supp to readme
Browse files Browse the repository at this point in the history
  • Loading branch information
daniel1noble committed Oct 20, 2021
1 parent 0578b73 commit 645cef3
Show file tree
Hide file tree
Showing 2 changed files with 162 additions and 1 deletion.
158 changes: 158 additions & 0 deletions .Rhistory
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
set.seed(123)
# Parameters
no.paper = 30 # Numbers of unique papers
n.effects = 5 # Number of effects per paper. We will keep simple and balanced
rho.e = 0.8 # Correlation in sampling variances
study_id = rep(1:no.paper, each = n.effects) # Study ID's
var_paper = 0.5 # Between-study variance (i.e., tau2)
var_effect = 0.2 # Effect size (or within study) variance
mu = 0.4 # Average, or overall, effect size
rho = c(0.2, 0.4, 0.6, 0.8)
# Add sampling variance
# First, sample
n.sample <- rnbinom(no.paper, mu = 40, size = 0.5) + 4
# Assume logRR. So, sampling variance approximated by the CV.
cv <- sample(seq(0.3, 0.35,by = 0.001), no.paper, replace = TRUE)
# Assuming sample size is the same for all effects within study (pretty sensible)
rep_sample <- rep(n.sample, each = n.effects)
# Sampling variances
vi <- (2*cv^2)/rep_sample
# Create the sampling variance matrix, which accounts for the correlation between effect sizes within study
sigma_vi <- as.matrix(Matrix::bdiag(impute_covariance_matrix(vi = vi, cluster = study_id, r = rho.e)))
# Now create the effect size level covariance matrix. Thsi would simulate a situation where, for example, you have effect sizes on reading and maths that are clearly correlated, but to varying extents across studies
# Create list of matrices, 1 for each study
matrices <- list()
for(i in 1:no.paper){
r <- sample(rho, size = 1, replace = TRUE)
tmp <- matrix(r, nrow = n.effects, ncol = n.effects)
diag(tmp) <- 1
matrices[[i]] <- tmp
}
cor_yi <- as.matrix(Matrix::bdiag(matrices))
cov_yi <- cor_yi * sqrt(var_effect) * sqrt(var_effect)
# Derive data from simulation
yi <- mu + rep(rnorm(no.paper, 0, sqrt(var_paper)), each = n.effects) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), cov_yi) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), sigma_vi)
# Create the data. We'll just assume that meta-analysts have already derived their effect size and sampling variance
data <- data.frame(study_id = study_id,
yi = yi,
vi = vi,
obs = c(1:(n.effects*no.paper)))
pacman::p_load(tidyverse, MASS, kableExtra, gridExtra, MCMCglmm, brms, metafor, robumeta, clubSandwich, pander, tidyverse)
# Simulate a dataset composed of 30 papers, each having 5 effects from the same study.
set.seed(123)
# Parameters
no.paper = 30 # Numbers of unique papers
n.effects = 5 # Number of effects per paper. We will keep simple and balanced
rho.e = 0.8 # Correlation in sampling variances
study_id = rep(1:no.paper, each = n.effects) # Study ID's
var_paper = 0.5 # Between-study variance (i.e., tau2)
var_effect = 0.2 # Effect size (or within study) variance
mu = 0.4 # Average, or overall, effect size
rho = c(0.2, 0.4, 0.6, 0.8)
# Add sampling variance
# First, sample
n.sample <- rnbinom(no.paper, mu = 40, size = 0.5) + 4
# Assume logRR. So, sampling variance approximated by the CV.
cv <- sample(seq(0.3, 0.35,by = 0.001), no.paper, replace = TRUE)
# Assuming sample size is the same for all effects within study (pretty sensible)
rep_sample <- rep(n.sample, each = n.effects)
# Sampling variances
vi <- (2*cv^2)/rep_sample
# Create the sampling variance matrix, which accounts for the correlation between effect sizes within study
sigma_vi <- as.matrix(Matrix::bdiag(impute_covariance_matrix(vi = vi, cluster = study_id, r = rho.e)))
# Now create the effect size level covariance matrix. Thsi would simulate a situation where, for example, you have effect sizes on reading and maths that are clearly correlated, but to varying extents across studies
# Create list of matrices, 1 for each study
matrices <- list()
for(i in 1:no.paper){
r <- sample(rho, size = 1, replace = TRUE)
tmp <- matrix(r, nrow = n.effects, ncol = n.effects)
diag(tmp) <- 1
matrices[[i]] <- tmp
}
cor_yi <- as.matrix(Matrix::bdiag(matrices))
cov_yi <- cor_yi * sqrt(var_effect) * sqrt(var_effect)
# Derive data from simulation
yi <- mu + rep(rnorm(no.paper, 0, sqrt(var_paper)), each = n.effects) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), cov_yi) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), sigma_vi)
# Create the data. We'll just assume that meta-analysts have already derived their effect size and sampling variance
data <- data.frame(study_id = study_id,
yi = yi,
vi = vi,
obs = c(1:(n.effects*no.paper)))
mod_multilevel = metafor::rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t")
summary(mod_multilevel)
mod_multilevel_pdf = metafor::rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t", dfs = "contain")
summary(mod_multilevel_pdf)
remotes::install_github("wviechtb/metafor")
mod_multilevel_pdf = metafor::rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t", dfs = "contain")
summary(mod_multilevel_pdf)
library(metafor)
mod_multilevel_pdf = metafor::rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t", dfs = "contain")
?metafor::rma.mv
library(metafor)
mod_multilevel_pdf = rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t", dfs = "contain")
summary(mod_multilevel_pdf)
# Clean workspace
rm(list=ls())
# Loading packages & Functions
remotes::install_github("wviechtb/metafor"); library(metafor)
pacman::p_load(tidyverse, MASS, kableExtra, gridExtra, MCMCglmm, brms, robumeta, clubSandwich, pander, tidyverse)
# Simulate a dataset composed of 30 papers, each having 5 effects from the same study.
set.seed(123)
# Parameters
no.paper = 30 # Numbers of unique papers
n.effects = 5 # Number of effects per paper. We will keep simple and balanced
rho.e = 0.8 # Correlation in sampling variances
study_id = rep(1:no.paper, each = n.effects) # Study ID's
var_paper = 0.5 # Between-study variance (i.e., tau2)
var_effect = 0.2 # Effect size (or within study) variance
mu = 0.4 # Average, or overall, effect size
rho = c(0.2, 0.4, 0.6, 0.8)
# Add sampling variance
# First, sample
n.sample <- rnbinom(no.paper, mu = 40, size = 0.5) + 4
# Assume logRR. So, sampling variance approximated by the CV.
cv <- sample(seq(0.3, 0.35,by = 0.001), no.paper, replace = TRUE)
# Assuming sample size is the same for all effects within study (pretty sensible)
rep_sample <- rep(n.sample, each = n.effects)
# Sampling variances
vi <- (2*cv^2)/rep_sample
# Create the sampling variance matrix, which accounts for the correlation between effect sizes within study
sigma_vi <- as.matrix(Matrix::bdiag(impute_covariance_matrix(vi = vi, cluster = study_id, r = rho.e)))
# Now create the effect size level covariance matrix. Thsi would simulate a situation where, for example, you have effect sizes on reading and maths that are clearly correlated, but to varying extents across studies
# Create list of matrices, 1 for each study
matrices <- list()
for(i in 1:no.paper){
r <- sample(rho, size = 1, replace = TRUE)
tmp <- matrix(r, nrow = n.effects, ncol = n.effects)
diag(tmp) <- 1
matrices[[i]] <- tmp
}
cor_yi <- as.matrix(Matrix::bdiag(matrices))
cov_yi <- cor_yi * sqrt(var_effect) * sqrt(var_effect)
# Derive data from simulation
yi <- mu + rep(rnorm(no.paper, 0, sqrt(var_paper)), each = n.effects) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), cov_yi) + mvrnorm(n = 1, mu = rep(0,n.effects*no.paper), sigma_vi)
# Create the data. We'll just assume that meta-analysts have already derived their effect size and sampling variance
data <- data.frame(study_id = study_id,
yi = yi,
vi = vi,
obs = c(1:(n.effects*no.paper)))
mod_multilevel = metafor::rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t")
summary(mod_multilevel)
mod_multilevel_pdf = rma.mv(yi = yi, V = vi, mods = ~1,
random=list(~1|study_id,~1|obs),
data=data, test="t", dfs = "contain")
summary(mod_multilevel_pdf)
remotes::install_github("rlesur/klippy")
klippy::klippy(tooltip_message = 'Click to Copy Code', tooltip_success = 'Done', position = 'right', color = "red")
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,15 @@ This paper has now been accepted for publication in *Ecology*:

Shinichi Nakagawa, Alistair M. Senior, Wolfgang Viechtbauer, and Daniel W. A. Noble. An assessment of statistical methods for non-independent data in ecological meta-analyses. Ecology, accepted 20 May 2021.

To access the Supplemental Material for implementing corrections click [here](https://daniel1noble.github.io/ecology_comment/)

## Introduction
This repository houses the code and supplementary tutorial used to demonstrate how multi-level meta-analytic models from `metafor` can be corrected to avoid infated Type I error in the presence of non-independent effect sizes. The commentary is a response to Song et al. (2020), to show how a few simple corrections can provide some resolution to problems they identify in their very thorough simulations.

*Just want to know how to apply corrections?* Users who are interested in learning more about how they can correct for non-independence can read the [Supplemental Material](https://daniel1noble.github.io/ecology_comment/)

*Reproducing the simulations?* Users wanting to reproduce Song et al's (2020) simulations, along with the correlations implemented by us can find all the R code in tge `R/` directory.

*Just want to know how to apply corrections?* Users who are interested in learning more about how they can correct for non-independence can read the [Supplemental Example](https://daniel1noble.github.io/ecology_comment/)

## References
Song, C., S. D. Peacor, C. W. Osenberg, and J. R. Bence. 2020. An assessment of statistical methods for nonindependent data in ecological meta-analyses. Ecology online: e03184.

0 comments on commit 645cef3

Please sign in to comment.