Skip to content

Commit

Permalink
Fixing the number of categories issue
Browse files Browse the repository at this point in the history
  • Loading branch information
ManuelRausch committed Apr 12, 2024
1 parent 5d06339 commit b9812b3
Show file tree
Hide file tree
Showing 7 changed files with 183 additions and 20 deletions.
3 changes: 2 additions & 1 deletion R/fitConf.R
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ fitConf <- function(data, model = "SDT",
B <- levels(data$stimulus)[2]
nCond <- length(levels(data$diffCond))
nRatings <- length(levels(data$rating))
nTrials <- length(data$rating)
abj_f <- 1 /(nRatings*2)

N_SA_RA <-
Expand Down Expand Up @@ -263,6 +264,6 @@ fitConf <- function(data, model = "SDT",
fit <- fitting_fct(N_SA_RA = N_SA_RA,N_SA_RB = N_SA_RB,
N_SB_RA = N_SB_RA,N_SB_RB = N_SB_RB,
nInits = nInits, nRestart = nRestart,
nRatings = nRatings, nCond = nCond)
nRatings = nRatings, nCond = nCond, nTrials = nTrials)
return(fit)
}
6 changes: 4 additions & 2 deletions R/fitConfModels.R
Original file line number Diff line number Diff line change
Expand Up @@ -212,8 +212,10 @@


#' @export
fitConfModels <- function(data, models="all", nInits = 5, nRestart = 4, #var="constant",
.parallel=FALSE, n.cores=NULL) {
fitConfModels <- function(data, models="all",
diffCond = NULL, stimulus = NULL, correct = NULL, rating = NULL,
nInits = 5, nRestart = 4,
.parallel=FALSE, n.cores=NULL) {
AllModels <- c('WEV', 'SDT','IG','ITGc',
'ITGcm', 'GN', 'PDA', 'logN', 'logWEV') # if you implement additional models, add them here!
if (identical(models,"all")) models <- AllModels
Expand Down
9 changes: 4 additions & 5 deletions R/int_fitCEV.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
### Model version described by (Rausch et al., 2023)
fitCEV <-
function(N_SA_RA, N_SA_RB, N_SB_RA, N_SB_RB,
nInits, nRestart, nRatings, nCond){
nInits, nRestart, nRatings, nCond, nTrials){

# coarse grid search to find promising initial values

Expand Down Expand Up @@ -77,7 +77,6 @@ fitCEV <-
res <- data.frame(matrix(nrow=1, ncol=0))
if(!inherits(fit, "try-error")){
k <- length(fit$par)
N <- length(ratings)

res[paste("d_",1:nCond, sep="")] <- as.vector(cumsum(exp(fit$par[1:(nCond)])))
res$c <- as.vector(fit$par[nCond+nRatings])
Expand All @@ -94,10 +93,10 @@ fitCEV <-
res$w <- exp(fit$par[nCond + nRatings*2+1])/(1+exp(fit$par[nCond + nRatings*2+1]))

res$negLogLik <- fit$value
res$N <- N
res$N <- nTrials
res$k <- k
res$BIC <- 2 * fit$value + k * log(N)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(N-k-1)
res$BIC <- 2 * fit$value + k * log(nTrials)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(nTrials-k-1)
res$AIC <- 2 * fit$value + k * 2
}
res
Expand Down
10 changes: 4 additions & 6 deletions R/int_fitNoisy.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

fitNoisy <-
function(N_SA_RA, N_SA_RB, N_SB_RA, N_SB_RB,
nInits, nRestart, nRatings, nCond){
nInits, nRestart, nRatings, nCond, nTrials){

# coarse grid search to find promising initial values

Expand Down Expand Up @@ -84,8 +84,6 @@ fitNoisy <-
if(!inherits(fit, "try-error")){

k <- length(fit$par)
N <- length(ratings)

res[paste("d_",1:nCond, sep="")] <- as.vector(cumsum(exp(fit$par[1:(nCond)])))
res$c <- as.vector(fit$par[nCond+nRatings])
res[,paste("theta_minus.",(nRatings-1):1, sep="")] <-
Expand All @@ -100,10 +98,10 @@ fitNoisy <-
res$sigma <- exp(fit$par[nCond + nRatings*2])

res$negLogLik <- fit$value
res$N <- N
res$N <- nTrials
res$k <- k
res$BIC <- 2 * fit$value + k * log(N)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(N-k-1)
res$BIC <- 2 * fit$value + k * log(nTrials)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(nTrials-k-1)
res$AIC <- 2 * fit$value + k * 2
}
res
Expand Down
164 changes: 164 additions & 0 deletions R/int_fitRCE.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
### Functions to fit the WEV model
### Model based on Peters et al. (2017)
### the original model is extended to include a choice bias parameter so the
### the cognitive architecture underlying the decision is equivalent to SDT.

fitHeuris <-
function(N_SA_RA, N_SA_RB, N_SB_RA, N_SB_RB,
nInits, nRestart, nRatings, nCond){

# coarse grid search to find promising initial values

temp <- expand.grid(minD = seq(-.3, .8, length.out=4),
maxD = c(.6, 1.2, 1.4, 1.9),
b = c(-.6, -.2, .2, .6), # bias in favor of option "B
tauMin = seq(-1.1, 2.4, length.out=4), # position of the most conservative confidence criterion related to stimulus A
tauRange = seq(.1,3.8, length.out=4)) # range of rating criteria stimulus B

inits <- data.frame(matrix(data=NA, nrow= nrow(temp),
ncol = nCond + nRatings*2 -1))
if(nCond==1) {
inits[,1] <- log(temp$maxD) }
else{
inits[,1:(nCond)] <- log(t(mapply(function(maxD) diff(seq(0, maxD, length.out = nCond+1)), temp$maxD)))
}
inits[,nCond +1] <- temp$tauMin
inits[,(nCond+2):(nCond+nRatings-1)] <-
log(t(mapply(function(tauRange) rep(tauRange/(nRatings-1), nRatings-2),
temp$tauRange)))
# inits[,nCond+(nRatings-1)] <- temp$tauMin
inits[,nCond+nRatings] <- temp$b # theta
inits[,nCond+(nRatings+1)] <- temp$tauMin
inits[,(nCond+nRatings+2):(nCond + nRatings*2-1)] <-
log(t(mapply(function(tauRange) rep(tauRange/(nRatings-1), nRatings-2),
temp$tauRange)))

logL <- apply(inits, MARGIN = 1,
function(p) try(llHeuris(p, N_SA_RA, N_SA_RB, N_SB_RA,N_SB_RB, nRatings, nCond), silent = TRUE))
logL <- as.numeric(logL)
nAttempts <- 5
nRestart <- 4
inits <- inits[order(logL),][1:nAttempts,]
nIter <- 10^6
noFitYet <- TRUE
#print(paste("Initial grid search took...",as.character(round(as.double(difftime(Sys.time(),t00,units = "mins")), 2))," mins"))
#print("Start fitting ... ")
start <- inits[1,]

try(fit <- optim(par = start,
f = llHeuris(), gr = NULL,
N_SA_RA = N_SA_RA,N_SA_RB = N_SA_RB,
N_SB_RA = N_SB_RA,N_SB_RB = N_SB_RB, nRatings = nRatings, nCond = nCond,
control = list(maxit = 10^6, reltol = 10^-8)))
if (!exists("fit") || class(fit) == "try-error"){
i <- 2
while(noFitYet && (i <= nAttempts)){
start <- inits[i,]
try(fit <- optim(par = start,
f = llHeuris, gr = NULL,
N_SA_RA = N_SA_RA,N_SA_RB = N_SA_RB,
N_SB_RA = N_SB_RA,N_SB_RB = N_SB_RB, nRatings = nRatings, nCond = nCond,
control = list(maxit = 10^6, reltol = 10^-8)))
if (exists("fit") && class(fit) == "list") noFitYet <- FALSE
i <- i+1
}
}

if (exists("fit") && class(fit) == "list"){
for (i in 1:nRestart){
try(fit <- optim(par = fit$par,
f = llHeuris, gr = NULL,
N_SA_RA = N_SA_RA,N_SA_RB = N_SA_RB,
N_SB_RA = N_SB_RA,N_SB_RB = N_SB_RB, nRatings = nRatings, nCond = nCond,
control = list(maxit = 10^6, reltol = 10^-8)))
}
}


res <- data.frame(matrix(nrow=1, ncol=0))
if(class(fit) != "try-error"){
k <- length(fit$par)
N <- length(ratings)
for (i in 1:nCond){
res[paste("d",i, sep="")] <- as.vector(fit$par[i])
}
res$b <- as.vector(fit$par[nCond+nRatings])
res[,paste("cA",1:(nRatings-1), sep="")] <-
c(as.vector(fit$par[nCond+1]),
as.vector(fit$par[nCond+1]) +
as.vector(cumsum(c(exp(fit$par[(nCond+2):(nCond + nRatings-1)])))))

res[,paste("cB",1:(nRatings-1), sep="")] <-
c(as.vector(fit$par[nCond+nRatings+1]),
as.vector(fit$par[nCond+nRatings+1]) +
as.vector(cumsum(c(exp(fit$par[(nCond+nRatings+2):(nCond + nRatings*2-1)])))))


res$negLogLik <- fit$value
res$N <- N
res$k <- k
res$BIC <- 2 * fit$value + k * log(N)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(N-k-1)
res$AIC <- 2 * fit$value + k * 2
}
res
}

llHeuris <-
function(p, N_SA_RA,N_SA_RB, N_SB_RA, N_SB_RB, nRatings, nCond){
p <- c(t(p))

ds <- p[1:nCond]
b <- p[nCond+nRatings]
c_RA <- c(-Inf, p[nCond+1], p[nCond+1] +
cumsum(c(exp(p[(nCond+2):(nCond+nRatings-1)]))), Inf)
# c_RA <- c(-Inf, p[nCond+nRatings-1], rev(cumsum(c(exp(p[(nCond+1):(nCond+nRatings-2)])))), p[nCond+nRatings-1], Inf)
c_RB <- c(-Inf, p[nCond+nRatings+1], p[nCond+nRatings+1] +
cumsum(c(exp(p[(nCond+nRatings+2):(nCond + nRatings*2-1)]))), Inf)

p_SA_RA <- matrix(NA, nrow=nCond, ncol = nRatings)
p_SA_RB <- matrix(NA, nrow=nCond, ncol = nRatings)
p_SB_RA <- matrix(NA, nrow=nCond, ncol = nRatings)
p_SB_RB <- matrix(NA, nrow=nCond, ncol = nRatings)

P_SBRB <- Vectorize(function(j,i){
integrate(function(x) dnorm(x, mean=ds[j]+b) * pnorm(x, mean=-b),
lower = c_RB[i],
upper = c_RB[i+1],
rel.tol = 10^-8)$value
})
P_SARB <- Vectorize(function(j,i){
integrate(function(x) dnorm(x, mean = b) * pnorm(x, mean=ds[j]-b), # dnorm(x, mean=) * (1-pnorm(x, mean=b))
lower = c_RB[i],
upper = c_RB[i+1],
rel.tol = 10^-8)$value
})


P_SBRA <- Vectorize(function(j,i){
integrate(function(x) dnorm(x, mean=-b) * pnorm(x, mean=ds[j]+b) , # dnorm(x, mean=ds[j]+b) * (1 - pnorm(x, mean=-b))
lower = c_RA[i],
upper = c_RA[i+1],
rel.tol = 10^-8)$value
})
P_SARA <- Vectorize(function(j,i){
integrate(function(x) dnorm(x, mean= ds[j]-b) * pnorm(x, mean=b),
lower = c_RA[i],
upper = c_RA[i+1],
rel.tol = 10^-8)$value
})

p_SB_RB <- outer(1:nCond, 1:nRatings, P_SBRB)
p_SB_RA <- outer(1:nCond, 1:nRatings, P_SBRA)
p_SA_RA <- outer(1:nCond, 1:nRatings, P_SARA)
p_SA_RB <- outer(1:nCond, 1:nRatings, P_SARB)

p_SB_RB[(is.na(p_SB_RB))| is.nan(p_SB_RB)| p_SB_RB < 1e-20] <- 1e-20
p_SB_RA[(is.na(p_SB_RA))| is.nan(p_SB_RA)| p_SB_RA < 1e-20] <- 1e-20
p_SA_RB[(is.na(p_SA_RB))| is.nan(p_SA_RB)| p_SA_RB < 1e-20] <- 1e-20
p_SA_RA[(is.na(p_SA_RA))| is.nan(p_SA_RA)| p_SA_RA < 1e-20] <- 1e-20
negLogL <- - sum (c(log(p_SB_RB) * N_SB_RB, log(p_SB_RA) * N_SB_RA,
log(p_SA_RB) * N_SA_RB, log(p_SA_RA) * N_SA_RA))

negLogL
}
9 changes: 4 additions & 5 deletions R/int_fitSDT.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

fitSDT <-
function(N_SA_RA, N_SA_RB, N_SB_RA, N_SB_RB,
nInits, nRestart, nRatings, nCond){
nInits, nRestart, nRatings, nCond, nTrials){

# coarse grid search to find promising initial values

Expand Down Expand Up @@ -75,7 +75,6 @@ fitSDT <-
res <- data.frame(matrix(nrow=1, ncol=0))
if(!inherits(fit, "try-error")){
k <- length(fit$par)
N <- length(ratings)

res[paste("d_",1:nCond, sep="")] <- as.vector(cumsum(exp(fit$par[1:(nCond)])))
res$c <- as.vector(fit$par[nCond+nRatings])
Expand All @@ -86,10 +85,10 @@ fitSDT <-


res$negLogLik <- fit$value
res$N <- N
res$N <- nTrials
res$k <- k
res$BIC <- 2 * fit$value + k * log(N)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(N-k-1)
res$BIC <- 2 * fit$value + k * log(nTrials)
res$AICc <- 2 * fit$value + k * 2 + 2*k*(k-1)/(nTrials-k-1)
res$AIC <- 2 * fit$value + k * 2
}
res
Expand Down
2 changes: 1 addition & 1 deletion paper.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ bibliography: paper.bib

# Summary

We present the statConfR package for R, which allows researchers to conveniently fit and compare nine different static models of decision confidence applicable to binary discrimination tasks with confidence ratings: the signal detection rating model [@Green1966], the Gaussian noise model[@Maniscalco2016], the independent Gaussian model [@Rausch2017], the weighted evidence and visibility model [@Rausch2018], the lognormal noise model [@Shekhar2020a], the lognormal weighted evidence and visibility model [@shekhar_how_2023], the independent truncated Gaussian model [@rausch_measures_2023] based on the meta-d$^\prime$/d$^\prime$ method [@Maniscalco2012; @Maniscalco2014], and the independent truncated Gaussian model based on the Hmetad method [@Fleming2017a]. In addition, the statConfR package provides functions for estimating meta-d$^\prime$/d$^\prime$, the most widely-used measure of metacognitive efficiency, allowing both @Maniscalco2012's and @Fleming2017a's model specification.
We present the `statConfR` package for R, which allows researchers to conveniently fit and compare nine different static models of decision confidence applicable to binary discrimination tasks with confidence ratings: the signal detection rating model [@Green1966], the Gaussian noise model[@Maniscalco2016], the independent Gaussian model [@Rausch2017], the weighted evidence and visibility model [@Rausch2018], the lognormal noise model [@Shekhar2020a], the lognormal weighted evidence and visibility model [@shekhar_how_2023], the independent truncated Gaussian model [@rausch_measures_2023] based on the meta-d$^\prime$/d$^\prime$ method [@Maniscalco2012; @Maniscalco2014], and the independent truncated Gaussian model based on the Hmetad method [@Fleming2017a]. In addition, the statConfR package provides functions for estimating meta-d$^\prime$/d$^\prime$, the most widely-used measure of metacognitive efficiency, allowing both @Maniscalco2012's and @Fleming2017a's model specification.

# Statement of need

Expand Down

0 comments on commit b9812b3

Please sign in to comment.