Skip to content

Commit

Permalink
Adding checkmate to all functions. Applying styler
Browse files Browse the repository at this point in the history
  • Loading branch information
Admin_mschuemi authored and Admin_mschuemi committed Apr 13, 2023
1 parent 290f248 commit 01ed5ca
Show file tree
Hide file tree
Showing 49 changed files with 2,105 additions and 1,189 deletions.
4 changes: 2 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ Package: MethodEvaluation
Type: Package
Title: Package for Evaluation of Estimation Methods
Version: 2.3.0
Date: 2032-03-31
Date: 2023-04-13
Authors@R: c(
person("Martijn", "Schuemie", , "[email protected]", role = c("aut", "cre"))
)
Expand All @@ -21,7 +21,7 @@ URL: https://github.com/OHDSI/MethodEvaluation
BugReports: https://github.com/OHDSI/MethodEvaluation/issues
Depends:
R (>= 3.5.0),
DatabaseConnector (>= 4.0.2),
DatabaseConnector (>= 6.0.0),
FeatureExtraction (>= 3.0.0),
Cyclops (>= 3.0.0)
Imports:
Expand Down
8 changes: 1 addition & 7 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,7 @@ Changes:

2. Adding `packageCustomBenchmarkResults()` to support custom methods benchmarks.


MethodEvaluation 2.2.1
======================

Changes:

1. Fixing seeds and setting `resetCoefficients = TRUE` to ensure reproducibility of positive control synthesis.
3. Fixing seeds and setting `resetCoefficients = TRUE` to ensure reproducibility of positive control synthesis.


MethodEvaluation 2.2.0
Expand Down
644 changes: 375 additions & 269 deletions R/CreateReferenceSetCohorts.R

Large diffs are not rendered by default.

20 changes: 13 additions & 7 deletions R/Mdrr.R
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@
#' }
#' @export
computeMdrr <- function(connectionDetails,
cdmDatabaseSchema,
oracleTempSchema = NULL,
tempEmulationSchema = getOption("sqlRenderTempEmulationSchema"),
cdmDatabaseSchema,
exposureOutcomePairs,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
Expand All @@ -101,12 +101,18 @@ computeMdrr <- function(connectionDetails,
if (is.null(exposureOutcomePairs$exposureId) && !is.null(exposureOutcomePairs$targetId)) {
exposureOutcomePairs$exposureId <- exposureOutcomePairs$targetId
}
if (is.null(exposureOutcomePairs$exposureId)) {
stop("exposureOutcomePairs is missing exposureId and targetId column")
}
if (is.null(exposureOutcomePairs$outcomeId)) {
stop("exposureOutcomePairs is missing outcomeId column")
}
errorMessages <- checkmate::makeAssertCollection()
checkmate::assertClass(connectionDetails, "ConnectionDetails", add = errorMessages)
checkmate::assertCharacter(tempEmulationSchema, len = 1, null.ok = TRUE, add = errorMessages)
checkmate::assertCharacter(cdmDatabaseSchema, len = 1, add = errorMessages)
checkmate::assertDataFrame(exposureOutcomePairs, add = errorMessages)
checkmate::assertNames(colnames(exposureOutcomePairs), must.include = c("exposureId", "outcomeId"), add = errorMessages)
checkmate::assertCharacter(exposureDatabaseSchema, len = 1, add = errorMessages)
checkmate::assertCharacter(exposureTable, len = 1, add = errorMessages)
checkmate::assertCharacter(outcomeDatabaseSchema, len = 1, add = errorMessages)
checkmate::assertCharacter(outcomeTable, len = 1, add = errorMessages)
checkmate::assertCharacter(cdmVersion, len = 1, add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)
exposureTable <- tolower(exposureTable)
outcomeTable <- tolower(outcomeTable)
if (exposureTable == "drug_era") {
Expand Down
8 changes: 4 additions & 4 deletions R/MethodEvaluation.R
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
# Copyright 2023 Observational Health Data Sciences and Informatics
#
# This file is part of MethodEvaluation
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand Down Expand Up @@ -114,7 +114,7 @@ NULL

#' The OHDSI Development Set - Negative Controls
#' A set of 76 negative control outcomes, all for the exposures of ACE inhibitors (compared to
#' thiazides and thiazide-like diuretics). This set is a much small set than the he OHDSI Method
#' thiazides and thiazide-like diuretics). This set is a much small set than the he OHDSI Method
#' Evaluation Benchmark, but follows the same principles. It is intended to be used when developing
#' methods, leaving the Methods Benchark untouched until a final evaluation of the method, thus preventing
#' 'training' on the evaluation set. The negative controls are borrowed from the LEGEND Hypertension study.
Expand Down
49 changes: 33 additions & 16 deletions R/Metrics.R
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,15 @@
computeMetrics <- function(logRr, seLogRr = NULL, ci95Lb = NULL, ci95Ub = NULL, p = NULL, trueLogRr) {
# data <- EmpiricalCalibration::simulateControls(n = 50 * 3, mean = 0.25, sd = 0.25, trueLogRr =
# log(c(1, 2, 4))); logRr <- data$logRr; seLogRr <- data$seLogRr; trueLogRr <- data$trueLogRr

errorMessages <- checkmate::makeAssertCollection()
checkmate::assertNumeric(logRr, min.len = 1, add = errorMessages)
checkmate::assertNumeric(seLogRr, len = length(logRr), null.ok = TRUE, add = errorMessages)
checkmate::assertNumeric(ci95Lb, len = length(logRr), null.ok = TRUE, add = errorMessages)
checkmate::assertNumeric(ci95Ub, len = length(logRr), null.ok = TRUE, add = errorMessages)
checkmate::assertNumeric(p, len = length(logRr), null.ok = TRUE, add = errorMessages)
checkmate::assertNumeric(trueLogRr, len = length(logRr), add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)

if (is.null(seLogRr) && is.null(ci95Lb)) {
stop("Must specify either standard error or confidence interval")
}
Expand All @@ -70,14 +78,14 @@ computeMetrics <- function(logRr, seLogRr = NULL, ci95Lb = NULL, ci95Ub = NULL,
} else {
data$p <- p
}

idx <- is.na(data$logRr) | is.infinite(data$logRr) | is.na(data$seLogRr) | is.infinite(data$seLogRr)
data$logRr[idx] <- 0
data$seLogRr[idx] <- 999
data$ci95Lb[idx] <- 0
data$ci95Ub[idx] <- 999
data$p[idx] <- 1

nonEstimable <- round(mean(data$seLogRr >= 99), 2)
roc <- pROC::roc(data$trueLogRr > 0, data$logRr, algorithm = 3)
auc <- round(pROC::auc(roc), 2)
Expand Down Expand Up @@ -198,19 +206,19 @@ packageOhdsiBenchmarkResults <- function(estimates,
checkmate::assertCharacter(databaseName, len = 1, add = errorMessages)
checkmate::assertCharacter(exportFolder, len = 1, add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)

if (!file.exists(exportFolder)) {
dir.create(exportFolder, recursive = TRUE)
}

# Create full grid of controls (including those that did not make it in the database:
if (referenceSet == "ohdsiMethodsBenchmark") {
ohdsiNegativeControls <- readRDS(system.file("ohdsiNegativeControls.rds",
package = "MethodEvaluation"
package = "MethodEvaluation"
))
} else {
ohdsiNegativeControls <- readRDS(system.file("ohdsiDevelopmentNegativeControls.rds",
package = "MethodEvaluation"
package = "MethodEvaluation"
))
}
ohdsiNegativeControls$oldOutcomeId <- ohdsiNegativeControls$outcomeId
Expand Down Expand Up @@ -238,7 +246,7 @@ packageOhdsiBenchmarkResults <- function(estimates,
fullGrid$targetEffectSize[idx]
)
allControls <- merge(controlSummary, fullGrid, all.y = TRUE)

.packageBenchmarkResults(
allControls = allControls,
analysisRef = analysisRef,
Expand Down Expand Up @@ -274,7 +282,7 @@ packageOhdsiBenchmarkResults <- function(estimates,
by = join_by("analysisId")
) %>%
mutate(database = databaseName)

# Perform empirical calibration:
# subset = subsets[[2]]
calibrate <- function(subset) {
Expand Down Expand Up @@ -304,7 +312,7 @@ packageOhdsiBenchmarkResults <- function(estimates,
model = model
)
null <- EmpiricalCalibration::fitNull(logRr = subsetMinusOne$logRr[subsetMinusOne$targetEffectSize ==
1], seLogRr = subsetMinusOne$seLogRr[subsetMinusOne$targetEffectSize == 1])
1], seLogRr = subsetMinusOne$seLogRr[subsetMinusOne$targetEffectSize == 1])
caliP <- EmpiricalCalibration::calibrateP(
null = null,
logRr = one$logRr,
Expand Down Expand Up @@ -433,7 +441,7 @@ packageCustomBenchmarkResults <- function(estimates,
checkmate::assertCharacter(databaseName, len = 1, add = errorMessages)
checkmate::assertCharacter(exportFolder, len = 1, add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)

trueEffecSizes <- c(1, unique(synthesisSummary$targetEffectSize))
negativeControls <- negativeControls %>%
mutate(stratum = if_else(.data$type == "Outcome control", .data$targetId, .data$outcomeId)) %>%
Expand Down Expand Up @@ -489,6 +497,15 @@ computeOhdsiBenchmarkMetrics <- function(exportFolder,
trueEffectSize = "Overall",
calibrated = FALSE,
comparative = FALSE) {
errorMessages <- checkmate::makeAssertCollection()
checkmate::assertCharacter(exportFolder, len = 1, add = errorMessages)
checkmate::assertNumeric(mdrr, len = 1, add = errorMessages)
checkmate::assertCharacter(stratum, len = 1, add = errorMessages)
checkmate::assertAtomic(trueEffectSize, len = 1, add = errorMessages)
checkmate::assertLogical(calibrated, len = 1, add = errorMessages)
checkmate::assertLogical(comparative, len = 1, add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)

# Load and prepare estimates of all methods
files <- list.files(exportFolder, "estimates.*csv", full.names = TRUE)
estimates <- lapply(files, read.csv)
Expand All @@ -510,12 +527,12 @@ computeOhdsiBenchmarkMetrics <- function(exportFolder,
estimates$calCi95Lb[idx] <- 0
estimates$calCi95Ub[idx] <- 999
estimates$calP[is.na(estimates$calP)] <- 1

# Load and prepare analysis refs
files <- list.files(exportFolder, "analysisRef.*csv", full.names = TRUE)
analysisRef <- lapply(files, read.csv)
analysisRef <- do.call("rbind", analysisRef)

# Apply selection criteria
subset <- estimates
if (mdrr != "All") {
Expand All @@ -534,7 +551,7 @@ computeOhdsiBenchmarkMetrics <- function(exportFolder,
subset$ci95Ub <- subset$calCi95Ub
subset$p <- subset$calP
}

# Compute metrics
combis <- unique(subset[, c("database", "method", "analysisId")])
if (trueEffectSize == "Overall") {
Expand Down Expand Up @@ -566,7 +583,7 @@ computeOhdsiBenchmarkMetrics <- function(exportFolder,
# trueRr <- input$trueRr
computeMetrics <- function(i) {
forEval <- subset[subset$method == combis$method[i] & subset$analysisId == combis$analysisId[i] &
subset$targetEffectSize == trueEffectSize, ]
subset$targetEffectSize == trueEffectSize, ]
mse <- round(mean((forEval$logRr - log(forEval$trueEffectSize))^2), 2)
coverage <- round(
mean(forEval$ci95Lb < forEval$trueEffectSize & forEval$ci95Ub > forEval$trueEffectSize),
Expand All @@ -580,7 +597,7 @@ computeOhdsiBenchmarkMetrics <- function(exportFolder,
nonEstimable <- round(mean(forEval$seLogRr == 999), 2)
} else {
negAndPos <- subset[subset$method == combis$method[i] & subset$analysisId == combis$analysisId[i] &
(subset$targetEffectSize == trueEffectSize | subset$targetEffectSize == 1), ]
(subset$targetEffectSize == trueEffectSize | subset$targetEffectSize == 1), ]
roc <- pROC::roc(negAndPos$targetEffectSize > 1, negAndPos$logRr, algorithm = 3)
auc <- round(pROC::auc(roc), 2)
type1 <- NA
Expand Down
Loading

0 comments on commit 01ed5ca

Please sign in to comment.