Skip to content

Commit

Permalink
Merge branch 'master' into chilampoon-improve-documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
stemangiola authored Oct 16, 2024
2 parents 6ba2954 + a0bdd3f commit 6f2b55a
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 37 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Type: Package
Package: tidybulk
Title: Brings transcriptomics to the tidyverse
Version: 1.17.3
Version: 1.17.4
Authors@R: c(person("Stefano", "Mangiola", email = "[email protected]",
role = c("aut", "cre")),
person("Maria", "Doyle", email = "[email protected]",
Expand Down
21 changes: 13 additions & 8 deletions R/functions_SE.R
Original file line number Diff line number Diff line change
Expand Up @@ -794,7 +794,7 @@ get_differential_transcript_abundance_bulk_SE <- function(.data,
"with `___` in the design matrix, in order to work with edgeR.")
colnames(design) = design |> colnames() |> str_replace(":", "___")
}

# Print the design column names in case I want contrasts
message(
sprintf(
Expand Down Expand Up @@ -1137,6 +1137,7 @@ get_differential_transcript_abundance_bulk_voom_SE <- function(.data,
#' "edgeR_likelihood_ratio" (i.e., LRT)
#' @param scaling_method A character string. The scaling method passed to the
#' backend function (i.e., edgeR::calcNormFactors; "TMM","TMMwsp","RLE","upperquartile")
#' @param .scaling_factor A tidyeval (column name) for the precalculated TMM scaling
#' @param omit_contrast_in_colnames If just one contrast is specified you can
#' choose to omit the contrast label in the colnames.
#' @param ... Additional arguments for glmmSeq
Expand All @@ -1151,15 +1152,16 @@ get_differential_transcript_abundance_glmmSeq_SE <- function(.data,
method,

test_above_log2_fold_change = NULL,

scaling_method = "TMM",
omit_contrast_in_colnames = FALSE,
prefix = "",
.dispersion = NULL,
...) {
scaling_method = "TMM",
.scaling_factor = NULL,
omit_contrast_in_colnames = FALSE,
prefix = "",
.dispersion = NULL,
...) {

.abundance = enquo(.abundance)
.dispersion = enquo(.dispersion)
.scaling_factor = enquo(.scaling_factor)

# Check if contrasts are of the same form
if(
Expand Down Expand Up @@ -1233,7 +1235,10 @@ get_differential_transcript_abundance_glmmSeq_SE <- function(.data,
dispersion = dispersion[rownames(counts)]

# Scaling
sizeFactors <- counts |> edgeR::calcNormFactors(method = scaling_method)
if(.scaling_factor |> quo_is_symbolic())
sizeFactors = .data |> pivot_sample() |> pull(!!.scaling_factor)
else
sizeFactors <- counts |> edgeR::calcNormFactors(method = scaling_method)


glmmSeq_object =
Expand Down
13 changes: 7 additions & 6 deletions R/methods.R
Original file line number Diff line number Diff line change
Expand Up @@ -627,14 +627,14 @@ setMethod("scale_abundance", "tidybulk", .scale_abundance)
#' The scaling inference is then applied back to all unfiltered data.
#'
#' Underlying method
#'
#'
#' If `limma_normalize_quantiles` is chosen
#'
#'
#' .data |>limma::normalizeQuantiles()
#'
#'
#' If `preprocesscore_normalize_quantiles_use_target` is chosen
#'
#' .data |>
#'
#' .data |>
#' preprocessCore::normalize.quantiles.use.target(
#' target = preprocessCore::normalize.quantiles.determine.target(.data)
#' )
Expand Down Expand Up @@ -675,6 +675,7 @@ setGeneric("quantile_normalise_abundance", function(
target_distribution = NULL,
action = "add") {


# Fix NOTEs
. = NULL

Expand Down Expand Up @@ -727,7 +728,7 @@ setGeneric("quantile_normalise_abundance", function(
BiocManager::install("preprocessCore", ask = FALSE)
}
if(is.null(target_distribution)) target_distribution = preprocessCore::normalize.quantiles.determine.target(.data_norm)

.data_norm_quant =
.data_norm |>
preprocessCore::normalize.quantiles.use.target(
Expand Down
17 changes: 9 additions & 8 deletions R/methods_SE.R
Original file line number Diff line number Diff line change
Expand Up @@ -186,17 +186,16 @@ setMethod("tidybulk", "RangedSummarizedExperiment", .tidybulk_se)

my_counts_scaled =
list(
assays(.data) %>%
as.list() %>%
.[[1]] %>%
multiply_by(
rep(multiplier, rep(nrow(.),length(multiplier)))
)
assay(.data) %*%
diag(multiplier)

) %>%
setNames(value_scaled)
colnames(my_counts_scaled[[1]]) = assay(.data) |> colnames()


# Add the assay
assays(.data) = assays(.data) %>% c(my_counts_scaled)
assays(.data, withDimnames=FALSE) = assays(.data) %>% c(my_counts_scaled)

.data %>%

Expand Down Expand Up @@ -309,7 +308,7 @@ setMethod("scale_abundance",
as.matrix()

if(is.null(target_distribution)) target_distribution = preprocessCore::normalize.quantiles.determine.target(.data_norm)

.data_norm =
.data_norm |>
preprocessCore::normalize.quantiles.use.target(
Expand Down Expand Up @@ -2922,6 +2921,8 @@ setMethod("describe_transcript", "RangedSummarizedExperiment", .describe_transcr
combination_of_factors_of_NON_interest =
# Factors
se[1,1, drop=FALSE] |>
colData() |>
as_tibble(rownames = ".sample") |>
select(...) |>
suppressWarnings() |>
colnames() |>
Expand Down
6 changes: 3 additions & 3 deletions man/quantile_normalise_abundance-methods.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 6 additions & 7 deletions man/test_differential_abundance-methods.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 6 additions & 4 deletions tests/testthat/test-bulk_methods_SummarizedExperiment.R
Original file line number Diff line number Diff line change
Expand Up @@ -747,7 +747,7 @@ test_that("gene over representation",{
species="Homo sapiens"
)

expect_equal( ncol(res), 10 )
expect_equal( ncol(res), 13 )



Expand Down Expand Up @@ -854,8 +854,8 @@ test_that("Only reduced dimensions UMAP - no object",{
test_that("resolve_complete_confounders_of_non_interest",{


library(tidySummarizedExperiment)
library(tidybulk)
#library(tidySummarizedExperiment)
library(SummarizedExperiment)

# Sample annotations
sample_annotations <- data.frame(
Expand Down Expand Up @@ -890,7 +890,9 @@ test_that("resolve_complete_confounders_of_non_interest",{

se |>
resolve_complete_confounders_of_non_interest(A, B, C) |>
distinct(.sample, A, B, C) |>
colData() |>
_[, c("A", "B", "C")] |>
as_tibble(rownames = ".sample") |>
expect_identical(expected_tibble )


Expand Down

0 comments on commit 6f2b55a

Please sign in to comment.