Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Amplicon Illumina nextflow conversion: Added Python wrapper script #133

Open
wants to merge 34 commits into
base: DEV_Amplicon_Illumina_NF_conversion
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
1702918
Fixed group means and stds
olabiyi Nov 18, 2024
8a7bbaa
set ANCOMBC image to 2.6.0
olabiyi Nov 18, 2024
a4d1505
added run workflow script
olabiyi Nov 27, 2024
7a97404
Edited README
olabiyi Nov 27, 2024
f7df02f
Edited README
olabiyi Nov 27, 2024
addf4d6
Fixed no extra argument to nextflow bug
olabiyi Nov 28, 2024
7a9c637
updated UNITE_DB link
olabiyi Dec 5, 2024
10afc84
added deseq
olabiyi Dec 24, 2024
b8ea788
Fixed beta diversity
olabiyi Dec 24, 2024
51bdac9
Updated pipeline document
olabiyi Jan 2, 2025
0f68848
Added more comments to pipeline document
olabiyi Jan 3, 2025
d1670ff
Fixed taxize's api rate error
olabiyi Jan 6, 2025
e579cba
fixed post processing bug
olabiyi Jan 6, 2025
e48589a
Added remove rare functionality
olabiyi Jan 8, 2025
b61a9b5
updated nextflow version
olabiyi Jan 8, 2025
1998ed2
Fixed reads count column bug
olabiyi Jan 8, 2025
e11e03f
Updated readme and variables
olabiyi Jan 9, 2025
4c44697
Fixed software versions redundancy bug
olabiyi Jan 10, 2025
39bd0f2
Added diversity and differential abundance testing to README post-pro…
olabiyi Jan 13, 2025
409b6f8
updated pipeline document
olabiyi Jan 15, 2025
5b74691
updated pipeline document
olabiyi Jan 15, 2025
3657ba5
Documented functions
olabiyi Jan 18, 2025
7cc24dc
assigned a default value to params.help
olabiyi Jan 21, 2025
6923766
Added some more comments to preprocessing
olabiyi Jan 23, 2025
9a7726a
Formatting changes
bnovak32 Feb 21, 2025
8e94dc9
Added structural zeros removal functionality
olabiyi Feb 22, 2025
d53e371
Renamed volcano plots
olabiyi Feb 24, 2025
e5b6882
Merge pull request #1 from bnovak32/DEV_Amplicon_Illumina_NF_conversi…
olabiyi Feb 25, 2025
b707346
Updated the pipeline document
olabiyi Feb 27, 2025
483b939
Updated the pipeline document
olabiyi Feb 27, 2025
d48eb0f
Updated the pipeline document
olabiyi Feb 27, 2025
b202b79
Updated volcano plots file name
olabiyi Feb 28, 2025
e92025e
fixed minor bugs
olabiyi Feb 28, 2025
6c6bfef
Edited launchDir for post-processing
olabiyi Feb 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Fixed group means and stds
olabiyi committed Nov 18, 2024
commit 1702918a3723e10410cdb6aad7be98eac74df1bd
Original file line number Diff line number Diff line change
@@ -105,7 +105,7 @@ nextflow run main.nf --help
```
> Note: Nextflow commands use both single hyphen arguments (e.g. -help) that denote general nextflow arguments and double hyphen arguments (e.g. --input_file) that denote workflow specific parameters. Take care to use the proper number of hyphens for each argument.
> Please Note: This workflow assumes that all your raw reads end with the same suffix. If they don't, please modify your input read filenames to have the same suffix as shown in [SE_file.csv](workflow_code/SE_file.csv) and [PE_file.csv](workflow_code/PE_file.csv).
<br>
#### 4a. Approach 1: Run slurm jobs in singularity containers with OSD or GLDS accession as input
Original file line number Diff line number Diff line change
@@ -148,7 +148,6 @@ library(ANCOMBC)
library(DescTools)
library(taxize)
library(glue)
library(here)
library(mia)
library(phyloseq)
library(utils)
@@ -390,7 +389,7 @@ pairwise_comp.m <- utils::combn(metadata[,group] %>% unique, 2)
pairwise_comp_df <- pairwise_comp.m %>% as.data.frame

colnames(pairwise_comp_df) <- map_chr(pairwise_comp_df,
\(col) str_c(col, collapse = ".vs."))
\(col) str_c(col, collapse = "v"))
comparisons <- colnames(pairwise_comp_df)
names(comparisons) <- comparisons

@@ -443,7 +442,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("lfc_({group2}).vs.({group1})"))
glue("logFC_({group2})v({group1})"))
)

# SE
@@ -452,7 +451,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("se_({group2}).vs.({group1})"))
glue("lfcSE_({group2})v({group1})"))
)

# W
@@ -461,7 +460,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("W_({group2}).vs.({group1})"))
glue("Wstat_({group2})v({group1})"))
)

# p_val
@@ -470,7 +469,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("p_({group2}).vs.({group1})"))
glue("pvalue_({group2})v({group1})"))
)

# q_val
@@ -479,7 +478,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("q_({group2}).vs.({group1})"))
glue("qvalue_({group2})v({group1})"))
)


@@ -489,7 +488,7 @@ final_results_bc1 <- map(pairwise_comp_df, function(col){
select(-contains("Intercept")) %>%
set_names(
c("taxon",
glue("diff_({group2}).vs.({group1})"))
glue("diff_({group2})v({group1})"))
)


@@ -524,26 +523,25 @@ walk(comparisons[names(final_results_bc1)], .f = function(comparison){
# Sort ASVs in ascending order
merged_stats_df <- merged_stats_df %>%
rename(!!feature := taxon) %>%
#filter(str_detect(ASV, "ASV")) %>%
mutate(!!feature := SortMixed(!!sym(feature)))



comp_names <- merged_stats_df %>%
select(starts_with("lfc")) %>%
colnames() %>% str_remove_all("lfc_")
select(starts_with("logFC")) %>%
colnames() %>% str_remove_all("logFC_")
names(comp_names) <- comp_names

message("Making volcano plots...")
# -------------- Make volcano plots ------------------ #
volcano_plots <- map(comp_names, function(comparison){

comp_col <- c(
glue("lfc_{comparison}"),
glue("se_{comparison}"),
glue("W_{comparison}"),
glue("p_{comparison}"),
glue("q_{comparison}"),
glue("logFC_{comparison}"),
glue("lfcSE_{comparison}"),
glue("Wstat_{comparison}"),
glue("pvalue_{comparison}"),
glue("qvalue_{comparison}"),
glue("diff_{comparison}")
)

@@ -554,16 +552,16 @@ volcano_plots <- map(comp_names, function(comparison){
pattern = "(.+)_.+",
replacement = "\\1")

p <- ggplot(sub_res_df, aes(x=lfc, y=-log10(p), color=diff, label=!!sym(feature))) +
geom_point(size=4) + geom_point(size=4) +
p <- ggplot(sub_res_df, aes(x=logFC, y=-log10(pvalue), color=diff, label=!!sym(feature))) +
geom_point(size=4) +
scale_color_manual(values=c("TRUE"="cyan2", "FALSE"="red")) +
geom_hline(yintercept = -log10(0.05), linetype = "dashed") +
ggrepel::geom_text_repel() +
labs(x="logFC", y="-log10(Pvalue)",
title = comparison, color="Significant") + publication_format

ggsave(filename = glue("{output_prefix}{comparison}_volcano{assay_suffix}.png"), plot = p, device = "png",
width = 6, height = 8, units = "in", dpi = 300, path=diff_abund_out_dir)
width = 6, height = 8, units = "in", dpi = 300, path = diff_abund_out_dir)

return(p)
})
@@ -580,7 +578,7 @@ p <- wrap_plots(volcano_plots, ncol = 2)
try(
ggsave(filename = glue("{output_prefix}{feature}_volcano{assay_suffix}.png"), plot = p, device = "png",
width = 16, height = fig_height, units = "in", dpi = 300,
path=diff_abund_out_dir, limitsize = FALSE)
path = diff_abund_out_dir, limitsize = FALSE)
)

# Add NCBI id to feature i.e. ASV
@@ -602,25 +600,37 @@ normalized_table <- as.data.frame(feature_table + 1) %>%
mutate(across( where(is.numeric), log ) )


group_means_df <- normalized_table[feature]
samples <- metadata[[samples_column]]
samplesdropped <- setdiff(x = samples, y = colnames(normalized_table)[-1])
missing_df <- data.frame(ASV=normalized_table[[feature]],
matrix(data = NA,
nrow = nrow(normalized_table),
ncol = length(samplesdropped)
)
)
colnames(missing_df) <- c(feature,samplesdropped)


walk(pairwise_comp_df, function(col){
group_levels <- metadata[, group] %>% unique() %>% sort()
group_means_df <- normalized_table[feature]
walk(group_levels, function(group_level){

group1 <- col[1]
group2 <- col[2]

mean_col <- glue("Group.Mean_({group2}).vs.({group1})")
std_col <- glue("Group.Stdev_({group2}).vs.({group1})")
mean_col <- glue("Group.Mean_({group_level})")
std_col <- glue("Group.Stdev_({group_level})")

# Samples that belong to the current group
Samples <- metadata %>%
filter(!!sym(group) %in% c(group1, group2)) %>%
pull(!!samples_column)
filter(!!sym(group) == group_level) %>%
pull(!!sym(samples_column))
# Samples that belong to the current group that are in the normalized table
Samples <- intersect(colnames(normalized_table), Samples)

temp_df <- normalized_table %>% select(!!feature, !!Samples) %>%
temp_df <- normalized_table %>% select(!!feature, all_of(Samples)) %>%
rowwise() %>%
mutate(!!mean_col := mean(c_across(where(is.numeric))),
!!std_col := sd(c_across(where(is.numeric))) ) %>%
select(!!feature, !!sym(mean_col), !!sym(std_col))
select(!!feature,!!sym(mean_col), !!sym(std_col))

group_means_df <<- group_means_df %>% left_join(temp_df)

@@ -631,7 +641,9 @@ walk(pairwise_comp_df, function(col){
normalized_table <- normalized_table %>%
rowwise() %>%
mutate(All.Mean=mean(c_across(where(is.numeric))),
All.Stdev=sd(c_across(where(is.numeric))) )
All.Stdev=sd(c_across(where(is.numeric))) )%>%
left_join(missing_df, by = feature) %>%
select(!!feature, all_of(samples), All.Mean, All.Stdev)


merged_df <- df %>%
@@ -643,9 +655,9 @@ merged_df <- df %>%

merged_df <- merged_df %>%
select(!!sym(feature):NCBI_id) %>%
left_join(normalized_table) %>%
left_join(normalized_table, by = feature) %>%
left_join(merged_df) %>%
left_join(group_means_df) %>%
left_join(group_means_df, by = feature) %>%
mutate(across(where(is.numeric), ~round(.x, digits=3))) %>%
mutate(across(where(is.matrix), as.numeric))

@@ -697,8 +709,4 @@ ggsave(filename = glue("{output_prefix}{feature}_boxplots{assay_suffix}.png"), p

)

# Error: One or both dimensions exceed the maximum (50000px).
# - Use `options(ragg.max_dim = ...)` to change the max
# Warning: May cause the R session to crash
# Execution halted
message("Run completed sucessfully.")
Original file line number Diff line number Diff line change
@@ -149,7 +149,6 @@ library(ANCOMBC)
library(DescTools)
library(taxize)
library(glue)
library(here)
library(mia)
library(phyloseq)
library(utils)
@@ -474,13 +473,25 @@ new_colnames <- map_chr(output$res_pair %>% colnames,
if(str_count(colname,group) == 1){
str_replace_all(string=colname,
pattern=glue("(.+)_{group}(.+)"),
replacement=glue("\\1_(\\2).vs.({ref_group})"))
replacement=glue("\\1_(\\2)v({ref_group})")) %>%
str_replace(pattern = "^lfc_", replacement = "logFC_") %>%
str_replace(pattern = "^se_", replacement = "lfcSE_") %>%
str_replace(pattern = "^W_", replacement = "Wstat_") %>%
str_replace(pattern = "^p_", replacement = "pvalue_") %>%
str_replace(pattern = "^q_", replacement = "qvalue_")

# Columns with normal two groups comparison
} else if(str_count(colname,group) == 2){

str_replace_all(string=colname,
pattern=glue("(.+)_{group}(.+)_{group}(.+)"),
replacement=glue("\\1_(\\2).vs.(\\3)"))
replacement=glue("\\1_(\\2)v(\\3)")) %>%
str_replace(pattern = "^lfc_", replacement = "logFC_") %>%
str_replace(pattern = "^se_", replacement = "lfcSE_") %>%
str_replace(pattern = "^W_", replacement = "Wstat_") %>%
str_replace(pattern = "^p_", replacement = "pvalue_") %>%
str_replace(pattern = "^q_", replacement = "qvalue_")

# Feature/ ASV column
} else{

@@ -489,7 +500,6 @@ new_colnames <- map_chr(output$res_pair %>% colnames,
} )



# Change the column named taxon to the feature name e.g. ASV
new_colnames[match("taxon", new_colnames)] <- feature

@@ -539,20 +549,29 @@ normalized_table <- output$bias_correct_log_table %>%
mutate(across(where(is.numeric), ~replace_na(.x, replace=0)))


samples <- metadata[[samples_column]]
samplesdropped <- setdiff(x = samples, y = colnames(normalized_table)[-1])
missing_df <- data.frame(ASV=normalized_table[[feature]],
matrix(data = NA,
nrow = nrow(normalized_table),
ncol = length(samplesdropped)
)
)
colnames(missing_df) <- c(feature,samplesdropped)


group_means_df <- normalized_table[feature]
walk(uniq_comps, function(comp){
walk(group_levels, function(group_level){

group1 <- str_replace(comp, "\\((.+)\\).vs.\\((.+)\\)", "\\1")
group2 <- str_replace(comp, "\\((.+)\\).vs.\\((.+)\\)", "\\2")

mean_col <- glue("Group.Mean_({group1}).vs.({group2})")
std_col <- glue("Group.Stdev_({group1}).vs.({group2})")
mean_col <- glue("Group.Mean_({group_level})")
std_col <- glue("Group.Stdev_({group_level})")

# Samples that belong to the current group
Samples <- metadata %>%
filter(!!sym(group) %in% c(group1, group2)) %>%
filter(!!sym(group) == group_level) %>%
pull(!!sym(samples_column))

# Samples that belong to the current group that are in the normalized table
Samples <- intersect(colnames(normalized_table), Samples)

temp_df <- normalized_table %>% select(!!feature, all_of(Samples)) %>%
@@ -570,7 +589,9 @@ walk(uniq_comps, function(comp){
normalized_table <- normalized_table %>%
rowwise() %>%
mutate(All.Mean=mean(c_across(where(is.numeric))),
All.Stdev=sd(c_across(where(is.numeric))) )
All.Stdev=sd(c_across(where(is.numeric))) ) %>%
left_join(missing_df, by = feature) %>%
select(!!feature, all_of(samples), All.Mean, All.Stdev)

# Append the taxonomy table to the ncbi and stats table
merged_df <- df %>%
@@ -598,11 +619,11 @@ message("Making volcano plots...")
volcano_plots <- map(uniq_comps, function(comparison){

comp_col <- c(
glue("lfc_{comparison}"),
glue("se_{comparison}"),
glue("W_{comparison}"),
glue("p_{comparison}"),
glue("q_{comparison}"),
glue("logFC_{comparison}"),
glue("lfcSE_{comparison}"),
glue("Wstat_{comparison}"),
glue("pvalue_{comparison}"),
glue("qvalue_{comparison}"),
glue("diff_{comparison}"),
glue("passed_ss_{comparison}")
)
@@ -614,16 +635,17 @@ volcano_plots <- map(uniq_comps, function(comparison){
pattern = "(.+)_.+",
replacement = "\\1")

p <- ggplot(sub_res_df, aes(x=lfc, y=-log10(p), color=diff, label=!!sym(feature))) +
p <- ggplot(sub_res_df, aes(x=logFC, y=-log10(pvalue), color=diff, label=!!sym(feature))) +
geom_point(size=4) + geom_point(size=4) +
scale_color_manual(values=c("TRUE"="cyan2", "FALSE"="red")) +
geom_hline(yintercept = -log10(0.05), linetype = "dashed") +
ggrepel::geom_text_repel() +
labs(x="logFC", y="-log10(Pvalue)",
title = comparison, color="Significant") + publication_format



ggsave(filename = glue("{output_prefix}{comparison}_volcano{assay_suffix}.png"), plot = p, device = "png",
width = 6, height = 8, units = "in", dpi = 300, path=diff_abund_out_dir)
width = 6, height = 8, units = "in", dpi = 300, path = diff_abund_out_dir)

return(p)

@@ -664,7 +686,7 @@ boxplots <- map(res_df[[feature]], function(feature){
legend.title = element_text(face = "bold", size=12))

ggsave(filename = glue("{output_prefix}{feature}_boxplot{assay_suffix}.png"), plot = p, device = "png",
width = 8, height = 5, units = "in", dpi = 300, path =diff_abund_out_dir)
width = 8, height = 5, units = "in", dpi = 300, path = diff_abund_out_dir)

return(p)
})

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -24,20 +24,20 @@ if (params.help) {
println(" > nextflow run main.nf -resume -profile conda --accession GLDS-487 --target_region 16S --conda.qc <path/to/existing/conda/environment>")
println()
println("Required arguments:")
println("""-profile [STRING] What profile should be used to run the workflow. Options are [singularity, docker, conda, slurm].
singularity, docker and conda will run the pipelne locally using singularity, docker, and conda, respectively.
To combine profiles, pass them together separated by comma. For example, to run jobs using slurm in singularity containers use 'slurm,singularity' . """)
println("--input_file [PATH] A 4-column (single-end) or 5-column (paired-end) input file (sample_id, forward, [reverse,] paired, groups). Mandatory if a GLDS accession is not provided.")
println(" Please see the files: SE_file.csv and PE_file.csv for single-end and paired-end examples, respectively.")
println(" The sample_id column should contain unique sample ids.")
println(" The forward and reverse columns should contain the absolute or relative path to the sample's forward and reverse reads.")
println(" The paired column should be true for paired-end or anything else for single-end reads.")
println(" The groups column contain group levels / treatments to be compared during diversity and differential abundance testing analysis.")
println("--target_region [STRING] What is the amplicon target region to be analyzed. Options are one of [16S, 18S, ITS]. Default: 16S.")
println("--trim_primers [BOOLEAN] Should primers be trimmed? true or false. Default: true.")
println(""" -profile [STRING] What profile should be used to run the workflow. Options are [singularity, docker, conda, slurm].
singularity, docker and conda will run the pipelne locally using singularity, docker, and conda, respectively.
To combine profiles, pass them together separated by comma. For example, to run jobs using slurm in singularity containers use 'slurm,singularity' . """)
println(" --input_file [PATH] A 4-column (single-end) or 5-column (paired-end) input file (sample_id, forward, [reverse,] paired, groups). Mandatory if a GLDS or OSD accession is not provided.")
println(" Please see the files: SE_file.csv and PE_file.csv for single-end and paired-end examples, respectively.")
println(" The sample_id column should contain unique sample ids.")
println(" The forward and reverse columns should contain the absolute or relative path to the sample's forward and reverse reads.")
println(" The paired column should be true for paired-end or anything else for single-end reads.")
println(" The groups column contain group levels / treatments to be compared during diversity and differential abundance testing analysis.")
println(" --target_region [STRING] What is the amplicon target region to be analyzed. Options are one of [16S, 18S, ITS]. Default: 16S.")
println(" --trim_primers [BOOLEAN] Should primers be trimmed? true or false. Default: true.")
println("PLEASE NOTE: This workflow assumes that all your raw reads end with the same suffix. If they don't please modify your filenames to have the same suffix as shown below.")
println("--raw_R1_suffix [STRING] Raw forward reads suffix (region following the unique part of the sample names). e.g. _R1_raw.fastq.gz.")
println("--raw_R2_suffix [STRING] Raw reverse reads suffix (region following the unique part of the sample names). e.g. _R2_raw.fastq.gz.")
println(" --raw_R1_suffix [STRING] Raw forward reads suffix (region following the unique part of the sample names). e.g. _R1_raw.fastq.gz.")
println(" --raw_R2_suffix [STRING] Raw reverse reads suffix (region following the unique part of the sample names). e.g. _R2_raw.fastq.gz.")
println()
println("Cutadapt (trimming) parameters:")
println(" --F_primer [STRING] Forward primer sequence e.g. AGAGTTTGATCCTGGCTCAG. Default: emptry string.")
@@ -50,8 +50,6 @@ if (params.help) {
println(" --help Print this help message and exit.")
println(" --publishDir_mode [STRING] How should nextflow publish file outputs. Options can be found here https://www.nextflow.io/docs/latest/process.html#publishdir. Default: link.")
println(" --errorStrategy [STRING] How should nextflow handle errors. Options can be found here https://www.nextflow.io/docs/latest/process.html#errorstrategy. Default: terminate")
println(" --enable_visualizations [BOOLEAN] Should ASV plots be made? true or false. if true supply a path to the ruhnsheet for plotting to the --runsheet option. Default: false.")
//println(" --runsheet [PATH] A 4-column file with these exact headers [Sample Name, read1_path, raw_R1_suffix, groups] for plotting. Only relevant if --enable_visualizations is true. Default: null.")
println(" --multiqc_config [PATH] Path to a custome multiqc config file. Default: config/multiqc.config.")
println()
println("Dada2 parameters passed to filterAndTrim() function:")
@@ -95,6 +93,7 @@ if (params.help) {
println(" --conda.genelab [PATH] Path to a conda environment containing genlab-utils. Default: null.")
println(" --conda.cutadapt [PATH] Path to a conda environment containing cutadapt. Default: null.")
println(" --conda.diversity [PATH] Path to a conda environment containing R packages required for diversity and differential abundance testing. Default: null.")
println()
print("Advanced users can edit the nextflow.config file for more control over default settings such container choice, number of cpus, memory per task etc.")
exit 0
}
@@ -106,7 +105,7 @@ log.info """
You have set the following parameters:
Input csv file : ${params.input_file}
GLDS_accession : ${params.accession}
GLDS or OSD accession : ${params.accession}
Amplicon target region : ${params.target_region}
Nextflow Directory publishing mode: ${params.publishDir_mode}
Trim Primers: ${params.trim_primers}
@@ -171,7 +170,7 @@ include { FASTQC as RAW_FASTQC ; MULTIQC as RAW_MULTIQC } from './modules/quali
include { CUTADAPT; COMBINE_CUTADAPT_LOGS_AND_SUMMARIZE } from './modules/quality_assessment.nf'
include { FASTQC as TRIMMED_FASTQC ; MULTIQC as TRIMMED_MULTIQC } from './modules/quality_assessment.nf'

// Cluster ASvs
// Cluster ASVs
include { RUN_R_TRIM; RUN_R_NOTRIM } from './modules/run_dada.nf'
include { ZIP_BIOM } from './modules/zip_biom.nf'

@@ -313,7 +312,7 @@ workflow {
ZIP_BIOM.out.version | mix(software_versions_ch) | set{software_versions_ch}


// Diversity, diffrential abundance testing and their corresponding visualizations
// Diversity, differential abundance testing and their corresponding visualizations
if(params.accession){
meta = Channel.of(["samples": "Sample Name",
"group" : "groups",
Original file line number Diff line number Diff line change
@@ -37,15 +37,11 @@ process ANCOMBC {
--assay-suffix '${meta.assay_suffix}' \\
--output-prefix '${meta.output_prefix}' \\
--cpus ${task.cpus}
Rscript -e "VERSION=sprintf('ANCOMBC %s', packageVersion('ANCOMBC')); \\
write(x=VERSION, file='versions.txt', append=TRUE)"
Rscript -e "VERSIONS=sprintf('tidyverse %s\\nglue %s\\nANCOMBC %s\\nhere %s\\nphyloseq %s\\nmia %s\\ntaxize %s\\nDescTools %s\\npatchwork %s\\nggrepel %s\\n', \\
Rscript -e "VERSIONS=sprintf('tidyverse %s\\nglue %s\\nANCOMBC %s\\nphyloseq %s\\nmia %s\\ntaxize %s\\nDescTools %s\\npatchwork %s\\nggrepel %s\\n', \\
packageVersion('tidyverse'), \\
packageVersion('glue'), \\
packageVersion('ANCOMBC'), \\
packageVersion('here'), \\
packageVersion('phyloseq'), \\
packageVersion('mia'), \\
packageVersion('taxize'), \\