Skip to content

Commit

Permalink
ALPHA RELEASE
Browse files Browse the repository at this point in the history
  • Loading branch information
SeverinBang committed Apr 28, 2021
1 parent 57c46d9 commit fd754e8
Show file tree
Hide file tree
Showing 12 changed files with 225 additions and 133 deletions.
Binary file added .Readme.md.kate-swp
Binary file not shown.
12 changes: 10 additions & 2 deletions R/01_helper_functions.R
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,7 @@ scale_target <- function(current_data,
uncertainties might be underestimated.")
}

# * parameter table -------------------------------------------------------
# Generate parameter table
parameter_table <- data.frame(
name = current_name,
Expand Down Expand Up @@ -798,6 +799,8 @@ scale_target <- function(current_data,
attr(parameter_table, "df") <- nrow(current_data) + normalize -
length(fit_result$argument)


# * out_prediction --------------------------------------------------------
# Predicted data
out_prediction <- current_data
out_prediction$sigma <- fit_result$sigma * bessel
Expand All @@ -810,6 +813,7 @@ scale_target <- function(current_data,
}


# * out_scaled ------------------------------------------------------------
values_scaled <- try(
# my_multiroot
rootSolve::multiroot(
Expand Down Expand Up @@ -856,7 +860,8 @@ scale_target <- function(current_data,
out_scaled$sigma <- sigmas_scaled
}

# Aligned

# * out_aligned -----------------------------------------------------------
no_initial <- length(levels_list[[1]])

# Use one datapoint per unique set of fixed parameters
Expand Down Expand Up @@ -885,7 +890,7 @@ scale_target <- function(current_data,
}


# Get the original data
# * out_orig_w_parameters -------------------------------------------------
out_orig_w_parameters <- current_data
for (k in seq_along(parameters)) {
effect <- names(parameters)[k]
Expand All @@ -906,6 +911,9 @@ scale_target <- function(current_data,
parameter_table
)




return(out)
}

Expand Down
2 changes: 1 addition & 1 deletion R/02_exported_functions.R
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ read_wide <- function(file, description = NULL, time = 1, header = TRUE, ...) {



#' All-in-one plot function for blotIt()
#' All-in-one plot function for blotIt3
#'
#' Takes the output of \link{align_me} and generates graphs. Which data will be
#' plotted can then be specified separately.
Expand Down
43 changes: 22 additions & 21 deletions R/03_align_me.R
Original file line number Diff line number Diff line change
Expand Up @@ -70,33 +70,34 @@
#' alignment result containing an attribute "outputs":
#' a list of data frames
#' \describe{
#' \item{aligned}{the average_techn_repd data with the fixed effects and their
#' uncertainty, only. The result of the alignment
#' algorithm.}
#' \item{prediction}{original data with value and sigma replaced by
#' the predicted values and sigmas}
#' \item{scaled}{original data with the values transformed according
#' to the inverse model, i.e. \code{model} solved for
#' the first parameter in \code{fixed}, e.g. "ys".
#' Sigma values are computed by error propagation
#' from the inverse model equation.}
#' \item{aligned}{data.frame with the original column names plus the column
#' \code{sigma}. Each set of unique distinguish effects i.e. biological
#' different condition (e.g. time point, target and treatment) has one set
#' of \code{value} and \code{sigma}. The values are the estimated true
#' values i.e. the determined distinguish parameters. The errors in the
#' \code{sigma} column are estimated by employing the fisher information
#' to quantify the uncertainty of the respective fit. Both, the value and
#' its error are on the common scale.}
#' \item{scaled}{The original measurements scaled to common scale by applying
#' inverse model. The errors are the result of the evaluation of the error
#' model and then also scaled to common scale by use of Gaussian error
#' propagation.}
#' \item{prediction}{Original data with \code{value} replaced by the prediction
#' (evaluation of the model with the fitted parameters), and \code{sigma}
#' from the evaluation of the error model. Both are on the original scale.
#' }
#'
#' \item{original}{the original data}
#' \item{original_with_parameters}{the original data but with added columns
#' containing the estimated parameters}
#' \item{parameter}{original data augmented by parameter columns.
#' Parameters in each row correspond to the levels of
#' fixed, latent or error as passed to \code{align_me()}.
#' Used for initialization or parameter values when
#' refitting with modified model.}
#' \item{distinguish}{names of the columns containing the distinguish effects}
#' \item{scaling}{names of the columns containing the scaling effects}
#' \item{original}{The original data as passed as \code{data}.}
#' \item{original_with_parameters}{The original data but with added columns
#' containing the estimated parameters}
#' \item{distinguish}{Names of the columns containing the distinguish effects}
#' \item{scaling}{Names of the columns containing the scaling effects}
#' }
#'
#' The estimated parameters are returned by the attribute "parameters".
#' @example inst/examples/example_align_me.R
#' @seealso \link{read_wide} to read data in a wide column format and
#' get it in the right format for \code{align_me()}.
#' get it in the right format for \code{align_me}.
#'
#' @importFrom stats D
#'
Expand Down
43 changes: 40 additions & 3 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ This reads out the provided example file, transferes it to a temporary location
The example file is structured as follows
|time| condition| ID| pAKT| pEPOR| pJAK2|...|
|--- | --- | --- | --- | ---|--- | ---|
|0 |0Uml Epo |1 |116.838271399017| 295.836863524109| |...
|5 |0Uml Epo |1 |138.808500374087| 245.229971713582| |...
|0 |0Uml Epo |1.1 |116.838271399017| 295.836863524109| |...
|5 |0Uml Epo |1.1 |138.808500374087| 245.229971713582| |...
|...|...|...|...|...|...|...
0 |0Uml Epo |2 |94.4670174938645| |293.604761934545| ...
5 |0Uml Epo |2 | | |398.958892340432| ...
Expand Down Expand Up @@ -95,4 +95,41 @@ In short: we state that the entries "name", "time" and "condition" contain _real
- `normalize_input` If set to `TRUE`, the data will be scaled before the actual scaling. This means that the raw input will be scaled to a common order of magnitude before the scaling parameters will be calculated. This is only a computational aid, to eliminate a rare fail of convergence when the different values differ by many orders of magnitude. Setting this to `TRUE` makes only sense (and is only supported) for `input_scale = "linear"`.

The result of `align_me()` is a list with the entries
- `aligned`
- `aligned` A `data.frame` with the columns containing the distinguish effects as well as the columns `value` containing the "estimated true values" and `sigma` containing the uncertainty of the fits. Both are on comon
- `scaled` The original data but with the values scaled to common scale and errors from the evaluation of the error model, also scaled to comon scale (obaying Gaussian error propagation).
- `prediction` The scales and sigma are from the evaluation of the respective models (on original scale).
- `original` Just the original parameters
- `original_with_parameters` As above but with additional columns for the estimated parameters.
- `distinguish` Names of the columns defined to contain the `distinguish` effects.
- `scaling` Names of the columns defined to contain the `scaling` effects.

### Plot Data
`blotIt3` provides _one_ plotting function `plot_align_me()` which data set will be plotted can be specified per parameter
```r
plot_align_me(
out_list = blotIt_test3,
plot_points = "aligned",
plot_line = "aligned",
spline = FALSE,
scales = "free",
align_zeros = TRUE,
plot_caption = TRUE,
ncol = NULL,
my_colors = NULL,
duplicate_zero_points = FALSE,
my_order = NULL
)
```
The parameters again are:
- `out_list` the result of `align_me()`
- `plot_points` It can seperately specified which data sets should be plotted as dots and as line. Here te data set for the dots is defined. It can be either of `original`, `scaled`, `prediction` or `aligned`.
- `plot_line` Same above but for the line.
- `spline` Logical parameter, if set to `TRUE`, the line ploted will be not straight lines connecting points but a smooth spline.
- `scales` String passed as `scales` argument to `facet_wrap`.
- `align_zeros` Logical parameter, if set to `TRUE` the zero ticks will be aligned throughout all the sub plots, although the axis canb have different scales.
- `plot_caption` Logical parameter, indicating if a caption describing which data is plotted should be added to the plot.
- `ncol` Numerical passed as `ncol` argument to `facet_wrap`.
- `my_colors` list of custom color values as taken by the `values` argument in the `scale_color_manual` method for `ggplot` objects, if not set the default `ggplot` color scheme is used.
- `duplicate_zero_points` Logical, if set `TRUE` all zero time points are assumed to belong to the first condition. E.g. when the different conditions consist of treatments added at time zero. Default is `FALSE`.
- `my_order` Optional list of target names in the custom order that will be used for faceting
- `...` Logical expression used for subsetting the data frames, e.g. `name == "pAKT" & time < 60`
23 changes: 23 additions & 0 deletions inst/examples/example_align_me.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
## Get example dataset
example_data_file <- system.file(
"extdata", "sim_data_wide.csv",
package = "blotIt3"
)

## read in example data by use of 'read_wide()'
example_data <- read_wide(example_data_file, description = seq_len(3))

## execute align_me
out <- align_me(
data = example_data,
model = "yi / sj",
error_model = "value * sigmaR",
distinguish = yi ~ name + time + condition,
scaling = sj ~ name + ID,
error = sigmaR ~ name + 1,
input_scale = "linear",
normalize = TRUE,
average_techn_rep = FALSE,
verbose = FALSE,
normalize_input = TRUE
)
2 changes: 1 addition & 1 deletion inst/extdata/.~lock.sim_data_wide.csv#
Original file line number Diff line number Diff line change
@@ -1 +1 @@
,severin,baum,27.04.2021 20:20,file:///home/severin/.config/libreoffice/4;
,severin,baum,28.04.2021 10:31,file:///home/severin/.config/libreoffice/4;
14 changes: 7 additions & 7 deletions inst/extdata/sim_data_wide.csv
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
time,condition,ID,pAKT,pEPOR,pJAK2,pMEK,ppERK,pSTAT5,tAKT,tEPOR,tERK,tJAK2,tMEK,tSTAT5
0,0Uml Epo,1,116.838271399017,295.836863524109,,,,,,,,,,
5,0Uml Epo,1,138.808500374087,245.229971713582,,,,,,,,,,
10,0Uml Epo,1,99.0906786883278,282.267681770956,,,,,,,,,,
20,0Uml Epo,1,106.685837903319,268.804162583856,,,,,,,,,,
30,0Uml Epo,1,115.028050064248,340.926275016381,,,,,,,,,,
60,0Uml Epo,1,111.913230066738,284.478960254812,,,,,,,,,,
240,0Uml Epo,1,132.566184846348,213.441067871856,,,,,,,,,,
0,0Uml Epo,1.1,116.838271399017,295.836863524109,,,,,,,,,,
5,0Uml Epo,1.1,138.808500374087,245.229971713582,,,,,,,,,,
10,0Uml Epo,1.1,99.0906786883278,282.267681770956,,,,,,,,,,
20,0Uml Epo,1.1,106.685837903319,268.804162583856,,,,,,,,,,
30,0Uml Epo,1.1,115.028050064248,340.926275016381,,,,,,,,,,
60,0Uml Epo,1.1,111.913230066738,284.478960254812,,,,,,,,,,
240,0Uml Epo,1.1,132.566184846348,213.441067871856,,,,,,,,,,
0,0Uml Epo,2,94.4670174938645,,293.604761934545,80.3605337171492,138.138177381568,3.97231585579595,776.283100190451,1078.75252380959,856.00478247304,678.443168410604,815.851106681355,892.59419078509
5,0Uml Epo,2,,,398.958892340432,106.402751987615,130.794465246105,5.05338698795638,839.075540504417,1085.82146322883,880.155347344083,771.353527473159,694.670584340057,759.818709231466
10,0Uml Epo,2,,,311.149898510556,62.3212348345122,131.894013544118,7.21999169403525,836.022510489417,1163.23336505246,644.425468757479,764.195982508808,674.523526828773,832.586720317394
Expand Down
65 changes: 44 additions & 21 deletions man/align_me.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/plot_align_me.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

76 changes: 0 additions & 76 deletions tests/testthat/test_exported_functions.R
Original file line number Diff line number Diff line change
Expand Up @@ -34,79 +34,3 @@ test_that("read_wide works properly", {
})


# split_for_scaling() -----------------------------------------------------

test_that("split_for_scaling()", {
sim_data_wide_file <- system.file(
"extdata", "sim_data_wide.csv",
package = "blotIt3"
)
sim_data_long <- read_wide(sim_data_wide_file, description = seq_len(3))

effects_values <- list(
distinguish_values = c("name", "time", "condition"),
scaling_values = c("name", "ID"),
error_values = "name"
)

effects_values_1 <- list(
distinguish_values = c("name", "time", "condition"),
scaling_values = c("name"),
error_values = "name"
)

effects_values_2 <- list(
distinguish_values = c("name", "time", "condition"),
scaling_values = c("ID"),
error_values = "name"
)

expect_equal(
length(
split_for_scaling(
data = sim_data_long,
effects_values,
input_scale = "linear",
normalize_input = TRUE
)
),
14
)

expect_equal(
length(
split_for_scaling(
data = sim_data_long,
effects_values_1,
input_scale = "linear",
normalize_input = TRUE
)
),
nrow(unique(sim_data_long["name"]))
)

expect_equal(
length(
split_for_scaling(
data = sim_data_long,
effects_values_2,
input_scale = "linear",
normalize_input = TRUE
)
),
1
)

expect_warning(
split_for_scaling(
data = sim_data_long,
effects_values_2,
input_scale = "log2",
normalize_input = TRUE
),
paste0(
"'normalize_input == TRUE' is only competable with ",
"'input_scale == linear'. 'normalize_input' was ignored."
)
)
})
Loading

0 comments on commit fd754e8

Please sign in to comment.