diff --git a/.Rbuildignore b/.Rbuildignore index 7884524..dd595a8 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,3 +1,6 @@ +^css$ +^js$ +^index\.html$ ^.*\.Rproj$ ^\.Rproj\.user$ ^CONVENTIONS\.md$ diff --git a/DESCRIPTION b/DESCRIPTION index fd5d38f..8835590 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -19,6 +19,7 @@ Imports: graphics (>= 3.2.3), keras (>= 2.2.4), purrr (>= 0.2.4), + R.utils (>= 2.7.0), stats (>= 3.2.3), utils Suggests: @@ -30,6 +31,6 @@ VignetteBuilder: knitr Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) -RoxygenNote: 6.1.0 +RoxygenNote: 6.1.1 SystemRequirements: Python (>= 2.7); keras (>= 2.1) diff --git a/NAMESPACE b/NAMESPACE index 1c9e050..d11ea41 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -90,5 +90,6 @@ export(save_as) export(sparsity) export(train) export(variational_block) +import(R.utils) import(graphics) import(purrr) diff --git a/R/00classes.R b/R/00classes.R index c391451..a650c6c 100644 --- a/R/00classes.R +++ b/R/00classes.R @@ -27,5 +27,3 @@ ruta_loss_named <- "ruta_loss_named" ruta_loss_contraction <- "ruta_loss_contraction" ruta_loss_correntropy <- "ruta_loss_correntropy" ruta_loss_variational <- "ruta_loss_variational" - -ruta_arg <- "ruta_arg" diff --git a/R/args.R b/R/args.R deleted file mode 100644 index 620e118..0000000 --- a/R/args.R +++ /dev/null @@ -1,208 +0,0 @@ -# %to% operator. Defines intervals for argument checks -`%to%` <- function(min, max) list(min = min, max = max) - -# Constructor for argument descriptors -arg_constructor <- function(..., .default = NULL, .required = FALSE) { - # A function which returns a default value for a parameter. If the `get` - # argument is set to TRUE, a machine-readable description of - # the parameter is returned instead. - structure(function(default = .default, get = FALSE) { - if (get) { - list( - classes = list(...), - required = .required - ) - } else { - default - } - }, class = ruta_arg) -} - -as_arg <- function(x) UseMethod("as_arg", x) -as_arg.ruta_arg <- function(x) x - -# Any value can be used to generate an argument descriptor for its class -as_arg.default <- function(x) { - if (is.name(x) && x == "") - arg_constructor(.required = TRUE) - else { - args <- list(NULL) - names(args) <- class(x) - do.call(arg_constructor, c(args, list(.default = x))) - } -} - -# Argument descriptor for a neural network object -arg_network <- arg_constructor(ruta_network = NULL, numeric = NULL, .required = TRUE) - -# Argument descriptor for a loss function -arg_loss <- arg_constructor( - ruta_loss = NULL, - character = list_keras_objects("loss"), - .default = "mean_squared_error" -) - -arg_activation <- arg_constructor( - character = list_keras_objects("activation"), - .default = "linear" -) - -which_functions <- function() as.character(lsf.str("package:ruta")) - -which_args <- function(f) { - # Gets formal arguments for the function - defaults <- formals(f) - get_checks(defaults) -} - -get_checks <- function(formal_args) { - formal_args$... <- NULL - - checks <- lapply(formal_args, function(arg) { - # Retrieves the argument descriptor and calls it - if (class(arg) == "call") { - get(as.character(arg))(get = TRUE) - } else { - as_arg(arg)(get = TRUE) - } - }) - - names(checks) <- names(formal_args) - # Return descriptions for each argument - structure(checks, class = "ruta_args") -} - -print.ruta_args <- function(checks) { - cat("Usage: \n") - for (arg in names(checks)) { - types <- checks[[arg]]$classes - str_types <- if (length(types) == 0) - "unknown" - else - paste0(names(types), collapse = " or ") - - # values <- if (!is.null(checks[[arg]]$classes)) - # c("allowed values {\n ", paste0(checks[[arg]]$values, collapse = "\n "), "\n }, ") - # else - # "" - - required <- if (checks[[arg]]$required) "" else "not " - - cat( - " ", arg, ": type ", str_types, ", ", required, "required\n", - sep = "" - ) - } -} - -# check_args_internal <- function() { -# # Which function was called and with what arguments? -# call_l <- as.list(sys.call(sys.parent(1))) -# # return(args(as.function(call_l))) -# # What are the argument descriptions for this function? -# checks <- which_args(as.character(call_l[[1]])) -# #print(call_l) -# -# args <- call_l[-1] -# # print(args) ############# -# # print(checks) ############ -# -# check_args(args, checks) -# } - -# formal_args - call to formals() -# arguments - call to environment() -check_args <- function(formal_args, arguments) { - checks <- get_checks(formal_args) - validate_call(as.list(arguments), checks) -} - -validate_call <- function(args, checks) { - # Detect positional arguments - unnamed_args <- if (is.null(names(args))) seq_along(args) else which(names(args) == "") - # Detect remaining named arguments - remaining_args <- setdiff(names(checks), names(args)[-unnamed_args]) - # Pair positional arguments with missing named arguments - names(args)[unnamed_args] <- remaining_args[1:length(unnamed_args)] - - # Are there formals which are not provided as arguments? - missing_args <- if (length(remaining_args) > length(unnamed_args)) - remaining_args[(length(unnamed_args) + 1):length(remaining_args)] - else - character(0) - - for (name in c(names(args), missing_args)) { - check <- checks[[name]] - - if (!is.null(check)) { - val <- eval(args[[name]]) - validate_arg(name, val, check) - } - } - - invisible(TRUE) -} - -validate_arg <- function(name, val, check) { - if ("call" %in% class(val)) { - val <- call(val) - } - - # Check type: mandatory argument - if (is.null(val)) { - if (check$required) { - stop(paste0(name, " is a required argument"), call. = F) - } else { - # nothing else to check if argument was not provided and not required - return() - } - } - - # Check type: class - if (length(check$classes) > 0) { - identified_class <- intersect(class(val), names(check$classes)) - - if (length(identified_class) == 0) { - stop(paste0(name, " does not have any allowed class (", paste(names(check$classes), collapse = ", "), "), found class: ", paste(class(val), collapse = " ")), call. = F) - } - - # Check type: values - messages <- list() - for (klass in identified_class) { - values <- check$classes[[klass]] - - if (!is.null(values)) { - if (is.atomic(values)) { - if (val %in% values) { - return(invisible(TRUE)) - } else { - messages <- append(messages, paste0(name, " does not equal any allowed value (", paste(values, collapse = ", "), ")")) - } - } - if (is.list(values)) { - check_min <- is.null(values$min) || val >= values$min - check_max <- is.null(values$max) || val <= values$max - - if (check_min && check_max) { - return(invisible(TRUE)) - } else { - messages <- append(messages, paste0(name, " is outside allowed range (", paste(values$min %||% "-infinity", values$max %||% "infinity", sep = "-"), ")")) - } - } - } - } - - if (length(messages) > 0) { - stop("Couldn't find a matching allowed value for ", name, ". Warnings:\n", paste(messages, collapse = "\n")) - } - } -} - - -.test_function <- function(network = arg_network(), loss = arg_loss(), activation = arg_activation(), weight = 2e-4) { - check_args(formals(), environment()) -} -.test_function2 <- function(network = arg_network(), loss = arg_loss("binary_crossentropy"), activation = arg_activation("elu"), weight = 2e-4) { - check_args(formals(), environment()) -} - diff --git a/R/autoencoder.R b/R/autoencoder.R index 55c447b..ab1f26a 100644 --- a/R/autoencoder.R +++ b/R/autoencoder.R @@ -50,8 +50,7 @@ new_autoencoder <- function(network, loss, extra_class = NULL) { #' ) #' #' @export -autoencoder <- function(network = arg_network(), loss = arg_loss("mean_squared_error")) { - check_args(formals(), environment()) +autoencoder <- function(network, loss = "mean_squared_error") { new_autoencoder(network, loss) } @@ -208,7 +207,7 @@ train.ruta_autoencoder <- function( data, validation_data = NULL, metrics = NULL, - epochs = arg_positive(20), + epochs = 20, optimizer = keras::optimizer_rmsprop(), ...) { learner$input_shape <- dim(data)[-1] diff --git a/R/autoencoder_orthonormal.R b/R/autoencoder_orthonormal.R deleted file mode 100644 index 61b6a55..0000000 --- a/R/autoencoder_orthonormal.R +++ /dev/null @@ -1,112 +0,0 @@ -#' Create a contractive autoencoder -#' -#' A contractive autoencoder adds a penalty term to the loss -#' function of a basic autoencoder which attempts to induce a contraction of -#' data in the latent space. -#' -#' @param network Layer construct of class \code{"ruta_network"} -#' @param loss Character string specifying the reconstruction error part of the loss function -#' @param weight Weight assigned to the contractive loss -#' -#' @return A construct of class \code{"ruta_autoencoder"} -#' -#' @references -#' - [A practical tutorial on autoencoders for nonlinear feature fusion](https://arxiv.org/abs/1801.01586) -#' -#' @family autoencoder variants -#' @import purrr -#' @export -autoencoder_orthonormal <- function(network, loss = "mean_squared_error", weight = 1e-3) { - autoencoder(network, loss) %>% - make_orthonormal(weight) -} - -#' Contractive loss -#' -#' @description This is a wrapper for a loss which induces a contraction in the -#' latent space. -#' -#' @param reconstruction_loss Original reconstruction error to be combined with the -#' contractive loss (e.g. `"binary_crossentropy"`) -#' @param weight Weight assigned to the contractive loss -#' @return A loss object which can be converted into a Keras loss -#' -#' @seealso `\link{autoencoder_contractive}` -#' @family loss functions -#' @export -orthonormal_loss <- function(reconstruction_loss = "mean_squared_error", weight = 1e-3) { - structure( - list( - reconstruction_loss = reconstruction_loss, - weight = weight - ), - class = c("ruta_loss_orthonormal", ruta_loss) - ) -} - -#' Add contractive behavior to any autoencoder -#' -#' @description Converts an autoencoder into a contractive one by assigning a -#' contractive loss to it -#' -#' @param learner The \code{"ruta_autoencoder"} object -#' @param weight Weight assigned to the contractive loss -#' -#' @return An autoencoder object which contains the contractive loss -#' -#' @seealso `\link{autoencoder_contractive}` -#' @export -make_orthonormal <- function(learner, weight = 2e-4) { - if (!is_orthonormal(learner)) { - learner$loss = orthonormal_loss(learner$loss, weight) - } - - learner -} - -#' Detect whether an autoencoder is contractive -#' @param learner A \code{"ruta_autoencoder"} object -#' @return Logical value indicating if a contractive loss was found -#' @seealso `\link{contraction}`, `\link{autoencoder_contractive}`, `\link{make_contractive}` -#' @export -is_orthonormal <- function(learner) { - "ruta_loss_orthonormal" %in% class(learner$loss) -} - -#' @rdname to_keras.ruta_loss_named -#' @param learner The learner object including the keras model which will use the loss -#' function -#' @references -#' - Contractive loss: \href{https://wiseodd.github.io/techblog/2016/12/05/contractive-autoencoder/}{Deriving Contractive Autoencoder and Implementing it in Keras} -#' @import purrr -#' @export -to_keras.ruta_loss_orthonormal <- function(x, learner, ...) { - rec_err <- x$reconstruction_loss %>% as_loss() %>% to_keras() - - keras_model <- learner$models$autoencoder - input_x <- keras::get_layer(keras_model, index = 0)$output - encoding_h <- keras::get_layer(keras_model, name = "encoding")$output - encoding_len <- learner$network[[learner$network %@% "encoding"]]$units - # Identity matrix - # shape = (encoding_size, encoding_size) - id <- keras::k_eye(size = as.integer(encoding_len)) - - # contractive loss - orthonormal <- function(y_true, y_pred) { - reconstruction <- rec_err(y_true, y_pred) - - # Compute the Jacobian matrix of the encoding with respect to the inputs - # shape = (batch_size, encoding_size, input_size) - reg <- jacobian(encoding_h, input_x) %>% - # Matrix product Jf Jf^T - # shape = (batch_size, encoding_size, encoding_size) - keras::k_batch_dot(., ., axes = 3) %>% - # (Jf Jf^T) - I - `-`(keras::k_expand_dims(id, axis = 1)) %>% - # Compute square values and sum the matrix for each instance in the batch - keras::k_square() %>% - keras::k_sum(axis = list(2, 3)) - - reconstruction + x$weight * reg - } -} diff --git a/R/derivative.R b/R/derivative.R index 6c34a43..4754ab1 100644 --- a/R/derivative.R +++ b/R/derivative.R @@ -1,3 +1,5 @@ +# Functions in this file are no longer used, and only kept for backward compatibility. + # This file defines the derivatives of activation functions available in Keras # (except softmax). They are not exported since this computation could change # in the future, in case Keras can automatically calculate derivatives for activations. diff --git a/R/evaluate.R b/R/evaluate.R index bdf90f7..246df5f 100644 --- a/R/evaluate.R +++ b/R/evaluate.R @@ -28,6 +28,7 @@ evaluation_metric <- function(evaluate_f) function(learner, data, ...) { #' #' @param learner A trained learner object #' @param data Test data for evaluation +#' @param ... Additional parameters passed to `keras::\link[keras]{evaluate}`. #' @return A named list with the autoencoder training loss and evaluation metric for the #' given data #' @examples diff --git a/R/filter.R b/R/filter.R index 6e6dd04..c6fadbb 100644 --- a/R/filter.R +++ b/R/filter.R @@ -162,9 +162,16 @@ apply_filter.ruta_noise_cauchy <- function(filter, data, ...) { data + term } +#' Get a Keras generator from a data filter +#' +#' Noise filters can be applied during training (in denoising autoencoders), +#' for this a generator is used to get data batches. +#' #' @import R.utils -#' @param data -#' @param batch_size +#' @param x Filter object +#' @param data Matrix where the filter will be applied +#' @param batch_size Size of the sample (for the training stage) +#' @param ... Additional parameters, currently unused to_keras.ruta_filter <- function(x, data, batch_size, ...) { limit <- dim(data)[1] order <- sample.int(limit) @@ -176,7 +183,7 @@ to_keras.ruta_filter <- function(x, data, batch_size, ...) { start <- 1 } else { idx <- order[start:(start + batch_size - 1)] - start <- start + batch_size + # start <- start + batch_size } original <- R.utils::extract(data, "1" = idx) noisy <- apply_filter(x, original) diff --git a/R/jacobian.R b/R/jacobian.R index 8d9430e..537b253 100644 --- a/R/jacobian.R +++ b/R/jacobian.R @@ -9,5 +9,3 @@ gradients <- reticulate::import("tensorflow.python.ops.parallel_for.gradients", jacobian <- function(y, x) { gradients$batch_jacobian(y, x) } - -# jacobian <- gradients$batch_jacobian diff --git a/R/layers.R b/R/layers.R index a8cadd4..6010d3e 100644 --- a/R/layers.R +++ b/R/layers.R @@ -70,7 +70,6 @@ to_keras.ruta_layer_input <- function(x, input_shape, ...) { #' @family neural layers #' @export output <- function(activation = "linear") { - check_args(formals(), environment()) make_atomic_network(ruta_layer_dense, activation = activation) } @@ -84,8 +83,7 @@ output <- function(activation = "linear") { #' dense(30, "tanh") #' @family neural layers #' @export -dense <- function(units, activation = arg_activation("linear")) { - check_args(formals(), environment()) +dense <- function(units, activation = "linear") { make_atomic_network(ruta_layer_dense, units = units, activation = activation) } diff --git a/R/penalties.R b/R/penalties.R deleted file mode 100644 index 852195e..0000000 --- a/R/penalties.R +++ /dev/null @@ -1,65 +0,0 @@ -add_custom_penalty <- function(learner, weight, func) { - old_loss <- learner$loss - learner$loss <- structure(list( - reconstruction = old_loss, - f = func - ), class = "ruta_loss_penalty") - learner -} - -to_keras.ruta_loss_penalty <- function(loss, learner) { - old_loss <- loss$reconstruction %>% as_loss() %>% to_keras() - penalty <- loss$f(learner) - - function(y_true, y_pred) { - rec_err <- old_loss(y_true, y_pred) - rec_err + weight * penalty(y_true, y_pred) - } -} - -add_penalty_entropy <- function(learner, weight = 1) { - encoding_layer <- learner$network[[learner$network %@% "encoding"]] - - if (!(encoding_layer$activation %in% c("tanh", "sigmoid", "softsign", "hard_sigmoid"))) { - message("This regularization is better defined for bounded activation functions (with an infimum and a supremum) in the encoding layer. Performance could be affected by this.") - } - - learner$network[[learner$network %@% "encoding"]]$activity_regularizer <- penalty_entropy(weight) - - learner -} - -penalty_entropy <- function(weight) { - structure(list(weight = weight), class = "ruta_penalty_entropy") -} - -to_keras.ruta_penalty_entropy <- function(x, activation) { - # This regularization only makes sense for bounded activation functions, but we - # adapt it to any other activation by defining high value as > 1 and low value - # as < -1 - low_v = switch(activation, - sigmoid = 0, - hard_sigmoid = 0, - relu = 0, - softplus = 0, - selu = - 1.7581, - -1 - ) - high_v = 1 - - - function(observed_activations) { - observed <- observed_activations %>% - keras::k_mean(axis = 1) %>% - keras::k_clip(low_v + keras::k_epsilon(), high_v - keras::k_epsilon()) - - # rescale means: what we want to calculate is the probability of a high value - q_high <- (observed - low_v) / (high_v - low_v) - - # Max entropy = Min (1 - entropy) - 1 + keras::k_mean( - q_high * keras::k_log(q_high) + - (1 - q_high) * keras::k_log(1 - q_high) - ) - } -} diff --git a/R/tied_layer.R b/R/tied_layer.R deleted file mode 100644 index 981acb4..0000000 --- a/R/tied_layer.R +++ /dev/null @@ -1,87 +0,0 @@ -DenseTied <- R6::R6Class("DenseTied", - inherit = KerasLayer, - - public = list( - output_dim = NULL, - tied_to = NULL, - tied_weights = NULL, - activation = NULL, - - initialize = function(tied_to, activation) { - self$tied_to <- tied_to - self$tied_weights <- tied_to$weights - self$output_dim <- self$tied_weights[[1]]$shape[0] - self$activation <- activation - if (is.character(self$activation)) { - self$activation <- get(paste0("activation_", self$activation), pos = "package:keras") - } - }, - - build = function(input_shape) { - # self$kernel <- self$add_weight( - # name = 'kernel', - # shape = list(input_shape[[2]], self$output_dim), - # initializer = initializer_random_normal(), - # trainable = TRUE - # ) - }, - - call = function(x, mask = NULL) { - # k_dot(x, self$kernel) - # Return the transpose layer mapping using the explicit weight matrices - output <- keras::k_dot(x - self$tied_weights[[2]], keras::k_transpose(self$tied_weights[[1]])) - if (!is.null(self$activation)) { - output <- self$activation(output) - } - output - }, - - compute_output_shape = function(input_shape) { - list(input_shape[[1]], self$output_dim) - } - ) -) - -layer_dense_tied <- function(object, tied_to, name = NULL, activation = NULL) { - create_layer(DenseTied, object, list( - name = name, - tied_to = tied_to, - activation = activation, - trainable = FALSE - )) -} - - -mytest <- function() { - test <- keras_model_sequential() - encoder1 <- - layer_dense( - test, - units = 100, - input_shape = list(784) - ) - encoder2 <- - layer_dense( - encoder1, - units = 36, - activation = "hard_sigmoid", - name = "encoding" - ) - decoder2 <- - layer_dense_tied(encoder2, tied_to = encoder2) - decoder1 <- - layer_dense_tied(decoder2, tied_to = encoder1, activation = "hard_sigmoid") - - contractive_loss <- - contraction("binary_crossentropy") %>% to_keras.ruta_loss_contraction(list(models = list(autoencoder = decoder))) - compile( - decoder1, - optimizer = "rmsprop", - loss = contractive_loss, - metrics = list("mean_squared_error") - ) - fit(decoder, x_train, x_train, batch_size = 32) - decode <- predict(decoder, x_test) - - plot_sample(x_test, baseline$decode, decode, 11:20) -} diff --git a/cran-comments.md b/cran-comments.md index fc3d806..6ed6eca 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,21 +1,8 @@ -## Resubmission - -This is a resubmission. Changes from the original: - -- Removed file left by accident -- Fixed title (omitted redundant "for R") -- Improved description (added implemented neural networks and a reference). -- Replaced \dontrun by \donttest in examples. -- Prevented writing to the user's filespace by default in functions/examples. - ## Test environments -* local Linux Mint install, R 3.4.4 +* local Arch Linux install, R 3.5.3 * ubuntu 14.04 (on travis-ci), R 3.5.0 and devel * win-builder (devel and release) ## R CMD check results -0 errors | 0 warnings | 1 note - -* This is a new release. -* Some types of auto-encoders are considered misspelled. +0 errors | 0 warnings | 0 notes diff --git a/man/evaluate.Rd b/man/evaluate.Rd index 26da28c..4fe5477 100644 --- a/man/evaluate.Rd +++ b/man/evaluate.Rd @@ -8,20 +8,22 @@ \alias{evaluate_kullback_leibler_divergence} \title{Evaluation metrics} \usage{ -evaluate_mean_squared_error(learner, data) +evaluate_mean_squared_error(learner, data, ...) -evaluate_mean_absolute_error(learner, data) +evaluate_mean_absolute_error(learner, data, ...) -evaluate_binary_crossentropy(learner, data) +evaluate_binary_crossentropy(learner, data, ...) -evaluate_binary_accuracy(learner, data) +evaluate_binary_accuracy(learner, data, ...) -evaluate_kullback_leibler_divergence(learner, data) +evaluate_kullback_leibler_divergence(learner, data, ...) } \arguments{ \item{learner}{A trained learner object} \item{data}{Test data for evaluation} + +\item{...}{Additional parameters passed to \code{keras::\link[keras]{evaluate}}.} } \value{ A named list with the autoencoder training loss and evaluation metric for the diff --git a/man/to_keras.ruta_filter.Rd b/man/to_keras.ruta_filter.Rd new file mode 100644 index 0000000..5455ba0 --- /dev/null +++ b/man/to_keras.ruta_filter.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/filter.R +\name{to_keras.ruta_filter} +\alias{to_keras.ruta_filter} +\title{Get a Keras generator from a data filter} +\usage{ +\method{to_keras}{ruta_filter}(x, data, batch_size, ...) +} +\arguments{ +\item{x}{Filter object} + +\item{data}{Matrix where the filter will be applied} + +\item{batch_size}{Size of the sample (for the training stage)} + +\item{...}{Additional parameters, currently unused} +} +\description{ +Noise filters can be applied during training (in denoising autoencoders), +for this a generator is used to get data batches. +} diff --git a/tests/testthat/test-args.R b/tests/testthat/test-args.R deleted file mode 100644 index 52c551d..0000000 --- a/tests/testthat/test-args.R +++ /dev/null @@ -1,28 +0,0 @@ -context("test-args.R") - -test_that("args can be validated", { - expect_silent(.test_function(network = input() + output())) - expect_silent(.test_function(network = input() + output(), loss = "binary_crossentropy")) - expect_silent(.test_function(network = input() + output(), loss = "binary_crossentropy", activation = "elu")) - expect_silent(.test_function(network = input() + output(), loss = "binary_crossentropy", activation = "elu", weight = 1)) - expect_silent(.test_function2(network = input() + output())) - expect_silent(.test_function2(network = input() + output(), "binary_crossentropy")) - expect_silent(.test_function2(network = input() + output(), "binary_crossentropy", "elu")) - expect_silent(.test_function2(network = input() + output(), "binary_crossentropy", "elu", 1)) - expect_silent(purrr::map(list(input() + output()), .test_function)) - expect_silent(purrr::map(list(input() + output()), .test_function2)) - expect_silent(purrr::map(list(input() + output()), ~ .test_function(.))) - expect_silent(purrr::map(list(input() + output()), ~ .test_function2(.))) - - expect_is(purrr::map(list(input, output), which_args), "list") - expect_is(purrr::map(list(input, output), ~ which_args(.)), "list") -}) - -test_that("args can error", { - expect_error(.test_function()) - expect_error(.test_function(network = input() + output(), loss = 3)) - expect_error(.test_function(network = input() + output(), loss = "hinge", activation = "unknown")) - expect_error(.test_function2()) - expect_error(.test_function2(input() + output(), 3)) - expect_error(.test_function2(input() + output(), "hinge", "unknown")) -}) diff --git a/vignettes/troubleshooting.Rmd b/vignettes/troubleshooting.Rmd index 63e2f41..f8f39ed 100644 --- a/vignettes/troubleshooting.Rmd +++ b/vignettes/troubleshooting.Rmd @@ -2,7 +2,7 @@ title: "Troubleshooting" output: rmarkdown::html_vignette vignette: > - %\VignetteIndexEntry{Building neural architectures} + %\VignetteIndexEntry{Troubleshooting} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} ---