Switch to side-by-side view

--- a
+++ b/man/create_model_lstm_cnn.Rd
@@ -0,0 +1,152 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/create_model_lstm_cnn.R
+\name{create_model_lstm_cnn}
+\alias{create_model_lstm_cnn}
+\title{Create LSTM/CNN network}
+\usage{
+create_model_lstm_cnn(
+  maxlen = 50,
+  dropout_lstm = 0,
+  recurrent_dropout_lstm = 0,
+  layer_lstm = NULL,
+  layer_dense = c(4),
+  dropout_dense = NULL,
+  kernel_size = NULL,
+  filters = NULL,
+  strides = NULL,
+  pool_size = NULL,
+  solver = "adam",
+  learning_rate = 0.001,
+  vocabulary_size = 4,
+  bidirectional = FALSE,
+  stateful = FALSE,
+  batch_size = NULL,
+  compile = TRUE,
+  padding = "same",
+  dilation_rate = NULL,
+  gap = FALSE,
+  use_bias = TRUE,
+  residual_block = FALSE,
+  residual_block_length = 1,
+  size_reduction_1Dconv = FALSE,
+  label_input = NULL,
+  zero_mask = FALSE,
+  label_smoothing = 0,
+  label_noise_matrix = NULL,
+  last_layer_activation = "softmax",
+  loss_fn = "categorical_crossentropy",
+  num_output_layers = 1,
+  auc_metric = FALSE,
+  f1_metric = FALSE,
+  bal_acc = FALSE,
+  verbose = TRUE,
+  batch_norm_momentum = 0.99,
+  model_seed = NULL,
+  mixed_precision = FALSE,
+  mirrored_strategy = NULL
+)
+}
+\arguments{
+\item{maxlen}{Length of predictor sequence.}
+
+\item{dropout_lstm}{Fraction of the units to drop for inputs.}
+
+\item{recurrent_dropout_lstm}{Fraction of the units to drop for recurrent state.}
+
+\item{layer_lstm}{Number of cells per network layer. Can be a scalar or vector.}
+
+\item{layer_dense}{Vector specifying number of neurons per dense layer after last LSTM or CNN layer (if no LSTM used).}
+
+\item{dropout_dense}{Dropout rates between dense layers. No dropout if \code{NULL}.}
+
+\item{kernel_size}{Size of 1d convolutional layers. For multiple layers, assign a vector. (e.g, \code{rep(3,2)} for two layers and kernel size 3)}
+
+\item{filters}{Number of filters. For multiple layers, assign a vector.}
+
+\item{strides}{Stride values. For multiple layers, assign a vector.}
+
+\item{pool_size}{Integer, size of the max pooling windows. For multiple layers, assign a vector.}
+
+\item{solver}{Optimization method, options are \verb{"adam", "adagrad", "rmsprop"} or \code{"sgd"}.}
+
+\item{learning_rate}{Learning rate for optimizer.}
+
+\item{vocabulary_size}{Number of unique character in vocabulary.}
+
+\item{bidirectional}{Use bidirectional wrapper for lstm layers.}
+
+\item{stateful}{Boolean. Whether to use stateful LSTM layer.}
+
+\item{batch_size}{Number of samples that are used for one network update. Only used if \code{stateful = TRUE}.}
+
+\item{compile}{Whether to compile the model.}
+
+\item{padding}{Padding of CNN layers, e.g. \verb{"same", "valid"} or \code{"causal"}.}
+
+\item{dilation_rate}{Integer, the dilation rate to use for dilated convolution.}
+
+\item{gap}{Whether to apply global average pooling after last CNN layer.}
+
+\item{use_bias}{Boolean. Usage of bias for CNN layers.}
+
+\item{residual_block}{Boolean. If true, the residual connections are used in CNN. It is not used in the first convolutional layer.}
+
+\item{residual_block_length}{Integer. Determines how many convolutional layers (or triplets when \code{size_reduction_1D_conv} is \code{TRUE}) exist}
+
+\item{size_reduction_1Dconv}{Boolean. When \code{TRUE}, the number of filters in the convolutional layers is reduced to 1/4 of the number of filters of}
+
+\item{label_input}{Integer or \code{NULL}. If not \code{NULL}, adds additional input layer of \code{label_input} size.}
+
+\item{zero_mask}{Boolean, whether to apply zero masking before LSTM layer. Only used if model does not use any CNN layers.}
+
+\item{label_smoothing}{Float in [0, 1]. If 0, no smoothing is applied. If > 0, loss between the predicted
+labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5.
+The closer the argument is to 1 the more the labels get smoothed.}
+
+\item{label_noise_matrix}{Matrix of label noises. Every row stands for one class and columns for percentage of labels in that class.
+If first label contains 5 percent wrong labels and second label no noise, then
+
+\code{label_noise_matrix <- matrix(c(0.95, 0.05, 0, 1), nrow = 2, byrow = TRUE )}}
+
+\item{last_layer_activation}{Activation function of output layer(s). For example \code{"sigmoid"} or \code{"softmax"}.}
+
+\item{loss_fn}{Either \code{"categorical_crossentropy"} or \code{"binary_crossentropy"}. If \code{label_noise_matrix} given, will use custom \code{"noisy_loss"}.}
+
+\item{num_output_layers}{Number of output layers.}
+
+\item{auc_metric}{Whether to add AUC metric.}
+
+\item{f1_metric}{Whether to add F1 metric.}
+
+\item{bal_acc}{Whether to add balanced accuracy.}
+
+\item{verbose}{Boolean.}
+
+\item{batch_norm_momentum}{Momentum for the moving mean and the moving variance.}
+
+\item{model_seed}{Set seed for model parameters in tensorflow if not \code{NULL}.}
+
+\item{mixed_precision}{Whether to use mixed precision (https://www.tensorflow.org/guide/mixed_precision).}
+
+\item{mirrored_strategy}{Whether to use distributed mirrored strategy. If NULL, will use distributed mirrored strategy only if >1 GPU available.}
+}
+\value{
+A keras model, stacks CNN, LSTM and dense layers.
+}
+\description{
+Creates a network consisting of an arbitrary number of CNN, LSTM and dense layers.
+Last layer is a dense layer.
+}
+\examples{
+\dontshow{if (reticulate::py_module_available("tensorflow")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+create_model_lstm_cnn(
+  maxlen = 500,
+  vocabulary_size = 4,
+  kernel_size = c(8, 8, 8),
+  filters = c(16, 32, 64),
+  pool_size = c(3, 3, 3),
+  layer_lstm = c(32, 64),
+  layer_dense = c(128, 4),
+  learning_rate = 0.001)
+\dontshow{\}) # examplesIf}
+}