% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_model_twin_network.R
\name{create_model_twin_network}
\alias{create_model_twin_network}
\title{Create twin network}
\usage{
create_model_twin_network(
maxlen = 50,
dropout_lstm = 0,
recurrent_dropout_lstm = 0,
layer_lstm = NULL,
layer_dense = c(4),
dropout_dense = NULL,
kernel_size = NULL,
filters = NULL,
strides = NULL,
pool_size = NULL,
solver = "adam",
learning_rate = 0.001,
vocabulary_size = 4,
bidirectional = FALSE,
compile = TRUE,
padding = "same",
dilation_rate = NULL,
gap_inputs = NULL,
use_bias = TRUE,
residual_block = FALSE,
residual_block_length = 1,
size_reduction_1Dconv = FALSE,
zero_mask = FALSE,
verbose = TRUE,
batch_norm_momentum = 0.99,
distance_method = "euclidean",
last_layer_activation = "sigmoid",
loss_fn = loss_cl(margin = 1),
metrics = "acc",
model_seed = NULL,
mixed_precision = FALSE,
mirrored_strategy = NULL
)
}
\arguments{
\item{maxlen}{Length of predictor sequence.}
\item{dropout_lstm}{Fraction of the units to drop for inputs.}
\item{recurrent_dropout_lstm}{Fraction of the units to drop for recurrent state.}
\item{layer_lstm}{Number of cells per network layer. Can be a scalar or vector.}
\item{layer_dense}{Vector containing number of neurons per dense layer, before euclidean distance layer.}
\item{dropout_dense}{Dropout rates between dense layers. No dropout if \code{NULL}.}
\item{kernel_size}{Size of 1d convolutional layers. For multiple layers, assign a vector. (e.g, \code{rep(3,2)} for two layers and kernel size 3)}
\item{filters}{Number of filters. For multiple layers, assign a vector.}
\item{strides}{Stride values. For multiple layers, assign a vector.}
\item{pool_size}{Integer, size of the max pooling windows. For multiple layers, assign a vector.}
\item{solver}{Optimization method, options are \verb{"adam", "adagrad", "rmsprop"} or \code{"sgd"}.}
\item{learning_rate}{Learning rate for optimizer.}
\item{vocabulary_size}{Number of unique character in vocabulary.}
\item{bidirectional}{Use bidirectional wrapper for lstm layers.}
\item{compile}{Whether to compile the model.}
\item{padding}{Padding of CNN layers, e.g. \verb{"same", "valid"} or \code{"causal"}.}
\item{dilation_rate}{Integer, the dilation rate to use for dilated convolution.}
\item{gap_inputs}{Global pooling method to apply. Same options as for \code{flatten_method} argument
in \link{create_model_transformer} function.}
\item{use_bias}{Boolean. Usage of bias for CNN layers.}
\item{residual_block}{Boolean. If true, the residual connections are used in CNN. It is not used in the first convolutional layer.}
\item{residual_block_length}{Integer. Determines how many convolutional layers (or triplets when \code{size_reduction_1D_conv} is \code{TRUE}) exist}
\item{size_reduction_1Dconv}{Boolean. When \code{TRUE}, the number of filters in the convolutional layers is reduced to 1/4 of the number of filters of}
\item{zero_mask}{Boolean, whether to apply zero masking before LSTM layer. Only used if model does not use any CNN layers.}
\item{verbose}{Boolean.}
\item{batch_norm_momentum}{Momentum for the moving mean and the moving variance.}
\item{distance_method}{Either "euclidean" or "cosine".}
\item{last_layer_activation}{Activation function of output layer(s). For example \code{"sigmoid"} or \code{"softmax"}.}
\item{loss_fn}{Either \code{"categorical_crossentropy"} or \code{"binary_crossentropy"}. If \code{label_noise_matrix} given, will use custom \code{"noisy_loss"}.}
\item{metrics}{Vector or list of metrics.}
\item{model_seed}{Set seed for model parameters in tensorflow if not \code{NULL}.}
\item{mixed_precision}{Whether to use mixed precision (https://www.tensorflow.org/guide/mixed_precision).}
\item{mirrored_strategy}{Whether to use distributed mirrored strategy. If NULL, will use distributed mirrored strategy only if >1 GPU available.}
}
\value{
A keras model implementing twin network architecture.
}
\description{
Twin network can be trained to maximize the distance
between embeddings of inputs.
Implements approach as described \href{https://keras.io/examples/vision/siamese_contrastive/}{here}.
}
\examples{
\dontshow{if (reticulate::py_module_available("tensorflow")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
maxlen <- 50
\donttest{
library(keras)
model <- create_model_twin_network(
maxlen = maxlen,
layer_dense = 16,
kernel_size = 12,
filters = 4,
pool_size = 3,
learning_rate = 0.001)
}
\dontshow{\}) # examplesIf}
}