% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom_layers.R
\name{layer_transformer_block_wrapper}
\alias{layer_transformer_block_wrapper}
\title{Transformer block}
\usage{
layer_transformer_block_wrapper(
num_heads = 2,
head_size = 4,
dropout_rate = 0,
ff_dim = 64,
vocabulary_size = 4,
load_r6 = FALSE,
embed_dim = 64
)
}
\arguments{
\item{num_heads}{Number of attention heads.}
\item{head_size}{Dimensions of attention key.}
\item{dropout_rate}{Rate to randomly drop out connections.}
\item{ff_dim}{Units of first dense layer after attention blocks.}
\item{vocabulary_size}{Number of unique character in vocabulary.}
\item{load_r6}{Whether to return the layer class.}
\item{embed_dim}{Dimension for token embedding. No embedding if set to 0. Should be used when input is not one-hot encoded
(integer sequence).}
}
\value{
A keras layer implementing a transformer block.
}
\description{
Create transformer block. Consists of self attention, dense layers, layer normalization, recurrent connection and dropout.
}
\examples{
\donttest{
library(keras)
l <- layer_transformer_block_wrapper()
}
}