[1c0e03]: / man / layer_transformer_block_wrapper.Rd

Download this file

46 lines (38 with data), 1.1 kB

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom_layers.R
\name{layer_transformer_block_wrapper}
\alias{layer_transformer_block_wrapper}
\title{Transformer block}
\usage{
layer_transformer_block_wrapper(
num_heads = 2,
head_size = 4,
dropout_rate = 0,
ff_dim = 64,
vocabulary_size = 4,
load_r6 = FALSE,
embed_dim = 64
)
}
\arguments{
\item{num_heads}{Number of attention heads.}
\item{head_size}{Dimensions of attention key.}
\item{dropout_rate}{Rate to randomly drop out connections.}
\item{ff_dim}{Units of first dense layer after attention blocks.}
\item{vocabulary_size}{Number of unique character in vocabulary.}
\item{load_r6}{Whether to return the layer class.}
\item{embed_dim}{Dimension for token embedding. No embedding if set to 0. Should be used when input is not one-hot encoded
(integer sequence).}
}
\value{
A keras layer implementing a transformer block.
}
\description{
Create transformer block. Consists of self attention, dense layers, layer normalization, recurrent connection and dropout.
}
\examples{
\donttest{
library(keras)
l <- layer_transformer_block_wrapper()
}
}