% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.R
\name{resume_training_from_model_card}
\alias{resume_training_from_model_card}
\title{Continue training from model card}
\usage{
resume_training_from_model_card(
path_model_card,
seed = NULL,
epoch = NULL,
new_run_name = NULL,
new_args = NULL,
new_compile = NULL,
use_mirrored_strategy = NULL,
unfreeze = FALSE,
verbose = FALSE
)
}
\arguments{
\item{path_model_card}{Path to model card to resume training from.}
\item{seed}{Seed for reproducible results. If \code{NULL}, set random seed.}
\item{epoch}{Epoch to resume from. If \code{NULL}, use last epoch.}
\item{new_run_name}{New run name. If \code{NULL}, new run name is old run name + '_cont'.}
\item{new_args}{Named list of arguments to overwrite. Will use previous arguments from model card otherwise.
For example, if you want to change the batch size and padding option:
\code{new_args = list(batch_size = 6, padding = TRUE)}.}
\item{new_compile}{List of arguments to compile the model again. If \code{NULL}, use compiled model from checkpoint.
Example: \code{new_compile = list(loss = 'binary_crossentropy', metrics = 'acc', optimizer = keras::optimizer_adam())}}
\item{use_mirrored_strategy}{Whether to use distributed mirrored strategy.
If NULL, will use distributed mirrored strategy only if >1 GPU available.}
\item{unfreeze}{If \code{TRUE}, set trainable attribute of model to \code{TRUE} (unfreeze weights).}
\item{verbose}{Whether to print all training arguments.}
}
\value{
A list of training metrics.
}
\description{
Use information from model card to resume from the corresponding checkpoint using the same training arguments.
}
\examples{
\donttest{
library(keras)
# create dummy data and temp directories
path_train_1 <- tempfile()
path_train_2 <- tempfile()
path_val_1 <- tempfile()
path_val_2 <- tempfile()
path_checkpoint <- tempfile()
dir.create(path_checkpoint)
path_model_card <- tempfile()
dir.create(path_model_card)
for (current_path in c(path_train_1, path_train_2,
path_val_1, path_val_2)) {
dir.create(current_path)
create_dummy_data(file_path = current_path,
num_files = 3,
seq_length = 10,
num_seq = 5,
vocabulary = c("a", "c", "g", "t"))
}
# create model
model <- create_model_lstm_cnn(layer_lstm = 8, layer_dense = 2, maxlen = 5)
# train model
run_name <- 'test_run_1'
hist <- train_model(train_type = "label_folder",
run_name = run_name,
path_checkpoint = path_checkpoint,
model_card = list(path_model_card = path_model_card, description = 'test run'),
model = model,
path = c(path_train_1, path_train_2),
path_val = c(path_val_1, path_val_2),
batch_size = 8,
epochs = 3,
steps_per_epoch = 6,
vocabulary_label = c("label_1", "label_2"))
# resume training
resume_training_from_model_card(path_model_card = file.path(path_model_card, run_name))
}
}