% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generator_csv.R
\name{generator_fasta_label_header_csv}
\alias{generator_fasta_label_header_csv}
\title{Data generator for fasta/fastq files and label targets}
\usage{
generator_fasta_label_header_csv(
path_corpus,
format = "fasta",
batch_size = 256,
maxlen = 250,
max_iter = 10000,
vocabulary = c("a", "c", "g", "t"),
verbose = FALSE,
shuffle_file_order = FALSE,
step = 1,
seed = 1234,
shuffle_input = FALSE,
file_limit = NULL,
path_file_log = NULL,
vocabulary_label = c("x", "y", "z"),
reverse_complement = TRUE,
ambiguous_nuc = "zero",
proportion_per_seq = NULL,
read_data = FALSE,
use_quality_score = FALSE,
padding = TRUE,
skip_amb_nuc = NULL,
max_samples = NULL,
concat_seq = NULL,
added_label_path = NULL,
add_input_as_seq = NULL,
target_from_csv = NULL,
target_split = NULL,
file_filter = NULL,
use_coverage = NULL,
proportion_entries = NULL,
sample_by_file_size = FALSE,
reverse_complement_encoding = FALSE,
n_gram = NULL,
n_gram_stride = 1,
add_noise = NULL,
return_int = FALSE,
reshape_xy = NULL
)
}
\arguments{
\item{path_corpus}{Input directory where fasta files are located or path to single file ending with fasta or fastq
(as specified in format argument). Can also be a list of directories and/or files.}
\item{format}{File format, either \code{"fasta"} or \code{"fastq"}.}
\item{batch_size}{Number of samples in one batch.}
\item{maxlen}{Length of predictor sequence.}
\item{max_iter}{Stop after \code{max_iter} number of iterations failed to produce a new batch.}
\item{vocabulary}{Vector of allowed characters. Characters outside vocabulary get encoded as specified in \code{ambiguous_nuc}.}
\item{verbose}{Whether to show messages.}
\item{shuffle_file_order}{Logical, whether to go through files randomly or sequentially.}
\item{step}{How often to take a sample.}
\item{seed}{Sets seed for \code{set.seed} function for reproducible results.}
\item{shuffle_input}{Whether to shuffle entries in every fasta/fastq file before extracting samples.}
\item{file_limit}{Integer or \code{NULL}. If integer, use only specified number of randomly sampled files for training. Ignored if greater than number of files in \code{path}.}
\item{path_file_log}{Write name of files to csv file if path is specified.}
\item{vocabulary_label}{Character vector of possible targets. Targets outside \code{vocabulary_label} will get discarded.}
\item{reverse_complement}{Boolean, for every new file decide randomly to use original data or its reverse complement.}
\item{ambiguous_nuc}{How to handle nucleotides outside vocabulary, either \code{"zero"}, \code{"discard"}, \code{"empirical"} or \code{"equal"}.
\itemize{
\item If \code{"zero"}, input gets encoded as zero vector.
\item If \code{"equal"}, input is repetition of \code{1/length(vocabulary)}.
\item If \code{"discard"}, samples containing nucleotides outside vocabulary get discarded.
\item If \code{"empirical"}, use nucleotide distribution of current file.
}}
\item{proportion_per_seq}{Numerical value between 0 and 1. Proportion of sequence to take samples from (use random subsequence).}
\item{read_data}{If \code{TRUE} the first element of input is a list of length 2, each containing one part of paired read. Maxlen should be 2*length of one read.}
\item{use_quality_score}{Whether to use fastq quality scores. If TRUE input is not one-hot-encoding but corresponds to probabilities.
For example (0.97, 0.01, 0.01, 0.01) instead of (1, 0, 0, 0).}
\item{padding}{Whether to pad sequences too short for one sample with zeros.}
\item{skip_amb_nuc}{Threshold of ambiguous nucleotides to accept in fasta entry. Complete entry will get discarded otherwise.}
\item{max_samples}{Maximum number of samples to use from one file. If not \code{NULL} and file has more than \code{max_samples} samples, will randomly choose a
subset of \code{max_samples} samples.}
\item{concat_seq}{Character string or \code{NULL}. If not \code{NULL} all entries from file get concatenated to one sequence with \code{concat_seq} string between them.
Example: If 1.entry AACC, 2. entry TTTG and \code{concat_seq = "ZZZ"} this becomes AACCZZZTTTG.}
\item{added_label_path}{Path to file with additional input labels. Should be a csv file with one column named "file". Other columns should correspond to labels.}
\item{add_input_as_seq}{Boolean vector specifying for each entry in \code{added_label_path} if rows from csv should be encoded as a sequence or used directly.
If a row in your csv file is a sequence this should be \code{TRUE}. For example you may want to add another sequence, say ACCGT. Then this would correspond to 1,2,2,3,4 in
csv file (if vocabulary = c("A", "C", "G", "T")). If \code{add_input_as_seq} is \code{TRUE}, 12234 gets one-hot encoded, so added input is a 3D tensor. If \code{add_input_as_seq} is
\code{FALSE} this will feed network just raw data (a 2D tensor).}
\item{target_from_csv}{Path to csv file with target mapping. One column should be called "file" and other entries in row are the targets.}
\item{target_split}{If target gets read from csv file, list of names to divide target tensor into list of tensors.
Example: if csv file has header names \verb{"file", "label_1", "label_2", "label_3"} and \code{target_split = list(c("label_1", "label_2"), "label_3")},
this will divide target matrix to list of length 2, where the first element contains columns named \code{"label_1"} and \code{"label_2"} and the
second entry contains the column named \code{"label_3"}.}
\item{file_filter}{Vector of file names to use from path_corpus.}
\item{use_coverage}{Integer or \code{NULL}. If not \code{NULL}, use coverage as encoding rather than one-hot encoding and normalize.
Coverage information must be contained in fasta header: there must be a string \code{"cov_n"} in the header, where \code{n} is some integer.}
\item{proportion_entries}{Proportion of fasta entries to keep. For example, if fasta file has 50 entries and \code{proportion_entries = 0.1},
will randomly select 5 entries.}
\item{sample_by_file_size}{Sample new file weighted by file size (bigger files more likely).}
\item{reverse_complement_encoding}{Whether to use both original sequence and reverse complement as two input sequences.}
\item{n_gram}{Integer, encode target not nucleotide wise but combine n nucleotides at once. For example for \verb{n=2, "AA" -> (1, 0,..., 0),}
\verb{"AC" -> (0, 1, 0,..., 0), "TT" -> (0,..., 0, 1)}, where the one-hot vectors have length \code{length(vocabulary)^n}.}
\item{n_gram_stride}{Step size for n-gram encoding. For AACCGGTT with \code{n_gram = 4} and \code{n_gram_stride = 2}, generator encodes
\verb{(AACC), (CCGG), (GGTT)}; for \code{n_gram_stride = 4} generator encodes \verb{(AACC), (GGTT)}.}
\item{add_noise}{\code{NULL} or list of arguments. If not \code{NULL}, list must contain the following arguments: \code{noise_type} can be \code{"normal"} or \code{"uniform"};
optional arguments \code{sd} or \code{mean} if noise_type is \code{"normal"} (default is \code{sd=1} and \code{mean=0}) or \verb{min, max} if \code{noise_type} is \code{"uniform"}
(default is \verb{min=0, max=1}).}
\item{return_int}{Whether to return integer encoding or one-hot encoding.}
\item{reshape_xy}{Can be a list of functions to apply to input and/or target. List elements (containing the reshape functions)
must be called x for input or y for target and each have arguments called x and y. For example:
\code{reshape_xy = list(x = function(x, y) {return(x+1)}, y = function(x, y) {return(x+y)})} .
For rds generator needs to have an additional argument called sw.}
}
\value{
A generator function.
}
\description{
Iterates over folder containing fasta/fastq files and produces encoding of predictor sequences
and target variables. Targets will be read from fasta headers or a separate csv file.
}
\examples{
\dontshow{if (reticulate::py_module_available("tensorflow")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
path_input <- tempfile()
dir.create(path_input)
# create 2 fasta files called 'file_1.fasta', 'file_2.fasta'
create_dummy_data(file_path = path_input,
num_files = 2,
seq_length = 5,
num_seq = 1,
vocabulary = c("a", "c", "g", "t"))
dummy_labels <- data.frame(file = c('file_1.fasta', 'file_2.fasta'), # dummy labels
label1 = c(0, 1),
label2 = c(1, 0))
target_from_csv <- tempfile(fileext = '.csv')
write.csv(dummy_labels, target_from_csv, row.names = FALSE)
gen <- generator_fasta_label_header_csv(path_corpus = path_input, batch_size = 2,
maxlen = 5, target_from_csv = target_from_csv)
z <- gen()
dim(z[[1]])
z[[2]]
\dontshow{\}) # examplesIf}
}