[3172cb]: / man / SL.LASSO.Rd

Download this file

101 lines (79 with data), 3.0 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IL_Utilities.R
\name{SL.LASSO}
\alias{SL.LASSO}
\title{Elastic net regression, including lasso and ridge with a fixed alpha}
\usage{
SL.LASSO(
Y,
X,
newX,
family,
obsWeights,
id,
alpha = 1,
nfolds = 10,
nlambda = 100,
useMin = TRUE,
loss = "deviance",
...
)
}
\arguments{
\item{Y}{Outcome variable}
\item{X}{Covariate dataframe}
\item{newX}{Dataframe to predict the outcome}
\item{family}{"gaussian" for regression, "binomial" for binary
classification. Untested options: "multinomial" for multiple classification
or "mgaussian" for multiple response, "poisson" for non-negative outcome
with proportional mean and variance, "cox".}
\item{obsWeights}{Optional observation-level weights}
\item{id}{Optional id to group observations from the same unit (not used
currently).}
\item{alpha}{Elastic net mixing parameter, range [0, 1]. 0 = ridge regression
and 1 = lasso.}
\item{nfolds}{Number of folds for internal cross-validation to optimize lambda.}
\item{nlambda}{Number of lambda values to check, recommended to be 100 or more.}
\item{useMin}{If TRUE use lambda that minimizes risk, otherwise use 1
standard-error rule which chooses a higher penalty with performance within
one standard error of the minimum (see Breiman et al. 1984 on CART for
background).}
\item{loss}{Loss function, can be "deviance", "mse", or "mae". If family =
binomial can also be "auc" or "class" (misclassification error).}
\item{...}{Any additional arguments are passed through to cv.glmnet.}
}
\description{
Penalized regression using elastic net. Alpha = 0 corresponds to ridge
regression and alpha = 1 corresponds to Lasso.
See \code{vignette("glmnet_beta", package = "glmnet")} for a nice tutorial on
glmnet.
}
\examples{
# Load a test dataset.
data(PimaIndiansDiabetes2, package = "mlbench")
data = PimaIndiansDiabetes2
# Omit observations with missing data.
data = na.omit(data)
Y = as.numeric(data$diabetes == "pos")
X = subset(data, select = -diabetes)
set.seed(1, "L'Ecuyer-CMRG")
sl = SuperLearner(Y, X, family = binomial(),
SL.library = c("SL.mean", "SL.glm", "SL.glmnet"))
sl
}
\references{
Friedman, J., Hastie, T., & Tibshirani, R. (2010). Regularization paths for
generalized linear models via coordinate descent. Journal of statistical
software, 33(1), 1.
Hoerl, A. E., & Kennard, R. W. (1970). Ridge regression: Biased estimation
for nonorthogonal problems. Technometrics, 12(1), 55-67.
Tibshirani, R. (1996). Regression shrinkage and selection via the lasso.
Journal of the Royal Statistical Society. Series B (Methodological), 267-288.
Zou, H., & Hastie, T. (2005). Regularization and variable selection via the
elastic net. Journal of the Royal Statistical Society: Series B (Statistical
Methodology), 67(2), 301-320.
}
\seealso{
\code{\link{predict.SL.glmnet}} \code{\link[glmnet]{cv.glmnet}}
\code{\link[glmnet]{glmnet}}
}