|
a |
|
b/man/reducedDims.Rd |
|
|
1 |
% Generated by roxygen2: do not edit by hand |
|
|
2 |
% Please edit documentation in R/scAI_model.R |
|
|
3 |
\name{reducedDims} |
|
|
4 |
\alias{reducedDims} |
|
|
5 |
\title{Perform dimensional reduction} |
|
|
6 |
\usage{ |
|
|
7 |
reducedDims(object, data.use = object@fit$H, do.scale = TRUE, |
|
|
8 |
do.center = TRUE, return.object = TRUE, method = "umap", |
|
|
9 |
dim.embed = 2, dim.use = NULL, perplexity = 30, theta = 0.5, |
|
|
10 |
check_duplicates = F, rand.seed = 42L, FItsne.path = NULL, |
|
|
11 |
dimPC = 40, do.fast = TRUE, weight.by.var = TRUE, |
|
|
12 |
n.neighbors = 30L, n.components = 2L, distance = "correlation", |
|
|
13 |
n.epochs = NULL, learning.rate = 1, min.dist = 0.3, spread = 1, |
|
|
14 |
set.op.mix.ratio = 1, local.connectivity = 1L, |
|
|
15 |
repulsion.strength = 1, negative.sample.rate = 5, a = NULL, |
|
|
16 |
b = NULL) |
|
|
17 |
} |
|
|
18 |
\arguments{ |
|
|
19 |
\item{object}{scAI object} |
|
|
20 |
|
|
|
21 |
\item{data.use}{input data} |
|
|
22 |
|
|
|
23 |
\item{do.scale}{whether scale the data} |
|
|
24 |
|
|
|
25 |
\item{do.center}{whether scale and center the data} |
|
|
26 |
|
|
|
27 |
\item{return.object}{whether return scAI object} |
|
|
28 |
|
|
|
29 |
\item{method}{Method of dimensional reduction, one of tsne, FItsne and umap} |
|
|
30 |
|
|
|
31 |
\item{dim.embed}{dimensions of t-SNE embedding} |
|
|
32 |
|
|
|
33 |
\item{dim.use}{num of PCs used for t-SNE} |
|
|
34 |
|
|
|
35 |
\item{perplexity}{perplexity parameter in tsne} |
|
|
36 |
|
|
|
37 |
\item{theta}{parameter in tsne} |
|
|
38 |
|
|
|
39 |
\item{check_duplicates}{parameter in tsne} |
|
|
40 |
|
|
|
41 |
\item{rand.seed}{Set a random seed. By default, sets the seed to 42.} |
|
|
42 |
|
|
|
43 |
\item{FItsne.path}{File path of FIt-SNE} |
|
|
44 |
|
|
|
45 |
\item{dimPC}{the number of components to keep in PCA} |
|
|
46 |
|
|
|
47 |
\item{do.fast}{whether do fast PCA} |
|
|
48 |
|
|
|
49 |
\item{weight.by.var}{whether use weighted pc.scores} |
|
|
50 |
|
|
|
51 |
\item{n.neighbors}{This determines the number of neighboring points used in |
|
|
52 |
local approximations of manifold structure. Larger values will result in more |
|
|
53 |
global structure being preserved at the loss of detailed local structure. In general this parameter should often be in the range 5 to 50.} |
|
|
54 |
|
|
|
55 |
\item{n.components}{The dimension of the space to embed into.} |
|
|
56 |
|
|
|
57 |
\item{distance}{This determines the choice of metric used to measure distance in the input space.} |
|
|
58 |
|
|
|
59 |
\item{n.epochs}{the number of training epochs to be used in optimizing the low dimensional embedding. Larger values result in more accurate embeddings. If NULL is specified, a value will be selected based on the size of the input dataset (200 for large datasets, 500 for small).} |
|
|
60 |
|
|
|
61 |
\item{learning.rate}{The initial learning rate for the embedding optimization.} |
|
|
62 |
|
|
|
63 |
\item{min.dist}{This controls how tightly the embedding is allowed compress points together. |
|
|
64 |
Larger values ensure embedded points are moreevenly distributed, while smaller values allow the |
|
|
65 |
algorithm to optimise more accurately with regard to local structure. Sensible values are in the range 0.001 to 0.5.} |
|
|
66 |
|
|
|
67 |
\item{spread}{he effective scale of embedded points. In combination with min.dist this determines how clustered/clumped the embedded points are.} |
|
|
68 |
|
|
|
69 |
\item{set.op.mix.ratio}{Interpolate between (fuzzy) union and intersection as the set operation used to combine local fuzzy simplicial sets to obtain a global fuzzy simplicial sets.} |
|
|
70 |
|
|
|
71 |
\item{local.connectivity}{The local connectivity required - i.e. the number of nearest neighbors |
|
|
72 |
that should be assumed to be connected at a local level. The higher this value the more connected |
|
|
73 |
the manifold becomes locally. In practice this should be not more than the local intrinsic dimension of the manifold.} |
|
|
74 |
|
|
|
75 |
\item{repulsion.strength}{Weighting applied to negative samples in low dimensional embedding |
|
|
76 |
optimization. Values higher than one will result in greater weight being given to negative samples.} |
|
|
77 |
|
|
|
78 |
\item{negative.sample.rate}{The number of negative samples to select per positive sample in the |
|
|
79 |
optimization process. Increasing this value will result in greater repulsive force being applied, greater optimization cost, but slightly more accuracy.} |
|
|
80 |
|
|
|
81 |
\item{a}{More specific parameters controlling the embedding. If NULL, these values are set automatically as determined by min. dist and spread.} |
|
|
82 |
|
|
|
83 |
\item{b}{More specific parameters controlling the embedding. If NULL, these values are set automatically as determined by min. dist and spread.} |
|
|
84 |
} |
|
|
85 |
\description{ |
|
|
86 |
Dimension reduction by PCA, t-SNE or UMAP |
|
|
87 |
} |