From cdd9ab94b417db38a5345cb3c6aca9359f9578df Mon Sep 17 00:00:00 2001 From: Andrea Raithel Date: Fri, 18 Oct 2024 13:20:12 +0200 Subject: [PATCH] update vignette --- vignettes/FRASER.Rnw | 18 +++++++++++------- vignettes/bibliography.bib | 8 ++++++++ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/vignettes/FRASER.Rnw b/vignettes/FRASER.Rnw index d3fa5837..bd3b92c0 100644 --- a/vignettes/FRASER.Rnw +++ b/vignettes/FRASER.Rnw @@ -686,17 +686,21 @@ fds <- fit(fds, q=3, type="jaccard", implementation="PCA-BB-Decoder", For the previous call, the dimension $q$ of the latent space has been fixed. Since working with the correct $q$ is very important, the \fraser{} -package also provides the function \Rfunction{optimHyperParams} that can be -used to estimate the dimension $q$ of the latent space of the data. It works by -artificially injecting outliers into the data and then comparing the AUC of -recalling these outliers for different values of $q$. Since this hyperparameter -optimization step can take some time for the full dataset, we only show it here -for a subset of the dataset: +package also provides the function \Rfunction{estimateBestQ} that can be +used to estimate the dimension $q$ of the latent space of the data. Per default, +it uses the deterministic Optimal Hard Thresholding (OHT) method established by Gavish +and Donoho\cite{Gavish2013}. Alternatively, a hyperparameter optimization can be +performed that works by artificially injecting outliers into the data and then +comparing the AUC of recalling these outliers for different values of $q$. Since +this hyperparameter optimization step is quite CPU-intensive, we only show it here +for a subset of the dataset and recommend to use the faster OHT approach: <>= set.seed(42) +# Optimal Hard Thresholding +fds <- estimateBestQ(fds, type="jaccard", useOHT=TRUE, plot=FALSE) # hyperparameter opimization -fds <- optimHyperParams(fds, type="jaccard", plot=FALSE) +fds <- estimateBestQ(fds, type="jaccard", useOHT=FALSE, plot=FALSE) # retrieve the estimated optimal dimension of the latent space bestQ(fds, type="jaccard") diff --git a/vignettes/bibliography.bib b/vignettes/bibliography.bib index 4c16df61..b7937c8e 100644 --- a/vignettes/bibliography.bib +++ b/vignettes/bibliography.bib @@ -56,4 +56,12 @@ @article{MarcoSola2012 author = {Santiago Marco-Sola and Michael Sammeth and Roderic Guig{\'{o}} and Paolo Ribeca}, title = {The {GEM} mapper: fast, accurate and versatile alignment by filtration}, journal = {Nature Methods} +} +@article{Gavish2013, + abstract = {We consider recovery of low-rank matrices from noisy data by hard thresholding of singular values, where singular values below a prescribed threshold {\$}\lambda{\$} are set to 0. We study the asymptotic MSE in a framework where the matrix size is large compared to the rank of the matrix to be recovered, and the signal-to-noise ratio of the low-rank piece stays constant. The AMSE-optimal choice of hard threshold, in the case of n-by-n matrix in noise level $\backslash$sigma, is simply {\$}(4/\sqrt{3}) \sqrt{n}$\backslash$sigma $\backslash$approx 2.309 \sqrt{n}$\backslash$sigma{\$} when {\$}$\backslash$sigma{\$} is known, or simply {\$}2.858$\backslash$cdot y{\_}{med}{\$} when {\$}$\backslash$sigma{\$} is unknown, where {\$}y{\_}{med}{\$} is the median empirical singular value. For nonsquare $m$ by $n$ matrices with {\$}m $\backslash$neq n{\$}, these thresholding coefficients are replaced with different provided constants. In our asymptotic framework, this thresholding rule adapts to unknown rank and to unknown noise level in an optimal manner: it is always better than hard thresholding at any other value, no matter what the matrix is that we are trying to recover, and is always better than ideal Truncated SVD (TSVD), which truncates at the true rank of the low-rank matrix we are trying to recover. Hard thresholding at the recommended value to recover an n-by-n matrix of rank r guarantees an AMSE at most {\$}3nr$\backslash$sigma{\^{}}2{\$}. In comparison, the guarantee provided by TSVD is {\$}5nr$\backslash$sigma{\^{}}2{\$}, the guarantee provided by optimally tuned singular value soft thresholding is {\$}6nr$\backslash$sigma{\^{}}2{\$}, and the best guarantee achievable by any shrinkage of the data singular values is {\$}2nr$\backslash$sigma{\^{}}2{\$}. Empirical evidence shows that these AMSE properties of the {\$}4/\sqrt{3}{\$} thresholding rule remain valid even for relatively small n, and that performance improvement over TSVD and other shrinkage rules is substantial, turning it into the practical hard threshold of choice.}, + author = {Gavish, Matan and Donoho, David L.}, + date = {25.05.2013}, + title = {{The Optimal Hard Threshold for Singular Values is 4/sqrt(3)}}, + url = {http://arxiv.org/pdf/1305.5870}, + file = {Gavish, Donoho 25.05.2013 - The Optimal Hard Threshold:Attachments/Gavish, Donoho 25.05.2013 - The Optimal Hard Threshold.pdf:application/pdf} } \ No newline at end of file