Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
allenzhuaz authored May 1, 2017
1 parent 996221d commit 26f42d5
Show file tree
Hide file tree
Showing 27 changed files with 1,444 additions and 0 deletions.
19 changes: 19 additions & 0 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
Package: MHTdiscrete
Type: Package
Title: Multiple Hypotheses Testing for Discrete Data
Version: 0.1.3
Suggests: multcomp, FixSeqMTP
Author: Yalin Zhu, Wenge Guo
Maintainer: Yalin Zhu <yalin.zhu@outlook.com>
BugReports: https://github.com/allenzhuaz/MHTdiscrete/issues
URL: https://allen.shinyapps.io/MTPs/
Description: A Comprehensive tool for almost all existing multiple testing
methods for discrete data. The package also provides some novel multiple testing
procedures controlling FWER/FDR for discrete data. Given discrete p-values
and their domains, the [method].p.adjust function returns adjusted p-values,
which can be used to compare with the nominal significant level alpha and make
decisions. For users' convenience, the functions also provide the output option
for printing decision rules.
License: GPL (>= 2)
LazyData: TRUE
NeedsCompilation: yes
21 changes: 21 additions & 0 deletions MHTdiscrete.Rproj
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
Version: 1.0

RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default

EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8

RnwWeave: Sweave
LaTeX: pdfLaTeX

AutoAppendNewline: Yes
StripTrailingWhitespace: Yes

BuildType: Package
PackageUseDevtools: Yes
PackageInstallArgs: --no-multiarch --with-keep.source
PackageRoxygenize: rd,collate,namespace
17 changes: 17 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Generated by roxygen2: do not edit by hand

export(GTBH.p.adjust)
export(GTBY.p.adjust)
export(MBH.p.adjust)
export(MBL.p.adjust)
export(MBY.p.adjust)
export(MBonf.p.adjust)
export(MHoch.p.adjust)
export(MHolm.p.adjust)
export(MixBonf.p.adjust)
export(Roth.p.adjust)
export(Roth.rej)
export(Sidak.p.adjust)
export(TH.p.adjust)
export(Tarone.p.adjust)
export(getPval)
42 changes: 42 additions & 0 deletions R/FDRSD.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#' The adjusted p-values for Modified Benjamini-Liu (BL) step-down FDR controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' MBL.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{MBH.p.adjust}}, \code{\link{MBY.p.adjust}}
#' @author Yalin Zhu
#' @references
#' Benjamini, Y., and Liu, W. (1999).
#' A step-down multiple hypotheses testing procedure that controls the false discovery rate under independence.
#' \emph{Journal of Statistical Planning and Inference}, \strong{82}: 163-170.
#' @note The MBL procedure for discrete data controls FDR under the specific dependence assumption where the joint distribution of statistics from true nulls are independent of the joint distribution of statistics from false nulls.
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' MBL.p.adjust(p,p.set)
#' @export

MBL.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p)
sort.p <- p[o]; sort.p.set <- p.set[o]
adjP <- numeric(m); pCDF <- matrix(NA,m,m)
for(i in 1:m){
for(j in i:m){
pCDF[i,j] <- max(sort.p.set[[j]][sort.p.set[[j]] <= sort.p[i]],0)
}
c <- (m-i+1)/m*(1-prod(1-pCDF[i,i:m]))
adjP[i] <- ifelse(i==1,c,max(adjP[i-1],c))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Modified Benjamini-Liu (BL)", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}
182 changes: 182 additions & 0 deletions R/FDRSU.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
#' The adjusted p-values for Modified Benjamini-Hochberg (BH) step-up FDR controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' MBH.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{MBY.p.adjust}}, \code{\link{MBL.p.adjust}}
#' @author Yalin Zhu
#' @references
#' Benjamini, Y., and Hochberg, Y. (1995).
#' Controlling the false discovery rate: a practical and powerful approach to multiple testing.
#' \emph{Journal of the Royal Statistical Society Series B}, \strong{57}: 289-300.
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' MBH.p.adjust(p,p.set)
#' @export

MBH.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p)
sort.p <- p[o]; sort.p.set <- p.set[o]
adjP <- numeric();pCDF <- matrix(NA,m,m)
for(i in m:1){
for(j in 1:m){
pCDF[i,j] <- max(sort.p.set[[j]][sort.p.set[[j]] <= sort.p[i]],0)
}
c <- sum(pCDF[i,1:m])/i
adjP[i] <- ifelse(i==m,c,min(adjP[i+1],c))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Modified Benjamini-Hochberg (BH)", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}

#' The adjusted p-values for Gilbert-Tarone-BH step-up FDR controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' GTBH.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{GTBY.p.adjust}}, \code{\link{MBH.p.adjust}}, \code{\link{MBY.p.adjust}}
#' @author Yalin Zhu
#' @references
#' Gilbert, P. B. (2005).
#' A modified false discovery rate multiple-comparisons procedure for discrete data, applied to human immunodeficiency virus genetics.
#' \emph{Journal of the Royal Statistical Society: Series C (Applied Statistics)}, \strong{54}: 143-158.
#'
#' Benjamini, Y., and Hochberg, Y. (1995).
#' Controlling the false discovery rate: a practical and powerful approach to multiple testing.
#' \emph{Journal of the Royal Statistical Society Series B}, \strong{57}: 289-300.
#'
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' GTBH.p.adjust(p,p.set)
#' @export

GTBH.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p); adjP <- numeric(m)
sort.p <- p[o]; sort.p.set <- p.set[o]
minP <- sort(sapply(sort.p.set,min))
for (j in m:1){
for (i in 1:m ){
if(sort.p[j]>=max(minP)){q <- m}
else if (sort.p[j]>=minP[i] & sort.p[j]<minP[i+1]){q <- i}
}
adjP[j] <- ifelse(j==q,sort.p[j],min(adjP[j+1],q*sort.p[j]/j))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Gilbert-Tarone-BH", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}



#' The adjusted p-values for Modified Benjamini-Yekutieli (BY) step-up FDR controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' MBY.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{MBH.p.adjust}}, \code{\link{MBL.p.adjust}}
#' @author Yalin Zhu
#' @references
#' Benjamini, Y., and Yekutieli, D. (2001).
#' The control of the false discovery rate in multiple testing under dependency.
#' \emph{Annals of Statistics}, \strong{29}: 1165-1188.
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' MBY.p.adjust(p,p.set)
#' @export


MBY.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p); C <- sum(1/c(1:m))
sort.p <- p[o]; sort.p.set <- p.set[o]
adjP <- numeric();pCDF <- matrix(NA,m,m)
for(i in m:1){
for(j in 1:m){
pCDF[i,j] <- max(sort.p.set[[j]][sort.p.set[[j]] <= sort.p[i]],0)
}
c <- min(1,sum(pCDF[i,1:m])*C/i)
adjP[i] <- ifelse(i==m,c,min(adjP[i+1],c))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Modified Benjamini-Yekutieli (BY)", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}


#' The adjusted p-values for Gilbert-Tarone-BY step-up FDR controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' GTBY.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{GTBH.p.adjust}}, \code{\link{MBH.p.adjust}}, \code{\link{MBY.p.adjust}}
#' @author Yalin Zhu
#' @references
#' Gilbert, P. B. (2005).
#' A modified false discovery rate multiple-comparisons procedure for discrete data, applied to human immunodeficiency virus genetics.
#' \emph{Journal of the Royal Statistical Society: Series C (Applied Statistics)}, \strong{54}: 143-158.
#'
#' Benjamini, Y., and Yekutieli, D. (2001).
#' The control of the false discovery rate in multiple testing under dependency.
#' \emph{Annals of Statistics}, \strong{29}: 1165-1188.
#'
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' GTBY.p.adjust(p,p.set)
#' @export

GTBY.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p); adjP <- numeric(m)
sort.p <- p[o]; sort.p.set <- p.set[o]
minP <- sort(sapply(sort.p.set,min))
for (j in m:1){
for (i in 1:m ){
if(sort.p[j]>=max(minP)){q <- m}
else if (sort.p[j]>=minP[i] & sort.p[j]<minP[i+1]){q <- i}
}
C <- sum(1/c(1:q))
adjP[j] <- ifelse(j==q,sort.p[j],min(adjP[j+1],q*C*sort.p[j]/j))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Gilbert-Tarone-BY", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}
95 changes: 95 additions & 0 deletions R/FWERSD.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
#' The adjusted p-values for Modified Holm step-down FWER controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' MHolm.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{TH.p.adjust}}, \code{\link[stats]{p.adjust}}.
#' @author Yalin Zhu
#' @references
#' Holm, S. (1979).
#' A simple sequentially rejective multiple test procedure.
#' \emph{Scandinavian Journal of Statistics}, \strong{6}: 65-70.
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' MHolm.p.adjust(p,p.set)
#' @export

MHolm.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p)
sort.p <- p[o]; sort.p.set <- p.set[o]
adjP <- numeric(m); pCDF <- matrix(NA,m,m)
for(i in 1:m){
for (j in i:m){
pCDF[i,j] <- max(sort.p.set[[j]][sort.p.set[[j]] <= sort.p[i]],0)
}
c <- min(1,sum(pCDF[i,i:m]))
adjP[i] <- ifelse(i==1,c,max(adjP[i-1],c))
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Modified Holm", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}

#' The adjusted p-values for Tarone-Holm step-down FWER controlling procedure.
#'
#' The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
#'
#' @usage
#' TH.p.adjust(p, p.set, alpha, make.decision)
#' @param p numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.
#' @param p.set a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.
#' @param alpha significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.
#' @param make.decision logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}
#' @return
#' A numeric vector of the adjusted p-values (of the same length as \code{p}).
#' @seealso \code{\link{MHolm.p.adjust}}, \code{\link[stats]{p.adjust}}.
#' @author Yalin Zhu
#' @references
#' Hommel, G., & Krummenauer, F. (1998).
#' Improvements and modifications of Tarone's multiple test procedure for discrete data.
#' \emph{Biometrics}, \strong{54}: 673-681.
#'
#' Holm, S. (1979).
#' A simple sequentially rejective multiple test procedure.
#' \emph{Scandinavian Journal of Statistics}, \strong{6}: 65-70.
#'
#' @examples
#' p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
#' p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
#' TH.p.adjust(p,p.set)
#' @export


TH.p.adjust <- function(p,p.set, alpha = 0.05, make.decision = FALSE){
o <- order(p); ro <- order(o); m <- length(p)
sort.p <- p[o]; sort.p.set <- p.set[o]; adjP <- numeric(m)
j <- 1
while (j <= m){
minP <- sort(sapply(sort.p.set[j:m],min))
for (i in 1:(m-j+1)){
if (sort.p[j]>=max(minP)){q <- m-j+1}
else if (sort.p[j]>=minP[i] & sort.p[j]<minP[i+1]){q <- i}
}
c <- min(1,q*sort.p[j])
adjP[j] <- ifelse(j==1,c,max(adjP[j-1],c))
j <- j+1
}
if (make.decision==FALSE){
return(adjP[ro])
} else{
return(list(method= "Tarone-Holm", significant.level = alpha, Result = data.frame(raw.p = p, adjust.p=adjP[ro], decision=ifelse(adjP[ro]<=alpha, "reject","accept"))))
}
}



Loading

0 comments on commit 26f42d5

Please sign in to comment.