content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' Calculate mitigation costs
#'
#' Calculate mitigation costs
#'
#'
#' @param data quitte object
#' @param scenBau baseline scenario name
#' @param scenPol policy scenario name
#' @param yearFrom the startyear
#' @param yearTo the endyear
#' @param nameVar Name of the variable containing consumption. Defaults to
#' "Consumption"
#' @param nameDisrate Name of the variable for the discount rate, only needed
#' if discount=endo. Defaults to "Interest Rate t/(t-1)|Real"
#' @param discount discount rate - exogenous only for now
#' @return regional mitigation costs (quitte object)
#' @author Anselm Schultes
#' @examples
#'
#' \dontrun{
#' calcMitigationCost(qd,"BAU","POL")
#' }
#'
#' @export
calcMitigationCost = function(data,scenBau,scenPol,
yearFrom=2010,yearTo=2100,
nameVar='Consumption',
nameDisrate='Interest Rate t/(t-1)|Real',
discount=0.05){
if(! nameVar %in% unique(data$variable)) stop(paste('Variable ',nameVar,' not found in data.'))
#takes quitte data, scenario names, start- and end-year
#select data
data = data[data$scenario %in% c(scenBau,scenPol)
& data$variable %in% c(nameVar,nameDisrate)
& data$period >= yearFrom
& data$period <= yearTo,]
#replace POL interest rate with BAU interest rate:
if(!is.numeric(discount)){
if(! nameDisrate %in% unique(data$variable)) {
stop(paste('Variable ',nameDisrate,' not found in data. Stoping.'))
}
data[data$variable == nameDisrate & data$scenario == scenPol,'value'] =
data[data$variable == nameDisrate & data$scenario == scenBau,'value']
}
#calculate discounted aggregate values
tmp = data.frame(calcCumulatedDiscount(data,nameVar=nameVar,
nameDisrate=nameDisrate,
discount=discount))
#calculate mitigaton costs
res = tmp %>%
filter(!!sym('period') == yearTo) %>%
group_by(!!sym('model'), !!sym('region')) %>%
arrange(!!sym('scenario') == scenPol) %>%
summarise(
!!sym('value') := 100 * (1 - !!sym('value')[2] / !!sym('value')[1])) %>%
ungroup()
res$scenario = scenPol
res$variable = 'Mitigation cost'
res$unit = 'pp'
return(as.quitte(data.frame(res)))
}
|
/R/calcMitigationCost.R
|
no_license
|
0UmfHxcvx5J7JoaOhFSs5mncnisTJJ6q/quitte
|
R
| false
| false
| 2,346
|
r
|
#' Calculate mitigation costs
#'
#' Calculate mitigation costs
#'
#'
#' @param data quitte object
#' @param scenBau baseline scenario name
#' @param scenPol policy scenario name
#' @param yearFrom the startyear
#' @param yearTo the endyear
#' @param nameVar Name of the variable containing consumption. Defaults to
#' "Consumption"
#' @param nameDisrate Name of the variable for the discount rate, only needed
#' if discount=endo. Defaults to "Interest Rate t/(t-1)|Real"
#' @param discount discount rate - exogenous only for now
#' @return regional mitigation costs (quitte object)
#' @author Anselm Schultes
#' @examples
#'
#' \dontrun{
#' calcMitigationCost(qd,"BAU","POL")
#' }
#'
#' @export
calcMitigationCost = function(data,scenBau,scenPol,
yearFrom=2010,yearTo=2100,
nameVar='Consumption',
nameDisrate='Interest Rate t/(t-1)|Real',
discount=0.05){
if(! nameVar %in% unique(data$variable)) stop(paste('Variable ',nameVar,' not found in data.'))
#takes quitte data, scenario names, start- and end-year
#select data
data = data[data$scenario %in% c(scenBau,scenPol)
& data$variable %in% c(nameVar,nameDisrate)
& data$period >= yearFrom
& data$period <= yearTo,]
#replace POL interest rate with BAU interest rate:
if(!is.numeric(discount)){
if(! nameDisrate %in% unique(data$variable)) {
stop(paste('Variable ',nameDisrate,' not found in data. Stoping.'))
}
data[data$variable == nameDisrate & data$scenario == scenPol,'value'] =
data[data$variable == nameDisrate & data$scenario == scenBau,'value']
}
#calculate discounted aggregate values
tmp = data.frame(calcCumulatedDiscount(data,nameVar=nameVar,
nameDisrate=nameDisrate,
discount=discount))
#calculate mitigaton costs
res = tmp %>%
filter(!!sym('period') == yearTo) %>%
group_by(!!sym('model'), !!sym('region')) %>%
arrange(!!sym('scenario') == scenPol) %>%
summarise(
!!sym('value') := 100 * (1 - !!sym('value')[2] / !!sym('value')[1])) %>%
ungroup()
res$scenario = scenPol
res$variable = 'Mitigation cost'
res$unit = 'pp'
return(as.quitte(data.frame(res)))
}
|
rm(list=ls())
set.seed(75)
xVec<-sample(0:9,size=10,replace=T)
tmpFn1 <- function(x)
{
x^(1:length(x))
}
tmpFn2 <- function(x)
{
n <- length(x)
(x^(1:n))/(1:n)
}
tmpFn <- function(x)
{
n<-length(x)-2
(x[1:n] + x[2:(n+1)] + x[3:(n+2)])/3
}
tmpFn(c(1:5,6:1))
tmpFn <- function(x)
{
ifelse(x < 0, x^2 + 2*x + 3, ifelse(x >=0 & x < 2, x+3, x^2 + 4*x - 7))
}
tmp <- seq(-3,3,len=100)
plot(tmp, tmpFn(tmp), type="l")
tmpFn <- function(x)
{
ifelse(x < 0, x^2 + 2*x + 3, ifelse(x < 2, x+3, x^2 + 4*x - 7))
}
tmp <- seq(-3,3,len=100)
plot(tmp, tmpFn(tmp), type="l")
evenFn <- function(mat)
{
ifelse(mat %% 2 == 1, mat * 2, mat)
}
evenFn(mat)
tmpFn <- function(mat)
{
mat[mat%%2 == 1] <- 2 * mat[mat%%2 == 1]
#mat
}
evenFn(mat)
mat
tmpFn <- function(n,k)
{
m <- matrix(0, nrow=n, ncol=n)
m[ row(m) == col(m) ] <- k
m[ abs( row(m) - col(m) ) == 1 ] <- 1
m
}
tmpFn <- function(n,k)
{
m <- diag(k, nrow=n, ncol=n)
m[ abs( row(m) - col(m) ) == 1 ] <- 1
m
}
tmpFn(5,2)
tmpFn(6,7)
quadrant <- function(alpha)
{
as.integer(( alpha %% 360) / 90) +1
}
m <- seq(0,430,by=10)
matrix(c(m,quadrant(m)),ncol=2,byrow=F)
day = 27
month = 2
year = 1997
k <- day
k
y <- year %% 100
y
c <- year %/% 100
c
m <- ((month - 3) %% 12) + 1
m
floor(2.6 * m - 0.2)
dow <- as.integer(( floor(2.6 * m - 0.26) + k + y + (y%/%4) + (c%/%4) - 2*c)%%7)
weekday <- function(day, month, year)
{
k <- day
y <- year %% 100
c <- year %/% 100
m <- ((month - 3) %% 12) + 1
dow <- as.integer(( floor(2.6 * m - 0.26) + k + y + (y%/%4) + (c%/%4) - 2*c)%%7)
c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")[dow +1]
}
weekday(10,2,2015)
weekday(10,7,1969)
weekday( c(27,18,21), c(2,2,1), c(1997,1940,1963) )
testLoop <- function(n)
{
if (n < 4) stop("The argument n must be an integer which is at least 4.\n")
x <- rep(NA, n-1)
x[1] <- 1
x[2] <- 2
for (j in (3:(n-1)))
{
x[j] <- x[j-1] + 2/x[j-1]
}
x
}
testLoop(8)
testLoop2 <- function(yVec)
{
sum(exp(seq(along=yVec)))
}
yVec <- vector(mode="numeric", length=0)
#yVec <- c(4,1,7,2)
seq(along=yVec)
length(yVec)
testLoop2(yVec)
quadmap <- function( start, rho, niter)
{
x <- rep(NA, niter)
x[1] <- start
for (k in 2:niter)
{
x[k] <- rho * x[k - 1] * (1 - x[k - 1])
}
x
}
tmp <- quadmap(start=0.95, rho=2, niter=500)
plot(tmp, type = "l")
tmp <- quadmap(start=0.95, rho=2.99, niter=500)
plot(tmp, type = "l")
plot(tmp[300:500], type = "l")
quadmap2 <- function( start, rho)
{
niter <- 1
x <- c(start, rho * start * (1 - start))
while ( abs(x[2] - x[1]) >= 0.02 )
{
x[1] <- x[2]
x[2] = rho * x[1] * (1 - x[1])
niter <- niter + 1
}
niter
}
quadmap2(start = 0.95, rho = 2.99)
tmp
x <- seq(2,56,by=3)
tmpFn <- function(xVec)
{
r <- rep(NA, 2)
r[1] <- sum((xVec[2:length(xVec)] - mean(xVec)) * (xVec[1:(length(xVec)-1)] - mean(xVec)))/sum((xVec - mean(xVec)) ^ 2)
r[2] <- sum((xVec[3:length(xVec)] - mean(xVec)) * (xVec[1:(length(xVec)-2)] - mean(xVec)))/sum((xVec - mean(xVec)) ^ 2)
r
}
tmpFn(xVec)
tmpAcf <- function(xVec)
{
xc <- xVec - mean(xVec)
denom <- sum(xc^2)
n <- length(x)
r1 <- sum( xc[2:n] * xc[1:(n-1)] )/denom
r2 <- sum( xc[3:n] * xc[1:(n-2)] )/denom
list(r1 = r1, r2 = r2)
}
tmpAcf <- function(x, k)
{
xc <- x - mean(x)
denom <- sum(xc^2)
n <- length(x)
tmpFn <- function(j){ sum( xc[(j+1):n] * xc[1:(n-j)] )/denom }
c(1, sapply(1:k, tmpFn))
}
|
/Ex3Functions.R
|
no_license
|
KrishnaGMohan/RExercises
|
R
| false
| false
| 3,509
|
r
|
rm(list=ls())
set.seed(75)
xVec<-sample(0:9,size=10,replace=T)
tmpFn1 <- function(x)
{
x^(1:length(x))
}
tmpFn2 <- function(x)
{
n <- length(x)
(x^(1:n))/(1:n)
}
tmpFn <- function(x)
{
n<-length(x)-2
(x[1:n] + x[2:(n+1)] + x[3:(n+2)])/3
}
tmpFn(c(1:5,6:1))
tmpFn <- function(x)
{
ifelse(x < 0, x^2 + 2*x + 3, ifelse(x >=0 & x < 2, x+3, x^2 + 4*x - 7))
}
tmp <- seq(-3,3,len=100)
plot(tmp, tmpFn(tmp), type="l")
tmpFn <- function(x)
{
ifelse(x < 0, x^2 + 2*x + 3, ifelse(x < 2, x+3, x^2 + 4*x - 7))
}
tmp <- seq(-3,3,len=100)
plot(tmp, tmpFn(tmp), type="l")
evenFn <- function(mat)
{
ifelse(mat %% 2 == 1, mat * 2, mat)
}
evenFn(mat)
tmpFn <- function(mat)
{
mat[mat%%2 == 1] <- 2 * mat[mat%%2 == 1]
#mat
}
evenFn(mat)
mat
tmpFn <- function(n,k)
{
m <- matrix(0, nrow=n, ncol=n)
m[ row(m) == col(m) ] <- k
m[ abs( row(m) - col(m) ) == 1 ] <- 1
m
}
tmpFn <- function(n,k)
{
m <- diag(k, nrow=n, ncol=n)
m[ abs( row(m) - col(m) ) == 1 ] <- 1
m
}
tmpFn(5,2)
tmpFn(6,7)
quadrant <- function(alpha)
{
as.integer(( alpha %% 360) / 90) +1
}
m <- seq(0,430,by=10)
matrix(c(m,quadrant(m)),ncol=2,byrow=F)
day = 27
month = 2
year = 1997
k <- day
k
y <- year %% 100
y
c <- year %/% 100
c
m <- ((month - 3) %% 12) + 1
m
floor(2.6 * m - 0.2)
dow <- as.integer(( floor(2.6 * m - 0.26) + k + y + (y%/%4) + (c%/%4) - 2*c)%%7)
weekday <- function(day, month, year)
{
k <- day
y <- year %% 100
c <- year %/% 100
m <- ((month - 3) %% 12) + 1
dow <- as.integer(( floor(2.6 * m - 0.26) + k + y + (y%/%4) + (c%/%4) - 2*c)%%7)
c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")[dow +1]
}
weekday(10,2,2015)
weekday(10,7,1969)
weekday( c(27,18,21), c(2,2,1), c(1997,1940,1963) )
testLoop <- function(n)
{
if (n < 4) stop("The argument n must be an integer which is at least 4.\n")
x <- rep(NA, n-1)
x[1] <- 1
x[2] <- 2
for (j in (3:(n-1)))
{
x[j] <- x[j-1] + 2/x[j-1]
}
x
}
testLoop(8)
testLoop2 <- function(yVec)
{
sum(exp(seq(along=yVec)))
}
yVec <- vector(mode="numeric", length=0)
#yVec <- c(4,1,7,2)
seq(along=yVec)
length(yVec)
testLoop2(yVec)
quadmap <- function( start, rho, niter)
{
x <- rep(NA, niter)
x[1] <- start
for (k in 2:niter)
{
x[k] <- rho * x[k - 1] * (1 - x[k - 1])
}
x
}
tmp <- quadmap(start=0.95, rho=2, niter=500)
plot(tmp, type = "l")
tmp <- quadmap(start=0.95, rho=2.99, niter=500)
plot(tmp, type = "l")
plot(tmp[300:500], type = "l")
quadmap2 <- function( start, rho)
{
niter <- 1
x <- c(start, rho * start * (1 - start))
while ( abs(x[2] - x[1]) >= 0.02 )
{
x[1] <- x[2]
x[2] = rho * x[1] * (1 - x[1])
niter <- niter + 1
}
niter
}
quadmap2(start = 0.95, rho = 2.99)
tmp
x <- seq(2,56,by=3)
tmpFn <- function(xVec)
{
r <- rep(NA, 2)
r[1] <- sum((xVec[2:length(xVec)] - mean(xVec)) * (xVec[1:(length(xVec)-1)] - mean(xVec)))/sum((xVec - mean(xVec)) ^ 2)
r[2] <- sum((xVec[3:length(xVec)] - mean(xVec)) * (xVec[1:(length(xVec)-2)] - mean(xVec)))/sum((xVec - mean(xVec)) ^ 2)
r
}
tmpFn(xVec)
tmpAcf <- function(xVec)
{
xc <- xVec - mean(xVec)
denom <- sum(xc^2)
n <- length(x)
r1 <- sum( xc[2:n] * xc[1:(n-1)] )/denom
r2 <- sum( xc[3:n] * xc[1:(n-2)] )/denom
list(r1 = r1, r2 = r2)
}
tmpAcf <- function(x, k)
{
xc <- x - mean(x)
denom <- sum(xc^2)
n <- length(x)
tmpFn <- function(j){ sum( xc[(j+1):n] * xc[1:(n-j)] )/denom }
c(1, sapply(1:k, tmpFn))
}
|
# test_igraph_inv.R
# Created by Disa Mhembere on 2013-12-31.
# Email: disa@jhu.edu
# Copyright (c) 2013. All rights reserved.
require(igraph)
require(argparse)
parser <- ArgumentParser(description="Run same invariants as MROCP on igraphs")
parser$add_argument("gfn", help="The graph file name")
parser$add_argument("-f", "--graph_format", default="gml", help="The graph format e.g gml, graphml, pajek, dot etc..")
result <- parser$parse_args()
if (!file.exists(result$gfn)){
stop(paste("File", result$gfn, "does not exist!!\n"))
}
g <- igraph::read.graph(result$gfn, format=result$graph_format) # Throws an exception if the graph format is unknown
begin <- proc.time()[3]
# Degree
cat("Processing Degree Vector...\n")
system.time( igraph::degree(g, mode="total") )
# Scan Stat 1
cat("Processing Scan Statistic...\n")
system.time( igraph::scan1(g) )
# Clustering Coefficient
cat("Processing Transitivity (i.e. Clustering Coefficient) ...\n")
system.time( igraph::transitivity(g, "local") )
# Triangles
cat("Processing Triangle count ...\n")
system.time( igraph::adjacent.triangles(g) )
# Eigendecomposition
cat("Spectral decomposition ...\n")
if (igraph::vcount(g) >= 102){
eigs <- 100
} else {
eigs <- igraph::vcount(g)-2
}
system.time( igraph::adjacency.spectral.embedding(g, eigs ))
cat("Total time for the 5 invariants = ", (proc.time()[3]-begin), " ...\n")
|
/MR-OCP/MROCPdjango/computation/tests/test_igraph_inv.R
|
permissive
|
gkiar/ndgrutedb
|
R
| false
| false
| 1,387
|
r
|
# test_igraph_inv.R
# Created by Disa Mhembere on 2013-12-31.
# Email: disa@jhu.edu
# Copyright (c) 2013. All rights reserved.
require(igraph)
require(argparse)
parser <- ArgumentParser(description="Run same invariants as MROCP on igraphs")
parser$add_argument("gfn", help="The graph file name")
parser$add_argument("-f", "--graph_format", default="gml", help="The graph format e.g gml, graphml, pajek, dot etc..")
result <- parser$parse_args()
if (!file.exists(result$gfn)){
stop(paste("File", result$gfn, "does not exist!!\n"))
}
g <- igraph::read.graph(result$gfn, format=result$graph_format) # Throws an exception if the graph format is unknown
begin <- proc.time()[3]
# Degree
cat("Processing Degree Vector...\n")
system.time( igraph::degree(g, mode="total") )
# Scan Stat 1
cat("Processing Scan Statistic...\n")
system.time( igraph::scan1(g) )
# Clustering Coefficient
cat("Processing Transitivity (i.e. Clustering Coefficient) ...\n")
system.time( igraph::transitivity(g, "local") )
# Triangles
cat("Processing Triangle count ...\n")
system.time( igraph::adjacent.triangles(g) )
# Eigendecomposition
cat("Spectral decomposition ...\n")
if (igraph::vcount(g) >= 102){
eigs <- 100
} else {
eigs <- igraph::vcount(g)-2
}
system.time( igraph::adjacency.spectral.embedding(g, eigs ))
cat("Total time for the 5 invariants = ", (proc.time()[3]-begin), " ...\n")
|
#' Submit an expression to be evaluated to multiple jobs.
#' @param expr An expression to be passed to Slurm.
#' @template slurm
#' @template job_name-tmp_path
#' @template sbatch_opt
#' @template rscript_opt
#' @template njobs
#' @return A list of length `njobs`.
#' @export
Slurm_EvalQ <- function(
expr,
njobs = 2L,
job_name = opts_slurmR$get_job_name(),
tmp_path = opts_slurmR$get_tmp_path(),
plan = "collect",
sbatch_opt = list(),
rscript_opt = list(),
seeds = NULL,
compress = TRUE,
export = NULL,
export_env = NULL,
libPaths = .libPaths(),
hooks = NULL,
overwrite = TRUE
) {
# Figuring out what are we doing.
plan <- the_plan(plan)
# Checking the path
check_full_path(
tmp_path = tmp_path, job_name = job_name, overwrite = overwrite
)
# Checking job name
sbatch_opt <- check_sbatch_opt(sbatch_opt, job_name = job_name, ntasks = 1L)
# Setting the job name
opts_slurmR$set_tmp_path(tmp_path)
opts_slurmR$set_job_name(job_name)
# Parsing expression ---------------------------------------------------------
sexpr <- deparse(substitute(expr))
# RSCRIPT --------------------------------------------------------------------
if (is.null(export_env))
export_env <- parent.frame()
rscript <- new_rscript(
njobs,
libPaths = libPaths,
tmp_path = tmp_path,
job_name = job_name
)
if (length(export)) {
rscript$add_rds(
mget(export, envir = export_env), compress = compress, index = FALSE)
}
# Setting the seeds
rscript$set_seed(seeds)
rscript$append(
paste0(
"ans <- list(tryCatch({\n",
paste0(gsub("^", " ", sexpr), collapse = "\n"),
"\n}, error = function(e) e))"
)
)
# Finalizing and writing it out
rscript$finalize("ans", compress = compress)
rscript$write()
# BASH script ----------------------------------------------------------------
bash <- new_bash(
njobs = njobs,
job_name = job_name,
output = snames("out", job_name = job_name, tmp_path = tmp_path),
filename = snames("sh", job_name = job_name, tmp_path = tmp_path)
)
bash$add_SBATCH(sbatch_opt)
bash$Rscript(
file = snames("r", job_name = job_name, tmp_path = tmp_path),
flags = rscript_opt
)
bash$write()
# Returning ------------------------------------------------------------------
ans <- new_slurm_job(
call = match.call(),
rscript = snames("r", job_name = job_name, tmp_path = tmp_path),
bashfile = snames("sh", job_name = job_name, tmp_path = tmp_path),
robjects = NULL,
njobs = njobs,
opts_job = sbatch_opt,
opts_r = opts_slurmR$get_opts_r(),
hooks = hooks
)
if (plan$collect)
return(Slurm_collect(sbatch(ans, wait = plan$wait, submit = plan$submit)))
else
return(sbatch(ans, wait = plan$wait, submit = plan$submit))
}
|
/R/Slurm_EvalQ.R
|
permissive
|
josezea/slurmR
|
R
| false
| false
| 2,899
|
r
|
#' Submit an expression to be evaluated to multiple jobs.
#' @param expr An expression to be passed to Slurm.
#' @template slurm
#' @template job_name-tmp_path
#' @template sbatch_opt
#' @template rscript_opt
#' @template njobs
#' @return A list of length `njobs`.
#' @export
Slurm_EvalQ <- function(
expr,
njobs = 2L,
job_name = opts_slurmR$get_job_name(),
tmp_path = opts_slurmR$get_tmp_path(),
plan = "collect",
sbatch_opt = list(),
rscript_opt = list(),
seeds = NULL,
compress = TRUE,
export = NULL,
export_env = NULL,
libPaths = .libPaths(),
hooks = NULL,
overwrite = TRUE
) {
# Figuring out what are we doing.
plan <- the_plan(plan)
# Checking the path
check_full_path(
tmp_path = tmp_path, job_name = job_name, overwrite = overwrite
)
# Checking job name
sbatch_opt <- check_sbatch_opt(sbatch_opt, job_name = job_name, ntasks = 1L)
# Setting the job name
opts_slurmR$set_tmp_path(tmp_path)
opts_slurmR$set_job_name(job_name)
# Parsing expression ---------------------------------------------------------
sexpr <- deparse(substitute(expr))
# RSCRIPT --------------------------------------------------------------------
if (is.null(export_env))
export_env <- parent.frame()
rscript <- new_rscript(
njobs,
libPaths = libPaths,
tmp_path = tmp_path,
job_name = job_name
)
if (length(export)) {
rscript$add_rds(
mget(export, envir = export_env), compress = compress, index = FALSE)
}
# Setting the seeds
rscript$set_seed(seeds)
rscript$append(
paste0(
"ans <- list(tryCatch({\n",
paste0(gsub("^", " ", sexpr), collapse = "\n"),
"\n}, error = function(e) e))"
)
)
# Finalizing and writing it out
rscript$finalize("ans", compress = compress)
rscript$write()
# BASH script ----------------------------------------------------------------
bash <- new_bash(
njobs = njobs,
job_name = job_name,
output = snames("out", job_name = job_name, tmp_path = tmp_path),
filename = snames("sh", job_name = job_name, tmp_path = tmp_path)
)
bash$add_SBATCH(sbatch_opt)
bash$Rscript(
file = snames("r", job_name = job_name, tmp_path = tmp_path),
flags = rscript_opt
)
bash$write()
# Returning ------------------------------------------------------------------
ans <- new_slurm_job(
call = match.call(),
rscript = snames("r", job_name = job_name, tmp_path = tmp_path),
bashfile = snames("sh", job_name = job_name, tmp_path = tmp_path),
robjects = NULL,
njobs = njobs,
opts_job = sbatch_opt,
opts_r = opts_slurmR$get_opts_r(),
hooks = hooks
)
if (plan$collect)
return(Slurm_collect(sbatch(ans, wait = plan$wait, submit = plan$submit)))
else
return(sbatch(ans, wait = plan$wait, submit = plan$submit))
}
|
##' Additional Themes and Theme Components for 'ggplot2' based on OCHA graphic styles
##'
##' A compilation of extra themes and theme components for 'ggplot2'
##' The core theme: `theme_ocha`
##'
##'
##' @name ochathemes-package
##' @aliases ochathemes
##' @docType package
##' @author \email{mail@ahmadoudicko.com}
##' @keywords package
NULL
|
/R/ochathemes-package.R
|
permissive
|
mmusori/ochathemes
|
R
| false
| false
| 344
|
r
|
##' Additional Themes and Theme Components for 'ggplot2' based on OCHA graphic styles
##'
##' A compilation of extra themes and theme components for 'ggplot2'
##' The core theme: `theme_ocha`
##'
##'
##' @name ochathemes-package
##' @aliases ochathemes
##' @docType package
##' @author \email{mail@ahmadoudicko.com}
##' @keywords package
NULL
|
miRDB <- function(searchBox = NA, searchType = c("miRNA", "gene"), Species = c("Human", "Mouse")){
suppressWarnings(suppressPackageStartupMessages({
library(httr)
library(curl)
library(rlist)
library(tidyverse)
library(rvest)} ))
headers <- c('Accept'='text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type'='application/x-www-form-urlencoded',
'User-Agent'='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Referer'='http://mirdb.org/index.html',
'Connection'='keep-alive'
)
payload<-list(
Species = match.arg(Species),
searchBox = searchBox,
searchType = match.arg(searchType),
submitButton = "Go",
geneChoice = "geneID"
)
url <- "http://mirdb.org/cgi-bin/search.cgi"
POST(url,add_headers(.headers =headers),body = payload, encode= "form" ) %>%
read_html() %>%
html_nodes("table#table1 tr") %>% html_text() %>% str_split("\\n") %>%
do.call(rbind,.) %>% .[-1,] %>%
data.frame(stringsAsFactors = FALSE) %>%
setNames(c("Target Detail", "Target Rank", "Target Score", "miRNA Name", "Gene Symbol", "Gene Description")) %>%
.[,-7]
}
df <- miRDB(searchBox = "hsa-miR-145-3p",
searchType = "miRNA",
Species = "Human")
df2 <- miRDB(searchBox = "7157",
searchType = "gene",
Species = "Human")
|
/R/miRDB.R
|
no_license
|
shitiezhu/BioMedR
|
R
| false
| false
| 1,530
|
r
|
miRDB <- function(searchBox = NA, searchType = c("miRNA", "gene"), Species = c("Human", "Mouse")){
suppressWarnings(suppressPackageStartupMessages({
library(httr)
library(curl)
library(rlist)
library(tidyverse)
library(rvest)} ))
headers <- c('Accept'='text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type'='application/x-www-form-urlencoded',
'User-Agent'='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Referer'='http://mirdb.org/index.html',
'Connection'='keep-alive'
)
payload<-list(
Species = match.arg(Species),
searchBox = searchBox,
searchType = match.arg(searchType),
submitButton = "Go",
geneChoice = "geneID"
)
url <- "http://mirdb.org/cgi-bin/search.cgi"
POST(url,add_headers(.headers =headers),body = payload, encode= "form" ) %>%
read_html() %>%
html_nodes("table#table1 tr") %>% html_text() %>% str_split("\\n") %>%
do.call(rbind,.) %>% .[-1,] %>%
data.frame(stringsAsFactors = FALSE) %>%
setNames(c("Target Detail", "Target Rank", "Target Score", "miRNA Name", "Gene Symbol", "Gene Description")) %>%
.[,-7]
}
df <- miRDB(searchBox = "hsa-miR-145-3p",
searchType = "miRNA",
Species = "Human")
df2 <- miRDB(searchBox = "7157",
searchType = "gene",
Species = "Human")
|
#install.packages('RSQLite')
library(RSQLite)
#lier ce script et nettoyage_equipes_1-7.r
bd_cours <- read.csv2('bd_cours_1-7.csv', header = T)
bd_liens <- read.csv2('bd_liens_1-7.csv', header = T)
bd_etudiants <- read.csv2('bd_etudiants_1-7.csv', header = T)
tables.db <- dbConnect(SQLite(), dbname="tables.db")
#creation de la table cours
cours_sql<- "CREATE TABLE cours (sigle CHAR(6),pratique BOLEAN(1),credits INTEGER(1),concentration BOLEAN(1),option BOLEAN(1),PRIMARY KEY(sigle));"
dbSendQuery(tables.db,cours_sql)
#injection des données
dbWriteTable(tables.db,append=TRUE,name="cours",value=bd_cours, row.names=FALSE)
#creation de la table etudiants
etudiants_sql <- "CREATE TABLE etudiants (id VARCHAR,prenom VARCHAR,nom VARCHAR,sexe CHAR(1),naissance INTEGER(4),faune BOLEAN(1),programme BOLEAN(1),pays_naissance CHAR(2),region_naissance INTEGER(2),diete CHAR(2),PRIMARY KEY(id) FOREIGN KEY(id) REFERENCES liens(etudiant1));"
dbSendQuery(tables.db,etudiants_sql)
#injection des données
dbWriteTable(tables.db,append=TRUE,name="etudiants",value=bd_etudiants, row.names=FALSE)
#creation de la table liens
liens_sql <- "CREATE TABLE liens (sigle CHAR(6),etudiant1 VARCHAR,etudiant2 VARCCHAR,session CHAR(3),id VARCHAR,PRIMARY KEY(id) FOREIGN KEY(sigle) REFERENCES cours(sigle), FOREIGN KEY(etudiant1) REFERENCES etudiants (id) ON DELETE CASCADE, FOREIGN KEY(etudiant2) REFERENCES etudiants (id) ON DELETE CASCADE);"
dbSendQuery(tables.db,liens_sql)
#injection de donn?es
dbWriteTable(tables.db,append=TRUE,name="liens",value=bd_liens, row.names=FALSE)
#creation de la table liens avec fréquence
liens_freq_sql <- "CREATE TABLE liensfreq (Var1 VARCHAR,Var2 VARCCHAR,Freq INTEGER(2),id VARCHAR,PRIMARY KEY(id) FOREIGN KEY(Var1) REFERENCES etudiants (id) ON DELETE CASCADE, FOREIGN KEY(Var2) REFERENCES etudiants (id) ON DELETE CASCADE);"
dbSendQuery(tables.db,liens_freq_sql)
# Creer table liens avec frequences
mat_liens<-table(bd_liens$etudiant1, bd_liens$etudiant2)
mat_liens<-as.matrix(mat_liens)
bd_liens_freq<-data.frame(mat_liens)
bd_liens_freq<-subset(bd_liens_freq,bd_liens_freq$Var1%in%bd_etudiants$id==T)
bd_liens_freq1<-data.frame(table(bd_liens$etudiant1,bd_liens$etudiant2))
bd_liens_freq1<-subset(bd_liens_freq1,bd_liens_freq1$Var1%in%bd_etudiants$id==T)
for (j in 1:length(bd_liens_freq1$Var1)) {
if (bd_liens_freq1[j,1]==bd_liens_freq1[j,2]) bd_liens_freq=bd_liens_freq[-j,]
}
# #Ajouté id
bd_liens_freq[,4]<- paste0(bd_liens_freq$Var1,'-',bd_liens_freq$Var2)
names(bd_liens_freq)[4]<-'id'
#injection de données
dbWriteTable(tables.db,append=TRUE,name="liensfreq",value=bd_liens_freq, row.names=FALSE)
|
/injection_données.R
|
no_license
|
elysepaquette/lesreseaux
|
R
| false
| false
| 2,642
|
r
|
#install.packages('RSQLite')
library(RSQLite)
#lier ce script et nettoyage_equipes_1-7.r
bd_cours <- read.csv2('bd_cours_1-7.csv', header = T)
bd_liens <- read.csv2('bd_liens_1-7.csv', header = T)
bd_etudiants <- read.csv2('bd_etudiants_1-7.csv', header = T)
tables.db <- dbConnect(SQLite(), dbname="tables.db")
#creation de la table cours
cours_sql<- "CREATE TABLE cours (sigle CHAR(6),pratique BOLEAN(1),credits INTEGER(1),concentration BOLEAN(1),option BOLEAN(1),PRIMARY KEY(sigle));"
dbSendQuery(tables.db,cours_sql)
#injection des données
dbWriteTable(tables.db,append=TRUE,name="cours",value=bd_cours, row.names=FALSE)
#creation de la table etudiants
etudiants_sql <- "CREATE TABLE etudiants (id VARCHAR,prenom VARCHAR,nom VARCHAR,sexe CHAR(1),naissance INTEGER(4),faune BOLEAN(1),programme BOLEAN(1),pays_naissance CHAR(2),region_naissance INTEGER(2),diete CHAR(2),PRIMARY KEY(id) FOREIGN KEY(id) REFERENCES liens(etudiant1));"
dbSendQuery(tables.db,etudiants_sql)
#injection des données
dbWriteTable(tables.db,append=TRUE,name="etudiants",value=bd_etudiants, row.names=FALSE)
#creation de la table liens
liens_sql <- "CREATE TABLE liens (sigle CHAR(6),etudiant1 VARCHAR,etudiant2 VARCCHAR,session CHAR(3),id VARCHAR,PRIMARY KEY(id) FOREIGN KEY(sigle) REFERENCES cours(sigle), FOREIGN KEY(etudiant1) REFERENCES etudiants (id) ON DELETE CASCADE, FOREIGN KEY(etudiant2) REFERENCES etudiants (id) ON DELETE CASCADE);"
dbSendQuery(tables.db,liens_sql)
#injection de donn?es
dbWriteTable(tables.db,append=TRUE,name="liens",value=bd_liens, row.names=FALSE)
#creation de la table liens avec fréquence
liens_freq_sql <- "CREATE TABLE liensfreq (Var1 VARCHAR,Var2 VARCCHAR,Freq INTEGER(2),id VARCHAR,PRIMARY KEY(id) FOREIGN KEY(Var1) REFERENCES etudiants (id) ON DELETE CASCADE, FOREIGN KEY(Var2) REFERENCES etudiants (id) ON DELETE CASCADE);"
dbSendQuery(tables.db,liens_freq_sql)
# Creer table liens avec frequences
mat_liens<-table(bd_liens$etudiant1, bd_liens$etudiant2)
mat_liens<-as.matrix(mat_liens)
bd_liens_freq<-data.frame(mat_liens)
bd_liens_freq<-subset(bd_liens_freq,bd_liens_freq$Var1%in%bd_etudiants$id==T)
bd_liens_freq1<-data.frame(table(bd_liens$etudiant1,bd_liens$etudiant2))
bd_liens_freq1<-subset(bd_liens_freq1,bd_liens_freq1$Var1%in%bd_etudiants$id==T)
for (j in 1:length(bd_liens_freq1$Var1)) {
if (bd_liens_freq1[j,1]==bd_liens_freq1[j,2]) bd_liens_freq=bd_liens_freq[-j,]
}
# #Ajouté id
bd_liens_freq[,4]<- paste0(bd_liens_freq$Var1,'-',bd_liens_freq$Var2)
names(bd_liens_freq)[4]<-'id'
#injection de données
dbWriteTable(tables.db,append=TRUE,name="liensfreq",value=bd_liens_freq, row.names=FALSE)
|
# Plot Functionality for package bpwpm
#-------------------------------------------------------------------------------
#' Generic bpwpm plotting
#'
#' Once a bpwpm has been run using the function \code{\link{bpwpm_gibbs}}, the
#' chains can be plotted and hence evaluated. This generic function builds sets
#' of plots for each parameter of the model, \eqn{\beta}, \eqn{w_1}, \eqn{w_2},
#' etc.
#'
#' @param object of the class bpwpm
#' @param n number of draws to plot
#' @param ... additional parameters to be passed to the functions
#'
#' @return a series of line plots and histograms
#' @export
#'
#' @examples (model1, 1000), (model2, 2000)
plot.bpwpm <- function(object, n = 100, ...){
if(!('bpwpm' %in% class(object))){
error("Object not of the class bpwpm")
geterrmessage()
}
# Betas
p <- plot_chains(object$betas, n, title = "Betas")
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_hist(object$betas, n, title = "Betas")
print(p)
for(i in seq(1, length(object$w))){
readline(prompt = "Press [enter] to view next plot")
p <- plot_chains(object$w[[i]], n, title = paste("w_",i))
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_hist(object$w[[i]], n, title = paste("w_",i))
print(p)
}
}
#-------------------------------------------------------------------------------
#' Plot MCMC Chains
#'
#' Plots the last n draws of an MCMC chain
#'
#' @inheritParams plot.bpwpm
#' @param mcmc_chain An MCMC Chain matrix. (draws * number of params)
#' @param title Title for the plot
#'
#' @return A ggplot2 lines plot
#' @export
#'
#' @examples plot_chains(betas), plot_chains(w_j, 1000)
plot_chains <- function(mcmc_chain, n = 100, title = ""){
dim_mcmc <- dim(mcmc_chain)
n <- min(n, dim_mcmc[1])
mcmc_temp <- tidyr::gather(mcmc_chain[seq(dim_mcmc[1] - n + 1,dim_mcmc[1]),],
key = Parameters)
ggplot2::ggplot(mcmc_temp, aes(x = rep(seq(1,n),dim_mcmc[2]),
y = value, group = Parameters,
colour = Parameters)) +
geom_line() + xlab("Index") + ylab("Value") + ggtitle(title)
}
#-------------------------------------------------------------------------------
#' Plot MCMC Chains histograms
#'
#' Plot the parameters to test for convergence
#'
#' @inheritParams plot.bpwpm
#' @inheritParams plot_chains
#'
#' @return A histogram for the n draws and parameters of the chain
#' @export
#'
plot_hist <- function(mcmc_chain, number = 100, title = "", ...){
dim_mcmc <- dim(mcmc_chain)
n <- min(number, dim_mcmc[1])
beta_temp <- tidyr::gather(mcmc_chain[seq(dim_mcmc[1] - n + 1,dim_mcmc[1]),],
key = Parameters)
ggplot2::ggplot(beta_temp, aes(x = value, fill = Parameters)) +
geom_histogram(..., position = "dodge") + xlab("Value") +
ggtitle(title)
}
#-------------------------------------------------------------------------------
#' Generic function for plotting bpwpm_predictions objects
#'
#' Once a model has been run and evaluated, a prediction can be made using the
#' function \code{\link{predict.bpwpm}}. The Input \code{X} and output \code{Y}
#' are saved and can be plotted against the final PWP expansion for the model.
#'
#' @param object of the class bpwpm_prediction
#' @param ... other arguments
#'
#' @return a series of plots from ggplot2
#' @export
#'
#' @examples (train_set_prediciton),
#' (test_set_prediciton)
plot.bpwpm_prediction <- function(object, ...){
if(!('bpwpm_prediction' %in% class(object))){
error("Object not of the class bpwpm_prediction")
geterrmessage()
}
plot_each_F(object$Y, object$X, object$bpwpm_params)
}
#-------------------------------------------------------------------------------
#' Plots each dimention f(x)
#'
#' With the posterior \code{w} parameters calculated from the Gibbs run, and
#' \code{\link{posterior_params}}, a Final F matrix can be calculated. and
#' hence, ploted against every Input X to see how does the PWP expansion looks
#' like for the specified set of parameters.
#'
#' @param Y A vector of binary response. Can be encoded as either a factor
#' vector or as a numeric one.
#' @param X A data frame or matrix containing the original Inputs for the model.
#' @param F_mat The F matrix calculated via \code{\link{calculate_F}} or
#' alternativly you can pass it the parameters calculated by function
#' \code{\link{posterior_params}}
#'
#' @return d plots for each dimention created using ggplot2
#' @export
#'
plot_each_F <- function(Y, X, F_mat){
if(class(F_mat) == "bpwpm_params"){
if(dim(X)[1] == dim(F_mat$estimated_F)[1]){
cat("Old F is being used")
F_mat <- F_mat$estimated_F
}
else{
cat("Calculating new F")
M <- F_mat$M
J <- F_mat$J
K <- F_mat$K
d <- F_mat$d
tau <- F_mat$tau
Phi <- calculate_Phi(X,M,J,K,d,tau, indep_terms = F_mat$indep_terms)
F_mat <- calculate_F(Phi, F_mat$w, d)
}
}
d <- dim(X)[2]
if(class(Y) == "numeric" | class(Y) == "integer"){
Y <- as.factor(Y)
}
for(i in seq(1:d)){
p <- ggplot2::qplot(x = X[,i], y = F_mat[,i+1],
color = Y) +
xlab(paste("X_",i, sep = "")) +
ylab(paste("F_",i,"(X_",i,")",sep = ""))
print(p)
if(i != (d+1)){
readline(prompt="Press [enter] to view next plot")
}
}
}
# Methods for ploting 2D Graphs
#-------------------------------------------------------------------------------
#' Wrapper Function for 2D Input Plots
#'
#' To better understand the model, we can visualize it, however due to universal
#' limitations, plots are only availabe for X inputs of 2 dimentions. ie: only
#' two factors included on the regresion.
#'
#' @param Y A response vector of size n
#' @param X An input matrix of size n*2.
#' @param bpwpm_params an object of the class bpwpm_params of bpwpm_prediction
#' created by the functions \code{\link{posterior_params}} or
#' \code{\link{predict.bpwpm}} respectively that contains all the info about the
#' posterior parametres of the model.
#' @param n Thinness of grid for 2D and 3D projectction
#' @param alpha numeric - level of transparency for 2D projection
#' @param f_of_0 Logical if If the constant function 0 is to be plotted
#'
#' @return A series of 3 plots to help ilustrate the model
#' @export
#'
plot_2D <- function(Y, X, bpwpm_params, n = 10, alpha = 0.6, f_of_0 = TRUE){
# Sanitizing Inputs
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
if(length(Y) != dim(X)[1]){
error("Y and X have a diferent number of observations")
geterrmessage()
}
# To simplify stuff
if(class(bpwpm_params) == 'bpwpm_prediction'){
bpwpm_params <- bpwpm_params$bpwpm_params
}else if(class(bpwpm_params) != 'bpwpm_params'){
error("bpwpm_params or bpwpm_prediction objects requiered to print the plots")
}
# Normal Data
p <- plot_2D_data(Y,X)
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_2D_proj(Y, X, bpwpm_params, n, alpha)
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_3D_proj(X, bpwpm_params, n - 5, f_of_0)
print(p)
}
#-------------------------------------------------------------------------------
#' Scatter Plot of 2D data
#'
#' Scatter plot to visualize the data and it's corresponding groups.
#'
#' @inheritParams plot_2D
#'
#' @return A ggplot2 scatter plot
#' @export
#'
#' @examples (Y = rbinom(100, 1, 4), X = cbind(rnorm(100), rnorm(100)))
plot_2D_data <- function(Y,X){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
ggplot2::ggplot(data = X, aes(x = X[, 1], y = X[ ,2], col = Y)) +
geom_point() + xlab("X_1") + ylab("X_2")
}
#-------------------------------------------------------------------------------
#' Plot 2D projection of the Model
#'
#' 2D projection of both the inputs and the posterior regions of classification.
#' Usefull to evaluate the corresponding binary outcomes.
#' Instead of plotting the corresponding conotur of the 3D function plotted by
#' \code{\link{plot_3D_proj}}. The projection function is mapped to it's
#' corresponding binary output and plotted behind the regular data.
#'
#' @inheritParams plot_2D
#'
#' @return A ggplot2 scatter plot
#' @export
#'
plot_2D_proj <- function(Y, X, bpwpm_params, n = 15, alpha = 0.6){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
mins <- apply(X, 2, min)
maxs <- apply(X, 2, max)
linspace <- expand.grid(X1 = seq(mins[1] - 0.2, maxs[1] + 0.2, by = 1/n),
X2 = seq(mins[2] - 0.2, maxs[2] + 0.2, by = 1/n))
linspace$Y <- model_projection(new_X = linspace,
bpwpm_params = bpwpm_params)
linspace$Y<- as.factor(as.integer(linspace$Y >= 0))
linspace$a <- rep(alpha, times = dim(linspace)[1])
data <- data.frame(cbind(X,Y), a = rep(1, times = dim(X)[1]))
colnames(data) <- c("X1","X2","Y","a")
data <- data.frame(rbind(data,linspace))
ggplot2::ggplot(data = data) +
geom_point(aes(x = X1, y = X2, col = Y, alpha = a), show.legend = FALSE) +
xlab("X_1") + ylab("X_2")
}
#-------------------------------------------------------------------------------
#' Plots the 3D representation of the projection function
#'
#' Given the set of parmeters and the input data in 2D, this function calculates
#' and plots the wireframe on a 3D linear space defined by the input matrix X.
#'
#' @inheritParams plot_2D
#'
#' @return a 3D WireFrame Lattice Plot
#' @export
#'
plot_3D_proj <- function(X, bpwpm_params, n, f_of_0 = TRUE){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
mins <- apply(X, 2, min)
maxs <- apply(X, 2, max)
linspace <- expand.grid(X1 = seq(mins[1], maxs[1], by = 1/n),
X2 = seq(mins[2], maxs[2], by = 1/n))
linspace$f <- model_projection(linspace,
bpwpm_params = bpwpm_params)
if(f_of_0){
m <- dim(linspace)[1]
linspace$g <- rep(1, times = m)
# Building the 0 gridspace
linspace <- rbind(linspace, data.frame(X1 = linspace[,1], X2 = linspace[,2]
,f = rep(0, times = m),
g = rep(0, times = m)))
}
lattice::wireframe(f ~ X1 * X2, data = linspace, group = g,
drape = TRUE,
aspect = c(1,1),
main = paste("3D plot for: M = ", bpwpm_params$M,
", J = ", bpwpm_params$J, ", K = ", bpwpm_params$K),
frame.plot = FALSE,
colorkey = FALSE,
scales = list(arrows = FALSE))
# col.groups = rgb(c(255,0,0), c(0,255,0), alpha = 70,maxColorValue = 255),
# col.regions = colorRampPalette(c("blue", "red"))(50))
# at = 0, col.regions = c("red", "blue"))
}
#-------------------------------------------------------------------------------
#' Plot Ergodic Mean
#'
#' Plots the Ergodic Mean of an object of class \code{bpwpm}
#'
#' @inheritParams plot.bpwpm
#' @inheritParams thin_chain
#'
#' @return A series of plots for the ergodic mean of the parameters
#' @export
#'
#' @examples (model1, 0, 0)
plot_ergodic_mean <- function(object, thin = 0, burn_in = 0, ...){
if(!('bpwpm' %in% class(object))){
error("Object not of the class bpwpm")
geterrmessage()
}
thin_object <- thin_bpwpm(object, thin, burn_in)
# Plots the whole erg
n <- dim(thin_object$betas)[1]
em_temp <- ergodic_mean(thin_object$betas)
p <- plot_chains(data.frame(em_temp), n, title = "Betas - Ergodic Mean")
print(p)
for(i in seq(1, length(thin_object$w))){
readline(prompt = "Press [enter] to view next plot")
em_temp <- ergodic_mean(thin_object$w[[i]])
p <- plot_chains(data.frame(em_temp), n, title = paste("w_",i," - Ergodic Mean", sep = ""))
print(p)
}
}
|
/R/plot_funcs.R
|
no_license
|
PaoloLuciano/bpwpm
|
R
| false
| false
| 13,027
|
r
|
# Plot Functionality for package bpwpm
#-------------------------------------------------------------------------------
#' Generic bpwpm plotting
#'
#' Once a bpwpm has been run using the function \code{\link{bpwpm_gibbs}}, the
#' chains can be plotted and hence evaluated. This generic function builds sets
#' of plots for each parameter of the model, \eqn{\beta}, \eqn{w_1}, \eqn{w_2},
#' etc.
#'
#' @param object of the class bpwpm
#' @param n number of draws to plot
#' @param ... additional parameters to be passed to the functions
#'
#' @return a series of line plots and histograms
#' @export
#'
#' @examples (model1, 1000), (model2, 2000)
plot.bpwpm <- function(object, n = 100, ...){
if(!('bpwpm' %in% class(object))){
error("Object not of the class bpwpm")
geterrmessage()
}
# Betas
p <- plot_chains(object$betas, n, title = "Betas")
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_hist(object$betas, n, title = "Betas")
print(p)
for(i in seq(1, length(object$w))){
readline(prompt = "Press [enter] to view next plot")
p <- plot_chains(object$w[[i]], n, title = paste("w_",i))
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_hist(object$w[[i]], n, title = paste("w_",i))
print(p)
}
}
#-------------------------------------------------------------------------------
#' Plot MCMC Chains
#'
#' Plots the last n draws of an MCMC chain
#'
#' @inheritParams plot.bpwpm
#' @param mcmc_chain An MCMC Chain matrix. (draws * number of params)
#' @param title Title for the plot
#'
#' @return A ggplot2 lines plot
#' @export
#'
#' @examples plot_chains(betas), plot_chains(w_j, 1000)
plot_chains <- function(mcmc_chain, n = 100, title = ""){
dim_mcmc <- dim(mcmc_chain)
n <- min(n, dim_mcmc[1])
mcmc_temp <- tidyr::gather(mcmc_chain[seq(dim_mcmc[1] - n + 1,dim_mcmc[1]),],
key = Parameters)
ggplot2::ggplot(mcmc_temp, aes(x = rep(seq(1,n),dim_mcmc[2]),
y = value, group = Parameters,
colour = Parameters)) +
geom_line() + xlab("Index") + ylab("Value") + ggtitle(title)
}
#-------------------------------------------------------------------------------
#' Plot MCMC Chains histograms
#'
#' Plot the parameters to test for convergence
#'
#' @inheritParams plot.bpwpm
#' @inheritParams plot_chains
#'
#' @return A histogram for the n draws and parameters of the chain
#' @export
#'
plot_hist <- function(mcmc_chain, number = 100, title = "", ...){
dim_mcmc <- dim(mcmc_chain)
n <- min(number, dim_mcmc[1])
beta_temp <- tidyr::gather(mcmc_chain[seq(dim_mcmc[1] - n + 1,dim_mcmc[1]),],
key = Parameters)
ggplot2::ggplot(beta_temp, aes(x = value, fill = Parameters)) +
geom_histogram(..., position = "dodge") + xlab("Value") +
ggtitle(title)
}
#-------------------------------------------------------------------------------
#' Generic function for plotting bpwpm_predictions objects
#'
#' Once a model has been run and evaluated, a prediction can be made using the
#' function \code{\link{predict.bpwpm}}. The Input \code{X} and output \code{Y}
#' are saved and can be plotted against the final PWP expansion for the model.
#'
#' @param object of the class bpwpm_prediction
#' @param ... other arguments
#'
#' @return a series of plots from ggplot2
#' @export
#'
#' @examples (train_set_prediciton),
#' (test_set_prediciton)
plot.bpwpm_prediction <- function(object, ...){
if(!('bpwpm_prediction' %in% class(object))){
error("Object not of the class bpwpm_prediction")
geterrmessage()
}
plot_each_F(object$Y, object$X, object$bpwpm_params)
}
#-------------------------------------------------------------------------------
#' Plots each dimention f(x)
#'
#' With the posterior \code{w} parameters calculated from the Gibbs run, and
#' \code{\link{posterior_params}}, a Final F matrix can be calculated. and
#' hence, ploted against every Input X to see how does the PWP expansion looks
#' like for the specified set of parameters.
#'
#' @param Y A vector of binary response. Can be encoded as either a factor
#' vector or as a numeric one.
#' @param X A data frame or matrix containing the original Inputs for the model.
#' @param F_mat The F matrix calculated via \code{\link{calculate_F}} or
#' alternativly you can pass it the parameters calculated by function
#' \code{\link{posterior_params}}
#'
#' @return d plots for each dimention created using ggplot2
#' @export
#'
plot_each_F <- function(Y, X, F_mat){
if(class(F_mat) == "bpwpm_params"){
if(dim(X)[1] == dim(F_mat$estimated_F)[1]){
cat("Old F is being used")
F_mat <- F_mat$estimated_F
}
else{
cat("Calculating new F")
M <- F_mat$M
J <- F_mat$J
K <- F_mat$K
d <- F_mat$d
tau <- F_mat$tau
Phi <- calculate_Phi(X,M,J,K,d,tau, indep_terms = F_mat$indep_terms)
F_mat <- calculate_F(Phi, F_mat$w, d)
}
}
d <- dim(X)[2]
if(class(Y) == "numeric" | class(Y) == "integer"){
Y <- as.factor(Y)
}
for(i in seq(1:d)){
p <- ggplot2::qplot(x = X[,i], y = F_mat[,i+1],
color = Y) +
xlab(paste("X_",i, sep = "")) +
ylab(paste("F_",i,"(X_",i,")",sep = ""))
print(p)
if(i != (d+1)){
readline(prompt="Press [enter] to view next plot")
}
}
}
# Methods for ploting 2D Graphs
#-------------------------------------------------------------------------------
#' Wrapper Function for 2D Input Plots
#'
#' To better understand the model, we can visualize it, however due to universal
#' limitations, plots are only availabe for X inputs of 2 dimentions. ie: only
#' two factors included on the regresion.
#'
#' @param Y A response vector of size n
#' @param X An input matrix of size n*2.
#' @param bpwpm_params an object of the class bpwpm_params of bpwpm_prediction
#' created by the functions \code{\link{posterior_params}} or
#' \code{\link{predict.bpwpm}} respectively that contains all the info about the
#' posterior parametres of the model.
#' @param n Thinness of grid for 2D and 3D projectction
#' @param alpha numeric - level of transparency for 2D projection
#' @param f_of_0 Logical if If the constant function 0 is to be plotted
#'
#' @return A series of 3 plots to help ilustrate the model
#' @export
#'
plot_2D <- function(Y, X, bpwpm_params, n = 10, alpha = 0.6, f_of_0 = TRUE){
# Sanitizing Inputs
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
if(length(Y) != dim(X)[1]){
error("Y and X have a diferent number of observations")
geterrmessage()
}
# To simplify stuff
if(class(bpwpm_params) == 'bpwpm_prediction'){
bpwpm_params <- bpwpm_params$bpwpm_params
}else if(class(bpwpm_params) != 'bpwpm_params'){
error("bpwpm_params or bpwpm_prediction objects requiered to print the plots")
}
# Normal Data
p <- plot_2D_data(Y,X)
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_2D_proj(Y, X, bpwpm_params, n, alpha)
print(p)
readline(prompt = "Press [enter] to view next plot")
p <- plot_3D_proj(X, bpwpm_params, n - 5, f_of_0)
print(p)
}
#-------------------------------------------------------------------------------
#' Scatter Plot of 2D data
#'
#' Scatter plot to visualize the data and it's corresponding groups.
#'
#' @inheritParams plot_2D
#'
#' @return A ggplot2 scatter plot
#' @export
#'
#' @examples (Y = rbinom(100, 1, 4), X = cbind(rnorm(100), rnorm(100)))
plot_2D_data <- function(Y,X){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
ggplot2::ggplot(data = X, aes(x = X[, 1], y = X[ ,2], col = Y)) +
geom_point() + xlab("X_1") + ylab("X_2")
}
#-------------------------------------------------------------------------------
#' Plot 2D projection of the Model
#'
#' 2D projection of both the inputs and the posterior regions of classification.
#' Usefull to evaluate the corresponding binary outcomes.
#' Instead of plotting the corresponding conotur of the 3D function plotted by
#' \code{\link{plot_3D_proj}}. The projection function is mapped to it's
#' corresponding binary output and plotted behind the regular data.
#'
#' @inheritParams plot_2D
#'
#' @return A ggplot2 scatter plot
#' @export
#'
plot_2D_proj <- function(Y, X, bpwpm_params, n = 15, alpha = 0.6){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
if(class(Y) == "numeric"){
Y <- factor(Y)
}
if(class(X) == "matrix"){
X <- data.frame(X)
}
mins <- apply(X, 2, min)
maxs <- apply(X, 2, max)
linspace <- expand.grid(X1 = seq(mins[1] - 0.2, maxs[1] + 0.2, by = 1/n),
X2 = seq(mins[2] - 0.2, maxs[2] + 0.2, by = 1/n))
linspace$Y <- model_projection(new_X = linspace,
bpwpm_params = bpwpm_params)
linspace$Y<- as.factor(as.integer(linspace$Y >= 0))
linspace$a <- rep(alpha, times = dim(linspace)[1])
data <- data.frame(cbind(X,Y), a = rep(1, times = dim(X)[1]))
colnames(data) <- c("X1","X2","Y","a")
data <- data.frame(rbind(data,linspace))
ggplot2::ggplot(data = data) +
geom_point(aes(x = X1, y = X2, col = Y, alpha = a), show.legend = FALSE) +
xlab("X_1") + ylab("X_2")
}
#-------------------------------------------------------------------------------
#' Plots the 3D representation of the projection function
#'
#' Given the set of parmeters and the input data in 2D, this function calculates
#' and plots the wireframe on a 3D linear space defined by the input matrix X.
#'
#' @inheritParams plot_2D
#'
#' @return a 3D WireFrame Lattice Plot
#' @export
#'
plot_3D_proj <- function(X, bpwpm_params, n, f_of_0 = TRUE){
if(dim(X)[2] != 2){
error("Only a 2D plot can be made. X matrix has diferent dimensions")
geterrmessage()
}
mins <- apply(X, 2, min)
maxs <- apply(X, 2, max)
linspace <- expand.grid(X1 = seq(mins[1], maxs[1], by = 1/n),
X2 = seq(mins[2], maxs[2], by = 1/n))
linspace$f <- model_projection(linspace,
bpwpm_params = bpwpm_params)
if(f_of_0){
m <- dim(linspace)[1]
linspace$g <- rep(1, times = m)
# Building the 0 gridspace
linspace <- rbind(linspace, data.frame(X1 = linspace[,1], X2 = linspace[,2]
,f = rep(0, times = m),
g = rep(0, times = m)))
}
lattice::wireframe(f ~ X1 * X2, data = linspace, group = g,
drape = TRUE,
aspect = c(1,1),
main = paste("3D plot for: M = ", bpwpm_params$M,
", J = ", bpwpm_params$J, ", K = ", bpwpm_params$K),
frame.plot = FALSE,
colorkey = FALSE,
scales = list(arrows = FALSE))
# col.groups = rgb(c(255,0,0), c(0,255,0), alpha = 70,maxColorValue = 255),
# col.regions = colorRampPalette(c("blue", "red"))(50))
# at = 0, col.regions = c("red", "blue"))
}
#-------------------------------------------------------------------------------
#' Plot Ergodic Mean
#'
#' Plots the Ergodic Mean of an object of class \code{bpwpm}
#'
#' @inheritParams plot.bpwpm
#' @inheritParams thin_chain
#'
#' @return A series of plots for the ergodic mean of the parameters
#' @export
#'
#' @examples (model1, 0, 0)
plot_ergodic_mean <- function(object, thin = 0, burn_in = 0, ...){
if(!('bpwpm' %in% class(object))){
error("Object not of the class bpwpm")
geterrmessage()
}
thin_object <- thin_bpwpm(object, thin, burn_in)
# Plots the whole erg
n <- dim(thin_object$betas)[1]
em_temp <- ergodic_mean(thin_object$betas)
p <- plot_chains(data.frame(em_temp), n, title = "Betas - Ergodic Mean")
print(p)
for(i in seq(1, length(thin_object$w))){
readline(prompt = "Press [enter] to view next plot")
em_temp <- ergodic_mean(thin_object$w[[i]])
p <- plot_chains(data.frame(em_temp), n, title = paste("w_",i," - Ergodic Mean", sep = ""))
print(p)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grcm38_tx2gene.R
\docType{data}
\name{grcm38_tx2gene}
\alias{grcm38_tx2gene}
\title{Mouse transcripts to genes}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 129826 rows and 2 columns.}
\source{
\url{http://ensembl.org/mus_musculus}
}
\usage{
grcm38_tx2gene
}
\description{
Lookup table for converting Mouse (\emph{Mus musculus})
Ensembl transcript IDs to gene IDs based on genome assembly
GRCM38 from Ensembl.
}
\details{
Variables:
\itemize{
\item enstxp
\item ensgene
}
}
\examples{
head(grcm38_tx2gene)
}
\keyword{datasets}
|
/man/grcm38_tx2gene.Rd
|
no_license
|
aaronwolen/annotables
|
R
| false
| true
| 652
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grcm38_tx2gene.R
\docType{data}
\name{grcm38_tx2gene}
\alias{grcm38_tx2gene}
\title{Mouse transcripts to genes}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 129826 rows and 2 columns.}
\source{
\url{http://ensembl.org/mus_musculus}
}
\usage{
grcm38_tx2gene
}
\description{
Lookup table for converting Mouse (\emph{Mus musculus})
Ensembl transcript IDs to gene IDs based on genome assembly
GRCM38 from Ensembl.
}
\details{
Variables:
\itemize{
\item enstxp
\item ensgene
}
}
\examples{
head(grcm38_tx2gene)
}
\keyword{datasets}
|
#' Produce a calibration plot for a set of predicted probabilities for a binary classifier.
#' @export
#' @import ggplot2
#' @import data.table
#' @importFrom patchwork plot_layout
#' @importFrom Hmisc binconf
#'
#' @param form A formula where the left-hand side is the variable representing the observed outcome, 0 or 1, and the right-hand side represents the column names of the different model probabilities.
#' @param data A data frame that contains at least two columns, one of which is the observed outcome and the others that are predicted probabilities.
#' @param cuts The number of bins of probabilities. Default = 10.
#' @param refline Whether or not to include a 45 degree reference line. Default = TRUE.
#' @param smooth Whether or not to include a smoothed loess curve for each models' probabilities. Default = FALSE.
#' @param fitline Whether or not to include a best-fit line for each models' probabilities. Default = FALSE.
#' @param rug Whether or not to include a rug plot of the observed probabilities. Usually works best with only one model. Default = FALSE.
#' @examples
#' m1 <- glm(mpg > 20 ~ cyl + disp + hp, family = 'binomial', data = mtcars)
#' results <- data.frame(outcome = mtcars$mpg > 20, lr_1 = predict(m1, type = 'response'))
#' calib_plot(outcome ~ lr_1, data = results, cuts = 5)
calib_plot <- function(form, data, cuts = 10, refline = TRUE,
smooth = FALSE, fitline = FALSE,
rug = FALSE) {
data <- as.data.table(data)
# Identify vars
.y <- all.vars(form)[1]
.mods <- all.vars(form)[-1]
# Inspired by Darren Dahly: https://darrendahly.github.io/post/homr/
dt <- lapply(.mods, function(m) {
data[,c(m,.y), with = FALSE][, bin := cut(get(m), breaks = cuts,
labels = FALSE)][,
.(Model = m,
Predicted = mean(get(m)),
Observed = mean(get(.y)),
ci_lo = binconf(sum(get(.y)),.N)[2],
ci_hi = binconf(sum(get(.y)),.N)[3]),
by = bin]
})
dt_all <- rbindlist(dt)
p <- ggplot(dt_all, aes(Predicted, Observed, color = Model)) +
geom_point(size = 0.3) + geom_line(size = 0.3) +
geom_errorbar(aes(ymin = ci_lo, ymax = ci_hi), width = 0.03, size = 0.3) +
xlim(0, 1) + ylim(0, 1) +
theme_bw() +
coord_fixed()
if (refline) p$layers <- c(geom_abline(slope = 1, intercept = 0, size = 0.5, color = 'lightgray'), p$layers)
if (fitline) p <- p + geom_smooth(method = 'lm', se = FALSE,
lty = 5, formula = y ~ -1 + x, size = 0.3)
if (smooth) p <- p + geom_smooth(method = 'loess', se = FALSE,
lty = 10, formula = y ~ -1 + x, size = 0.3)
if (rug) {
dt_preds <- data[, .mods, with = FALSE]
dt_preds_melt <- melt(dt_preds, measure.vars = .mods)
dist_plot <- ggplot(dt_preds_melt, aes(x = value, fill = variable, color = variable)) +
geom_histogram(bins = 100) +
scale_x_continuous('Predicted probability', limits = c(0,1)) +
scale_y_continuous('', n.breaks = 2) +
theme_minimal() +
theme(legend.position = 'none')
p <- p + xlab('')
p <- p + dist_plot +
plot_layout(ncol = 1,
widths = unit(c(6,6), c('cm', 'cm')),
heights = unit(c(6,1), c('cm', 'cm')))
}
return(p)
}
|
/R/calib_plot.R
|
no_license
|
gweissman/gmish
|
R
| false
| false
| 3,508
|
r
|
#' Produce a calibration plot for a set of predicted probabilities for a binary classifier.
#' @export
#' @import ggplot2
#' @import data.table
#' @importFrom patchwork plot_layout
#' @importFrom Hmisc binconf
#'
#' @param form A formula where the left-hand side is the variable representing the observed outcome, 0 or 1, and the right-hand side represents the column names of the different model probabilities.
#' @param data A data frame that contains at least two columns, one of which is the observed outcome and the others that are predicted probabilities.
#' @param cuts The number of bins of probabilities. Default = 10.
#' @param refline Whether or not to include a 45 degree reference line. Default = TRUE.
#' @param smooth Whether or not to include a smoothed loess curve for each models' probabilities. Default = FALSE.
#' @param fitline Whether or not to include a best-fit line for each models' probabilities. Default = FALSE.
#' @param rug Whether or not to include a rug plot of the observed probabilities. Usually works best with only one model. Default = FALSE.
#' @examples
#' m1 <- glm(mpg > 20 ~ cyl + disp + hp, family = 'binomial', data = mtcars)
#' results <- data.frame(outcome = mtcars$mpg > 20, lr_1 = predict(m1, type = 'response'))
#' calib_plot(outcome ~ lr_1, data = results, cuts = 5)
calib_plot <- function(form, data, cuts = 10, refline = TRUE,
smooth = FALSE, fitline = FALSE,
rug = FALSE) {
data <- as.data.table(data)
# Identify vars
.y <- all.vars(form)[1]
.mods <- all.vars(form)[-1]
# Inspired by Darren Dahly: https://darrendahly.github.io/post/homr/
dt <- lapply(.mods, function(m) {
data[,c(m,.y), with = FALSE][, bin := cut(get(m), breaks = cuts,
labels = FALSE)][,
.(Model = m,
Predicted = mean(get(m)),
Observed = mean(get(.y)),
ci_lo = binconf(sum(get(.y)),.N)[2],
ci_hi = binconf(sum(get(.y)),.N)[3]),
by = bin]
})
dt_all <- rbindlist(dt)
p <- ggplot(dt_all, aes(Predicted, Observed, color = Model)) +
geom_point(size = 0.3) + geom_line(size = 0.3) +
geom_errorbar(aes(ymin = ci_lo, ymax = ci_hi), width = 0.03, size = 0.3) +
xlim(0, 1) + ylim(0, 1) +
theme_bw() +
coord_fixed()
if (refline) p$layers <- c(geom_abline(slope = 1, intercept = 0, size = 0.5, color = 'lightgray'), p$layers)
if (fitline) p <- p + geom_smooth(method = 'lm', se = FALSE,
lty = 5, formula = y ~ -1 + x, size = 0.3)
if (smooth) p <- p + geom_smooth(method = 'loess', se = FALSE,
lty = 10, formula = y ~ -1 + x, size = 0.3)
if (rug) {
dt_preds <- data[, .mods, with = FALSE]
dt_preds_melt <- melt(dt_preds, measure.vars = .mods)
dist_plot <- ggplot(dt_preds_melt, aes(x = value, fill = variable, color = variable)) +
geom_histogram(bins = 100) +
scale_x_continuous('Predicted probability', limits = c(0,1)) +
scale_y_continuous('', n.breaks = 2) +
theme_minimal() +
theme(legend.position = 'none')
p <- p + xlab('')
p <- p + dist_plot +
plot_layout(ncol = 1,
widths = unit(c(6,6), c('cm', 'cm')),
heights = unit(c(6,1), c('cm', 'cm')))
}
return(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitted.FPCAder.R
\name{fitted.FPCAder}
\alias{fitted.FPCAder}
\title{Fitted functional sample from FPCAder object}
\usage{
\method{fitted}{FPCAder}(object, K = NULL, ...)
}
\arguments{
\item{object}{A object of class FPCA returned by the function FPCA().}
\item{K}{The integer number of the first K components used for the representation. (default: length(derObj$lambda ))}
\item{...}{Additional arguments}
}
\value{
An \code{n} by \code{length(workGrid)} matrix, each row of which contains a sample.
}
\description{
Combine the zero-meaned fitted values and the mean derivative to get the fitted values for the derivatives trajectories
Estimates are given on the work-grid, not on the observation grid. Use ConvertSupport to map the estimates to your desired domain.
}
\examples{
set.seed(1)
n <- 20
pts <- seq(0, 1, by=0.05)
sampWiener <- Wiener(n, pts)
sampWiener <- Sparsify(sampWiener, pts, 10)
}
\references{
\cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)}
}
|
/man/fitted.FPCAder.Rd
|
no_license
|
rogersguo/tPACE
|
R
| false
| true
| 1,252
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitted.FPCAder.R
\name{fitted.FPCAder}
\alias{fitted.FPCAder}
\title{Fitted functional sample from FPCAder object}
\usage{
\method{fitted}{FPCAder}(object, K = NULL, ...)
}
\arguments{
\item{object}{A object of class FPCA returned by the function FPCA().}
\item{K}{The integer number of the first K components used for the representation. (default: length(derObj$lambda ))}
\item{...}{Additional arguments}
}
\value{
An \code{n} by \code{length(workGrid)} matrix, each row of which contains a sample.
}
\description{
Combine the zero-meaned fitted values and the mean derivative to get the fitted values for the derivatives trajectories
Estimates are given on the work-grid, not on the observation grid. Use ConvertSupport to map the estimates to your desired domain.
}
\examples{
set.seed(1)
n <- 20
pts <- seq(0, 1, by=0.05)
sampWiener <- Wiener(n, pts)
sampWiener <- Sparsify(sampWiener, pts, 10)
}
\references{
\cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)}
}
|
## Read in the data
# Data are at http://www.stat.columbia.edu/~gelman/arm/examples/arsenic
library("arm")
wells <- read.table ("~/Dropbox/Work/Harvard/Wolkovich Lab/Gelman_Hill/ARM_Data/arsenic/wells.dat")
attach.all (wells)
## Histogram on distance (Figure 5.8)
hist (dist, breaks=seq(0,10+max(dist[!is.na(dist)]),10),
xlab="Distance (in meters) to the nearest safe well",
ylab="", main="", mgp=c(2,.5,0))
## Logistic regression with one predictor
fit.1 <- glm (switch ~ dist, family=binomial(link="logit"))
display (fit.1)
## Repeat the regression above with distance in 100-meter units
dist100 <- dist/100
fit.2 <- glm (switch ~ dist100, family=binomial(link="logit"))
display (fit.2)
## Graphing the fitted model with one predictor (Figure 5.9)
jitter.binary <- function(a, jitt=.05){
ifelse (a==0, runif (length(a), 0, jitt), runif (length(a), 1-jitt, 1))
}
switch.jitter <- jitter.binary(switch)
plot(dist, switch.jitter, xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.1)[1]+coef(fit.1)[2]*x), lwd=1, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
## Histogram on arsenic levels (Figure 5.10)
hist (arsenic, breaks=seq(0,.25+max(arsenic[!is.na(arsenic)]),.25), freq=TRUE, xlab="Arsenic concentration in well water", ylab="", main="", mgp=c(2,.5,0))
## Logistic regression with second input variable
fit.3 <- glm (switch ~ dist100 + arsenic, family=binomial(link="logit"))
display (fit.3)
## Graphing the fitted model with two predictors (Figure 5.11)
plot(dist, switch.jitter, xlim=c(0,max(dist)), xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(cbind (1, x/100, .5) %*% coef(fit.3)), lwd=.5, add=TRUE)
curve (invlogit(cbind (1, x/100, 1.0) %*% coef(fit.3)), lwd=.5, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
text (50, .27, "if As = 0.5", adj=0, cex=.8)
text (75, .50, "if As = 1.0", adj=0, cex=.8)
plot(arsenic, switch.jitter, xlim=c(0,max(arsenic)), xlab="Arsenic concentration in well water", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(cbind (1, 0, x) %*% coef(fit.3)), lwd=.5, add=TRUE)
curve (invlogit(cbind (1, 0.5, x) %*% coef(fit.3)), lwd=.5, add=TRUE)
points (arsenic, jitter.binary(switch), pch=20, cex=.1)
text (1.5, .78, "if dist = 0", adj=0, cex=.8)
text (2.2, .6, "if dist = 50", adj=0, cex=.8)
#equivalently
plot(dist, switch.jitter, xlim=c(0,max(dist)), xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*x/100+coef(fit.3)[3]*.50), lwd=.5, add=TRUE)
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*x/100+coef(fit.3)[3]*1.00), lwd=.5, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
text (50, .27, "if As = 0.5", adj=0, cex=.8)
text (75, .50, "if As = 1.0", adj=0, cex=.8)
plot(arsenic, switch.jitter, xlim=c(0,max(arsenic)), xlab="Arsenic concentration in well water", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*0+coef(fit.3)[3]*x), from=0.5, lwd=.5, add=TRUE)
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*0.5+coef(fit.3)[3]*x), from=0.5, lwd=.5, add=TRUE)
points (arsenic, jitter.binary(switch), pch=20, cex=.1)
text (1.5, .78, "if dist = 0", adj=0, cex=.8)
text (2.2, .6, "if dist = 50", adj=0, cex=.8)
|
/gelmanhill_stuff/Book_Codes/Ch.5/5.4_Logistic regression_wells in Bangladesh.R
|
no_license
|
lizzieinvancouver/gelmanhill
|
R
| false
| false
| 3,600
|
r
|
## Read in the data
# Data are at http://www.stat.columbia.edu/~gelman/arm/examples/arsenic
library("arm")
wells <- read.table ("~/Dropbox/Work/Harvard/Wolkovich Lab/Gelman_Hill/ARM_Data/arsenic/wells.dat")
attach.all (wells)
## Histogram on distance (Figure 5.8)
hist (dist, breaks=seq(0,10+max(dist[!is.na(dist)]),10),
xlab="Distance (in meters) to the nearest safe well",
ylab="", main="", mgp=c(2,.5,0))
## Logistic regression with one predictor
fit.1 <- glm (switch ~ dist, family=binomial(link="logit"))
display (fit.1)
## Repeat the regression above with distance in 100-meter units
dist100 <- dist/100
fit.2 <- glm (switch ~ dist100, family=binomial(link="logit"))
display (fit.2)
## Graphing the fitted model with one predictor (Figure 5.9)
jitter.binary <- function(a, jitt=.05){
ifelse (a==0, runif (length(a), 0, jitt), runif (length(a), 1-jitt, 1))
}
switch.jitter <- jitter.binary(switch)
plot(dist, switch.jitter, xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.1)[1]+coef(fit.1)[2]*x), lwd=1, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
## Histogram on arsenic levels (Figure 5.10)
hist (arsenic, breaks=seq(0,.25+max(arsenic[!is.na(arsenic)]),.25), freq=TRUE, xlab="Arsenic concentration in well water", ylab="", main="", mgp=c(2,.5,0))
## Logistic regression with second input variable
fit.3 <- glm (switch ~ dist100 + arsenic, family=binomial(link="logit"))
display (fit.3)
## Graphing the fitted model with two predictors (Figure 5.11)
plot(dist, switch.jitter, xlim=c(0,max(dist)), xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(cbind (1, x/100, .5) %*% coef(fit.3)), lwd=.5, add=TRUE)
curve (invlogit(cbind (1, x/100, 1.0) %*% coef(fit.3)), lwd=.5, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
text (50, .27, "if As = 0.5", adj=0, cex=.8)
text (75, .50, "if As = 1.0", adj=0, cex=.8)
plot(arsenic, switch.jitter, xlim=c(0,max(arsenic)), xlab="Arsenic concentration in well water", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(cbind (1, 0, x) %*% coef(fit.3)), lwd=.5, add=TRUE)
curve (invlogit(cbind (1, 0.5, x) %*% coef(fit.3)), lwd=.5, add=TRUE)
points (arsenic, jitter.binary(switch), pch=20, cex=.1)
text (1.5, .78, "if dist = 0", adj=0, cex=.8)
text (2.2, .6, "if dist = 50", adj=0, cex=.8)
#equivalently
plot(dist, switch.jitter, xlim=c(0,max(dist)), xlab="Distance (in meters) to nearest safe well", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*x/100+coef(fit.3)[3]*.50), lwd=.5, add=TRUE)
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*x/100+coef(fit.3)[3]*1.00), lwd=.5, add=TRUE)
points (dist, jitter.binary(switch), pch=20, cex=.1)
text (50, .27, "if As = 0.5", adj=0, cex=.8)
text (75, .50, "if As = 1.0", adj=0, cex=.8)
plot(arsenic, switch.jitter, xlim=c(0,max(arsenic)), xlab="Arsenic concentration in well water", ylab="Pr (switching)", type="n", xaxs="i", yaxs="i", mgp=c(2,.5,0))
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*0+coef(fit.3)[3]*x), from=0.5, lwd=.5, add=TRUE)
curve (invlogit(coef(fit.3)[1]+coef(fit.3)[2]*0.5+coef(fit.3)[3]*x), from=0.5, lwd=.5, add=TRUE)
points (arsenic, jitter.binary(switch), pch=20, cex=.1)
text (1.5, .78, "if dist = 0", adj=0, cex=.8)
text (2.2, .6, "if dist = 50", adj=0, cex=.8)
|
## Test file for the IRSDataLoader object
library(QFFixedIncome)
testIRSDataLoader <- function()
{
# test bad inputss
loaderSample <- IRSDataLoader()
irsSample <- GenericIRS()
irsSample$setDefault(tenor = "5y")
shouldBomb(loaderSample$getSpreads())
shouldBomb(loaderSample$getSpreads(irsObj = IRS(),source = "internal",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "int",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = TRUE,endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = FALSE))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2000-07-03"))
shouldBomb(loaderSample$getOneDateSpread())
shouldBomb(loaderSample$getOneDateSpread(irsObj = IRS(),source = "internal",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "int",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = FALSE,defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = "2007-07-03",defaultLimit = -1))
shouldBomb(loaderSample$getCurves())
shouldBomb(loaderSample$getCurves(irsObj = IRS(),source = "internal",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "int",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = TRUE,endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = FALSE))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2000-07-03"))
shouldBomb(loaderSample$getOneDateCurve())
shouldBomb(loaderSample$getOneDateCurve(irsObj = IRS(),source = "internal",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "int",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = FALSE,defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = "2007-07-03",defaultLimit = -1))
# test getCurves
target <- getZooDataFrame(zoo(
matrix(c(
5.20674,5.10598,5.04051,5.04224,5.06248,5.08716,5.11933,5.14901,
5.17993,5.20836,5.26586,5.33181,5.38778,5.40622,5.40592,5.39342),nrow = 1, ncol = 16),
order.by = "2007-05-03"))
colnames(target) <- c("18m", "2y", "3y", "4y", "5y", "6y", "7y", "8y", "9y", "10y", "12y", "15y", "20y", "25y", "30y", "40y")
checkEquals(target,
loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03")
)
# test getOneDateCurve
checkEquals(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03"),
loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = "2007-05-03",defaultLimit = 0)
)
# test getOneDateSpread
checkEquals(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03"),
loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = "2007-05-03",defaultLimit = 0)
)
}
testIRSDataLoaderGetSpreadsWithDefaultFilter <- function()
{
loaderSample <- IRSDataLoader()
irsSample <- GenericIRS()
irsSample$setDefault(tenor = "5y")
target <- getZooDataFrame(zoo(c(5.06248,5.02598),order.by = c("2007-05-03","2007-05-04")))
colnames(target) <- "5y"
checkEquals(target,
loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-04")
)
}
testIRSDataLoaderWhenInvalidFilterSupplied <- function() {
loader <- IRSDataLoader(filter = "13:00:00") # not the NY close that data exists for
irs <- GenericIRS()
irs$setDefault(tenor="5y")
shouldBomb(loader$getSpreads(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getCurves(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getOneDateSpread(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getOneDateCurve(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
}
|
/R/src/QFFixedIncome/tests/testIRSDataLoader.R
|
no_license
|
rsheftel/ratel
|
R
| false
| false
| 5,049
|
r
|
## Test file for the IRSDataLoader object
library(QFFixedIncome)
testIRSDataLoader <- function()
{
# test bad inputss
loaderSample <- IRSDataLoader()
irsSample <- GenericIRS()
irsSample$setDefault(tenor = "5y")
shouldBomb(loaderSample$getSpreads())
shouldBomb(loaderSample$getSpreads(irsObj = IRS(),source = "internal",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "int",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = TRUE,endDate = "2007-07-03"))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = FALSE))
shouldBomb(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2000-07-03"))
shouldBomb(loaderSample$getOneDateSpread())
shouldBomb(loaderSample$getOneDateSpread(irsObj = IRS(),source = "internal",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "int",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = FALSE,defaultLimit = 3))
shouldBomb(loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = "2007-07-03",defaultLimit = -1))
shouldBomb(loaderSample$getCurves())
shouldBomb(loaderSample$getCurves(irsObj = IRS(),source = "internal",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "int",startDate = "2007-05-03",endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = TRUE,endDate = "2007-07-03"))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = FALSE))
shouldBomb(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2000-07-03"))
shouldBomb(loaderSample$getOneDateCurve())
shouldBomb(loaderSample$getOneDateCurve(irsObj = IRS(),source = "internal",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "int",myDate = "2007-07-03",defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = FALSE,defaultLimit = 3))
shouldBomb(loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = "2007-07-03",defaultLimit = -1))
# test getCurves
target <- getZooDataFrame(zoo(
matrix(c(
5.20674,5.10598,5.04051,5.04224,5.06248,5.08716,5.11933,5.14901,
5.17993,5.20836,5.26586,5.33181,5.38778,5.40622,5.40592,5.39342),nrow = 1, ncol = 16),
order.by = "2007-05-03"))
colnames(target) <- c("18m", "2y", "3y", "4y", "5y", "6y", "7y", "8y", "9y", "10y", "12y", "15y", "20y", "25y", "30y", "40y")
checkEquals(target,
loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03")
)
# test getOneDateCurve
checkEquals(loaderSample$getCurves(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03"),
loaderSample$getOneDateCurve(irsObj = irsSample,source = "internal",myDate = "2007-05-03",defaultLimit = 0)
)
# test getOneDateSpread
checkEquals(loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-03"),
loaderSample$getOneDateSpread(irsObj = irsSample,source = "internal",myDate = "2007-05-03",defaultLimit = 0)
)
}
testIRSDataLoaderGetSpreadsWithDefaultFilter <- function()
{
loaderSample <- IRSDataLoader()
irsSample <- GenericIRS()
irsSample$setDefault(tenor = "5y")
target <- getZooDataFrame(zoo(c(5.06248,5.02598),order.by = c("2007-05-03","2007-05-04")))
colnames(target) <- "5y"
checkEquals(target,
loaderSample$getSpreads(irsObj = irsSample,source = "internal",startDate = "2007-05-03",endDate = "2007-05-04")
)
}
testIRSDataLoaderWhenInvalidFilterSupplied <- function() {
loader <- IRSDataLoader(filter = "13:00:00") # not the NY close that data exists for
irs <- GenericIRS()
irs$setDefault(tenor="5y")
shouldBomb(loader$getSpreads(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getCurves(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getOneDateSpread(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
shouldBomb(loader$getOneDateCurve(irsObj = irs, source = "internal", startDate = "2007-05-03",endDate = "2007-05-04"))
}
|
#' getVariantsColors
#'
#' @description
#'
#' Given the reference and alternative for a set of variants, assigns a color
#' to each of them
#'
#' @details
#'
#' The function creates an nucleotide substitution identifier with for each
#' variant and uses it to query the color.table lookup table. If color.table
#' is NULL, a color.table based in the selected color.schema is used. All
#' unkwonwn nucleotide substitutions are assigned a gray color. Color table
#' needs to have entries for C>A, C>G, C>T, T>A, T>C and T>G (and optionally
#' "others"), since other changes can be reverse complemented to these.
#'
#' @usage getVariantsColors(ref, alt, color.table=NULL, color.schema=c("cell21breast"))
#'
#' @param ref (character vector) The reference nucleotides of the variants. It has to have the same length as \code{alt}.
#' @param alt (character vector) The alternative nucleotides of the variants. It has to have the same length as \code{ref}
#' @param color.table (named character vector) if present, its used to assign colors to the nucleotide substitutions.
#' @param color.schema (character) The name of the color schema to use: \code{cell21breast} (the color schema used in "Mutational Processes Molding the Genomes of 21 Breast Cancers" by S. Nik-Zainal, Cell, 2012). (defaults to \code{cell21breast})
#'
#' @return
#' a named character vector with the colors associated to each variant
#'
#' @seealso \code{\link{plotKaryotype}}, \code{\link{kpPlotRainfall}}
#'
#' @examples
#'
#' ref <- c("A", "A", "C", "T", "G", "A")
#' alt <- c("G", "C", "T", "A", "A", "-")
#' getVariantsColors(ref, alt)
#'
#' col.table <- c("C>A"="#FF0000", "C>G"="#000000", "C>T"="#00FF00", "T>A"="#0000FF", "T>C"="#BB00BB", "T>G"="#00BBBB", "other"="#888888")
#' getVariantsColors(ref, alt, col.table)
#'
#'
#' @export getVariantsColors
#'
getVariantsColors <- function(ref, alt, color.table=NULL, color.schema=c("cell21breast")) {
if(!methods::is(ref, "character")) stop(paste0("In getVariantsColors: 'ref' must be a valid character object"))
if(!methods::is(alt, "character")) stop(paste0("In getVariantsColors: 'alt' must be a valid character object"))
if(length(ref) != length(alt)) stop(paste0("In getVariantsColors: 'ref' and 'alt' must have the same length"))
color.schema <- match.arg(color.schema)
if(is.null(color.table)) {
if(color.schema=="cell21breast") {
color.table <- c("C>A"="#4c64ae",
"C>G"="#000000",
"C>T"="#e40611",
"T>A"="#bf4a96",
"T>C"="#fbe800",
"T>G"="#6eb529",
"other"="#888888")
}
}
comp <- c(G="C", A="T", C="G", T="A")
#complement if necessary
to.comp <- which(ref=="A" | ref=="G")
ref[to.comp] <- comp[ref[to.comp]]
alt[to.comp] <- comp[alt[to.comp]]
var.cols <- color.table[paste0(ref, ">", alt)]
if(!is.null(color.table["other"])) {
var.cols[which(is.na(var.cols))] <- color.table["other"]
} else {
var.cols[which(is.na(var.cols))] <- "#888888"
}
return(var.cols)
}
|
/R/getVariantsColors.R
|
no_license
|
jing-wan/karyoploteR
|
R
| false
| false
| 3,151
|
r
|
#' getVariantsColors
#'
#' @description
#'
#' Given the reference and alternative for a set of variants, assigns a color
#' to each of them
#'
#' @details
#'
#' The function creates an nucleotide substitution identifier with for each
#' variant and uses it to query the color.table lookup table. If color.table
#' is NULL, a color.table based in the selected color.schema is used. All
#' unkwonwn nucleotide substitutions are assigned a gray color. Color table
#' needs to have entries for C>A, C>G, C>T, T>A, T>C and T>G (and optionally
#' "others"), since other changes can be reverse complemented to these.
#'
#' @usage getVariantsColors(ref, alt, color.table=NULL, color.schema=c("cell21breast"))
#'
#' @param ref (character vector) The reference nucleotides of the variants. It has to have the same length as \code{alt}.
#' @param alt (character vector) The alternative nucleotides of the variants. It has to have the same length as \code{ref}
#' @param color.table (named character vector) if present, its used to assign colors to the nucleotide substitutions.
#' @param color.schema (character) The name of the color schema to use: \code{cell21breast} (the color schema used in "Mutational Processes Molding the Genomes of 21 Breast Cancers" by S. Nik-Zainal, Cell, 2012). (defaults to \code{cell21breast})
#'
#' @return
#' a named character vector with the colors associated to each variant
#'
#' @seealso \code{\link{plotKaryotype}}, \code{\link{kpPlotRainfall}}
#'
#' @examples
#'
#' ref <- c("A", "A", "C", "T", "G", "A")
#' alt <- c("G", "C", "T", "A", "A", "-")
#' getVariantsColors(ref, alt)
#'
#' col.table <- c("C>A"="#FF0000", "C>G"="#000000", "C>T"="#00FF00", "T>A"="#0000FF", "T>C"="#BB00BB", "T>G"="#00BBBB", "other"="#888888")
#' getVariantsColors(ref, alt, col.table)
#'
#'
#' @export getVariantsColors
#'
getVariantsColors <- function(ref, alt, color.table=NULL, color.schema=c("cell21breast")) {
if(!methods::is(ref, "character")) stop(paste0("In getVariantsColors: 'ref' must be a valid character object"))
if(!methods::is(alt, "character")) stop(paste0("In getVariantsColors: 'alt' must be a valid character object"))
if(length(ref) != length(alt)) stop(paste0("In getVariantsColors: 'ref' and 'alt' must have the same length"))
color.schema <- match.arg(color.schema)
if(is.null(color.table)) {
if(color.schema=="cell21breast") {
color.table <- c("C>A"="#4c64ae",
"C>G"="#000000",
"C>T"="#e40611",
"T>A"="#bf4a96",
"T>C"="#fbe800",
"T>G"="#6eb529",
"other"="#888888")
}
}
comp <- c(G="C", A="T", C="G", T="A")
#complement if necessary
to.comp <- which(ref=="A" | ref=="G")
ref[to.comp] <- comp[ref[to.comp]]
alt[to.comp] <- comp[alt[to.comp]]
var.cols <- color.table[paste0(ref, ">", alt)]
if(!is.null(color.table["other"])) {
var.cols[which(is.na(var.cols))] <- color.table["other"]
} else {
var.cols[which(is.na(var.cols))] <- "#888888"
}
return(var.cols)
}
|
\name{max.subtree.rfsrc}
\alias{max.subtree.rfsrc}
\alias{max.subtree}
\title{Acquire Maximal Subtree Information}
\description{
Extract maximal subtree information from a RF-SRC object. Used for
variable selection and identifying interactions between variables.
}
\usage{\method{max.subtree}{rfsrc}(object,
max.order = 2, sub.order = FALSE, conservative = FALSE, ...)
}
\arguments{
\item{object}{An object of class \code{(rfsrc, grow)} or \code{(rfsrc,
forest).}}
\item{max.order}{Non-negative integer specifying the target number
of order depths. Default is to return the first and second order
depths. Used to identify predictive variables. Setting
\option{max.order=0} returns the first order depth for each
variable by tree. A side effect is that \option{conservative} is
automatically set to \code{FALSE}.}
\item{sub.order}{Set this value to \code{TRUE} to return the
minimal depth of each variable relative to another variable.
Used to identify interrelationship between variables. See
details below.}
\item{conservative}{If \code{TRUE}, the threshold value for selecting
variables is calculated using a conservative marginal
approximation to the minimal depth distribution (the method used
in Ishwaran et al. 2010). Otherwise, the minimal depth
distribution is the tree-averaged distribution. The latter method
tends to give larger threshold values and discovers more
variables, especially in high-dimensions.}
\item{...}{Further arguments passed to or from other methods.}
}
\details{
The maximal subtree for a variable \emph{x} is the largest subtree
whose root node splits on \emph{x}. Thus, all parent nodes of
\emph{x}'s maximal subtree have nodes that split on variables other
than \emph{x}. The largest maximal subtree possible is the root
node. In general, however, there can be more than one maximal
subtree for a variable. A maximal subtree may also not exist if
there are no splits on the variable. See Ishwaran et al. (2010,
2011) for details.
The minimal depth of a maximal subtree (the first order depth)
measures predictiveness of a variable \emph{x}. It equals the
shortest distance (the depth) from the root node to the parent node
of the maximal subtree (zero is the smallest value possible). The
smaller the minimal depth, the more impact \emph{x} has on
prediction. The mean of the minimal depth distribution is used as
the threshold value for deciding whether a variable's minimal depth
value is small enough for the variable to be classified as strong.
The second order depth is the distance from the root node to the
second closest maximal subtree of \emph{x}. To specify the target
order depth, use the \code{max.order} option (e.g., setting
\option{max.order=2} returns the first and second order depths).
Setting \option{max.order=0} returns the first order depth for each
variable for each tree.
Set \option{sub.order=TRUE} to obtain the minimal depth of a
variable relative to another variable. This returns a
\code{p}x\code{p} matrix, where \code{p} is the number of variables,
and entries [i][j] are the normalized relative minimal depth of a
variable [j] within the maximal subtree for variable [i], where
normalization adjusts for the size of [i]'s maximal subtree. Entry
[i][i] is the normalized minimal depth of i relative to the root
node. The matrix should be read by looking across rows (not down
columns) and identifies interrelationship between variables. Small
[i][j] entries indicate interactions. See
\command{find.interaction} for related details.
For competing risk data, maximal subtree analyses are unconditional
(i.e., they are non-event specific).
}
\value{
Invisibly, a list with the following components:
\item{order}{Order depths for a given variable up to \code{max.order}
averaged over a tree and the forest. Matrix of dimension
\code{p}x\code{max.order}. If \option{max.order=0}, a matrix of
\code{p}x\code{ntree} is returned containing the first order depth
for each variable by tree.}
\item{count}{Averaged number of maximal subtrees, normalized by
the size of a tree, for each variable.}
\item{nodes.at.depth}{Number of non-terminal nodes by depth for each tree.}
\item{sub.order}{Average minimal depth of a variable relative to another
variable. Can be \code{NULL}.}
\item{threshold}{Threshold value (the mean minimal depth) used to
select variables.}
\item{threshold.1se}{Mean minimal depth plus one standard error.}
\item{topvars}{Character vector of names of the final selected
variables.}
\item{topvars.1se}{Character vector of names of the final selected
variables using the 1se threshold rule.}
\item{percentile}{Minimal depth percentile for each variable.}
\item{density}{Estimated minimal depth density.}
\item{second.order.threshold}{Threshold for second order depth.}
}
\author{
Hemant Ishwaran and Udaya B. Kogalur
}
\references{
Ishwaran H., Kogalur U.B., Gorodeski E.Z, Minn A.J. and
Lauer M.S. (2010). High-dimensional variable selection for survival
data. \emph{J. Amer. Statist. Assoc.}, 105:205-217.
Ishwaran H., Kogalur U.B., Chen X. and Minn A.J. (2011). Random
survival forests for high-dimensional data. \emph{Statist. Anal. Data
Mining}, 4:115-132.
}
\seealso{
\command{\link{holdout.vimp.rfsrc}},
\command{\link{var.select.rfsrc}},
\command{\link{vimp.rfsrc}}
}
\examples{
\donttest{
## ------------------------------------------------------------
## survival analysis
## first and second order depths for all variables
## ------------------------------------------------------------
data(veteran, package = "randomForestSRC")
v.obj <- rfsrc(Surv(time, status) ~ . , data = veteran)
v.max <- max.subtree(v.obj)
# first and second order depths
print(round(v.max$order, 3))
# the minimal depth is the first order depth
print(round(v.max$order[, 1], 3))
# strong variables have minimal depth less than or equal
# to the following threshold
print(v.max$threshold)
# this corresponds to the set of variables
print(v.max$topvars)
## ------------------------------------------------------------
## regression analysis
## try different levels of conservativeness
## ------------------------------------------------------------
mtcars.obj <- rfsrc(mpg ~ ., data = mtcars)
max.subtree(mtcars.obj)$topvars
max.subtree(mtcars.obj, conservative = TRUE)$topvars
}
}
\keyword{variable selection}
|
/man/max.subtree.rfsrc.Rd
|
no_license
|
nkuwangkai/randomForestSRC
|
R
| false
| false
| 6,560
|
rd
|
\name{max.subtree.rfsrc}
\alias{max.subtree.rfsrc}
\alias{max.subtree}
\title{Acquire Maximal Subtree Information}
\description{
Extract maximal subtree information from a RF-SRC object. Used for
variable selection and identifying interactions between variables.
}
\usage{\method{max.subtree}{rfsrc}(object,
max.order = 2, sub.order = FALSE, conservative = FALSE, ...)
}
\arguments{
\item{object}{An object of class \code{(rfsrc, grow)} or \code{(rfsrc,
forest).}}
\item{max.order}{Non-negative integer specifying the target number
of order depths. Default is to return the first and second order
depths. Used to identify predictive variables. Setting
\option{max.order=0} returns the first order depth for each
variable by tree. A side effect is that \option{conservative} is
automatically set to \code{FALSE}.}
\item{sub.order}{Set this value to \code{TRUE} to return the
minimal depth of each variable relative to another variable.
Used to identify interrelationship between variables. See
details below.}
\item{conservative}{If \code{TRUE}, the threshold value for selecting
variables is calculated using a conservative marginal
approximation to the minimal depth distribution (the method used
in Ishwaran et al. 2010). Otherwise, the minimal depth
distribution is the tree-averaged distribution. The latter method
tends to give larger threshold values and discovers more
variables, especially in high-dimensions.}
\item{...}{Further arguments passed to or from other methods.}
}
\details{
The maximal subtree for a variable \emph{x} is the largest subtree
whose root node splits on \emph{x}. Thus, all parent nodes of
\emph{x}'s maximal subtree have nodes that split on variables other
than \emph{x}. The largest maximal subtree possible is the root
node. In general, however, there can be more than one maximal
subtree for a variable. A maximal subtree may also not exist if
there are no splits on the variable. See Ishwaran et al. (2010,
2011) for details.
The minimal depth of a maximal subtree (the first order depth)
measures predictiveness of a variable \emph{x}. It equals the
shortest distance (the depth) from the root node to the parent node
of the maximal subtree (zero is the smallest value possible). The
smaller the minimal depth, the more impact \emph{x} has on
prediction. The mean of the minimal depth distribution is used as
the threshold value for deciding whether a variable's minimal depth
value is small enough for the variable to be classified as strong.
The second order depth is the distance from the root node to the
second closest maximal subtree of \emph{x}. To specify the target
order depth, use the \code{max.order} option (e.g., setting
\option{max.order=2} returns the first and second order depths).
Setting \option{max.order=0} returns the first order depth for each
variable for each tree.
Set \option{sub.order=TRUE} to obtain the minimal depth of a
variable relative to another variable. This returns a
\code{p}x\code{p} matrix, where \code{p} is the number of variables,
and entries [i][j] are the normalized relative minimal depth of a
variable [j] within the maximal subtree for variable [i], where
normalization adjusts for the size of [i]'s maximal subtree. Entry
[i][i] is the normalized minimal depth of i relative to the root
node. The matrix should be read by looking across rows (not down
columns) and identifies interrelationship between variables. Small
[i][j] entries indicate interactions. See
\command{find.interaction} for related details.
For competing risk data, maximal subtree analyses are unconditional
(i.e., they are non-event specific).
}
\value{
Invisibly, a list with the following components:
\item{order}{Order depths for a given variable up to \code{max.order}
averaged over a tree and the forest. Matrix of dimension
\code{p}x\code{max.order}. If \option{max.order=0}, a matrix of
\code{p}x\code{ntree} is returned containing the first order depth
for each variable by tree.}
\item{count}{Averaged number of maximal subtrees, normalized by
the size of a tree, for each variable.}
\item{nodes.at.depth}{Number of non-terminal nodes by depth for each tree.}
\item{sub.order}{Average minimal depth of a variable relative to another
variable. Can be \code{NULL}.}
\item{threshold}{Threshold value (the mean minimal depth) used to
select variables.}
\item{threshold.1se}{Mean minimal depth plus one standard error.}
\item{topvars}{Character vector of names of the final selected
variables.}
\item{topvars.1se}{Character vector of names of the final selected
variables using the 1se threshold rule.}
\item{percentile}{Minimal depth percentile for each variable.}
\item{density}{Estimated minimal depth density.}
\item{second.order.threshold}{Threshold for second order depth.}
}
\author{
Hemant Ishwaran and Udaya B. Kogalur
}
\references{
Ishwaran H., Kogalur U.B., Gorodeski E.Z, Minn A.J. and
Lauer M.S. (2010). High-dimensional variable selection for survival
data. \emph{J. Amer. Statist. Assoc.}, 105:205-217.
Ishwaran H., Kogalur U.B., Chen X. and Minn A.J. (2011). Random
survival forests for high-dimensional data. \emph{Statist. Anal. Data
Mining}, 4:115-132.
}
\seealso{
\command{\link{holdout.vimp.rfsrc}},
\command{\link{var.select.rfsrc}},
\command{\link{vimp.rfsrc}}
}
\examples{
\donttest{
## ------------------------------------------------------------
## survival analysis
## first and second order depths for all variables
## ------------------------------------------------------------
data(veteran, package = "randomForestSRC")
v.obj <- rfsrc(Surv(time, status) ~ . , data = veteran)
v.max <- max.subtree(v.obj)
# first and second order depths
print(round(v.max$order, 3))
# the minimal depth is the first order depth
print(round(v.max$order[, 1], 3))
# strong variables have minimal depth less than or equal
# to the following threshold
print(v.max$threshold)
# this corresponds to the set of variables
print(v.max$topvars)
## ------------------------------------------------------------
## regression analysis
## try different levels of conservativeness
## ------------------------------------------------------------
mtcars.obj <- rfsrc(mpg ~ ., data = mtcars)
max.subtree(mtcars.obj)$topvars
max.subtree(mtcars.obj, conservative = TRUE)$topvars
}
}
\keyword{variable selection}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/link_time_performance_integrated.R
\name{link_time_performance_integrated}
\alias{link_time_performance_integrated}
\title{Integration of link time performance}
\usage{
link_time_performance_integrated(flow, t0, capacity, alpha = 0.15, beta = 4)
}
\arguments{
\item{flow}{Vector with current traffic flow}
\item{t0}{Link free-flow times}
\item{capacity}{Link capacity}
\item{alpha}{alpha BPR function parameter}
\item{beta}{beta BPR function parameter}
}
\value{
Vector with integrated values from performance function
}
\description{
The integrated (with repsect to link flow) form of
aforementioned performance function.
Some optimization should be implemented for avoiding overflow
}
|
/man/link_time_performance_integrated.Rd
|
no_license
|
douglascm/trafficr
|
R
| false
| true
| 769
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/link_time_performance_integrated.R
\name{link_time_performance_integrated}
\alias{link_time_performance_integrated}
\title{Integration of link time performance}
\usage{
link_time_performance_integrated(flow, t0, capacity, alpha = 0.15, beta = 4)
}
\arguments{
\item{flow}{Vector with current traffic flow}
\item{t0}{Link free-flow times}
\item{capacity}{Link capacity}
\item{alpha}{alpha BPR function parameter}
\item{beta}{beta BPR function parameter}
}
\value{
Vector with integrated values from performance function
}
\description{
The integrated (with repsect to link flow) form of
aforementioned performance function.
Some optimization should be implemented for avoiding overflow
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
function(input, output, session) {
# input$date and others are Date objects. When outputting
# text, we need to convert to character; otherwise it will
# print an integer rather than a date.
output$dateText <- renderText({
if(input$date < Sys.Date()){
paste("You have born on", as.character(weekdays(input$date)))}
else if(input$date > Sys.Date()){
paste("The baby will be born on", enc2native(as.character(weekdays(input$date))))}
else {"Congratulations on the new baby"}
})
}
|
/server.R
|
no_license
|
HossamMHassan/App
|
R
| false
| false
| 803
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
function(input, output, session) {
# input$date and others are Date objects. When outputting
# text, we need to convert to character; otherwise it will
# print an integer rather than a date.
output$dateText <- renderText({
if(input$date < Sys.Date()){
paste("You have born on", as.character(weekdays(input$date)))}
else if(input$date > Sys.Date()){
paste("The baby will be born on", enc2native(as.character(weekdays(input$date))))}
else {"Congratulations on the new baby"}
})
}
|
test_that("rws_connect", {
expect_error(
rws_connect(":memory:", exists = TRUE),
"File ':memory:' must already exist."
)
conn <- rws_connect(":memory:")
expect_true(vld_sqlite_conn(conn, connected = TRUE))
rws_disconnect(conn)
expect_true(vld_sqlite_conn(conn, connected = FALSE))
})
|
/tests/testthat/test-connection.R
|
permissive
|
poissonconsulting/readwritesqlite
|
R
| false
| false
| 304
|
r
|
test_that("rws_connect", {
expect_error(
rws_connect(":memory:", exists = TRUE),
"File ':memory:' must already exist."
)
conn <- rws_connect(":memory:")
expect_true(vld_sqlite_conn(conn, connected = TRUE))
rws_disconnect(conn)
expect_true(vld_sqlite_conn(conn, connected = FALSE))
})
|
#Plot 3
require(lubridate)
require(ggplot2)
#Download and read files
if(!(file.exists("Source_Classification_Code.rds"))){
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", "dataset.zip")
unzip("dataset.zip")
file.remove("dataset.zip")
}
#Variable creation and general cleanup
SCC <- readRDS(file = "Source_Classification_Code.rds")
for(i in names(SCC)){
SCC[[i]] <- as.factor(SCC[[i]])
SCC[[i]][SCC[[i]]==""] <- NA
SCC[[i]][SCC[[i]]==" "] <- NA
}
SCC <- droplevels(SCC)
SCC$Created_Date <- mdy_hms(SCC$Created_Date)
SCC$Revised_Date <- mdy_hms(SCC$Revised_Date)
NEI <- readRDS(file = "summarySCC_PM25.rds")
NEI$Pollutant <- NULL
for(i in names(NEI)){
if(i!="Emissions"){
NEI[[i]] <- as.factor(NEI[[i]])
}
}
NEI$fips[NEI$fips == " NA"] <- NA
NEI$fips <- droplevels(NEI$fips)
rm(i)
#### Cleanup ends here ####
baltimore.fips <- "24510"
baltimore <- NEI[NEI$fips==baltimore.fips,]
baltimore.bytype <- aggregate(Emissions ~ year + type, data = baltimore, sum)
baltimore.bytype$Category <- c(rep("POINT", 4), rep("ROAD", 8), rep("POINT", 4))
baltimore.bytype$OnNon <- c(rep("NON", 8), rep("ON", 8))
png(filename = "plot3.png", width = 480, height = 480, units = "px")
ggplot(baltimore.bytype, aes(year, Emissions, fill=type)) +
geom_bar(stat = "identity") + facet_grid(Category~OnNon) +
ggtitle("Total Baltimore Emissions by Type") +
ylab("Emissions (Tons of PM2.5)") + xlab("Year") + theme(legend.position = "bottom")
dev.off()
|
/plot3.R
|
no_license
|
joaoclemencio/EDA-Course-Project-2
|
R
| false
| false
| 1,501
|
r
|
#Plot 3
require(lubridate)
require(ggplot2)
#Download and read files
if(!(file.exists("Source_Classification_Code.rds"))){
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", "dataset.zip")
unzip("dataset.zip")
file.remove("dataset.zip")
}
#Variable creation and general cleanup
SCC <- readRDS(file = "Source_Classification_Code.rds")
for(i in names(SCC)){
SCC[[i]] <- as.factor(SCC[[i]])
SCC[[i]][SCC[[i]]==""] <- NA
SCC[[i]][SCC[[i]]==" "] <- NA
}
SCC <- droplevels(SCC)
SCC$Created_Date <- mdy_hms(SCC$Created_Date)
SCC$Revised_Date <- mdy_hms(SCC$Revised_Date)
NEI <- readRDS(file = "summarySCC_PM25.rds")
NEI$Pollutant <- NULL
for(i in names(NEI)){
if(i!="Emissions"){
NEI[[i]] <- as.factor(NEI[[i]])
}
}
NEI$fips[NEI$fips == " NA"] <- NA
NEI$fips <- droplevels(NEI$fips)
rm(i)
#### Cleanup ends here ####
baltimore.fips <- "24510"
baltimore <- NEI[NEI$fips==baltimore.fips,]
baltimore.bytype <- aggregate(Emissions ~ year + type, data = baltimore, sum)
baltimore.bytype$Category <- c(rep("POINT", 4), rep("ROAD", 8), rep("POINT", 4))
baltimore.bytype$OnNon <- c(rep("NON", 8), rep("ON", 8))
png(filename = "plot3.png", width = 480, height = 480, units = "px")
ggplot(baltimore.bytype, aes(year, Emissions, fill=type)) +
geom_bar(stat = "identity") + facet_grid(Category~OnNon) +
ggtitle("Total Baltimore Emissions by Type") +
ylab("Emissions (Tons of PM2.5)") + xlab("Year") + theme(legend.position = "bottom")
dev.off()
|
## ---------- dataprep.R ----------- ##
# #
# fastaconc #
# df2fasta #
# d.phy2df #
# #
## --------------------------------- ##
## ----------------------------------------------------------- ##
# fastaconc(otus, inputdir,out.file) #
## ----------------------------------------------------------- ##
#' Concatenate Fasta Files in a Single Multispecies Fasta File
#' @description Concatenate fasta files from different species in a single multispecies fasta file.
#' @usage fastaconc(otus, inputdir = ".", out.file = "./concatenated_multispecies.fasta")
#' @param otus a character vector giving the otus' names.
#' @param inputdir path to the directory containing the individual fasta files.
#' @param out.file path and name of output file.
#' @details When we have fasta files (extension should be '.fasta'), each one for a species containing different sequences of the given species, this function concatenate the different sequences of the same species and writes it as a single sequence in a single multispecies fasta file. If the individual fasta files are found in the working directory, the inputdir argument don't need to be passed. The names of the individual fasta files must match the otus' names.
#' @return A single multispecies fasta file with the sequences of each species spliced in a single sequence.
#' @seealso df2fasta()
#' @examples \dontrun{fastaconc(otus = c('Glis_glis', 'Ovis_aries', 'Sus_scrofa'))}
#' @importFrom seqinr read.fasta
#' @importFrom seqinr write.fasta
#' @export
fastaconc <- function(otus, inputdir = ".", out.file = "./concatenated_multispecies.fasta"){
seqs <- character(length(otus))
for (i in 1:length(otus)){
f <- unlist(seqinr::read.fasta(paste(inputdir, "/", otus[i], ".fasta", sep = "")))
seqs[i] <- paste(toupper(f), collapse = "")
}
seqinr::write.fasta(sequences = as.list(seqs),
names = otus,
file.out = out.file,
as.string = TRUE)
print(paste("Work finished. Fasta file saved at", out.file))
}
## ---------------------------------------------------- ##
## df2fasta(df, out.file) ##
## ---------------------------------------------------- ##
#' Convert Dataframe into Fasta File
#' @description Converts a dataframe into a fasta file.
#' @usage df2fasta(df, out.file)
#' @param df a named (both rows and cols) dataframe (see details).
#' @param out.file path and name of output file.
#' @details The format of the df should be as follows. Each row represents a protein sequence and each column a species.
#' @return A fasta file that is saved in the specified path.
#' @seealso fastaconc()
#' @examples \dontrun{df2fasta(df = bovids, out.file = "./example.fasta")}
#' @importFrom seqinr write.fasta
#' @importFrom seqinr s2c
#' @export
df2fasta <- function(df, out.file){
# protname <- rownames(df)
otus <- names(df)
seqs <- unlist(lapply(1:length(otus), function(i) paste(df[,i], collapse = "")))
# seqs <- lapply(seqs, seqinr::s2c)
seqinr::write.fasta(sequences = as.list(seqs),
names = otus,
file.out = out.file,
as.string = TRUE,)
print(paste("Work finished. Files saved at", out.file))
}
## ---------------------------------------------------- ##
## d.phy2df(phyfile, as) ##
## ---------------------------------------------------- ##
#' Convert a Phylip Distance Matrix into a DataFrame
#' @description Converts a phylip distance matrix into a either an R dataFrame or matrix.
#' @usage d.phy2df(phyfile, as = 'matrix')
#' @param phyfile path to the file containing the distances in phylip format.
#' @param as class of the output R data. It should be either 'dataframe' or 'matrix'.
#' @return Either a dataframe or a matrix containing the distances read from the phy file.
#' @seealso d.df2pny()
#' @examples \dontrun{d.phy2df(phyfile = "./data_t/d_dummy.txt")}
#' @importFrom utils read.csv
#' @export
d.phy2df <- function(phyfile, as = 'matrix'){
d <- read.csv(phyfile, sep = "", header = FALSE, skip = 1)
names <- d[,1]
d <- d[,-1]
names(d) <- names
rownames(d) <- names
if (as == 'matrix'){
d <- as.matrix(d)
}
return(d)
}
|
/R/dataprep.R
|
no_license
|
cran/EnvNJ
|
R
| false
| false
| 4,428
|
r
|
## ---------- dataprep.R ----------- ##
# #
# fastaconc #
# df2fasta #
# d.phy2df #
# #
## --------------------------------- ##
## ----------------------------------------------------------- ##
# fastaconc(otus, inputdir,out.file) #
## ----------------------------------------------------------- ##
#' Concatenate Fasta Files in a Single Multispecies Fasta File
#' @description Concatenate fasta files from different species in a single multispecies fasta file.
#' @usage fastaconc(otus, inputdir = ".", out.file = "./concatenated_multispecies.fasta")
#' @param otus a character vector giving the otus' names.
#' @param inputdir path to the directory containing the individual fasta files.
#' @param out.file path and name of output file.
#' @details When we have fasta files (extension should be '.fasta'), each one for a species containing different sequences of the given species, this function concatenate the different sequences of the same species and writes it as a single sequence in a single multispecies fasta file. If the individual fasta files are found in the working directory, the inputdir argument don't need to be passed. The names of the individual fasta files must match the otus' names.
#' @return A single multispecies fasta file with the sequences of each species spliced in a single sequence.
#' @seealso df2fasta()
#' @examples \dontrun{fastaconc(otus = c('Glis_glis', 'Ovis_aries', 'Sus_scrofa'))}
#' @importFrom seqinr read.fasta
#' @importFrom seqinr write.fasta
#' @export
fastaconc <- function(otus, inputdir = ".", out.file = "./concatenated_multispecies.fasta"){
seqs <- character(length(otus))
for (i in 1:length(otus)){
f <- unlist(seqinr::read.fasta(paste(inputdir, "/", otus[i], ".fasta", sep = "")))
seqs[i] <- paste(toupper(f), collapse = "")
}
seqinr::write.fasta(sequences = as.list(seqs),
names = otus,
file.out = out.file,
as.string = TRUE)
print(paste("Work finished. Fasta file saved at", out.file))
}
## ---------------------------------------------------- ##
## df2fasta(df, out.file) ##
## ---------------------------------------------------- ##
#' Convert Dataframe into Fasta File
#' @description Converts a dataframe into a fasta file.
#' @usage df2fasta(df, out.file)
#' @param df a named (both rows and cols) dataframe (see details).
#' @param out.file path and name of output file.
#' @details The format of the df should be as follows. Each row represents a protein sequence and each column a species.
#' @return A fasta file that is saved in the specified path.
#' @seealso fastaconc()
#' @examples \dontrun{df2fasta(df = bovids, out.file = "./example.fasta")}
#' @importFrom seqinr write.fasta
#' @importFrom seqinr s2c
#' @export
df2fasta <- function(df, out.file){
# protname <- rownames(df)
otus <- names(df)
seqs <- unlist(lapply(1:length(otus), function(i) paste(df[,i], collapse = "")))
# seqs <- lapply(seqs, seqinr::s2c)
seqinr::write.fasta(sequences = as.list(seqs),
names = otus,
file.out = out.file,
as.string = TRUE,)
print(paste("Work finished. Files saved at", out.file))
}
## ---------------------------------------------------- ##
## d.phy2df(phyfile, as) ##
## ---------------------------------------------------- ##
#' Convert a Phylip Distance Matrix into a DataFrame
#' @description Converts a phylip distance matrix into a either an R dataFrame or matrix.
#' @usage d.phy2df(phyfile, as = 'matrix')
#' @param phyfile path to the file containing the distances in phylip format.
#' @param as class of the output R data. It should be either 'dataframe' or 'matrix'.
#' @return Either a dataframe or a matrix containing the distances read from the phy file.
#' @seealso d.df2pny()
#' @examples \dontrun{d.phy2df(phyfile = "./data_t/d_dummy.txt")}
#' @importFrom utils read.csv
#' @export
d.phy2df <- function(phyfile, as = 'matrix'){
d <- read.csv(phyfile, sep = "", header = FALSE, skip = 1)
names <- d[,1]
d <- d[,-1]
names(d) <- names
rownames(d) <- names
if (as == 'matrix'){
d <- as.matrix(d)
}
return(d)
}
|
##Loading and reading the file
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
##plotting graph 1
#str(subSetData)
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
kellywale/Exploratorydataplotting
|
R
| false
| false
| 477
|
r
|
##Loading and reading the file
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
##plotting graph 1
#str(subSetData)
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
### 6.4
# [for the sake of practice, here I use a different approach than in the previous exercise.
# this approach is more general as it is not limited to two states as before]
# random walk parameters:
p <- .3
q <- .3
r <- .4
p1<- .2
q1<- .2
r1<- .6
p2<- .4
q2<- .3
r2<- .3
p3<- .3
q3<- .4
r3<- .3
# set initial state by fiat
s <- t <- u <- v<- 0
# set repetitions
N <- 500
for (k in 2:N) {
# roll the dice see what gets picked
roll <- rmultinom(1,1,p=c(p,q,r))
# make the step
s[k][roll[1] == 1] <- s[k-1] + 1
s[k][roll[2] == 1] <- s[k-1] - 1
s[k][roll[3] == 1] <- s[k-1]
# this flips a coin with probability assigned by P, based on current state
# to determine successive state. It stores that in s vector.
roll <- rmultinom(1,1,p=c(p1,q1,r1))
t[k][roll[1] == 1] <- t[k-1] + 1
t[k][roll[2] == 1] <- t[k-1] - 1
t[k][roll[3] == 1] <- t[k-1]
roll <- rmultinom(1,1,p=c(p2,q2,r2))
u[k][roll[1] == 1] <- u[k-1] + 1
u[k][roll[2] == 1] <- u[k-1] - 1
u[k][roll[3] == 1] <- u[k-1]
roll <- rmultinom(1,1,p=c(p3,q3,r3))
v[k][roll[1] == 1] <- v[k-1] + 1
v[k][roll[2] == 1] <- v[k-1] - 1
v[k][roll[3] == 1] <- v[k-1]
}
# plotting random walks:
par(mfrow=c(2,2))
plot(s, type = "l",
main = "p=.3 q=.3, r=.4",
xlab = "N")
plot(t, type = "l",
main = "p=.2, q=.2, r=.6",
xlab = "N")
plot(u, type = "l",
main = "p=.4, q=.3, r=.3",
xlab = "N")
plot(v, type = "l",
main = "p=.3, q=.3, r=.3",
xlab = "N")
|
/hw2/hw2_6.4_sb.R
|
no_license
|
vhurtadol/POLI574
|
R
| false
| false
| 1,820
|
r
|
### 6.4
# [for the sake of practice, here I use a different approach than in the previous exercise.
# this approach is more general as it is not limited to two states as before]
# random walk parameters:
p <- .3
q <- .3
r <- .4
p1<- .2
q1<- .2
r1<- .6
p2<- .4
q2<- .3
r2<- .3
p3<- .3
q3<- .4
r3<- .3
# set initial state by fiat
s <- t <- u <- v<- 0
# set repetitions
N <- 500
for (k in 2:N) {
# roll the dice see what gets picked
roll <- rmultinom(1,1,p=c(p,q,r))
# make the step
s[k][roll[1] == 1] <- s[k-1] + 1
s[k][roll[2] == 1] <- s[k-1] - 1
s[k][roll[3] == 1] <- s[k-1]
# this flips a coin with probability assigned by P, based on current state
# to determine successive state. It stores that in s vector.
roll <- rmultinom(1,1,p=c(p1,q1,r1))
t[k][roll[1] == 1] <- t[k-1] + 1
t[k][roll[2] == 1] <- t[k-1] - 1
t[k][roll[3] == 1] <- t[k-1]
roll <- rmultinom(1,1,p=c(p2,q2,r2))
u[k][roll[1] == 1] <- u[k-1] + 1
u[k][roll[2] == 1] <- u[k-1] - 1
u[k][roll[3] == 1] <- u[k-1]
roll <- rmultinom(1,1,p=c(p3,q3,r3))
v[k][roll[1] == 1] <- v[k-1] + 1
v[k][roll[2] == 1] <- v[k-1] - 1
v[k][roll[3] == 1] <- v[k-1]
}
# plotting random walks:
par(mfrow=c(2,2))
plot(s, type = "l",
main = "p=.3 q=.3, r=.4",
xlab = "N")
plot(t, type = "l",
main = "p=.2, q=.2, r=.6",
xlab = "N")
plot(u, type = "l",
main = "p=.4, q=.3, r=.3",
xlab = "N")
plot(v, type = "l",
main = "p=.3, q=.3, r=.3",
xlab = "N")
|
library(plotrix)
library(readxl)
pob<-read_xlsx("azcapob1.xlsx")
attach(pob)
pob1<-pob[,-1]
pob2<-t(pob1)
colnames(pob2)<- c("40-44", "45 a 49", "50 a 54", "55 a 59")
pob2
barp(pob2, names.arg = colnames(pob2), cex.axis = 0.7,
col=rainbow(2), cylindrical = TRUE, ylim = c(0,19000),
shadow = FALSE, staxx = TRUE, staxy = TRUE,
legend.pos = list(x=0.10, y=0.30), xlab = "", ylab = "", border = TRUE)
mtext("rango de edades", 1, line = 2.8, font = 2, cex = 1.6)
mtext("número de habitantes", 2, line = 2.6, font = 2, cex = 1.6)
legend("topleft", colnames(pob1), bty="n", cex = 0.5, fill=c("red","lightskyblue"))
|
/Rstudioazcapotzalco/poblacion/R/azcagrafo3.R
|
no_license
|
davidlechuga/OPCD
|
R
| false
| false
| 635
|
r
|
library(plotrix)
library(readxl)
pob<-read_xlsx("azcapob1.xlsx")
attach(pob)
pob1<-pob[,-1]
pob2<-t(pob1)
colnames(pob2)<- c("40-44", "45 a 49", "50 a 54", "55 a 59")
pob2
barp(pob2, names.arg = colnames(pob2), cex.axis = 0.7,
col=rainbow(2), cylindrical = TRUE, ylim = c(0,19000),
shadow = FALSE, staxx = TRUE, staxy = TRUE,
legend.pos = list(x=0.10, y=0.30), xlab = "", ylab = "", border = TRUE)
mtext("rango de edades", 1, line = 2.8, font = 2, cex = 1.6)
mtext("número de habitantes", 2, line = 2.6, font = 2, cex = 1.6)
legend("topleft", colnames(pob1), bty="n", cex = 0.5, fill=c("red","lightskyblue"))
|
# Platform Repository Service
#
# Platform Repository Service - Sage Bionetworks Platform
#
# The version of the OpenAPI document: develop-SNAPSHOT
# Contact: thomas.yu@sagebionetworks.org
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title SubmissionStatusEnum
#'
#' @description SubmissionStatusEnum Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SubmissionStatusEnum <- R6::R6Class(
"SubmissionStatusEnum",
public = list(
initialize = function(...) {
local.optional.var <- list(...)
val <- unlist(local.optional.var)
enumvec <- .parse_SubmissionStatusEnum()
stopifnot(length(val) == 1L)
if (!val %in% enumvec)
stop("Use one of the valid values: ",
paste0(enumvec, collapse = ", "))
private$value <- val
},
toJSON = function() {
jsonlite::toJSON(private$value, auto_unbox = TRUE)
},
fromJSON = function(SubmissionStatusEnumJson) {
private$value <- jsonlite::fromJSON(SubmissionStatusEnumJson,
simplifyVector = FALSE)
self
},
toJSONString = function() {
as.character(jsonlite::toJSON(private$value,
auto_unbox = TRUE))
},
fromJSONString = function(SubmissionStatusEnumJson) {
private$value <- jsonlite::fromJSON(SubmissionStatusEnumJson,
simplifyVector = FALSE)
self
}
),
private = list(
value = NULL
)
)
# add to utils.R
.parse_SubmissionStatusEnum <- function(vals) {
res <- gsub("^\\[|\\]$", "",
"[OPEN, CLOSED, SCORED, INVALID, VALIDATED, EVALUATION_IN_PROGRESS, RECEIVED, REJECTED, ACCEPTED]"
)
unlist(strsplit(res, ", "))
}
|
/R/submission_status_enum.R
|
no_license
|
thomasyu888/synr-sdk-client
|
R
| false
| false
| 1,905
|
r
|
# Platform Repository Service
#
# Platform Repository Service - Sage Bionetworks Platform
#
# The version of the OpenAPI document: develop-SNAPSHOT
# Contact: thomas.yu@sagebionetworks.org
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title SubmissionStatusEnum
#'
#' @description SubmissionStatusEnum Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SubmissionStatusEnum <- R6::R6Class(
"SubmissionStatusEnum",
public = list(
initialize = function(...) {
local.optional.var <- list(...)
val <- unlist(local.optional.var)
enumvec <- .parse_SubmissionStatusEnum()
stopifnot(length(val) == 1L)
if (!val %in% enumvec)
stop("Use one of the valid values: ",
paste0(enumvec, collapse = ", "))
private$value <- val
},
toJSON = function() {
jsonlite::toJSON(private$value, auto_unbox = TRUE)
},
fromJSON = function(SubmissionStatusEnumJson) {
private$value <- jsonlite::fromJSON(SubmissionStatusEnumJson,
simplifyVector = FALSE)
self
},
toJSONString = function() {
as.character(jsonlite::toJSON(private$value,
auto_unbox = TRUE))
},
fromJSONString = function(SubmissionStatusEnumJson) {
private$value <- jsonlite::fromJSON(SubmissionStatusEnumJson,
simplifyVector = FALSE)
self
}
),
private = list(
value = NULL
)
)
# add to utils.R
.parse_SubmissionStatusEnum <- function(vals) {
res <- gsub("^\\[|\\]$", "",
"[OPEN, CLOSED, SCORED, INVALID, VALIDATED, EVALUATION_IN_PROGRESS, RECEIVED, REJECTED, ACCEPTED]"
)
unlist(strsplit(res, ", "))
}
|
## Copyright 2013-2015 Stefan Widgren and Maria Noremark,
## National Veterinary Institute, Sweden
##
## Licensed under the EUPL, Version 1.1 or - as soon they
## will be approved by the European Commission - subsequent
## versions of the EUPL (the "Licence");
## You may not use this work except in compliance with the
## Licence.
## You may obtain a copy of the Licence at:
##
## http://ec.europa.eu/idabc/eupl
##
## Unless required by applicable law or agreed to in
## writing, software distributed under the Licence is
## distributed on an "AS IS" basis,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
## express or implied.
## See the Licence for the specific language governing
## permissions and limitations under the Licence.
##' \code{InDegree}
##'
##' The number of herds with direct movements of animals to the root herd
##' during the defined time window used for tracing.
##'
##'
##' The time period used for \code{InDegree} can either be specified
##' using \code{tEnd} and \code{days} or \code{inBegin} and \code{inEnd}.
##'
##' If using \code{tEnd} and \code{days}, the time period for ingoing
##' contacts ends at \code{tEnd} and starts at \code{days} prior to
##' \code{tEnd}. The indegree will be calculated for each combination
##' of \code{root}, \code{tEnd} and \code{days}.
##'
##' An alternative way is to use \code{inBegin} and \code{inEnd}. The
##' time period for ingoing contacts starts at inBegin and ends at
##' inEndDate. The vectors \code{root} \code{inBegin}, \code{inEnd}
##' must have the same lengths and the indegree will be calculated for
##' each index of them.
##'
##' The movements in \code{InDegree} is a \code{data.frame}
##' with the following columns:
##' \describe{
##'
##' \item{source}{
##' an integer or character identifier of the source holding.
##' }
##'
##' \item{destination}{
##' an integer or character identifier of the destination holding.
##' }
##'
##' \item{t}{
##' the Date of the transfer
##' }
##'
##' \item{id}{
##' an optional character vector with the identity of the animal.
##' }
##'
##' \item{n}{
##' an optional numeric vector with the number of animals moved.
##' }
##'
##' \item{category}{
##' an optional character or factor with category of the animal e.g. Cattle.
##' }
##' }
##'
##' @rdname InDegree-methods
##' @docType methods
##' @keywords methods
##' @include Contacts.r
##' @include ContactTrace.r
##' @section Methods:
##' \describe{
##' \item{\code{signature(x = "ContactTrace")}}{
##' Get the InDegree of a \code{ContactTrace} object.
##' }
##'
##' \item{\code{signature(x = "data.frame")}}{
##' Get the InDegree for a data.frame with movements, see details and examples.
##' }
##' }
##' @seealso \code{\link{NetworkSummary}}
##' @param x a ContactTrace object, or a list of ContactTrace objects
##' or a \code{data.frame} with movements of animals between holdings,
##' see \code{\link{Trace}} for details.
##' @param ... Additional arguments to the method
##' @param root vector of roots to calculate indegree for.
##' @param tEnd the last date to include ingoing movements. Defaults
##' to \code{NULL}
##' @param days the number of previous days before tEnd to include
##' ingoing movements. Defaults to \code{NULL}
##' @param inBegin the first date to include ingoing
##' movements. Defaults to \code{NULL}
##' @param inEnd the last date to include ingoing movements. Defaults
##' to \code{NULL}
##' @return A \code{data.frame} with the following columns:
##' \describe{
##' \item{root}{
##' The root of the contact tracing
##' }
##'
##' \item{inBegin}{
##' The first date to include ingoing movements
##' }
##'
##' \item{inEnd}{
##' The last date to include ingoing movements
##' }
##'
##' \item{inDays}{
##' The number of days in the interval inBegin to inEnd
##' }
##'
##' \item{inDegree}{
##' The \code{\link{InDegree}} of the root within the time-interval
##' }
##' }
##'
##' @references \itemize{
##' \item Dube, C., et al., A review of network analysis terminology
##' and its application to foot-and-mouth disease modelling and policy
##' development. Transbound Emerg Dis 56 (2009) 73-85, doi:
##' 10.1111/j.1865-1682.2008.01064.x
##'
##' \item Noremark, M., et al., Network analysis
##' of cattle and pig movements in Sweden: Measures relevant for
##' disease control and riskbased surveillance. Preventive Veterinary
##' Medicine 99 (2011) 78-90, doi: 10.1016/j.prevetmed.2010.12.009
##' }
##' @examples
##' \dontrun{
##'
##' ## Load data
##' data(transfers)
##'
##' ## Perform contact tracing using tEnd and days
##' contactTrace <- Trace(movements=transfers,
##' root=2645,
##' tEnd='2005-10-31',
##' days=91)
##'
##' ## Calculate indegree from a ContactTrace object
##' id.1 <- InDegree(contactTrace)
##'
##' ## Calculate indegree using tEnd and days
##' id.2 <- InDegree(transfers,
##' root=2645,
##' tEnd='2005-10-31',
##' days=91)
##'
##' ## Check that the result is identical
##' identical(id.1, id.2)
##'
##' ## Calculate indegree for all included herds
##' ## First extract all source and destination from the dataset
##' root <- sort(unique(c(transfers$source,
##' transfers$destination)))
##'
##' ## Calculate indegree
##' result <- InDegree(transfers,
##' root=root,
##' tEnd='2005-10-31',
##' days=91)
##' }
setGeneric("InDegree",
signature = "x",
function(x, ...) standardGeneric("InDegree"))
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "Contacts"),
function(x)
{
if(!identical(x@direction, "in")) {
stop("Unable to determine InDegree for outgoing contacts")
}
return(length(unique(x@source[x@destination==x@root])))
}
)
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "ContactTrace"),
function (x)
{
return(NetworkSummary(x)[, c("root",
"inBegin",
"inEnd",
"inDays",
"inDegree")])
}
)
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "data.frame"),
function(x,
root,
tEnd = NULL,
days = NULL,
inBegin = NULL,
inEnd = NULL)
{
if(missing(root)) {
stop("Missing parameters in call to InDegree")
}
if(all(is.null(tEnd), is.null(days))) {
outBegin <- inBegin
outEnd <- outBegin
} else {
outBegin <- NULL
outEnd <- NULL
}
return(NetworkSummary(x,
root,
tEnd,
days,
inBegin,
inEnd,
outBegin,
outEnd)[, c("root",
"inBegin",
"inEnd",
"inDays",
"inDegree")])
}
)
|
/EpiContactTrace/R/in-degree.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 7,556
|
r
|
## Copyright 2013-2015 Stefan Widgren and Maria Noremark,
## National Veterinary Institute, Sweden
##
## Licensed under the EUPL, Version 1.1 or - as soon they
## will be approved by the European Commission - subsequent
## versions of the EUPL (the "Licence");
## You may not use this work except in compliance with the
## Licence.
## You may obtain a copy of the Licence at:
##
## http://ec.europa.eu/idabc/eupl
##
## Unless required by applicable law or agreed to in
## writing, software distributed under the Licence is
## distributed on an "AS IS" basis,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
## express or implied.
## See the Licence for the specific language governing
## permissions and limitations under the Licence.
##' \code{InDegree}
##'
##' The number of herds with direct movements of animals to the root herd
##' during the defined time window used for tracing.
##'
##'
##' The time period used for \code{InDegree} can either be specified
##' using \code{tEnd} and \code{days} or \code{inBegin} and \code{inEnd}.
##'
##' If using \code{tEnd} and \code{days}, the time period for ingoing
##' contacts ends at \code{tEnd} and starts at \code{days} prior to
##' \code{tEnd}. The indegree will be calculated for each combination
##' of \code{root}, \code{tEnd} and \code{days}.
##'
##' An alternative way is to use \code{inBegin} and \code{inEnd}. The
##' time period for ingoing contacts starts at inBegin and ends at
##' inEndDate. The vectors \code{root} \code{inBegin}, \code{inEnd}
##' must have the same lengths and the indegree will be calculated for
##' each index of them.
##'
##' The movements in \code{InDegree} is a \code{data.frame}
##' with the following columns:
##' \describe{
##'
##' \item{source}{
##' an integer or character identifier of the source holding.
##' }
##'
##' \item{destination}{
##' an integer or character identifier of the destination holding.
##' }
##'
##' \item{t}{
##' the Date of the transfer
##' }
##'
##' \item{id}{
##' an optional character vector with the identity of the animal.
##' }
##'
##' \item{n}{
##' an optional numeric vector with the number of animals moved.
##' }
##'
##' \item{category}{
##' an optional character or factor with category of the animal e.g. Cattle.
##' }
##' }
##'
##' @rdname InDegree-methods
##' @docType methods
##' @keywords methods
##' @include Contacts.r
##' @include ContactTrace.r
##' @section Methods:
##' \describe{
##' \item{\code{signature(x = "ContactTrace")}}{
##' Get the InDegree of a \code{ContactTrace} object.
##' }
##'
##' \item{\code{signature(x = "data.frame")}}{
##' Get the InDegree for a data.frame with movements, see details and examples.
##' }
##' }
##' @seealso \code{\link{NetworkSummary}}
##' @param x a ContactTrace object, or a list of ContactTrace objects
##' or a \code{data.frame} with movements of animals between holdings,
##' see \code{\link{Trace}} for details.
##' @param ... Additional arguments to the method
##' @param root vector of roots to calculate indegree for.
##' @param tEnd the last date to include ingoing movements. Defaults
##' to \code{NULL}
##' @param days the number of previous days before tEnd to include
##' ingoing movements. Defaults to \code{NULL}
##' @param inBegin the first date to include ingoing
##' movements. Defaults to \code{NULL}
##' @param inEnd the last date to include ingoing movements. Defaults
##' to \code{NULL}
##' @return A \code{data.frame} with the following columns:
##' \describe{
##' \item{root}{
##' The root of the contact tracing
##' }
##'
##' \item{inBegin}{
##' The first date to include ingoing movements
##' }
##'
##' \item{inEnd}{
##' The last date to include ingoing movements
##' }
##'
##' \item{inDays}{
##' The number of days in the interval inBegin to inEnd
##' }
##'
##' \item{inDegree}{
##' The \code{\link{InDegree}} of the root within the time-interval
##' }
##' }
##'
##' @references \itemize{
##' \item Dube, C., et al., A review of network analysis terminology
##' and its application to foot-and-mouth disease modelling and policy
##' development. Transbound Emerg Dis 56 (2009) 73-85, doi:
##' 10.1111/j.1865-1682.2008.01064.x
##'
##' \item Noremark, M., et al., Network analysis
##' of cattle and pig movements in Sweden: Measures relevant for
##' disease control and riskbased surveillance. Preventive Veterinary
##' Medicine 99 (2011) 78-90, doi: 10.1016/j.prevetmed.2010.12.009
##' }
##' @examples
##' \dontrun{
##'
##' ## Load data
##' data(transfers)
##'
##' ## Perform contact tracing using tEnd and days
##' contactTrace <- Trace(movements=transfers,
##' root=2645,
##' tEnd='2005-10-31',
##' days=91)
##'
##' ## Calculate indegree from a ContactTrace object
##' id.1 <- InDegree(contactTrace)
##'
##' ## Calculate indegree using tEnd and days
##' id.2 <- InDegree(transfers,
##' root=2645,
##' tEnd='2005-10-31',
##' days=91)
##'
##' ## Check that the result is identical
##' identical(id.1, id.2)
##'
##' ## Calculate indegree for all included herds
##' ## First extract all source and destination from the dataset
##' root <- sort(unique(c(transfers$source,
##' transfers$destination)))
##'
##' ## Calculate indegree
##' result <- InDegree(transfers,
##' root=root,
##' tEnd='2005-10-31',
##' days=91)
##' }
setGeneric("InDegree",
signature = "x",
function(x, ...) standardGeneric("InDegree"))
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "Contacts"),
function(x)
{
if(!identical(x@direction, "in")) {
stop("Unable to determine InDegree for outgoing contacts")
}
return(length(unique(x@source[x@destination==x@root])))
}
)
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "ContactTrace"),
function (x)
{
return(NetworkSummary(x)[, c("root",
"inBegin",
"inEnd",
"inDays",
"inDegree")])
}
)
##' @rdname InDegree-methods
##' @export
setMethod("InDegree",
signature(x = "data.frame"),
function(x,
root,
tEnd = NULL,
days = NULL,
inBegin = NULL,
inEnd = NULL)
{
if(missing(root)) {
stop("Missing parameters in call to InDegree")
}
if(all(is.null(tEnd), is.null(days))) {
outBegin <- inBegin
outEnd <- outBegin
} else {
outBegin <- NULL
outEnd <- NULL
}
return(NetworkSummary(x,
root,
tEnd,
days,
inBegin,
inEnd,
outBegin,
outEnd)[, c("root",
"inBegin",
"inEnd",
"inDays",
"inDegree")])
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_targets_node_parser.R
\name{parse_drug_targets_polypeptides_pfams}
\alias{parse_drug_targets_polypeptides_pfams}
\title{Extracts the drug targets polypeptides pfams
element and return data as data frame.}
\usage{
parse_drug_targets_polypeptides_pfams(save_table = FALSE,
save_csv = FALSE, csv_path = ".", override_csv = FALSE)
}
\arguments{
\item{save_table}{boolean, save table in database if true.}
\item{save_csv}{boolean, save csv version of parsed dataframe if true}
\item{csv_path}{location to save csv files into it, default is current location, save_csv must be true}
\item{override_csv}{override existing csv, if any, in case it is true in the new parse operation}
}
\value{
drug targets polypeptides pfams node attributes date frame
}
\description{
\code{parse_drug_targets_polypeptides_pfams} returns data frame of
drug targets polypeptides pfams elements.
}
\details{
This functions extracts the targets polypeptides pfams element of drug node in drug bank
xml database with the option to save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
save the returned dataframe in the database.
It must be called after \code{\link{get_xml_db_rows}} function like
any other parser function.
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again before calling this function.
}
\examples{
\donttest{
# return only the parsed dataframe
parse_drug_targets_polypeptides_pfams()
# save in database and return parsed dataframe
parse_drug_targets_polypeptides_pfams(save_table = TRUE)
# save parsed dataframe as csv if it does not exist in current location
# and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE)
# save in database, save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(ssave_table = TRUE, save_csv = TRUE)
# save parsed dataframe as csv if it does not exist in given location and
# return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE, csv_path = TRUE)
# save parsed dataframe as csv if it does not exist in current location
# and return parsed dataframe.
# If the csv exist override it and return it.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE, csv_path = TRUE, override = TRUE)
}
}
|
/man/parse_drug_targets_polypeptides_pfams.Rd
|
no_license
|
Sparklingredstar/dbparser
|
R
| false
| true
| 2,606
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_targets_node_parser.R
\name{parse_drug_targets_polypeptides_pfams}
\alias{parse_drug_targets_polypeptides_pfams}
\title{Extracts the drug targets polypeptides pfams
element and return data as data frame.}
\usage{
parse_drug_targets_polypeptides_pfams(save_table = FALSE,
save_csv = FALSE, csv_path = ".", override_csv = FALSE)
}
\arguments{
\item{save_table}{boolean, save table in database if true.}
\item{save_csv}{boolean, save csv version of parsed dataframe if true}
\item{csv_path}{location to save csv files into it, default is current location, save_csv must be true}
\item{override_csv}{override existing csv, if any, in case it is true in the new parse operation}
}
\value{
drug targets polypeptides pfams node attributes date frame
}
\description{
\code{parse_drug_targets_polypeptides_pfams} returns data frame of
drug targets polypeptides pfams elements.
}
\details{
This functions extracts the targets polypeptides pfams element of drug node in drug bank
xml database with the option to save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
save the returned dataframe in the database.
It must be called after \code{\link{get_xml_db_rows}} function like
any other parser function.
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again before calling this function.
}
\examples{
\donttest{
# return only the parsed dataframe
parse_drug_targets_polypeptides_pfams()
# save in database and return parsed dataframe
parse_drug_targets_polypeptides_pfams(save_table = TRUE)
# save parsed dataframe as csv if it does not exist in current location
# and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE)
# save in database, save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(ssave_table = TRUE, save_csv = TRUE)
# save parsed dataframe as csv if it does not exist in given location and
# return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE, csv_path = TRUE)
# save parsed dataframe as csv if it does not exist in current location
# and return parsed dataframe.
# If the csv exist override it and return it.
parse_drug_targets_polypeptides_pfams(save_csv = TRUE, csv_path = TRUE, override = TRUE)
}
}
|
`LDuncan` <- function(anova,which="",conf.level=0.95)
UseMethod("LDuncan")
|
/R/LDuncan.R
|
no_license
|
cran/laercio
|
R
| false
| false
| 75
|
r
|
`LDuncan` <- function(anova,which="",conf.level=0.95)
UseMethod("LDuncan")
|
##' Calculate Mohn's Rho values for select quantities
##'
##' Function calculates:
##' (1) a rho value for the ending year for each retrospective relative to the reference model
##' as in Mohn (1999),
##' (2) a "Wood's Hole Mohn's Rho", which is a rho value averaged across all years for each
##' retrospective relative to the reference model, and
##' (3) an "Alaska Fisheries Science Center and Hurtado-Ferro et al. (2015) Mohn's rho,
##' which is the average rho per retrospective "peel".
##'
##'
##' @param summaryoutput List created by `SSsummarize`. The expected order for the
##' models are the full reference model, the retro -1, retro -2, and so forth.
##' @param endyrvec Single year or vector of years representing the
##' final year of values to show for each model.
##' @param startyr Single year used to calculate the start of the Wood's Hole
##' Mohn's Rho value across all years. Defaults to startyr of reference model.
##' @param verbose Print messages when running the function?
##'
##' @author Chantel R. Wetzel and Carey McGilliard
##' @references Hurtado-Ferro et al. 2015. Looking in the rear-view mirror: bias
##' and retrospective patterns in integrated, age-structured stock assessment
##' models. ICES J. Mar. Sci Volume 72, Issue 1, 1 January 2015,
##' Pages 99-110, https://doi.org/10.1093/icesjms/fsu198
##' Mohn, R. 1999. The retrospective problem in sequential population analysis:
##' An investigation using cod fishery and simulated data. ICES J. Mar. Sci
##' Volume 56, Pages 473-488
##'
##' @export
SSmohnsrho <-
function(summaryoutput,
endyrvec,
startyr,
verbose = TRUE) {
if (verbose) {
message(
"The expected order of models in the summary output are the\n",
"reference model followed by retro -1, retro -2, and so forth."
)
}
N <- summaryoutput[["n"]]
if (missing(endyrvec)) {
endyrvec <- rev((summaryoutput[["endyrs"]][N] - N + 1):summaryoutput[["endyrs"]][N])
}
if (missing(startyr)) {
startyr <- summaryoutput[["startyrs"]][1]
}
mohnSSB <- mohnRec <- mohnBratio <- mohnF <- numeric()
mohnSSB.all <- mohnRec.all <- mohnBratio.all <- mohnF.all <- numeric()
# Mohn's Rho Calculation for the terminal year for each of
# the retrospectives relative to the reference model
# Rho <- sum over y [ (X_y,retro - X_y,ref) / X_y,ref ]
for (i in 1:(N - 1)) {
ind <- which(summaryoutput[["SpawnBio"]][["Yr"]] == endyrvec[i + 1])
mohnSSB[i] <- (summaryoutput[["SpawnBio"]][ind, i + 1] -
summaryoutput[["SpawnBio"]][ind, 1]) /
summaryoutput[["SpawnBio"]][ind, 1]
ind <- which(summaryoutput[["recruits"]][["Yr"]] == endyrvec[i + 1])
mohnRec[i] <- (summaryoutput[["recruits"]][ind, i + 1] -
summaryoutput[["recruits"]][ind, 1]) /
summaryoutput[["recruits"]][ind, 1]
ind <- which(summaryoutput[["Bratio"]][["Yr"]] == endyrvec[i + 1])
mohnBratio[i] <- (summaryoutput[["Bratio"]][ind, i + 1] -
summaryoutput[["Bratio"]][ind, 1]) /
summaryoutput[["Bratio"]][ind, 1]
ind <- which(summaryoutput[["Fvalue"]][["Yr"]] == endyrvec[i + 1])
mohnF[i] <- (summaryoutput[["Fvalue"]][ind, i + 1] -
summaryoutput[["Fvalue"]][ind, 1]) /
summaryoutput[["Fvalue"]][ind, 1]
}
# Wood's Hole Mohn's Rho Calculation for all years for each of the
# retrospectives relative to the reference model
# Rho <- sum over y [ (X_y,retro - X_y,ref) / X_y,ref ]
# This rho value is then scaled according to the number of model years
# for comparison between the one year and all year calculation
# Rho <- Rho / Number of Years
for (i in 1:(N - 1)) {
ind <- which(summaryoutput[["SpawnBio"]][["Yr"]] == startyr):which(summaryoutput[["SpawnBio"]][["Yr"]] == endyrvec[i + 1])
mohnSSB.all[i] <-
sum((summaryoutput[["SpawnBio"]][ind, i + 1] - summaryoutput[["SpawnBio"]][ind, 1]) /
summaryoutput[["SpawnBio"]][ind, 1]) / length(ind)
ind <- which(summaryoutput[["recruits"]][["Yr"]] == startyr):which(summaryoutput[["recruits"]][["Yr"]] == endyrvec[i + 1])
mohnRec.all[i] <-
sum((summaryoutput[["recruits"]][ind, i + 1] - summaryoutput[["recruits"]][ind, 1]) /
summaryoutput[["recruits"]][ind, 1]) / length(ind)
if (length(which(summaryoutput[["Bratio"]][["Yr"]] == startyr + 1)) != 0) {
ind <- which(summaryoutput[["Bratio"]][["Yr"]] == startyr + 1):which(summaryoutput[["Bratio"]][["Yr"]] == endyrvec[i + 1])
mohnBratio.all[i] <-
sum((summaryoutput[["Bratio"]][ind, i + 1] - summaryoutput[["Bratio"]][ind, 1]) /
summaryoutput[["Bratio"]][ind, 1]) / length(ind)
} else {
warning("Skipping Wood's Hole Mohns Rho on Bratio, as Bratio is not available for year after the first model year.")
mohnBratio.all[i] <- NA
}
if (length(which(summaryoutput[["Fvalue"]][["Yr"]] == startyr)) != 0) {
ind <- which(summaryoutput[["Fvalue"]][["Yr"]] == startyr):which(summaryoutput[["Fvalue"]][["Yr"]] == endyrvec[i + 1])
mohnF.all[i] <-
sum((summaryoutput[["Fvalue"]][ind, i + 1] - summaryoutput[["Fvalue"]][ind, 1]) /
summaryoutput[["Fvalue"]][ind, 1]) / length(ind)
} else {
warning("Skipping Wood's Hole Mohn's Rho on Fvalue, ecause Fvalue is not available for first model year.")
mohnF.all[i] <- NA
}
}
mohn.out <- list()
mohn.out[["SSB"]] <- sum(mohnSSB)
mohn.out[["Rec"]] <- sum(mohnRec)
mohn.out[["Bratio"]] <- sum(mohnBratio)
mohn.out[["F"]] <- sum(mohnF)
mohn.out[["WoodHole_SSB.all"]] <- sum(mohnSSB.all)
mohn.out[["WoodHole_Rec.all"]] <- sum(mohnRec.all)
mohn.out[["WoodHole_Bratio.all"]] <- sum(mohnBratio.all)
mohn.out[["WoodHole_F.all"]] <- sum(mohnF.all)
# Alaska Fisheries Science Center and Hurtado-Ferro et al. (2015) Mohn's rho
# https://www.afsc.noaa.gov/REFM/stocks/Plan_Team/2013/Sept/Retrospectives_2013_final3.pdf
# Equation 1: Rho <- (sum over p [ (X_y-p,p -X_y-p,0) / X_y-p,0]) / P
mohn.out[["AFSC_Hurtado_SSB"]] <- sum(mohnSSB) / length(mohnSSB)
mohn.out[["AFSC_Hurtado_Rec"]] <- sum(mohnRec) / length(mohnRec)
mohn.out[["AFSC_Hurtado_F"]] <- sum(mohnF) / length(mohnF)
mohn.out[["AFSC_Hurtado_Bratio"]] <- sum(mohnBratio) / length(mohnBratio)
return(mohn.out)
}
|
/R/SSmohnsrho.R
|
no_license
|
cran/r4ss
|
R
| false
| false
| 6,555
|
r
|
##' Calculate Mohn's Rho values for select quantities
##'
##' Function calculates:
##' (1) a rho value for the ending year for each retrospective relative to the reference model
##' as in Mohn (1999),
##' (2) a "Wood's Hole Mohn's Rho", which is a rho value averaged across all years for each
##' retrospective relative to the reference model, and
##' (3) an "Alaska Fisheries Science Center and Hurtado-Ferro et al. (2015) Mohn's rho,
##' which is the average rho per retrospective "peel".
##'
##'
##' @param summaryoutput List created by `SSsummarize`. The expected order for the
##' models are the full reference model, the retro -1, retro -2, and so forth.
##' @param endyrvec Single year or vector of years representing the
##' final year of values to show for each model.
##' @param startyr Single year used to calculate the start of the Wood's Hole
##' Mohn's Rho value across all years. Defaults to startyr of reference model.
##' @param verbose Print messages when running the function?
##'
##' @author Chantel R. Wetzel and Carey McGilliard
##' @references Hurtado-Ferro et al. 2015. Looking in the rear-view mirror: bias
##' and retrospective patterns in integrated, age-structured stock assessment
##' models. ICES J. Mar. Sci Volume 72, Issue 1, 1 January 2015,
##' Pages 99-110, https://doi.org/10.1093/icesjms/fsu198
##' Mohn, R. 1999. The retrospective problem in sequential population analysis:
##' An investigation using cod fishery and simulated data. ICES J. Mar. Sci
##' Volume 56, Pages 473-488
##'
##' @export
SSmohnsrho <-
function(summaryoutput,
endyrvec,
startyr,
verbose = TRUE) {
if (verbose) {
message(
"The expected order of models in the summary output are the\n",
"reference model followed by retro -1, retro -2, and so forth."
)
}
N <- summaryoutput[["n"]]
if (missing(endyrvec)) {
endyrvec <- rev((summaryoutput[["endyrs"]][N] - N + 1):summaryoutput[["endyrs"]][N])
}
if (missing(startyr)) {
startyr <- summaryoutput[["startyrs"]][1]
}
mohnSSB <- mohnRec <- mohnBratio <- mohnF <- numeric()
mohnSSB.all <- mohnRec.all <- mohnBratio.all <- mohnF.all <- numeric()
# Mohn's Rho Calculation for the terminal year for each of
# the retrospectives relative to the reference model
# Rho <- sum over y [ (X_y,retro - X_y,ref) / X_y,ref ]
for (i in 1:(N - 1)) {
ind <- which(summaryoutput[["SpawnBio"]][["Yr"]] == endyrvec[i + 1])
mohnSSB[i] <- (summaryoutput[["SpawnBio"]][ind, i + 1] -
summaryoutput[["SpawnBio"]][ind, 1]) /
summaryoutput[["SpawnBio"]][ind, 1]
ind <- which(summaryoutput[["recruits"]][["Yr"]] == endyrvec[i + 1])
mohnRec[i] <- (summaryoutput[["recruits"]][ind, i + 1] -
summaryoutput[["recruits"]][ind, 1]) /
summaryoutput[["recruits"]][ind, 1]
ind <- which(summaryoutput[["Bratio"]][["Yr"]] == endyrvec[i + 1])
mohnBratio[i] <- (summaryoutput[["Bratio"]][ind, i + 1] -
summaryoutput[["Bratio"]][ind, 1]) /
summaryoutput[["Bratio"]][ind, 1]
ind <- which(summaryoutput[["Fvalue"]][["Yr"]] == endyrvec[i + 1])
mohnF[i] <- (summaryoutput[["Fvalue"]][ind, i + 1] -
summaryoutput[["Fvalue"]][ind, 1]) /
summaryoutput[["Fvalue"]][ind, 1]
}
# Wood's Hole Mohn's Rho Calculation for all years for each of the
# retrospectives relative to the reference model
# Rho <- sum over y [ (X_y,retro - X_y,ref) / X_y,ref ]
# This rho value is then scaled according to the number of model years
# for comparison between the one year and all year calculation
# Rho <- Rho / Number of Years
for (i in 1:(N - 1)) {
ind <- which(summaryoutput[["SpawnBio"]][["Yr"]] == startyr):which(summaryoutput[["SpawnBio"]][["Yr"]] == endyrvec[i + 1])
mohnSSB.all[i] <-
sum((summaryoutput[["SpawnBio"]][ind, i + 1] - summaryoutput[["SpawnBio"]][ind, 1]) /
summaryoutput[["SpawnBio"]][ind, 1]) / length(ind)
ind <- which(summaryoutput[["recruits"]][["Yr"]] == startyr):which(summaryoutput[["recruits"]][["Yr"]] == endyrvec[i + 1])
mohnRec.all[i] <-
sum((summaryoutput[["recruits"]][ind, i + 1] - summaryoutput[["recruits"]][ind, 1]) /
summaryoutput[["recruits"]][ind, 1]) / length(ind)
if (length(which(summaryoutput[["Bratio"]][["Yr"]] == startyr + 1)) != 0) {
ind <- which(summaryoutput[["Bratio"]][["Yr"]] == startyr + 1):which(summaryoutput[["Bratio"]][["Yr"]] == endyrvec[i + 1])
mohnBratio.all[i] <-
sum((summaryoutput[["Bratio"]][ind, i + 1] - summaryoutput[["Bratio"]][ind, 1]) /
summaryoutput[["Bratio"]][ind, 1]) / length(ind)
} else {
warning("Skipping Wood's Hole Mohns Rho on Bratio, as Bratio is not available for year after the first model year.")
mohnBratio.all[i] <- NA
}
if (length(which(summaryoutput[["Fvalue"]][["Yr"]] == startyr)) != 0) {
ind <- which(summaryoutput[["Fvalue"]][["Yr"]] == startyr):which(summaryoutput[["Fvalue"]][["Yr"]] == endyrvec[i + 1])
mohnF.all[i] <-
sum((summaryoutput[["Fvalue"]][ind, i + 1] - summaryoutput[["Fvalue"]][ind, 1]) /
summaryoutput[["Fvalue"]][ind, 1]) / length(ind)
} else {
warning("Skipping Wood's Hole Mohn's Rho on Fvalue, ecause Fvalue is not available for first model year.")
mohnF.all[i] <- NA
}
}
mohn.out <- list()
mohn.out[["SSB"]] <- sum(mohnSSB)
mohn.out[["Rec"]] <- sum(mohnRec)
mohn.out[["Bratio"]] <- sum(mohnBratio)
mohn.out[["F"]] <- sum(mohnF)
mohn.out[["WoodHole_SSB.all"]] <- sum(mohnSSB.all)
mohn.out[["WoodHole_Rec.all"]] <- sum(mohnRec.all)
mohn.out[["WoodHole_Bratio.all"]] <- sum(mohnBratio.all)
mohn.out[["WoodHole_F.all"]] <- sum(mohnF.all)
# Alaska Fisheries Science Center and Hurtado-Ferro et al. (2015) Mohn's rho
# https://www.afsc.noaa.gov/REFM/stocks/Plan_Team/2013/Sept/Retrospectives_2013_final3.pdf
# Equation 1: Rho <- (sum over p [ (X_y-p,p -X_y-p,0) / X_y-p,0]) / P
mohn.out[["AFSC_Hurtado_SSB"]] <- sum(mohnSSB) / length(mohnSSB)
mohn.out[["AFSC_Hurtado_Rec"]] <- sum(mohnRec) / length(mohnRec)
mohn.out[["AFSC_Hurtado_F"]] <- sum(mohnF) / length(mohnF)
mohn.out[["AFSC_Hurtado_Bratio"]] <- sum(mohnBratio) / length(mohnBratio)
return(mohn.out)
}
|
#' @export
Ops.optmatch.dlist <- function (e1, e2=NULL)
{
ok <- switch(.Generic, "%%" = , "%/%" = FALSE, TRUE)
if (!ok) {
warning(.Generic, " not meaningful for matching distances; returning 1st arg")
return(e1)
}
unary <- nargs() == 1
if (nchar(.Method[1])) {
rn1 <- attr(e1, "row.names")
nne <- unlist(as.logical(lapply(e1, length)))
e1.nullentries <- e1[!nne]
full.sc1 <- names(e1)
e1 <- e1[nne]
sc1 <- names(e1)
} else {rn1 <- NULL}
if (nchar(.Method[2])) {
rn2 <- attr(e2, "row.names")
nne <- unlist(as.logical(lapply(e2, length)))
e2.nullentries <- e2[!nne]
full.sc2 <- names(e2)
e2 <- e2[nne]
sc2 <- names(e2)
} else {rn2 <- NULL}
if (!unary && all(nchar(.Method)))
{
rn12rn2 <- match(rn1, rn2)
rn22rn1 <- match(rn2, rn1)
if (any(is.na(rn12rn2)) && any(is.na(rn22rn1))) stop("arguments\' row names attributes don't match")
if (!any(is.na(rn12rn2)) && any(diff(rn12rn2)<0)) stop("arguments\' row names inconsistently ordered")
if (!any(is.na(rn22rn1)) && any(diff(rn22rn1)<0)) stop("arguments\' row names inconsistently ordered")
# the proper behavior is:
# - make sure the two objects have same length
# - in each item, make sure the row and column names are the same
# if either is not met, fail
if (setequal(sc1,sc2)) {
# if they have the same names, great. proceed, perhaps reording e2
e2 <- e2[sc1]
} else {
if (length(sc1) != length(sc2)) {
stop("arguments must have equal number of subproblems")
}
k <- length(sc1)
for (i in 1:k) {
if (!identical(dimnames(e1[[i]]), dimnames(e2[[i]]))) {
stop("arguments must have identically named subproblem matrices")
}
}
}
dm11 <- lapply(e1, function(x) {if (is.null(dim(x))) {length(x)} else {dim(x)[1]}})
dm11 <- unlist(dm11)
dm12 <- lapply(e2, function(x) {if (is.null(dim(x))) {1} else {dim(x)[2]}})
dm12 <- unlist(dm12)
dm21 <- lapply(e2, function(x) {if (is.null(dim(x))) {length(x)} else {dim(x)[1]}})
dm21 <- unlist(dm21)
dm22 <- lapply(e2, function(x) {if (is.null(dim(x))) {1} else {dim(x)[2]}})
dm22 <- unlist(dm22)
if (any(dm11!=dm21) || any(dm12!=dm22))
stop("dimensions of distlist arguments don\'t match")
}
value <- list()
FUN <- get(.Generic, envir = parent.frame(), mode = "function")
f <- if (unary)
quote(FUN(left))
else quote(FUN(left, right))
if (nchar(.Method[1]) )
{
for (j in seq_along(e1))
{
left <- e1[[j]]
if (!unary) {
if (nchar(.Method[2])) {
right <- e2[[j]] } else {
right <- e2}
}
value[[j]] <- eval(f)
}
names(value) <- sc1
if (length(e1.nullentries))
{
value <- c(value, e1.nullentries)
value <- value[full.sc1]
}
} else
{
if (nchar(.Method[2]))
{
for (j in seq_along(e2))
{
right <- e2[[j]]
left <- e1
value[[j]] <- eval(f)
}
names(value) <- sc2
if (length(e2.nullentries))
{
value <- c(value, e2.nullentries)
value <- value[full.sc2]
}
}
}
class(value) <- c('optmatch.dlist', 'list')
if (length(rn1)>length(rn2))
{
attr(value, "row.names") <- rn1
} else {
attr(value, "row.names") <- rn2
}
value
}
###### Other optmatch.dlist common methods #####
#' @export
dim.optmatch.dlist <- function(x) {
dims <- lapply(x, dim)
return(Reduce(function(x,y) { c(x[1] + y[1], x[2] + y[2])}, dims, c(0,0)))
}
#' @export
dimnames.optmatch.dlist <- function(x) {
dnms <- lapply(x, dimnames)
return(Reduce(function(x,y) {list(treated = c(x$treated, y[[1]]), control =
c(x$control, y[[2]]))}, dnms, list(treated = c(), control = c())))
}
#' @export
as.matrix.optmatch.dlist <- function(x, ...) {
xdim <- dim(x)
tmp <- matrix(Inf, nrow = xdim[1], ncol = xdim[2], dimnames = dimnames(x))
for (i in seq_along(x)) {
submatrix <- x[[i]]
subrows <- rownames(submatrix)
subcols <- colnames(submatrix)
tmp[subrows, subcols] <- submatrix
}
return(tmp)
}
#' @export
subset.optmatch.dlist <- function(x, subset, select, ...) {
subset(as.matrix(x), subset, select, ...)
}
#' @rdname subdim-methods
#' @export
subdim.optmatch.dlist <- function(x) {
lapply(x, dim)
}
|
/R/Ops.optmatch.dlist.R
|
permissive
|
markmfredrickson/optmatch
|
R
| false
| false
| 4,573
|
r
|
#' @export
Ops.optmatch.dlist <- function (e1, e2=NULL)
{
ok <- switch(.Generic, "%%" = , "%/%" = FALSE, TRUE)
if (!ok) {
warning(.Generic, " not meaningful for matching distances; returning 1st arg")
return(e1)
}
unary <- nargs() == 1
if (nchar(.Method[1])) {
rn1 <- attr(e1, "row.names")
nne <- unlist(as.logical(lapply(e1, length)))
e1.nullentries <- e1[!nne]
full.sc1 <- names(e1)
e1 <- e1[nne]
sc1 <- names(e1)
} else {rn1 <- NULL}
if (nchar(.Method[2])) {
rn2 <- attr(e2, "row.names")
nne <- unlist(as.logical(lapply(e2, length)))
e2.nullentries <- e2[!nne]
full.sc2 <- names(e2)
e2 <- e2[nne]
sc2 <- names(e2)
} else {rn2 <- NULL}
if (!unary && all(nchar(.Method)))
{
rn12rn2 <- match(rn1, rn2)
rn22rn1 <- match(rn2, rn1)
if (any(is.na(rn12rn2)) && any(is.na(rn22rn1))) stop("arguments\' row names attributes don't match")
if (!any(is.na(rn12rn2)) && any(diff(rn12rn2)<0)) stop("arguments\' row names inconsistently ordered")
if (!any(is.na(rn22rn1)) && any(diff(rn22rn1)<0)) stop("arguments\' row names inconsistently ordered")
# the proper behavior is:
# - make sure the two objects have same length
# - in each item, make sure the row and column names are the same
# if either is not met, fail
if (setequal(sc1,sc2)) {
# if they have the same names, great. proceed, perhaps reording e2
e2 <- e2[sc1]
} else {
if (length(sc1) != length(sc2)) {
stop("arguments must have equal number of subproblems")
}
k <- length(sc1)
for (i in 1:k) {
if (!identical(dimnames(e1[[i]]), dimnames(e2[[i]]))) {
stop("arguments must have identically named subproblem matrices")
}
}
}
dm11 <- lapply(e1, function(x) {if (is.null(dim(x))) {length(x)} else {dim(x)[1]}})
dm11 <- unlist(dm11)
dm12 <- lapply(e2, function(x) {if (is.null(dim(x))) {1} else {dim(x)[2]}})
dm12 <- unlist(dm12)
dm21 <- lapply(e2, function(x) {if (is.null(dim(x))) {length(x)} else {dim(x)[1]}})
dm21 <- unlist(dm21)
dm22 <- lapply(e2, function(x) {if (is.null(dim(x))) {1} else {dim(x)[2]}})
dm22 <- unlist(dm22)
if (any(dm11!=dm21) || any(dm12!=dm22))
stop("dimensions of distlist arguments don\'t match")
}
value <- list()
FUN <- get(.Generic, envir = parent.frame(), mode = "function")
f <- if (unary)
quote(FUN(left))
else quote(FUN(left, right))
if (nchar(.Method[1]) )
{
for (j in seq_along(e1))
{
left <- e1[[j]]
if (!unary) {
if (nchar(.Method[2])) {
right <- e2[[j]] } else {
right <- e2}
}
value[[j]] <- eval(f)
}
names(value) <- sc1
if (length(e1.nullentries))
{
value <- c(value, e1.nullentries)
value <- value[full.sc1]
}
} else
{
if (nchar(.Method[2]))
{
for (j in seq_along(e2))
{
right <- e2[[j]]
left <- e1
value[[j]] <- eval(f)
}
names(value) <- sc2
if (length(e2.nullentries))
{
value <- c(value, e2.nullentries)
value <- value[full.sc2]
}
}
}
class(value) <- c('optmatch.dlist', 'list')
if (length(rn1)>length(rn2))
{
attr(value, "row.names") <- rn1
} else {
attr(value, "row.names") <- rn2
}
value
}
###### Other optmatch.dlist common methods #####
#' @export
dim.optmatch.dlist <- function(x) {
dims <- lapply(x, dim)
return(Reduce(function(x,y) { c(x[1] + y[1], x[2] + y[2])}, dims, c(0,0)))
}
#' @export
dimnames.optmatch.dlist <- function(x) {
dnms <- lapply(x, dimnames)
return(Reduce(function(x,y) {list(treated = c(x$treated, y[[1]]), control =
c(x$control, y[[2]]))}, dnms, list(treated = c(), control = c())))
}
#' @export
as.matrix.optmatch.dlist <- function(x, ...) {
xdim <- dim(x)
tmp <- matrix(Inf, nrow = xdim[1], ncol = xdim[2], dimnames = dimnames(x))
for (i in seq_along(x)) {
submatrix <- x[[i]]
subrows <- rownames(submatrix)
subcols <- colnames(submatrix)
tmp[subrows, subcols] <- submatrix
}
return(tmp)
}
#' @export
subset.optmatch.dlist <- function(x, subset, select, ...) {
subset(as.matrix(x), subset, select, ...)
}
#' @rdname subdim-methods
#' @export
subdim.optmatch.dlist <- function(x) {
lapply(x, dim)
}
|
genlogCompleteStartValues <- function(data,
timeVar = 1,
yVar = 2,
phaseVar = NULL,
baselineMeasurements = NULL, ### Was nA
yRange = NULL,
startInflection = NULL, ### Was Xs
startBase = NULL, ### Was ABs
startTop = NULL, ### Was ATs
startGrowthRate = NULL, ### Was Bs
startV = 1,
inflectionPointBounds = NULL,
growthRateBounds = c(-2, 2),
baseMargin = c(0, 3),
topMargin = c(-3, 0),
baseBounds = NULL,
topBounds = NULL,
vBounds = c(1, 1),
changeDelay = 4,
returnFullObject = FALSE,
...) {
res <- list(input = as.list(environment()),
intermediate = list(),
output = list());
### Store names for easy access later on
res$intermediate$yVarName <- yVar <-
ifelse(is.numeric(yVar),
names(data)[yVar],
yVar);
res$intermediate$timeVarName <- timeVar <-
ifelse(is.numeric(timeVar),
names(data)[timeVar],
timeVar);
res$intermediate$phaseVarName <- phaseVar <-
ifelse(is.null(phaseVar),
"none",
ifelse(is.numeric(timeVar),
names(data)[phaseVar],
phaseVar));
if (phaseVar=="none") {
data <- data[, c(timeVar, yVar)];
} else {
data <- data[, c(timeVar, yVar, phaseVar)];
}
### Remove cases with missing values
res$intermediate$originalCases <- nrow(data);
data <- data[complete.cases(data), ];
res$intermediate$usedCases <- nrow(data);
res$intermediate$omittedCases <-
res$intermediate$originalCases - res$intermediate$usedCases;
### If the time variable is actually provided as time instead of as
### indices/ranks, convert to numeric first.
if (!is.numeric(data[, timeVar])) {
if (any(class(data[, timeVar]) %in% c('Date', 'POSIXct', 'POSIXt', 'POSIXt'))) {
res$intermediate$day0 <- min(data[, timeVar], na.rm=TRUE);
res$intermediate$day0.formatted <- as.character(res$intermediate$day0);
### Compute number of days since first measurement
data[, timeVar] <- (as.numeric(data[, timeVar]) - as.numeric(res$intermediate$day0)) / 86400;
} else {
stop("The timeVar variable does not have a class I can work with (numeric or date): instead it has class ",
vecTxtQ(class(data[, timeVar])), ".");
}
}
### Number of measurements in pre-intervention phase
if (is.null(baselineMeasurements) && is.null(phaseVar)) {
stop("Provide number of measurements in pre-intervention phase, either by ",
"specifying the variable indicating the phase in 'phaseVar', or by ",
"specifying the number of baseline measurements in 'baselineMeasurements'.");
} else {
res$intermediate$baselineMeasurements <-
baselineMeasurements <-
ifelse(is.null(baselineMeasurements),
sum(data[, phaseVar] == min(data[, phaseVar])),
baselineMeasurements);
}
### Starting values for starting to estimate the sigmoid parameters
res$intermediate$startInflection <-
startInflection <-
ifelse(is.null(startInflection),
data[order(data[, timeVar],
decreasing=FALSE)[baselineMeasurements+changeDelay],
timeVar],
startInflection);
res$intermediate$startGrowthRate <-
startGrowthRate <-
ifelse(is.null(startGrowthRate),
0,
startGrowthRate);
res$intermediate$startBase <-
startBase <-
ifelse(is.null(startBase),
min(data[, yVar]),
startBase);
res$intermediate$startTop <-
startTop <-
ifelse(is.null(startTop),
max(data[, yVar]),
startTop);
######################################################################
### Prepare starting values and parameter bounds
######################################################################
### Get specified yRange or derive range from observations
res$intermediate$yRange <-
yRange <- ifelseObj(is.null(yRange),
range(data[, yVar], na.rm=TRUE),
yRange);
### Same for the initiation of the change
res$intermediate$inflectionPointBounds <-
inflectionPointBounds <- ifelseObj(is.null(inflectionPointBounds),
c(### Last-but-two baseline measurement
data[order(data[, timeVar],
decreasing=FALSE)[baselineMeasurements-1],
timeVar],
### Fifth last element
data[order(data[, timeVar],
decreasing=TRUE)[5],
timeVar]),
inflectionPointBounds);
### And the base (floor) and top (ceiling) bounds/constraints
res$intermediate$baseBounds <-
baseBounds <- ifelseObj(is.null(baseBounds),
c(min(yRange) + baseMargin[1],
min(yRange) + baseMargin[2]),
baseBounds);
res$intermediate$topBounds <-
topBounds <- ifelseObj(is.null(topBounds),
c(max(yRange) + topMargin[1],
max(yRange) + topMargin[2]),
topBounds);
### Store in lists for convenient passing to optimization function
res$output$startingValues <-
startingValues <- c(inflectionPoint = startInflection,
growthRate = startGrowthRate,
base = startBase,
top = startTop,
v = startV);
res$output$lowerBounds <-
lowerBounds <- c(inflectionPoint = inflectionPointBounds[1],
growthRate = growthRateBounds[1],
base = baseBounds[1],
top = topBounds[1],
v = vBounds[1]);
res$output$upperBounds <-
upperBounds <- c(inflectionPoint = inflectionPointBounds[2],
growthRate = growthRateBounds[2],
base = baseBounds[2],
top = topBounds[2],
v = vBounds[2]);
if (returnFullObject) {
return(res);
} else {
return(res$output);
}
}
|
/R/genlogCompleteStartValues.R
|
no_license
|
DBoegner/userfriendlyscience
|
R
| false
| false
| 7,274
|
r
|
genlogCompleteStartValues <- function(data,
timeVar = 1,
yVar = 2,
phaseVar = NULL,
baselineMeasurements = NULL, ### Was nA
yRange = NULL,
startInflection = NULL, ### Was Xs
startBase = NULL, ### Was ABs
startTop = NULL, ### Was ATs
startGrowthRate = NULL, ### Was Bs
startV = 1,
inflectionPointBounds = NULL,
growthRateBounds = c(-2, 2),
baseMargin = c(0, 3),
topMargin = c(-3, 0),
baseBounds = NULL,
topBounds = NULL,
vBounds = c(1, 1),
changeDelay = 4,
returnFullObject = FALSE,
...) {
res <- list(input = as.list(environment()),
intermediate = list(),
output = list());
### Store names for easy access later on
res$intermediate$yVarName <- yVar <-
ifelse(is.numeric(yVar),
names(data)[yVar],
yVar);
res$intermediate$timeVarName <- timeVar <-
ifelse(is.numeric(timeVar),
names(data)[timeVar],
timeVar);
res$intermediate$phaseVarName <- phaseVar <-
ifelse(is.null(phaseVar),
"none",
ifelse(is.numeric(timeVar),
names(data)[phaseVar],
phaseVar));
if (phaseVar=="none") {
data <- data[, c(timeVar, yVar)];
} else {
data <- data[, c(timeVar, yVar, phaseVar)];
}
### Remove cases with missing values
res$intermediate$originalCases <- nrow(data);
data <- data[complete.cases(data), ];
res$intermediate$usedCases <- nrow(data);
res$intermediate$omittedCases <-
res$intermediate$originalCases - res$intermediate$usedCases;
### If the time variable is actually provided as time instead of as
### indices/ranks, convert to numeric first.
if (!is.numeric(data[, timeVar])) {
if (any(class(data[, timeVar]) %in% c('Date', 'POSIXct', 'POSIXt', 'POSIXt'))) {
res$intermediate$day0 <- min(data[, timeVar], na.rm=TRUE);
res$intermediate$day0.formatted <- as.character(res$intermediate$day0);
### Compute number of days since first measurement
data[, timeVar] <- (as.numeric(data[, timeVar]) - as.numeric(res$intermediate$day0)) / 86400;
} else {
stop("The timeVar variable does not have a class I can work with (numeric or date): instead it has class ",
vecTxtQ(class(data[, timeVar])), ".");
}
}
### Number of measurements in pre-intervention phase
if (is.null(baselineMeasurements) && is.null(phaseVar)) {
stop("Provide number of measurements in pre-intervention phase, either by ",
"specifying the variable indicating the phase in 'phaseVar', or by ",
"specifying the number of baseline measurements in 'baselineMeasurements'.");
} else {
res$intermediate$baselineMeasurements <-
baselineMeasurements <-
ifelse(is.null(baselineMeasurements),
sum(data[, phaseVar] == min(data[, phaseVar])),
baselineMeasurements);
}
### Starting values for starting to estimate the sigmoid parameters
res$intermediate$startInflection <-
startInflection <-
ifelse(is.null(startInflection),
data[order(data[, timeVar],
decreasing=FALSE)[baselineMeasurements+changeDelay],
timeVar],
startInflection);
res$intermediate$startGrowthRate <-
startGrowthRate <-
ifelse(is.null(startGrowthRate),
0,
startGrowthRate);
res$intermediate$startBase <-
startBase <-
ifelse(is.null(startBase),
min(data[, yVar]),
startBase);
res$intermediate$startTop <-
startTop <-
ifelse(is.null(startTop),
max(data[, yVar]),
startTop);
######################################################################
### Prepare starting values and parameter bounds
######################################################################
### Get specified yRange or derive range from observations
res$intermediate$yRange <-
yRange <- ifelseObj(is.null(yRange),
range(data[, yVar], na.rm=TRUE),
yRange);
### Same for the initiation of the change
res$intermediate$inflectionPointBounds <-
inflectionPointBounds <- ifelseObj(is.null(inflectionPointBounds),
c(### Last-but-two baseline measurement
data[order(data[, timeVar],
decreasing=FALSE)[baselineMeasurements-1],
timeVar],
### Fifth last element
data[order(data[, timeVar],
decreasing=TRUE)[5],
timeVar]),
inflectionPointBounds);
### And the base (floor) and top (ceiling) bounds/constraints
res$intermediate$baseBounds <-
baseBounds <- ifelseObj(is.null(baseBounds),
c(min(yRange) + baseMargin[1],
min(yRange) + baseMargin[2]),
baseBounds);
res$intermediate$topBounds <-
topBounds <- ifelseObj(is.null(topBounds),
c(max(yRange) + topMargin[1],
max(yRange) + topMargin[2]),
topBounds);
### Store in lists for convenient passing to optimization function
res$output$startingValues <-
startingValues <- c(inflectionPoint = startInflection,
growthRate = startGrowthRate,
base = startBase,
top = startTop,
v = startV);
res$output$lowerBounds <-
lowerBounds <- c(inflectionPoint = inflectionPointBounds[1],
growthRate = growthRateBounds[1],
base = baseBounds[1],
top = topBounds[1],
v = vBounds[1]);
res$output$upperBounds <-
upperBounds <- c(inflectionPoint = inflectionPointBounds[2],
growthRate = growthRateBounds[2],
base = baseBounds[2],
top = topBounds[2],
v = vBounds[2]);
if (returnFullObject) {
return(res);
} else {
return(res$output);
}
}
|
#' combine two gg_partial objects
#'
#' @description
#' The \code{combine.gg_partial} function assumes the two \code{\link{gg_partial}}
#' objects were generated from the same \code{\link[randomForestSRC]{rfsrc}}
#' object. So, the function joins along the \code{\link{gg_partial}} list item
#' names (one per partial plot variable). Further, we combine the two
#' \code{\link{gg_partial}} objects along the group variable.
#'
#' Hence, to join three \code{\link{gg_partial}} objects together (i.e. for
#' three different time points from a survival random forest) would require
#' two \code{combine.gg_partial} calls: One to join the first two
#' \code{\link{gg_partial}} object, and one to append the third
#' \code{\link{gg_partial}} object to the output from the first call.
#' The second call will append a single \code{lbls} label to the
#' \code{\link{gg_partial}} object.
#'
#' @param x \code{\link{gg_partial}} object
#' @param y \code{\link{gg_partial}} object
#' @param lbls vector of 2 strings to label the combined data.
#' @param ... not used
#'
#' @return \code{\link{gg_partial}} or \code{gg_partial_list} based on
#' class of x and y.
#'
#' @aliases combine.gg_partial combine.gg_partial_list
#'
#' @importFrom parallel mclapply
#'
#' @examples
#' \dontrun{
#' # Load a set of plot.variable partial plot data
#' data(partial_pbc)
#'
#' # A list of 2 plot.variable objects
#' length(partial_pbc)
#' class(partial_pbc)
#'
#' class(partial_pbc[[1]])
#' class(partial_pbc[[2]])
#'
#' # Create gg_partial objects
#' ggPrtl.1 <- gg_partial(partial_pbc[[1]])
#' ggPrtl.2 <- gg_partial(partial_pbc[[2]])
#'
#' # Combine the objects to get multiple time curves
#' # along variables on a single figure.
#' ggpart <- combine.gg_partial(ggPrtl.1, ggPrtl.2,
#' lbls = c("1 year", "3 years"))
#'
#' # Plot each figure separately
#' plot(ggpart)
#'
#' # Get the continuous data for a panel of continuous plots.
#' ggcont <- ggpart
#' ggcont$edema <- ggcont$ascites <- ggcont$stage <- NULL
#' plot(ggcont, panel=TRUE)
#'
#' # And the categorical for a panel of categorical plots.
#' nms <- colnames(sapply(ggcont, function(st){st}))
#' for(ind in nms){
#' ggpart[[ind]] <- NULL
#' }
#' plot(ggpart, panel=TRUE)
#' }
#'
#' @export
combine.gg_partial <- function(x, y, lbls, ...){
return(combine.gg_partial_list(x, y, lbls, ...))
}
combine.gg_partial_list <- function(x, y, lbls, ...){
if(inherits(x,"plot.variable"))
x <- gg_partial(x)
if(inherits(y,"plot.variable"))
y <- gg_partial(y)
if((!inherits(x,"gg_partial_list") & !inherits(x,"gg_partial")) &
(!inherits(y,"gg_partial_list") & !inherits(y,"gg_partial")) ){
stop(paste("combine.gg_partial expects either a",
"ggRandomForests::gg_partial or ",
"randomForestSRC::plot.variable object"))
}
if(missing(lbls)){
lbls <- c("x1", "x2")
}
### !!TODO!! check for lbls length
cls <- class(x)
### We need to check for the case when x and y already have
### a group column,
if(is.null(x[[1]]$group))
x <- parallel::mclapply(x, function(st){
st$group <- lbls[1]
st
})
if(is.null(y[[1]]$group)){
ind.l <- length(lbls)
y <- parallel::mclapply(y, function(st){
st$group <- lbls[ind.l]
st
})
}
# By names
nm <- names(x)
gg_dta <- parallel::mclapply(nm, function(ind){
rbind(x[[ind]], y[[ind]])
})
names(gg_dta) <- names(x)
class(gg_dta) <- cls
return(gg_dta)
}
|
/R/combine.gg_partial.R
|
no_license
|
mingrisch/ggRandomForests
|
R
| false
| false
| 3,600
|
r
|
#' combine two gg_partial objects
#'
#' @description
#' The \code{combine.gg_partial} function assumes the two \code{\link{gg_partial}}
#' objects were generated from the same \code{\link[randomForestSRC]{rfsrc}}
#' object. So, the function joins along the \code{\link{gg_partial}} list item
#' names (one per partial plot variable). Further, we combine the two
#' \code{\link{gg_partial}} objects along the group variable.
#'
#' Hence, to join three \code{\link{gg_partial}} objects together (i.e. for
#' three different time points from a survival random forest) would require
#' two \code{combine.gg_partial} calls: One to join the first two
#' \code{\link{gg_partial}} object, and one to append the third
#' \code{\link{gg_partial}} object to the output from the first call.
#' The second call will append a single \code{lbls} label to the
#' \code{\link{gg_partial}} object.
#'
#' @param x \code{\link{gg_partial}} object
#' @param y \code{\link{gg_partial}} object
#' @param lbls vector of 2 strings to label the combined data.
#' @param ... not used
#'
#' @return \code{\link{gg_partial}} or \code{gg_partial_list} based on
#' class of x and y.
#'
#' @aliases combine.gg_partial combine.gg_partial_list
#'
#' @importFrom parallel mclapply
#'
#' @examples
#' \dontrun{
#' # Load a set of plot.variable partial plot data
#' data(partial_pbc)
#'
#' # A list of 2 plot.variable objects
#' length(partial_pbc)
#' class(partial_pbc)
#'
#' class(partial_pbc[[1]])
#' class(partial_pbc[[2]])
#'
#' # Create gg_partial objects
#' ggPrtl.1 <- gg_partial(partial_pbc[[1]])
#' ggPrtl.2 <- gg_partial(partial_pbc[[2]])
#'
#' # Combine the objects to get multiple time curves
#' # along variables on a single figure.
#' ggpart <- combine.gg_partial(ggPrtl.1, ggPrtl.2,
#' lbls = c("1 year", "3 years"))
#'
#' # Plot each figure separately
#' plot(ggpart)
#'
#' # Get the continuous data for a panel of continuous plots.
#' ggcont <- ggpart
#' ggcont$edema <- ggcont$ascites <- ggcont$stage <- NULL
#' plot(ggcont, panel=TRUE)
#'
#' # And the categorical for a panel of categorical plots.
#' nms <- colnames(sapply(ggcont, function(st){st}))
#' for(ind in nms){
#' ggpart[[ind]] <- NULL
#' }
#' plot(ggpart, panel=TRUE)
#' }
#'
#' @export
combine.gg_partial <- function(x, y, lbls, ...){
return(combine.gg_partial_list(x, y, lbls, ...))
}
combine.gg_partial_list <- function(x, y, lbls, ...){
if(inherits(x,"plot.variable"))
x <- gg_partial(x)
if(inherits(y,"plot.variable"))
y <- gg_partial(y)
if((!inherits(x,"gg_partial_list") & !inherits(x,"gg_partial")) &
(!inherits(y,"gg_partial_list") & !inherits(y,"gg_partial")) ){
stop(paste("combine.gg_partial expects either a",
"ggRandomForests::gg_partial or ",
"randomForestSRC::plot.variable object"))
}
if(missing(lbls)){
lbls <- c("x1", "x2")
}
### !!TODO!! check for lbls length
cls <- class(x)
### We need to check for the case when x and y already have
### a group column,
if(is.null(x[[1]]$group))
x <- parallel::mclapply(x, function(st){
st$group <- lbls[1]
st
})
if(is.null(y[[1]]$group)){
ind.l <- length(lbls)
y <- parallel::mclapply(y, function(st){
st$group <- lbls[ind.l]
st
})
}
# By names
nm <- names(x)
gg_dta <- parallel::mclapply(nm, function(ind){
rbind(x[[ind]], y[[ind]])
})
names(gg_dta) <- names(x)
class(gg_dta) <- cls
return(gg_dta)
}
|
library(shiny)
library(shinydashboard)
library(purrr)
server <- function(input, output, session) {
###########################################################################
# Settings
###########################################################################
i18n <- reactive({
selected <- input$selected_language
if (length(selected) > 0 && selected %in% translator$languages) {
translator$set_translation_language(selected)
}
translator
})
# Maintain Tab State
observeEvent(input$selected_language, {
updateTabItems(session, "tabs", input$tabs)
})
# Reproduce parameters when language changes
initParams <- eventReactive(input$selected_language, {
dist_opened <- input$tabs
if (!(dist_opened %in% dist_names)) {
return(NULL)
}
ns <- NS(dist_opened)
# App is prepairing
if (is.null(input[[ns("p_or_c")]])) {
return(NULL)
}
# Keep parameters
d <- distributions[[dist_opened]]
param_names <- names(d$params)
targets <- c(c("p_or_c", "range"), param_names)
dist_params <- reactiveValuesToList(input)[ns(targets)]
names(dist_params) <- targets
params <- list(dist_params)
names(params) <- dist_opened
return(params)
})
# Bookmark
onBookmarked(function(url) {
url_filtered <- filterQueryParams(url, reactiveValuesToList(input))
updateQueryString(url_filtered)
})
onBookmarked(showBookmarkModal(input, i18n))
onRestore(function(state) {
updateTabItems(session, "tabs", state$input$tabs)
})
###########################################################################
# UI
###########################################################################
output$language_selector <- renderUI({
selectLanguageInput(
inputId = "selected_language",
choices = i18n()$languages,
selected = input$selected_language,
width = "100px"
)
})
output$about <- renderUI({
fn <- paste0("markdown/", i18n()$t("about.md"))
fluidRow(
column(
12,
includeMarkdown(fn)
)
)
})
output$sidebar_menu <- renderMenu({
sidebarMenu(
id = "tabs",
menuItem(
i18n()$t("Continuous distributions"),
icon = icon("line-chart"),
purrr::map(continuous_distributions, ~ {
menuSubItem(i18n()$t(.x$name), tabName = .x$dist)
})
),
menuItem(
i18n()$t("Discrete distributions"),
icon = icon("bar-chart-o"),
purrr::map(discrete_distributions, ~ {
menuSubItem(i18n()$t(.x$name), tabName = .x$dist)
})
),
menuItem(
"About", icon = icon("info"),
tabName = "about"
),
menuItem(
"Source code", icon = icon("github"),
href = "http://github.com/ksmzn/ProbabilityDistributionsViewer"
),
tags$li(
a(
href = paste0("http://twitter.com/intent/tweet?text=", i18n()$t("Probability Distributions Viewer"), "&url=https://statdist.ksmzn.com/&via=ksmzn&hashtags=rshiny"),
target = "_blank",
icon("twitter"),
onClick = "window.open(encodeURI(decodeURI(this.href)),
'tweetwindow',
'width=550, height=450, personalbar=0, toolbar=0, scrollbars=1, resizable=1'
); return false;",
span("Tweet"),
tags$small(
class = "badge pull-right bg-light-blue",
"Share"
)
)
),
tags$li(
a(
href = paste0("http://www.facebook.com/sharer.php?u=https://statdist.ksmzn.com/&t=", i18n()$t("Probability Distributions Viewer")),
target = "_blank",
icon("facebook"),
span("Facebook"),
tags$small(
class = "badge pull-right bg-light-blue",
"Share"
)
)
),
menuItem(
"@ksmzn", icon = icon("twitter"),
href = "https://twitter.com/ksmzn"
),
menuItem(
"Blog", icon = icon("pencil"),
href = "http://ksmzn.hatenablog.com/"
)
)
})
###########################################################################
# Continuous probability distributions
###########################################################################
callDistributionModule(norm, initParams, i18n)
callDistributionModule(erlang, initParams, i18n)
callDistributionModule(f, initParams, i18n)
callDistributionModule(ncf, initParams, i18n)
callDistributionModule(chisq, initParams, i18n)
callDistributionModule(ncChisq, initParams, i18n)
callDistributionModule(gamma, initParams, i18n)
callDistributionModule(cauchy, initParams, i18n)
callDistributionModule(exp_dist, initParams, i18n)
callDistributionModule(lnormal, initParams, i18n)
callDistributionModule(t_dist, initParams, i18n)
callDistributionModule(nct, initParams, i18n)
callDistributionModule(beta, initParams, i18n)
callDistributionModule(ncbeta, initParams, i18n)
callDistributionModule(unif, initParams, i18n)
callDistributionModule(logis, initParams, i18n)
callDistributionModule(weibull, initParams, i18n)
###########################################################################
# Discrete probability distributions
###########################################################################
callDistributionModule(geom, initParams, i18n)
callDistributionModule(hyper, initParams, i18n)
callDistributionModule(binom, initParams, i18n)
callDistributionModule(nbinom, initParams, i18n)
callDistributionModule(pois, initParams, i18n)
callDistributionModule(dunif, initParams, i18n)
}
|
/server.R
|
permissive
|
pablo-vivas/ProbabilityDistributionsViewer
|
R
| false
| false
| 5,625
|
r
|
library(shiny)
library(shinydashboard)
library(purrr)
server <- function(input, output, session) {
###########################################################################
# Settings
###########################################################################
i18n <- reactive({
selected <- input$selected_language
if (length(selected) > 0 && selected %in% translator$languages) {
translator$set_translation_language(selected)
}
translator
})
# Maintain Tab State
observeEvent(input$selected_language, {
updateTabItems(session, "tabs", input$tabs)
})
# Reproduce parameters when language changes
initParams <- eventReactive(input$selected_language, {
dist_opened <- input$tabs
if (!(dist_opened %in% dist_names)) {
return(NULL)
}
ns <- NS(dist_opened)
# App is prepairing
if (is.null(input[[ns("p_or_c")]])) {
return(NULL)
}
# Keep parameters
d <- distributions[[dist_opened]]
param_names <- names(d$params)
targets <- c(c("p_or_c", "range"), param_names)
dist_params <- reactiveValuesToList(input)[ns(targets)]
names(dist_params) <- targets
params <- list(dist_params)
names(params) <- dist_opened
return(params)
})
# Bookmark
onBookmarked(function(url) {
url_filtered <- filterQueryParams(url, reactiveValuesToList(input))
updateQueryString(url_filtered)
})
onBookmarked(showBookmarkModal(input, i18n))
onRestore(function(state) {
updateTabItems(session, "tabs", state$input$tabs)
})
###########################################################################
# UI
###########################################################################
output$language_selector <- renderUI({
selectLanguageInput(
inputId = "selected_language",
choices = i18n()$languages,
selected = input$selected_language,
width = "100px"
)
})
output$about <- renderUI({
fn <- paste0("markdown/", i18n()$t("about.md"))
fluidRow(
column(
12,
includeMarkdown(fn)
)
)
})
output$sidebar_menu <- renderMenu({
sidebarMenu(
id = "tabs",
menuItem(
i18n()$t("Continuous distributions"),
icon = icon("line-chart"),
purrr::map(continuous_distributions, ~ {
menuSubItem(i18n()$t(.x$name), tabName = .x$dist)
})
),
menuItem(
i18n()$t("Discrete distributions"),
icon = icon("bar-chart-o"),
purrr::map(discrete_distributions, ~ {
menuSubItem(i18n()$t(.x$name), tabName = .x$dist)
})
),
menuItem(
"About", icon = icon("info"),
tabName = "about"
),
menuItem(
"Source code", icon = icon("github"),
href = "http://github.com/ksmzn/ProbabilityDistributionsViewer"
),
tags$li(
a(
href = paste0("http://twitter.com/intent/tweet?text=", i18n()$t("Probability Distributions Viewer"), "&url=https://statdist.ksmzn.com/&via=ksmzn&hashtags=rshiny"),
target = "_blank",
icon("twitter"),
onClick = "window.open(encodeURI(decodeURI(this.href)),
'tweetwindow',
'width=550, height=450, personalbar=0, toolbar=0, scrollbars=1, resizable=1'
); return false;",
span("Tweet"),
tags$small(
class = "badge pull-right bg-light-blue",
"Share"
)
)
),
tags$li(
a(
href = paste0("http://www.facebook.com/sharer.php?u=https://statdist.ksmzn.com/&t=", i18n()$t("Probability Distributions Viewer")),
target = "_blank",
icon("facebook"),
span("Facebook"),
tags$small(
class = "badge pull-right bg-light-blue",
"Share"
)
)
),
menuItem(
"@ksmzn", icon = icon("twitter"),
href = "https://twitter.com/ksmzn"
),
menuItem(
"Blog", icon = icon("pencil"),
href = "http://ksmzn.hatenablog.com/"
)
)
})
###########################################################################
# Continuous probability distributions
###########################################################################
callDistributionModule(norm, initParams, i18n)
callDistributionModule(erlang, initParams, i18n)
callDistributionModule(f, initParams, i18n)
callDistributionModule(ncf, initParams, i18n)
callDistributionModule(chisq, initParams, i18n)
callDistributionModule(ncChisq, initParams, i18n)
callDistributionModule(gamma, initParams, i18n)
callDistributionModule(cauchy, initParams, i18n)
callDistributionModule(exp_dist, initParams, i18n)
callDistributionModule(lnormal, initParams, i18n)
callDistributionModule(t_dist, initParams, i18n)
callDistributionModule(nct, initParams, i18n)
callDistributionModule(beta, initParams, i18n)
callDistributionModule(ncbeta, initParams, i18n)
callDistributionModule(unif, initParams, i18n)
callDistributionModule(logis, initParams, i18n)
callDistributionModule(weibull, initParams, i18n)
###########################################################################
# Discrete probability distributions
###########################################################################
callDistributionModule(geom, initParams, i18n)
callDistributionModule(hyper, initParams, i18n)
callDistributionModule(binom, initParams, i18n)
callDistributionModule(nbinom, initParams, i18n)
callDistributionModule(pois, initParams, i18n)
callDistributionModule(dunif, initParams, i18n)
}
|
library(ggplot2movies)
library(car)
data(movies)
attach(movies)
View(movies) # przegląd wszystkich danych
ls(movies) # lista zmiennnych
# zmniejszamy ilość porównywanych danych, ponieważ dla tak dużej ilości porównywanych danych
# (58788) hipoteza zerowa będzie niemalże na 100% pewna. Dla możliwości wyciągnięcia wniosków
# zmniejszamty liczbę porównywanych filmów do 500 (co i tak jest dużą liczbą)
length(movies$year)
filmy=movies[1:500,]
View(filmy)
# przedział ufności
przedzial<-function(X, conf.level=0.95, var=NULL)
{
n<-length(X)
alpha<-1-conf.level
if(is.null(var)){
L<-mean(X)-sd(X)*qt(1-alpha/2, df=n-1)/sqrt(n)
P<-mean(X)+sd(X)*qt(1-alpha/2, df=n-1)/sqrt(n)
}
else{
L<-mean(X)-sqrt(var)*qnorm(1-alpha/2, df=n-1)/sqrt(n)
L<-mean(X)+sqrt(var)*qnorm(1-alpha/2, df=n-1)/sqrt(n)
}
wynik<-c(L,P)
return(wynik)
}
# SCHEMAT
hist() # histogram
przedzial() # przedział
mean() # średnia
var() # wariancja
sd() # odchylenie
length() # liczba danych porównywanych
range(movies$year) # najstarszy i najnowszy film w bazie
range(movies$length) # najkrótszy i najdłuższy film w bazie
table(movies$year) # ilość filmów wyprogukowanych w poszczególnym roku
przedzial(movies$rating, conf.level = 0.95) # przedział średniej wszystkich ocen
przedzial(movies$rating[movies$Comedy==1], conf.level = 0.95) # przedział średnich ocen komedii
przedzial(movies$rating[movies$year>2000], conf.level = 0.95) # przedział średnich ocen filmów powyżej 2000 roku
przedzial(movies$rating[movies$length<180], conf.level = 0.95) # przedział średnich ocen filmów o długości po niżej 3 godzin
hist(movies$rating) # histogram ocen wszystkich filmów
hist(movies$rating[movies$Comedy==1]) # histogram ocen filmów, które są komediami
par(mfrow=c(3,1))
hist(movies$rating[movies$year>2000]) # histogram ocen filmów wypodorukowanych po 2000 roku
a=movies$year>1970
hist(movies$rating[a<2000]) # histogram ocen wyprodukowanybch w latach 1970-2000
hist(movies$rating[movies$year<1970]) # histogram ocen filmów wypodorukowanych do 1970 roku
par(mfrow=c(1,1))
b=movies$length>90
hist(movies$rating[movies$length<180]) # histogram ocen filmów trwających 90 do 180 minut
# test normalności
# im bardziej linia jest prosta, tym bardziej rozkład danych zbliżony jest do rozkładu normalnego
qqnorm(movies$year) # bez sensu, średni rok? ;p
qqnorm(movies$rating)
qqnorm(movies$rating[movies$Comedy==1])
qqnorm(movies$rating[movies$year])
# test na normalność Shapiro-Wilka
shapiro.test(movies$year)
shapiro.test(movies$rating)
shapiro.test(movies$rating[movies$Comedy==1])
shapiro.test(movies$rating[movies$year])
#
x=filmy$year[1:100]
hist(x, breaks=40, probability=T)
lines(x, dnorm(x,mean(x), sd(x)))
# test t.studenta
t.test(movies$year,mu=length(movies$year)-1)
t.test(movies$rating,mu=length(movies$rating)-1)
t.test(movies$rating[movies$Comedy==1],mu=length(movies$rating[movies$Comedy==1])-1)
t.test(movies$rating[movies$year],mu=length(movies$rating[movies$year])-1)
# jeśli dane nie mają rozkładu zbliżonego do rozkładu normalnego używamy testu Wilcoxona
# test ten można również używać dla rokładu normalnego, jednak jest to mniej pewny test,
# a co za tym idzie jest większa szansa na przyjęcie błędnej hipotezy zerowej
RY=movies$rating[movies$year]
wilcox.test(RY,mu=length(RY)-1)
# test "z" dla sigma = 3 i alfa = 0.05
ci = qnorm(1-0.05/2)
s = 3/sqrt(length(RY))
mean(RY) + c(-s*ci,s*ci)
# testy dla dwóch prób
d1=movies$rating[movies$Comedy==1] # oceny komedii
d2=movies$rating[movies$Drama==1] # oceny dramatów
var.test(d1,d2) # czy jest jednorodność wariancji?
# jeśli nie ma różnic wariancji przeprowadzamy test t.Studenta
t.test(d1,d2)
# jeśli wariancje są różne przeprowadzamy test Wilcoxona
wilcox.test(d1,d2)
# test dla większej ilości prób (nie poleca się)
d3=movies$rating[movies$Documentary==1] # oceny filmów dokumentarnych
d4=movies$rating[movies$Action==1] # oceny filmów akcji
D1=d1[1:2000]
D2=d2[1:2000]
D3=d3[1:2000]
D4=d4[1:2000]
danex = data.frame(wyniki=c(D1, D2, D3, D4),metoda=rep(1:4,each=8))
View(danex)
bartlett.test(wyniki ~ metoda,danex) # czy jest niejednorodność?
kruskal.test(wyniki~metoda,danex) # jeśli jest, Kruskar-Wallis
anova(aov(wyniki~metoda,danex)) # jeśli nie, Anova
# test Chi-kwadrat dla proporcji
video=movies[1:500,] # 500 filmów
table(video$Drama==1) # wśród 500 filmów 181 to dramaty
# Czy można powiedzieć, że 40% wszystkich filmów to dramaty?
prop.test(181,500,p=0.40)
# porównanie wyników kilku grup
video2=movies[1100:1349,] # 250 filmów
video3=movies[5000:5089,] # 90 filmów
table(video2$Drama==1) # wśród 250 filmów 105 to dramaty
table(video3$Drama==1) # wśród 90 filmów 34 to dramaty
# czy wyniki pochodziły z tej samej ankiety?
prop.test(c(181,105,24),c(500,250,90))
# test bez poprawki Yates'a
prop.test(c(181,105,24),c(500,250,90), correct=FALSE)
# prop 1 = 0.3620000
# prop 2 = 0.4200000
# prop 3 = 0.2666667
# 0.4200000 - 0.2666667 = 0.1533333 > 0.05, można z tego wnioskować,
# że wyniki pochodziły z różnych ankiet
# (mimo, że w maszym przypadku wyniki pochodziły z tej samej ankiety)
# rysunki, nieużyteczne dla moich danych
vid=video[1:30,]
x=vid$rating
y=vid$votes<1000
fit=lm(x~y)
summary(fit)
plot(x,y);abline(fit) # wykres przedstawiający naszą krzywą oraz prostą regresji.
# Funkcja abline dodaje do wykresu linię prostą o zadanych parametrach, w tym przypadku po- branych z obiektu fit.
plot(ellipse(fit),type="l") # wykres „elipsy ufności” estymatorów regresji.
plot(x,fit$residuals);abline(h=0) #wykres reszt regresji z dodaną linią prostą wzdłuż osi x.
qqnorm(fit$residuals) # wykres kwantylowy-normalny reszt regresji
par(mfrow=c(2,2));plot(fit);par(mfrow=c(1,1)) # zwykła funkcja plot z argumentem będącym
#rezultatem funkcji lm generuje 4 wykresy dotyczące tej regresji.
# ile filmów zostało wyprodukowanych w poszczególnych latach?
lata <- c(movies$year)
range(lata)
lata2 <- cut(lata, c(1890, 1900, 1910, 1920, 1930, 1940, 1950, 1970, 1980, 1990, 2000, 2010))
tab <- table(lata2)
dim(tab) <- c(1,11)
tab2 <- c("(1890-1900]", "(1900-1910]", "(1910-1920]", "(1920-1930]", "(1930-1940]", "(1940-1950]", "(1950-1960]", "(1970-1980]", "(1980-1990]", "(1990-2000]", "(2000-2010]")
tablica <- array(c(tab2,tab),dim=c(11,2))
|
/src/R/RAPORT 2 2.R
|
permissive
|
NataliaEwa/AppliedMathematics
|
R
| false
| false
| 6,386
|
r
|
library(ggplot2movies)
library(car)
data(movies)
attach(movies)
View(movies) # przegląd wszystkich danych
ls(movies) # lista zmiennnych
# zmniejszamy ilość porównywanych danych, ponieważ dla tak dużej ilości porównywanych danych
# (58788) hipoteza zerowa będzie niemalże na 100% pewna. Dla możliwości wyciągnięcia wniosków
# zmniejszamty liczbę porównywanych filmów do 500 (co i tak jest dużą liczbą)
length(movies$year)
filmy=movies[1:500,]
View(filmy)
# przedział ufności
przedzial<-function(X, conf.level=0.95, var=NULL)
{
n<-length(X)
alpha<-1-conf.level
if(is.null(var)){
L<-mean(X)-sd(X)*qt(1-alpha/2, df=n-1)/sqrt(n)
P<-mean(X)+sd(X)*qt(1-alpha/2, df=n-1)/sqrt(n)
}
else{
L<-mean(X)-sqrt(var)*qnorm(1-alpha/2, df=n-1)/sqrt(n)
L<-mean(X)+sqrt(var)*qnorm(1-alpha/2, df=n-1)/sqrt(n)
}
wynik<-c(L,P)
return(wynik)
}
# SCHEMAT
hist() # histogram
przedzial() # przedział
mean() # średnia
var() # wariancja
sd() # odchylenie
length() # liczba danych porównywanych
range(movies$year) # najstarszy i najnowszy film w bazie
range(movies$length) # najkrótszy i najdłuższy film w bazie
table(movies$year) # ilość filmów wyprogukowanych w poszczególnym roku
przedzial(movies$rating, conf.level = 0.95) # przedział średniej wszystkich ocen
przedzial(movies$rating[movies$Comedy==1], conf.level = 0.95) # przedział średnich ocen komedii
przedzial(movies$rating[movies$year>2000], conf.level = 0.95) # przedział średnich ocen filmów powyżej 2000 roku
przedzial(movies$rating[movies$length<180], conf.level = 0.95) # przedział średnich ocen filmów o długości po niżej 3 godzin
hist(movies$rating) # histogram ocen wszystkich filmów
hist(movies$rating[movies$Comedy==1]) # histogram ocen filmów, które są komediami
par(mfrow=c(3,1))
hist(movies$rating[movies$year>2000]) # histogram ocen filmów wypodorukowanych po 2000 roku
a=movies$year>1970
hist(movies$rating[a<2000]) # histogram ocen wyprodukowanybch w latach 1970-2000
hist(movies$rating[movies$year<1970]) # histogram ocen filmów wypodorukowanych do 1970 roku
par(mfrow=c(1,1))
b=movies$length>90
hist(movies$rating[movies$length<180]) # histogram ocen filmów trwających 90 do 180 minut
# test normalności
# im bardziej linia jest prosta, tym bardziej rozkład danych zbliżony jest do rozkładu normalnego
qqnorm(movies$year) # bez sensu, średni rok? ;p
qqnorm(movies$rating)
qqnorm(movies$rating[movies$Comedy==1])
qqnorm(movies$rating[movies$year])
# test na normalność Shapiro-Wilka
shapiro.test(movies$year)
shapiro.test(movies$rating)
shapiro.test(movies$rating[movies$Comedy==1])
shapiro.test(movies$rating[movies$year])
#
x=filmy$year[1:100]
hist(x, breaks=40, probability=T)
lines(x, dnorm(x,mean(x), sd(x)))
# test t.studenta
t.test(movies$year,mu=length(movies$year)-1)
t.test(movies$rating,mu=length(movies$rating)-1)
t.test(movies$rating[movies$Comedy==1],mu=length(movies$rating[movies$Comedy==1])-1)
t.test(movies$rating[movies$year],mu=length(movies$rating[movies$year])-1)
# jeśli dane nie mają rozkładu zbliżonego do rozkładu normalnego używamy testu Wilcoxona
# test ten można również używać dla rokładu normalnego, jednak jest to mniej pewny test,
# a co za tym idzie jest większa szansa na przyjęcie błędnej hipotezy zerowej
RY=movies$rating[movies$year]
wilcox.test(RY,mu=length(RY)-1)
# test "z" dla sigma = 3 i alfa = 0.05
ci = qnorm(1-0.05/2)
s = 3/sqrt(length(RY))
mean(RY) + c(-s*ci,s*ci)
# testy dla dwóch prób
d1=movies$rating[movies$Comedy==1] # oceny komedii
d2=movies$rating[movies$Drama==1] # oceny dramatów
var.test(d1,d2) # czy jest jednorodność wariancji?
# jeśli nie ma różnic wariancji przeprowadzamy test t.Studenta
t.test(d1,d2)
# jeśli wariancje są różne przeprowadzamy test Wilcoxona
wilcox.test(d1,d2)
# test dla większej ilości prób (nie poleca się)
d3=movies$rating[movies$Documentary==1] # oceny filmów dokumentarnych
d4=movies$rating[movies$Action==1] # oceny filmów akcji
D1=d1[1:2000]
D2=d2[1:2000]
D3=d3[1:2000]
D4=d4[1:2000]
danex = data.frame(wyniki=c(D1, D2, D3, D4),metoda=rep(1:4,each=8))
View(danex)
bartlett.test(wyniki ~ metoda,danex) # czy jest niejednorodność?
kruskal.test(wyniki~metoda,danex) # jeśli jest, Kruskar-Wallis
anova(aov(wyniki~metoda,danex)) # jeśli nie, Anova
# test Chi-kwadrat dla proporcji
video=movies[1:500,] # 500 filmów
table(video$Drama==1) # wśród 500 filmów 181 to dramaty
# Czy można powiedzieć, że 40% wszystkich filmów to dramaty?
prop.test(181,500,p=0.40)
# porównanie wyników kilku grup
video2=movies[1100:1349,] # 250 filmów
video3=movies[5000:5089,] # 90 filmów
table(video2$Drama==1) # wśród 250 filmów 105 to dramaty
table(video3$Drama==1) # wśród 90 filmów 34 to dramaty
# czy wyniki pochodziły z tej samej ankiety?
prop.test(c(181,105,24),c(500,250,90))
# test bez poprawki Yates'a
prop.test(c(181,105,24),c(500,250,90), correct=FALSE)
# prop 1 = 0.3620000
# prop 2 = 0.4200000
# prop 3 = 0.2666667
# 0.4200000 - 0.2666667 = 0.1533333 > 0.05, można z tego wnioskować,
# że wyniki pochodziły z różnych ankiet
# (mimo, że w maszym przypadku wyniki pochodziły z tej samej ankiety)
# rysunki, nieużyteczne dla moich danych
vid=video[1:30,]
x=vid$rating
y=vid$votes<1000
fit=lm(x~y)
summary(fit)
plot(x,y);abline(fit) # wykres przedstawiający naszą krzywą oraz prostą regresji.
# Funkcja abline dodaje do wykresu linię prostą o zadanych parametrach, w tym przypadku po- branych z obiektu fit.
plot(ellipse(fit),type="l") # wykres „elipsy ufności” estymatorów regresji.
plot(x,fit$residuals);abline(h=0) #wykres reszt regresji z dodaną linią prostą wzdłuż osi x.
qqnorm(fit$residuals) # wykres kwantylowy-normalny reszt regresji
par(mfrow=c(2,2));plot(fit);par(mfrow=c(1,1)) # zwykła funkcja plot z argumentem będącym
#rezultatem funkcji lm generuje 4 wykresy dotyczące tej regresji.
# ile filmów zostało wyprodukowanych w poszczególnych latach?
lata <- c(movies$year)
range(lata)
lata2 <- cut(lata, c(1890, 1900, 1910, 1920, 1930, 1940, 1950, 1970, 1980, 1990, 2000, 2010))
tab <- table(lata2)
dim(tab) <- c(1,11)
tab2 <- c("(1890-1900]", "(1900-1910]", "(1910-1920]", "(1920-1930]", "(1930-1940]", "(1940-1950]", "(1950-1960]", "(1970-1980]", "(1980-1990]", "(1990-2000]", "(2000-2010]")
tablica <- array(c(tab2,tab),dim=c(11,2))
|
Sys.setlocale("LC_ALL", "C")
setwd("C:\\R\\Analytics Edge")
getwd()
emails = read.csv("emails.csv", stringsAsFactors=FALSE)
str(emails)
summary(emails$spam==1)
library(tm)
library(SnowballC)
corpus = Corpus(VectorSource(emails$text))
corpus
corpus[[1]]
corpus = tm_map(corpus, tolower)
corpus = tm_map(corpus, PlainTextDocument)
corpus = tm_map(corpus, removePunctuation)
corpus[[1]]
corpus[[2]]
min(nchar(emails$text))
nchar(emails$text)==13
corpus = tm_map(corpus, removeWords, stopwords("english"))
corpus[[1]]
corpus = tm_map(corpus, stemDocument)
corpus[[1]]
dtm = DocumentTermMatrix(corpus)
dtm
spdtm = removeSparseTerms(dtm, 0.95)
spdtm
emailsSparse = as.data.frame(as.matrix(spdtm))
colnames(emailsSparse) = make.names(colnames(emailsSparse ))
str(emailsSparse)
colnames(emailsSparse[which.max(colSums(emailsSparse))])
emailsSparse$spam = emails$spam
sort(colSums(subset(emailsSparse, emailsSparse$spam == FALSE)))
sort(colSums(subset(emailsSparse, emailsSparse$spam == TRUE)))
emailsSparse$spam = as.factor(emailsSparse$spam)
library(caTools)
set.seed(123)
split = sample.split(emailsSparse$spam, SplitRatio = 0.7)
train = subset(emailsSparse, split==TRUE)
test = subset(emailsSparse, split==FALSE)
#Model
library(rpart)
library(rpart.plot)
library(randomForest)
spamLog = glm(spam ~ . , data = train, family = binomial)
summary(spamLog)
predictions = predict(spamLog, type="response")
sum(predictions < 0.00001)
sum(predictions > 0.99999)
sum(predictions > 0.00001 & predictions < 0.99999 )
summary(spamLog)
t = table(train$spam, predictions > 0.5)
(t[1,1]+t[2,2])/nrow(train)
library(ROCR)
ROCRpred <- prediction(predictions, train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
spamCART = rpart(spam ~., data = train, method ="class")
prp(spamCART)
predictCART = predict(spamCART, type="class")
t2 = table(train$spam, predictCART)
(t2[1,1]+t2[2,2])/nrow(train)
predictCART = predict(spamCART)
ROCRpred <- prediction(predictCART[,2], train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
set.seed(123)
spamRF = randomForest(spam ~., data = train)
predictRF = predict(spamRF, type = 'class')
t3 = table(train$spam, predictRF)
(t3[1,1]+t3[2,2])/nrow(train)
predictRF = predict(spamRF, type = 'prob')
ROCRpred <- prediction(predictRF[,2], train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
#Test
t4 = table(predict(spamLog, newdata = test) > 0.5, test$spam)
(t4[1,1]+t4[2,2])/nrow(test)
predicted <- predict(spamLog, type="response", newdata=test)
ROCRpredTest = prediction(predicted, test$spam)
auc = as.numeric(performance(ROCRpredTest, "auc")@y.values)
auc
predicted <- predict(spamCART, newdata=test, type = "class")
sum(predicted == test$spam) / length(test$spam)
predicted <- predict(spamCART, newdata = test)
r <- prediction(predicted[,2], test$spam)
auc <- as.numeric(performance(r, "auc")@y.values)
auc
predicted <- predict(spamRF, newdata =test)
sum(predicted == test$spam)/length(test$spam)
predicted <- predict(spamRF, newdata=test, type = 'prob')
r <- prediction(predicted[,2], test$spam)
auc <- as.numeric(performance(r, "auc")@y.values)
auc
|
/Week 5 - Text Mining/Asignment 5c/Spam from Ham.R
|
no_license
|
AlfonsoCampos/The-Analytics-Edge
|
R
| false
| false
| 3,172
|
r
|
Sys.setlocale("LC_ALL", "C")
setwd("C:\\R\\Analytics Edge")
getwd()
emails = read.csv("emails.csv", stringsAsFactors=FALSE)
str(emails)
summary(emails$spam==1)
library(tm)
library(SnowballC)
corpus = Corpus(VectorSource(emails$text))
corpus
corpus[[1]]
corpus = tm_map(corpus, tolower)
corpus = tm_map(corpus, PlainTextDocument)
corpus = tm_map(corpus, removePunctuation)
corpus[[1]]
corpus[[2]]
min(nchar(emails$text))
nchar(emails$text)==13
corpus = tm_map(corpus, removeWords, stopwords("english"))
corpus[[1]]
corpus = tm_map(corpus, stemDocument)
corpus[[1]]
dtm = DocumentTermMatrix(corpus)
dtm
spdtm = removeSparseTerms(dtm, 0.95)
spdtm
emailsSparse = as.data.frame(as.matrix(spdtm))
colnames(emailsSparse) = make.names(colnames(emailsSparse ))
str(emailsSparse)
colnames(emailsSparse[which.max(colSums(emailsSparse))])
emailsSparse$spam = emails$spam
sort(colSums(subset(emailsSparse, emailsSparse$spam == FALSE)))
sort(colSums(subset(emailsSparse, emailsSparse$spam == TRUE)))
emailsSparse$spam = as.factor(emailsSparse$spam)
library(caTools)
set.seed(123)
split = sample.split(emailsSparse$spam, SplitRatio = 0.7)
train = subset(emailsSparse, split==TRUE)
test = subset(emailsSparse, split==FALSE)
#Model
library(rpart)
library(rpart.plot)
library(randomForest)
spamLog = glm(spam ~ . , data = train, family = binomial)
summary(spamLog)
predictions = predict(spamLog, type="response")
sum(predictions < 0.00001)
sum(predictions > 0.99999)
sum(predictions > 0.00001 & predictions < 0.99999 )
summary(spamLog)
t = table(train$spam, predictions > 0.5)
(t[1,1]+t[2,2])/nrow(train)
library(ROCR)
ROCRpred <- prediction(predictions, train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
spamCART = rpart(spam ~., data = train, method ="class")
prp(spamCART)
predictCART = predict(spamCART, type="class")
t2 = table(train$spam, predictCART)
(t2[1,1]+t2[2,2])/nrow(train)
predictCART = predict(spamCART)
ROCRpred <- prediction(predictCART[,2], train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
set.seed(123)
spamRF = randomForest(spam ~., data = train)
predictRF = predict(spamRF, type = 'class')
t3 = table(train$spam, predictRF)
(t3[1,1]+t3[2,2])/nrow(train)
predictRF = predict(spamRF, type = 'prob')
ROCRpred <- prediction(predictRF[,2], train$spam)
auclog <- as.numeric(performance(ROCRpred, "auc")@y.values)
auclog
#Test
t4 = table(predict(spamLog, newdata = test) > 0.5, test$spam)
(t4[1,1]+t4[2,2])/nrow(test)
predicted <- predict(spamLog, type="response", newdata=test)
ROCRpredTest = prediction(predicted, test$spam)
auc = as.numeric(performance(ROCRpredTest, "auc")@y.values)
auc
predicted <- predict(spamCART, newdata=test, type = "class")
sum(predicted == test$spam) / length(test$spam)
predicted <- predict(spamCART, newdata = test)
r <- prediction(predicted[,2], test$spam)
auc <- as.numeric(performance(r, "auc")@y.values)
auc
predicted <- predict(spamRF, newdata =test)
sum(predicted == test$spam)/length(test$spam)
predicted <- predict(spamRF, newdata=test, type = 'prob')
r <- prediction(predicted[,2], test$spam)
auc <- as.numeric(performance(r, "auc")@y.values)
auc
|
#Task 1
# Setting up working directory
setwd("C:/Users/victor/Desktop/Economics_R_Code")
library(tidyverse)
library(sf)
#Task 2
#Downloaded Counties and equavilent data
#Shape file are located in working directory file 'Dataset' of my directory
#Task 3
#Reading the nytiowa data set and tidy it up (reuse code form lab 6)
nytiowa <- read_csv("Dataset/nytiowa.csv")
View(nytiowa)
long_nytiowa <- pivot_longer(nytiowa,
c("Buttigieg", "Sanders", "Warren", "Biden"),
names_to = "Canidates",
values_to = "Delegate")
view(long_nytiowa)
long_nytiowa <- group_by(long_nytiowa, County)
long_nytiowa <- mutate(long_nytiowa, winner =max(Delegate))
Winner_nytiowa <- filter(long_nytiowa, Delegate == winner)
view(Winner_nytiowa)
#Task 4
#Loading our Shape file
counties <- read_sf(dsn = "Dataset", layer = "tl_2019_us_county")
iowa<-filter(counties,STATEFP=="19")
#Task 5
#Join our spatial data set with our tidy data table
iowa <- rename(iowa, County = NAME)
spatial_iowa <- left_join(iowa, Winner_nytiowa, by="County")
#Task 6
#plotting the choropleth graph of Iowa Caucus Winner by county
plot(spatial_iowa ["Canidates"],main="Iowa Caucus Result",key.pos = 1)
|
/Scripts/Tran7.R
|
no_license
|
VictorTran808/Economics_R_Code
|
R
| false
| false
| 1,247
|
r
|
#Task 1
# Setting up working directory
setwd("C:/Users/victor/Desktop/Economics_R_Code")
library(tidyverse)
library(sf)
#Task 2
#Downloaded Counties and equavilent data
#Shape file are located in working directory file 'Dataset' of my directory
#Task 3
#Reading the nytiowa data set and tidy it up (reuse code form lab 6)
nytiowa <- read_csv("Dataset/nytiowa.csv")
View(nytiowa)
long_nytiowa <- pivot_longer(nytiowa,
c("Buttigieg", "Sanders", "Warren", "Biden"),
names_to = "Canidates",
values_to = "Delegate")
view(long_nytiowa)
long_nytiowa <- group_by(long_nytiowa, County)
long_nytiowa <- mutate(long_nytiowa, winner =max(Delegate))
Winner_nytiowa <- filter(long_nytiowa, Delegate == winner)
view(Winner_nytiowa)
#Task 4
#Loading our Shape file
counties <- read_sf(dsn = "Dataset", layer = "tl_2019_us_county")
iowa<-filter(counties,STATEFP=="19")
#Task 5
#Join our spatial data set with our tidy data table
iowa <- rename(iowa, County = NAME)
spatial_iowa <- left_join(iowa, Winner_nytiowa, by="County")
#Task 6
#plotting the choropleth graph of Iowa Caucus Winner by county
plot(spatial_iowa ["Canidates"],main="Iowa Caucus Result",key.pos = 1)
|
#Goncala version riskvalRuns
source("powerTests.r")
#source("powerTests2.r")
#total sample size 10000
Samp <- 10000 #Casamp = n; Cosamp = Samp-n
rounds = 10000
disfr = 0.0001
sims <- data.frame()
#cat("Ncases Ncontrols AF Risk Power_Log Power_Score Power_Prop Power_Chi Disease_frequency\n")
for (Cosamp in c(9000, 8000, 7000, 6000, 5000)){
Casamp <- Samp - Cosamp
for (af in c(0.01, 0.05, 0.10, 0.20, 0.50)){
for (risk in c(0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5, 1.75, 2.0, 5.0)){
#cat(Casamp, Cosamp, af, risk, powerReturn(Cosamp,Casamp,af,risk,rounds, disfr), "\n")
sims <- rbind(sims,c(Casamp, Cosamp, af, risk, powerReturn(Cosamp,Casamp,af,risk,rounds, disfr), disfr))
}
}
}
names(sims) <- c("Ncases","Ncontrols", "AF", "Risk", "Power_Log", "Power_Score", "Power_Prop", "Power_Chi", "Disease_frequency")
write.table(sims, file = "sim_fail.csv", sep = ",", col.names = T, row.names = F)
|
/riskValRuns.R
|
no_license
|
lankrist/ph_meta-analysis
|
R
| false
| false
| 956
|
r
|
#Goncala version riskvalRuns
source("powerTests.r")
#source("powerTests2.r")
#total sample size 10000
Samp <- 10000 #Casamp = n; Cosamp = Samp-n
rounds = 10000
disfr = 0.0001
sims <- data.frame()
#cat("Ncases Ncontrols AF Risk Power_Log Power_Score Power_Prop Power_Chi Disease_frequency\n")
for (Cosamp in c(9000, 8000, 7000, 6000, 5000)){
Casamp <- Samp - Cosamp
for (af in c(0.01, 0.05, 0.10, 0.20, 0.50)){
for (risk in c(0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5, 1.75, 2.0, 5.0)){
#cat(Casamp, Cosamp, af, risk, powerReturn(Cosamp,Casamp,af,risk,rounds, disfr), "\n")
sims <- rbind(sims,c(Casamp, Cosamp, af, risk, powerReturn(Cosamp,Casamp,af,risk,rounds, disfr), disfr))
}
}
}
names(sims) <- c("Ncases","Ncontrols", "AF", "Risk", "Power_Log", "Power_Score", "Power_Prop", "Power_Chi", "Disease_frequency")
write.table(sims, file = "sim_fail.csv", sep = ",", col.names = T, row.names = F)
|
#packages installed= data.table, dplyr, formattable, tidyr using function install.packages("") for example install.packages ("tidyr")
library(tidyr)
library(ggplot2)
library(data.table)
library(dplyr)
library(formattable)
library(qwraps2)
library(stringr)
#Actual.Eco.Site is a column I added to each TerrADat csv to assign the ecological site
#CSVs have to be saved as CSV with the names as follows: plots = "Allyears_plots" , soil horizon = "Allyears_soilhorizons"
#query results = "Allyears_query", plant specis = "PSPPALL" , species richness = "Allyears_species_rich"
#LPI detail = "LPI_all", leaving all dataframes in in case unknown codes will be subsetted
#Plotdata has the actual.eco.site assigned, the "match" code below is adding a column that will assign actual.eco.site to every row
##using the Plotdata csv
Plotdata<-read.csv("~/Allyears_plots.csv")
soildata<-read.csv("~/Allyears_soilhorizons.csv")
soildata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(soildata$PrimaryKey,Plotdata$PrimaryKey)]
AIMdata<-read.csv("~/Allyears_query.csv")
AIMdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(AIMdata$Primary.Key,Plotdata$PrimaryKey)]
PSPSdata<-read.csv("~/PSPPALL.csv")
PSPSdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(PSPSdata$PrimaryKey,Plotdata$PrimaryKey)]
SPdata<-read.csv("~/Allyears_species_rich.csv")
SPdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(SPdata$PrimaryKey,Plotdata$PrimaryKey)]
ST<-read.csv("~/LPI_all.csv")
#when downloading LPI detail from TerrADat online you will have to label the PrimaryKey column PrimaryKey, you can use the View()
##command to ensure you're matching with the correct column
ST1<-ST
ST1$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(ST1$PrimaryKey,Plotdata$PrimaryKey)]
##finding unknowns in species richness (in theory everything from LPI should be in here, but just being safe LPI unknowns were also determined)
PF<-paste0("PF",0:300)
AF<-paste0("AF",0:300)
PG<-paste0("PG",0:300)
AG<-paste0("AG",0:300)
TR<-paste0("TR",0:300)
SH<-paste0("SH",0:300)
list<-c(AF,PF,AG,PG,TR,SH)
SP2<-SPdata %>%
select(PrimaryKey,SpeciesList,DateLoadedInDb) %>%
filter(SpeciesList %in% list)
##checking species
PSPSdata$DateLoadedInDb<-SPdata$DateLoadedInDb[match(PSPSdata$PrimaryKey,SPdata$PrimaryKey)]
PSPS2<-PSPSdata %>%
select(PrimaryKey,Species,DateLoadedInDb) %>%
filter(Species %in% list)
##determining codes in LPI
STT<-ST1 %>%
select(PrimaryKey,TopCanopy,DateLoadedInDb) %>%
filter(TopCanopy %in% list)
STL1<-ST1 %>%
select(PrimaryKey,Lower1,DateLoadedInDb) %>%
filter(Lower1 %in% list)
STL2<-ST1 %>%
select(PrimaryKey,Lower2,DateLoadedInDb) %>%
filter(Lower2 %in% list)
STL3<-ST1 %>%
select(PrimaryKey,Lower3,DateLoadedInDb) %>%
filter(Lower3 %in% list)
STL4<-ST1 %>%
select(PrimaryKey,Lower4,DateLoadedInDb) %>%
filter(Lower4 %in% list)
STL5<-ST1 %>%
select(PrimaryKey,Lower5,DateLoadedInDb) %>%
filter(Lower5 %in% list)
STL6<-ST1 %>%
select(PrimaryKey,Lower6,DateLoadedInDb) %>%
filter(Lower6 %in% list)
STL7<-ST1 %>%
select(PrimaryKey,Lower7,DateLoadedInDb) %>%
filter(Lower7 %in% list)
##because a number could be repeated in different years, the code must be united with the date
STT<-unite(STT,TopCanopy,TopCanopy,DateLoadedInDb,sep="_",remove=TRUE)
STL1<-unite(STL1,Lower1,Lower1,DateLoadedInDb,sep="_",remove=TRUE)
STL2<-unite(STL2,Lower2,Lower2,DateLoadedInDb,sep="_",remove=TRUE)
STL3<-unite(STL3,Lower3,Lower3,DateLoadedInDb,sep="_",remove=TRUE)
STL4<-unite(STL4,Lower4,Lower4,DateLoadedInDb,sep="_",remove=TRUE)
STL5<-unite(STL5,Lower5,Lower5,DateLoadedInDb,sep="_",remove=TRUE)
STL6<-unite(STL6,Lower6,Lower6,DateLoadedInDb,sep="_",remove=TRUE)
STL7<-unite(STL7,Lower7,Lower7,DateLoadedInDb,sep="_",remove=TRUE)
SP2<-unite(SP2,SpeciesList,SpeciesList,DateLoadedInDb,sep="_",remove=TRUE)
PSPS2<-unite(PSPS2,Species,Species,DateLoadedInDb,sep="_",remove=TRUE)
#making a list and removing duplicates
list2<-c(as.character(STT$TopCanopy),as.character(STL1$Lower1),as.character(STL2$Lower2),as.character(STL3$Lower3),as.character(STL4$Lower4),as.character(STL5$Lower5),as.character(STL6$Lower6),as.character(STL7$Lower7),as.character(SP2$SpeciesList),as.character(PSPS2$Species))
list2<-unique(list2)
df <- data.frame(matrix(list2, nrow=length(list2), byrow=T))
df<- df %>% separate(matrix.list2..nrow...length.list2...byrow...T.,c("Unknown_Code","Date"),sep="_")
View(df)
#exporting csv to saved file
write.csv(df,"unknown_codes_in_data.csv")
|
/Unknown Codes remaining in data.R
|
no_license
|
bewheeler/AIM_Annual_Report_Code
|
R
| false
| false
| 4,588
|
r
|
#packages installed= data.table, dplyr, formattable, tidyr using function install.packages("") for example install.packages ("tidyr")
library(tidyr)
library(ggplot2)
library(data.table)
library(dplyr)
library(formattable)
library(qwraps2)
library(stringr)
#Actual.Eco.Site is a column I added to each TerrADat csv to assign the ecological site
#CSVs have to be saved as CSV with the names as follows: plots = "Allyears_plots" , soil horizon = "Allyears_soilhorizons"
#query results = "Allyears_query", plant specis = "PSPPALL" , species richness = "Allyears_species_rich"
#LPI detail = "LPI_all", leaving all dataframes in in case unknown codes will be subsetted
#Plotdata has the actual.eco.site assigned, the "match" code below is adding a column that will assign actual.eco.site to every row
##using the Plotdata csv
Plotdata<-read.csv("~/Allyears_plots.csv")
soildata<-read.csv("~/Allyears_soilhorizons.csv")
soildata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(soildata$PrimaryKey,Plotdata$PrimaryKey)]
AIMdata<-read.csv("~/Allyears_query.csv")
AIMdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(AIMdata$Primary.Key,Plotdata$PrimaryKey)]
PSPSdata<-read.csv("~/PSPPALL.csv")
PSPSdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(PSPSdata$PrimaryKey,Plotdata$PrimaryKey)]
SPdata<-read.csv("~/Allyears_species_rich.csv")
SPdata$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(SPdata$PrimaryKey,Plotdata$PrimaryKey)]
ST<-read.csv("~/LPI_all.csv")
#when downloading LPI detail from TerrADat online you will have to label the PrimaryKey column PrimaryKey, you can use the View()
##command to ensure you're matching with the correct column
ST1<-ST
ST1$Actual.Eco.Site<-Plotdata$Actual.Eco.Site[match(ST1$PrimaryKey,Plotdata$PrimaryKey)]
##finding unknowns in species richness (in theory everything from LPI should be in here, but just being safe LPI unknowns were also determined)
PF<-paste0("PF",0:300)
AF<-paste0("AF",0:300)
PG<-paste0("PG",0:300)
AG<-paste0("AG",0:300)
TR<-paste0("TR",0:300)
SH<-paste0("SH",0:300)
list<-c(AF,PF,AG,PG,TR,SH)
SP2<-SPdata %>%
select(PrimaryKey,SpeciesList,DateLoadedInDb) %>%
filter(SpeciesList %in% list)
##checking species
PSPSdata$DateLoadedInDb<-SPdata$DateLoadedInDb[match(PSPSdata$PrimaryKey,SPdata$PrimaryKey)]
PSPS2<-PSPSdata %>%
select(PrimaryKey,Species,DateLoadedInDb) %>%
filter(Species %in% list)
##determining codes in LPI
STT<-ST1 %>%
select(PrimaryKey,TopCanopy,DateLoadedInDb) %>%
filter(TopCanopy %in% list)
STL1<-ST1 %>%
select(PrimaryKey,Lower1,DateLoadedInDb) %>%
filter(Lower1 %in% list)
STL2<-ST1 %>%
select(PrimaryKey,Lower2,DateLoadedInDb) %>%
filter(Lower2 %in% list)
STL3<-ST1 %>%
select(PrimaryKey,Lower3,DateLoadedInDb) %>%
filter(Lower3 %in% list)
STL4<-ST1 %>%
select(PrimaryKey,Lower4,DateLoadedInDb) %>%
filter(Lower4 %in% list)
STL5<-ST1 %>%
select(PrimaryKey,Lower5,DateLoadedInDb) %>%
filter(Lower5 %in% list)
STL6<-ST1 %>%
select(PrimaryKey,Lower6,DateLoadedInDb) %>%
filter(Lower6 %in% list)
STL7<-ST1 %>%
select(PrimaryKey,Lower7,DateLoadedInDb) %>%
filter(Lower7 %in% list)
##because a number could be repeated in different years, the code must be united with the date
STT<-unite(STT,TopCanopy,TopCanopy,DateLoadedInDb,sep="_",remove=TRUE)
STL1<-unite(STL1,Lower1,Lower1,DateLoadedInDb,sep="_",remove=TRUE)
STL2<-unite(STL2,Lower2,Lower2,DateLoadedInDb,sep="_",remove=TRUE)
STL3<-unite(STL3,Lower3,Lower3,DateLoadedInDb,sep="_",remove=TRUE)
STL4<-unite(STL4,Lower4,Lower4,DateLoadedInDb,sep="_",remove=TRUE)
STL5<-unite(STL5,Lower5,Lower5,DateLoadedInDb,sep="_",remove=TRUE)
STL6<-unite(STL6,Lower6,Lower6,DateLoadedInDb,sep="_",remove=TRUE)
STL7<-unite(STL7,Lower7,Lower7,DateLoadedInDb,sep="_",remove=TRUE)
SP2<-unite(SP2,SpeciesList,SpeciesList,DateLoadedInDb,sep="_",remove=TRUE)
PSPS2<-unite(PSPS2,Species,Species,DateLoadedInDb,sep="_",remove=TRUE)
#making a list and removing duplicates
list2<-c(as.character(STT$TopCanopy),as.character(STL1$Lower1),as.character(STL2$Lower2),as.character(STL3$Lower3),as.character(STL4$Lower4),as.character(STL5$Lower5),as.character(STL6$Lower6),as.character(STL7$Lower7),as.character(SP2$SpeciesList),as.character(PSPS2$Species))
list2<-unique(list2)
df <- data.frame(matrix(list2, nrow=length(list2), byrow=T))
df<- df %>% separate(matrix.list2..nrow...length.list2...byrow...T.,c("Unknown_Code","Date"),sep="_")
View(df)
#exporting csv to saved file
write.csv(df,"unknown_codes_in_data.csv")
|
plot3<- function() {
# read data
project_elec<-read.table("household_power_consumption.txt",
sep=";",header=TRUE)
# convert to numeric
project_elec$Sub_metering_1<-as.numeric(project_elec$Sub_metering_1)
project_elec$Sub_metering_2<-as.numeric(project_elec$Sub_metering_2)
project_elec$Sub_metering_3<-as.numeric(project_elec$Sub_metering_3)
project_elec$Global_active_power<-
as.numeric(project_elec$Global_active_power)
project_elec$Global_intensity<-
as.numeric(project_elec$Global_intensity)
project_elec$Global_reactive_power<-
as.numeric(project_elec$Global_reactive_power)
project_elec$Voltage<-as.numeric(project_elec$Voltage)
project_elec$Date1<-as.Date(project_elec$Date,"%d/%m/%Y")
# select desired date range
proj2<-filter(project_elec,
project_elec$Date1>=as.Date("2007-02-01") &
project_elec$Date1<=as.Date("2007-02-02"))
# create datwe time variable
proj2$dt1<-as.POSIXct(paste(proj2$Date,proj2$Time),format="%d/%m/%Y %H:%M:%S")
# create plot
png("plot3.png")
with(proj2,plot(dt1,Sub_metering_1,col="black",type="l",xlab=" ",ylab="Energy ub metering"))
lines(proj2$dt1,proj2$Sub_metering_2,col="red")
lines(proj2$dt1,proj2$Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1)
dev.off()
}
|
/plot3.R
|
no_license
|
unknowncutename/exploratorydata
|
R
| false
| false
| 1,436
|
r
|
plot3<- function() {
# read data
project_elec<-read.table("household_power_consumption.txt",
sep=";",header=TRUE)
# convert to numeric
project_elec$Sub_metering_1<-as.numeric(project_elec$Sub_metering_1)
project_elec$Sub_metering_2<-as.numeric(project_elec$Sub_metering_2)
project_elec$Sub_metering_3<-as.numeric(project_elec$Sub_metering_3)
project_elec$Global_active_power<-
as.numeric(project_elec$Global_active_power)
project_elec$Global_intensity<-
as.numeric(project_elec$Global_intensity)
project_elec$Global_reactive_power<-
as.numeric(project_elec$Global_reactive_power)
project_elec$Voltage<-as.numeric(project_elec$Voltage)
project_elec$Date1<-as.Date(project_elec$Date,"%d/%m/%Y")
# select desired date range
proj2<-filter(project_elec,
project_elec$Date1>=as.Date("2007-02-01") &
project_elec$Date1<=as.Date("2007-02-02"))
# create datwe time variable
proj2$dt1<-as.POSIXct(paste(proj2$Date,proj2$Time),format="%d/%m/%Y %H:%M:%S")
# create plot
png("plot3.png")
with(proj2,plot(dt1,Sub_metering_1,col="black",type="l",xlab=" ",ylab="Energy ub metering"))
lines(proj2$dt1,proj2$Sub_metering_2,col="red")
lines(proj2$dt1,proj2$Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1)
dev.off()
}
|
#' @title Gather spectra from list of spectral data into a tibble object
#' @description Gather specta and spectrometer metadata from list into a tibble.
#' Spectra and wavenumbers are stored in list-columns. A tibble is an extended
#' data frame and each spectrum can contain complex data and metadata that
#' are in a rectangular data structure. List-columns is tidy
#' data structure concept that can be combined with functional programming
#' frameworks provided by e.g. the purrr package.
#' @param data list with file name elements that contain spectra and metadata
#' after reading binary OPUS files with \code{simplerspec::read_opus_univ()}
#' @usage gather_spc(data)
#' @return Spectral data and metadata in object of class tible
#' @export
gather_spc <- function(data) {
## Extract data from list by dplyr::map variants -----------------------------
# Extract original spectral matrix for all scans
# First, try to map all spectra; if some of the list elements are NULL,
# remove all spectra and metadata for Bruker files that were
# not successfully read (NULL in final spectra in sublist "spc")
map_spc <- purrr::map(data, "spc")
which_NULL <- which(sapply(map_spc, is.null))
if (length(which_NULL > 0)) {
# message(paste0("Sample spectra originating from the following",
# " OPUS files could not gathered because extracted spectra are NULL: <",
# paste(names(which_NULL), collapse = ";"), ">. ",
# "The following list positions have therefore been omitted from
# <data> when gathering spectra from list into tibble: ",
# paste(which_NULL, collapse = "; ")), ".")
message(paste0("cannot extract spectra: <", paste(names(which_NULL)), ">\n"))
data <- data[- which_NULL]
map_spc <- map_spc[- which_NULL]
}
# Extract metadata list elements and combine into data.frame
map_metadata_df <- purrr::map_df(data, "metadata")
map_metadata <- purrr::map(data, "metadata")
# Extract rownames of spectra; remove names of rownames vector
rownames_spc <- unname(unlist(lapply(map_spc, rownames)))
# Extract wavenumbers
map_wavenumbers <- purrr::map(data, "wavenumbers")
## Create list-column tibble
data_tibble <- tibble::as_tibble(
#map_metadata_df[c("unique_id", "file_id", "sample_id")]
map_metadata_df[c("unique_id", "file_id", "sample_id")]
)
## Add spectra and wavenumbers
tibble::add_column(.data = data_tibble,
# raw spectra
spc = map_spc,
wavenumbers = map_wavenumbers,
metadata = map_metadata
)
}
|
/Soil-Predictions-Example/Functions/simplerspec/gather-spc.R
|
no_license
|
lusensn/Soil-Predictions-MIR
|
R
| false
| false
| 2,515
|
r
|
#' @title Gather spectra from list of spectral data into a tibble object
#' @description Gather specta and spectrometer metadata from list into a tibble.
#' Spectra and wavenumbers are stored in list-columns. A tibble is an extended
#' data frame and each spectrum can contain complex data and metadata that
#' are in a rectangular data structure. List-columns is tidy
#' data structure concept that can be combined with functional programming
#' frameworks provided by e.g. the purrr package.
#' @param data list with file name elements that contain spectra and metadata
#' after reading binary OPUS files with \code{simplerspec::read_opus_univ()}
#' @usage gather_spc(data)
#' @return Spectral data and metadata in object of class tible
#' @export
gather_spc <- function(data) {
## Extract data from list by dplyr::map variants -----------------------------
# Extract original spectral matrix for all scans
# First, try to map all spectra; if some of the list elements are NULL,
# remove all spectra and metadata for Bruker files that were
# not successfully read (NULL in final spectra in sublist "spc")
map_spc <- purrr::map(data, "spc")
which_NULL <- which(sapply(map_spc, is.null))
if (length(which_NULL > 0)) {
# message(paste0("Sample spectra originating from the following",
# " OPUS files could not gathered because extracted spectra are NULL: <",
# paste(names(which_NULL), collapse = ";"), ">. ",
# "The following list positions have therefore been omitted from
# <data> when gathering spectra from list into tibble: ",
# paste(which_NULL, collapse = "; ")), ".")
message(paste0("cannot extract spectra: <", paste(names(which_NULL)), ">\n"))
data <- data[- which_NULL]
map_spc <- map_spc[- which_NULL]
}
# Extract metadata list elements and combine into data.frame
map_metadata_df <- purrr::map_df(data, "metadata")
map_metadata <- purrr::map(data, "metadata")
# Extract rownames of spectra; remove names of rownames vector
rownames_spc <- unname(unlist(lapply(map_spc, rownames)))
# Extract wavenumbers
map_wavenumbers <- purrr::map(data, "wavenumbers")
## Create list-column tibble
data_tibble <- tibble::as_tibble(
#map_metadata_df[c("unique_id", "file_id", "sample_id")]
map_metadata_df[c("unique_id", "file_id", "sample_id")]
)
## Add spectra and wavenumbers
tibble::add_column(.data = data_tibble,
# raw spectra
spc = map_spc,
wavenumbers = map_wavenumbers,
metadata = map_metadata
)
}
|
LabelPlot<-function(Data,Sel1,Sel2=NULL,col1,col2=NULL,...){
x=Data$Clust
d_temp <- stats::dendrapply(stats::as.dendrogram(x,hang=0.02),LabelCols,Sel1,Sel2,col1,col2)
graphics::plot(d_temp,nodePar=list(pch=NA),edgePar=list(lwd=2),ylab="Height",font.axis=2,font.lab=2,font=2,...)
graphics::axis(side = 2, lwd = 2)
}
|
/IntClust/R/Labelplot.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 331
|
r
|
LabelPlot<-function(Data,Sel1,Sel2=NULL,col1,col2=NULL,...){
x=Data$Clust
d_temp <- stats::dendrapply(stats::as.dendrogram(x,hang=0.02),LabelCols,Sel1,Sel2,col1,col2)
graphics::plot(d_temp,nodePar=list(pch=NA),edgePar=list(lwd=2),ylab="Height",font.axis=2,font.lab=2,font=2,...)
graphics::axis(side = 2, lwd = 2)
}
|
# Teresita M. Porter, Sept. 9, 2019
library(vegan)
library(reshape2)
library(gridExtra)
library(grid)
library(ggplot2)
library(plyr)
library(data.table)
library("car")
library(stringr)
library("ggpubr")
# Read infile
A<-read.table(file="matrix.csv", head=TRUE, sep=",")
# Select phylum Arthropoda only
B<-A[A$Phylum=="Arthropoda",]
# Split SampleName into their own columns using pkg "stringr"
B2<-data.frame(B, do.call(rbind, str_split(B$SampleName,"_")))
names(B2)[32:38]<-c("month","year","version","substrate","site","marker","siterep")
# Combine substrate+siterep into their own column
B2$sample<-paste(B2$substrate,B2$siterep,sep="_")
# Pivot to make matrix for vegan
C<-dcast(B2, Marker_GlobalESV ~ sample, value.var="ESVsize", fun.aggregate = sum)
# Move marker_OTU to row names
rownames(C)<-C$sample
C<-C[,-1]
# Transpose to get sites in rows, ESVs in columns
Ct<-t(C)
# Remove columns with only zeros
notnull<-Ct[,colSums(Ct) !=0]
# Remove rows with only zeros
notnull2<-notnull[rowSums(notnull) !=0,]
# Calculate 15th percentile for rrarefy function
percentile<-quantile(rowSums(notnull2), prob=0.15)
# Set random seed for rarefaction
set.seed(12345)
# Rarefy the dataset down to the 15th percentile
df<-rrarefy(notnull2,sample=percentile)
# Convert to presence-absence
df[df>0] <- 1
# Convert to df
df<-data.frame(df)
# Get total ESVs per sample
df$sums<-rowSums(df)
# Move rownames to first column
df2<-data.frame(df)
setDT(df2, keep.rownames = TRUE)[]
# Get separate substrate and siterep cols
setDT(df2)[, paste0("S", 1:2) := tstrsplit(rn, "_")]
colnames(df2)[colnames(df2)=="S1"] <- "substrate"
colnames(df2)[colnames(df2)=="S2"] <- "siterep"
# Get separate site and rep cols
df2$site <- str_sub(df2$siterep, 1,2)
df2$rep <- str_sub(df2$siterep, -1)
# Create factors
df2$substrate<-factor(df2$substrate, levels=c("B","W"), labels=c("Benthos","Water"))
#mggplot boxplot
p1<-ggplot(df2) +
# geom_boxplot(aes(x=df2$site, y=df2$sums, fill=substrate)) +
geom_point(aes(x=df2$site, y=df2$sums, color=substrate)) +
labs(x="Sites", y="ESV Richness") +
facet_wrap(~substrate) +
scale_color_manual(values=c("#4DAF4A", "#377EB8")) +
theme(legend.title=element_blank()) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title = element_blank())
ggsave("F1_ESVrichness.pdf",p1)
# test for normality pkg "ggpubr"
ggdensity(df2$sums,
main = "Density plot",
xlab = "Sample ESV richness")
# not normal
ggqqplot(df2$sums)
# mostly normal
qqPlot(df2$sums)
# mostly normal
#Shapiro-Wilk test of normality
shapiro.test(df2$sums)
# data: df2$sums
# W = 0.94474, p-value = 0.02477
# sig diff than normal
#paired samples Wilcoxon test (Wilcoxon signed-rank test)
wilcox.test(df2$sums[df2$substrate=="Benthos"], df2$sums[df2$substrate=="Water"],
paired = TRUE, alternative = "greater")
# p-value = 3.217e-05, Benthos richness greater than Water richness
|
/scripts/Fig1_Richness.R
|
permissive
|
Hajibabaei-Lab/HajibabaeiEtAl2019b_benthos_vs_water
|
R
| false
| false
| 3,085
|
r
|
# Teresita M. Porter, Sept. 9, 2019
library(vegan)
library(reshape2)
library(gridExtra)
library(grid)
library(ggplot2)
library(plyr)
library(data.table)
library("car")
library(stringr)
library("ggpubr")
# Read infile
A<-read.table(file="matrix.csv", head=TRUE, sep=",")
# Select phylum Arthropoda only
B<-A[A$Phylum=="Arthropoda",]
# Split SampleName into their own columns using pkg "stringr"
B2<-data.frame(B, do.call(rbind, str_split(B$SampleName,"_")))
names(B2)[32:38]<-c("month","year","version","substrate","site","marker","siterep")
# Combine substrate+siterep into their own column
B2$sample<-paste(B2$substrate,B2$siterep,sep="_")
# Pivot to make matrix for vegan
C<-dcast(B2, Marker_GlobalESV ~ sample, value.var="ESVsize", fun.aggregate = sum)
# Move marker_OTU to row names
rownames(C)<-C$sample
C<-C[,-1]
# Transpose to get sites in rows, ESVs in columns
Ct<-t(C)
# Remove columns with only zeros
notnull<-Ct[,colSums(Ct) !=0]
# Remove rows with only zeros
notnull2<-notnull[rowSums(notnull) !=0,]
# Calculate 15th percentile for rrarefy function
percentile<-quantile(rowSums(notnull2), prob=0.15)
# Set random seed for rarefaction
set.seed(12345)
# Rarefy the dataset down to the 15th percentile
df<-rrarefy(notnull2,sample=percentile)
# Convert to presence-absence
df[df>0] <- 1
# Convert to df
df<-data.frame(df)
# Get total ESVs per sample
df$sums<-rowSums(df)
# Move rownames to first column
df2<-data.frame(df)
setDT(df2, keep.rownames = TRUE)[]
# Get separate substrate and siterep cols
setDT(df2)[, paste0("S", 1:2) := tstrsplit(rn, "_")]
colnames(df2)[colnames(df2)=="S1"] <- "substrate"
colnames(df2)[colnames(df2)=="S2"] <- "siterep"
# Get separate site and rep cols
df2$site <- str_sub(df2$siterep, 1,2)
df2$rep <- str_sub(df2$siterep, -1)
# Create factors
df2$substrate<-factor(df2$substrate, levels=c("B","W"), labels=c("Benthos","Water"))
#mggplot boxplot
p1<-ggplot(df2) +
# geom_boxplot(aes(x=df2$site, y=df2$sums, fill=substrate)) +
geom_point(aes(x=df2$site, y=df2$sums, color=substrate)) +
labs(x="Sites", y="ESV Richness") +
facet_wrap(~substrate) +
scale_color_manual(values=c("#4DAF4A", "#377EB8")) +
theme(legend.title=element_blank()) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title = element_blank())
ggsave("F1_ESVrichness.pdf",p1)
# test for normality pkg "ggpubr"
ggdensity(df2$sums,
main = "Density plot",
xlab = "Sample ESV richness")
# not normal
ggqqplot(df2$sums)
# mostly normal
qqPlot(df2$sums)
# mostly normal
#Shapiro-Wilk test of normality
shapiro.test(df2$sums)
# data: df2$sums
# W = 0.94474, p-value = 0.02477
# sig diff than normal
#paired samples Wilcoxon test (Wilcoxon signed-rank test)
wilcox.test(df2$sums[df2$substrate=="Benthos"], df2$sums[df2$substrate=="Water"],
paired = TRUE, alternative = "greater")
# p-value = 3.217e-05, Benthos richness greater than Water richness
|
plot.scaleCoef <-
function(x,
add=FALSE, # logical, if true add plot to current plot
offsetby=0, # amount on x-axis by which to offset the plotted
# estimates and confidence intervals, useful when add is TRUE
xlim=NULL,
ylim=NULL,
xlab=NULL,
ylab=NULL,
xaxt=NULL, # specify "n" to add later yourself
yaxt=NULL, # specify "n" to add later yourself
horizontal=FALSE, # FALSE: predictors will be plotted
# on x-axis, scaled coefficient values on y-axis
firstToLast=TRUE, # when FALSE, plots variables last to first
# instead of first to last
...
){
# specify axis for predictor variables
ifelse(horizontal, predaxis <- 2, predaxis <- 1)
# if null, change predictor axt to "n", since label according to variable names
if(horizontal){
if(is.null(yaxt)) {
yaxt <- "n"
predaxt <- TRUE
}
}
else{
if(is.null(xaxt)) {
xaxt <- "n"
predaxt <- TRUE
}
}
# if no specified axt, change to default
if(is.null(xaxt)) xaxt="s"
if(is.null(yaxt)) yaxt="s"
if(!exists("predaxt")) predaxt <- FALSE
# specify x and y axis labels, depending on horizontal
if(horizontal){
if(is.null(ylab)) ylab <- "Predictor variable(s)"
if(is.null(xlab)) xlab <- "Scaled regression coefficient"
}
else{
if(is.null(xlab)) xlab <- "Predictor variable(s)"
if(is.null(ylab)) ylab <- "Scaled regression coefficient"
}
# find out if there are multiple fits in x
oneFit <- "estSE"%in%names(x$scaledResults)
# one fit: plot scaled coefficients from same model in a single plot
if(oneFit){
num <- length(x$scaledResults$scaleBy)
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
# detemine coefficient axis limits
if(!is.null(x$scaledResults$CI)){
ub <- max(x$scaledResults$CI)
lb <- min(x$scaledResults$CI)
}
else{
ub <- max(x$scaledResults$estSE)
lb <- min(x$scaledResults$estSE)
warning("Confidence interval(s) omitted from plot")
}
if(horizontal){
if(is.null(xlim)) xlim <- c(lb,ub)
}
else{
if(is.null(ylim)) ylim <- c(lb,ub)
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, num +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, num +.5)
}
if(!add){
plot(1,1,ylim=ylim,xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, names(x$scaledResults$scaleBy))}
}
if(horizontal){
if(!"matrix"%in%class(x$scaledResults$estSE)){
xplot1 <- x$scaledResults$estSE["Estimate"]
xplot2 <- x$scaledResults$CI["2.5 %"]
xplot3 <- x$scaledResults$CI["97.5 %"]
}
else{
xplot1 <- x$scaledResults$estSE[,"Estimate"]
xplot2 <- x$scaledResults$CI[,"2.5 %"]
xplot3 <- x$scaledResults$CI[,"97.5 %"]
}
yplot1 <- predAt+offsetby
yplot2 <- predAt+offsetby
yplot3 <- predAt+offsetby
}
else{
if(!"matrix"%in%class(x$scaledResults$estSE)){
yplot1 <- x$scaledResults$estSE["Estimate"]
yplot2 <- x$scaledResults$CI["2.5 %"]
yplot3 <- x$scaledResults$CI["97.5 %"]
}
else{
yplot1 <- x$scaledResults$estSE[,"Estimate"]
yplot2 <- x$scaledResults$CI[,"2.5 %"]
yplot3 <- x$scaledResults$CI[,"97.5 %"]
}
xplot1 <- predAt+offsetby
xplot2 <- predAt+offsetby
xplot3 <- predAt+offsetby
}
points( xplot1,yplot1, ...)
if(!is.null(x$scaledResults$CI)){
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
# multiple fits, plotting depends on whether diffAcrossModels
if(!oneFit){
numFit <- length(x$scaledResults)
if(is.null(names(x$scaledResults))){
fitNames <- paste("fit",1:length(x$scaledResults),sep="")
}
else{
fitNames <- names(x$scaledResults)
}
# matrix containing all CI
CIs <- NULL
for(f in 1:numFit){
fitCI <- x$scaledResults[[f]]$CI
if(is.null(fitCI)) fitCI <- c(NA,NA)
CIs <- rbind(CIs, fitCI)
}
if(any(is.na(CIs))) warning("Confidence interval(s) omitted from plot")
# detemine coefficient axis limits
ub <- max(CIs, na.rm=TRUE)
lb <- min(CIs, na.rm=TRUE)
# assign coefficient axis limits
if(horizontal){
if(is.null(xlim)){ xlim <- c(lb,ub)}
}
else{
if(is.null(ylim)){ ylim <- c(lb,ub) }
}
# identify whether diffAcrossModels
vars <- sapply(x$scaledResults, function(y) names(y$scaleBy))
if(!is.matrix(vars) & length(unique(vars))!=1){
diffAcrossModels <- TRUE
}
else(
diffAcrossModels <- FALSE
)
# when diffAcrossModels=TRUE OR only one coef in all models, make one plot
if(diffAcrossModels | (!diffAcrossModels & length(unique(vars))==1)){
if(length(unique(vars))==1){
vars <- paste(fitNames,vars,sep=".")
}
# possibly change order of plotting
if(firstToLast){
predAt <- 1:numFit
}
else{
predAt <- numFit:1
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, numFit +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, numFit +.5)
}
if(!add){
plot(1,1,ylim=ylim, xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, vars)}
}
for(f in 1:numFit){
if(horizontal){
yplot1 <- predAt[f]+offsetby
yplot2 <- predAt[f]+offsetby
yplot3 <- predAt[f]+offsetby
xplot1 <- x$scaledResults[[f]]$estSE["Estimate"]
xplot2 <- CIs[f,"2.5 %"]
xplot3 <- CIs[f,"97.5 %"]
}
else{
xplot1 <- predAt[f]+offsetby
xplot2 <- predAt[f]+offsetby
xplot3 <- predAt[f]+offsetby
yplot1 <- x$scaledResults[[f]]$estSE["Estimate"]
yplot2 <- CIs[f,"2.5 %"]
yplot3 <- CIs[f,"97.5 %"]
}
points( xplot1,yplot1, ...)
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
# otherwise, make numFit plots, can't add to previous plots
else{
if(add) stop("Can't add to multiple previously constructed plots")
vars <- names(x$scaledResults[[1]]$scaleBy)
num <- length(vars)
# possibly change order of plotting
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, num +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, num +.5)
}
par(mfrow=c(numFit,1))
# possibly change order of plotting
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
for(v in 1:numFit){
plot(1,1,ylim=ylim,xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
main=paste("Model:",fitNames[v]), xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, vars)}
for(f in 1:num){
if(horizontal){
yplot1 <- predAt[f]+offsetby
yplot2 <- predAt[f]+offsetby
yplot3 <- predAt[f]+offsetby
xplot1 <- x$scaledResults[[v]]$estSE[f,"Estimate"]
if(!is.null(x$scaledResults[[v]]$CI)){
xplot2 <- x$scaledResults[[v]]$CI[f,"2.5 %"]
xplot3 <- x$scaledResults[[v]]$CI[f,"97.5 %"]
}
else{
xplot2 <- NA
xplot3 <- NA
}
}
else{
xplot1 <- predAt[f]+offsetby
xplot2 <- predAt[f]+offsetby
xplot3 <- predAt[f]+offsetby
yplot1 <- x$scaledResults[[v]]$estSE[f,"Estimate"]
if(!is.null(x$scaledResults[[v]]$CI)){
yplot2 <- x$scaledResults[[v]]$CI[f,"2.5 %"]
yplot3 <- x$scaledResults[[v]]$CI[f,"97.5 %"]
}
else{
yplot2 <- NA
yplot3 <- NA
}
}
points( xplot1,yplot1, ...)
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
par(mfrow=c(1,1))
}
}
}
|
/R/plot.scaleCoef.R
|
no_license
|
cran/scaleCoef
|
R
| false
| false
| 10,626
|
r
|
plot.scaleCoef <-
function(x,
add=FALSE, # logical, if true add plot to current plot
offsetby=0, # amount on x-axis by which to offset the plotted
# estimates and confidence intervals, useful when add is TRUE
xlim=NULL,
ylim=NULL,
xlab=NULL,
ylab=NULL,
xaxt=NULL, # specify "n" to add later yourself
yaxt=NULL, # specify "n" to add later yourself
horizontal=FALSE, # FALSE: predictors will be plotted
# on x-axis, scaled coefficient values on y-axis
firstToLast=TRUE, # when FALSE, plots variables last to first
# instead of first to last
...
){
# specify axis for predictor variables
ifelse(horizontal, predaxis <- 2, predaxis <- 1)
# if null, change predictor axt to "n", since label according to variable names
if(horizontal){
if(is.null(yaxt)) {
yaxt <- "n"
predaxt <- TRUE
}
}
else{
if(is.null(xaxt)) {
xaxt <- "n"
predaxt <- TRUE
}
}
# if no specified axt, change to default
if(is.null(xaxt)) xaxt="s"
if(is.null(yaxt)) yaxt="s"
if(!exists("predaxt")) predaxt <- FALSE
# specify x and y axis labels, depending on horizontal
if(horizontal){
if(is.null(ylab)) ylab <- "Predictor variable(s)"
if(is.null(xlab)) xlab <- "Scaled regression coefficient"
}
else{
if(is.null(xlab)) xlab <- "Predictor variable(s)"
if(is.null(ylab)) ylab <- "Scaled regression coefficient"
}
# find out if there are multiple fits in x
oneFit <- "estSE"%in%names(x$scaledResults)
# one fit: plot scaled coefficients from same model in a single plot
if(oneFit){
num <- length(x$scaledResults$scaleBy)
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
# detemine coefficient axis limits
if(!is.null(x$scaledResults$CI)){
ub <- max(x$scaledResults$CI)
lb <- min(x$scaledResults$CI)
}
else{
ub <- max(x$scaledResults$estSE)
lb <- min(x$scaledResults$estSE)
warning("Confidence interval(s) omitted from plot")
}
if(horizontal){
if(is.null(xlim)) xlim <- c(lb,ub)
}
else{
if(is.null(ylim)) ylim <- c(lb,ub)
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, num +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, num +.5)
}
if(!add){
plot(1,1,ylim=ylim,xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, names(x$scaledResults$scaleBy))}
}
if(horizontal){
if(!"matrix"%in%class(x$scaledResults$estSE)){
xplot1 <- x$scaledResults$estSE["Estimate"]
xplot2 <- x$scaledResults$CI["2.5 %"]
xplot3 <- x$scaledResults$CI["97.5 %"]
}
else{
xplot1 <- x$scaledResults$estSE[,"Estimate"]
xplot2 <- x$scaledResults$CI[,"2.5 %"]
xplot3 <- x$scaledResults$CI[,"97.5 %"]
}
yplot1 <- predAt+offsetby
yplot2 <- predAt+offsetby
yplot3 <- predAt+offsetby
}
else{
if(!"matrix"%in%class(x$scaledResults$estSE)){
yplot1 <- x$scaledResults$estSE["Estimate"]
yplot2 <- x$scaledResults$CI["2.5 %"]
yplot3 <- x$scaledResults$CI["97.5 %"]
}
else{
yplot1 <- x$scaledResults$estSE[,"Estimate"]
yplot2 <- x$scaledResults$CI[,"2.5 %"]
yplot3 <- x$scaledResults$CI[,"97.5 %"]
}
xplot1 <- predAt+offsetby
xplot2 <- predAt+offsetby
xplot3 <- predAt+offsetby
}
points( xplot1,yplot1, ...)
if(!is.null(x$scaledResults$CI)){
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
# multiple fits, plotting depends on whether diffAcrossModels
if(!oneFit){
numFit <- length(x$scaledResults)
if(is.null(names(x$scaledResults))){
fitNames <- paste("fit",1:length(x$scaledResults),sep="")
}
else{
fitNames <- names(x$scaledResults)
}
# matrix containing all CI
CIs <- NULL
for(f in 1:numFit){
fitCI <- x$scaledResults[[f]]$CI
if(is.null(fitCI)) fitCI <- c(NA,NA)
CIs <- rbind(CIs, fitCI)
}
if(any(is.na(CIs))) warning("Confidence interval(s) omitted from plot")
# detemine coefficient axis limits
ub <- max(CIs, na.rm=TRUE)
lb <- min(CIs, na.rm=TRUE)
# assign coefficient axis limits
if(horizontal){
if(is.null(xlim)){ xlim <- c(lb,ub)}
}
else{
if(is.null(ylim)){ ylim <- c(lb,ub) }
}
# identify whether diffAcrossModels
vars <- sapply(x$scaledResults, function(y) names(y$scaleBy))
if(!is.matrix(vars) & length(unique(vars))!=1){
diffAcrossModels <- TRUE
}
else(
diffAcrossModels <- FALSE
)
# when diffAcrossModels=TRUE OR only one coef in all models, make one plot
if(diffAcrossModels | (!diffAcrossModels & length(unique(vars))==1)){
if(length(unique(vars))==1){
vars <- paste(fitNames,vars,sep=".")
}
# possibly change order of plotting
if(firstToLast){
predAt <- 1:numFit
}
else{
predAt <- numFit:1
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, numFit +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, numFit +.5)
}
if(!add){
plot(1,1,ylim=ylim, xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, vars)}
}
for(f in 1:numFit){
if(horizontal){
yplot1 <- predAt[f]+offsetby
yplot2 <- predAt[f]+offsetby
yplot3 <- predAt[f]+offsetby
xplot1 <- x$scaledResults[[f]]$estSE["Estimate"]
xplot2 <- CIs[f,"2.5 %"]
xplot3 <- CIs[f,"97.5 %"]
}
else{
xplot1 <- predAt[f]+offsetby
xplot2 <- predAt[f]+offsetby
xplot3 <- predAt[f]+offsetby
yplot1 <- x$scaledResults[[f]]$estSE["Estimate"]
yplot2 <- CIs[f,"2.5 %"]
yplot3 <- CIs[f,"97.5 %"]
}
points( xplot1,yplot1, ...)
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
# otherwise, make numFit plots, can't add to previous plots
else{
if(add) stop("Can't add to multiple previously constructed plots")
vars <- names(x$scaledResults[[1]]$scaleBy)
num <- length(vars)
# possibly change order of plotting
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
# determine predictor axis limits
if(horizontal){
if(is.null(ylim)) ylim <- c(.5, num +.5)
}
else{
if(is.null(xlim)) xlim <- c(.5, num +.5)
}
par(mfrow=c(numFit,1))
# possibly change order of plotting
if(firstToLast){
predAt <- 1:num
}
else{
predAt <- num:1
}
for(v in 1:numFit){
plot(1,1,ylim=ylim,xlim=xlim, type="n", xaxt=xaxt, yaxt=yaxt,
main=paste("Model:",fitNames[v]), xlab=xlab, ylab=ylab, ...)
if(predaxt) {axis(predaxis, at=predAt, vars)}
for(f in 1:num){
if(horizontal){
yplot1 <- predAt[f]+offsetby
yplot2 <- predAt[f]+offsetby
yplot3 <- predAt[f]+offsetby
xplot1 <- x$scaledResults[[v]]$estSE[f,"Estimate"]
if(!is.null(x$scaledResults[[v]]$CI)){
xplot2 <- x$scaledResults[[v]]$CI[f,"2.5 %"]
xplot3 <- x$scaledResults[[v]]$CI[f,"97.5 %"]
}
else{
xplot2 <- NA
xplot3 <- NA
}
}
else{
xplot1 <- predAt[f]+offsetby
xplot2 <- predAt[f]+offsetby
xplot3 <- predAt[f]+offsetby
yplot1 <- x$scaledResults[[v]]$estSE[f,"Estimate"]
if(!is.null(x$scaledResults[[v]]$CI)){
yplot2 <- x$scaledResults[[v]]$CI[f,"2.5 %"]
yplot3 <- x$scaledResults[[v]]$CI[f,"97.5 %"]
}
else{
yplot2 <- NA
yplot3 <- NA
}
}
points( xplot1,yplot1, ...)
segments( xplot2,yplot2,
xplot3,yplot3, ...)
}
}
par(mfrow=c(1,1))
}
}
}
|
library(igraph)
configModel <- function (degrees = rep(n, floor(n / 2)),
membership = rep(1, n), P = NULL) {
# Checking P
if (!is.null(P)) {
if (!is.matrix(P))
stop('P must be a matrix\n')
K <- max(membership)
if (nrow(P) != K)
stop('P must have K rows\n')
}
# Checking degrees / membership
n <- length(degrees)
dT <- sum(degrees)
if (dT %% 2 != 0) stop('sum of degrees must be even\n')
if (length(membership) != length(degrees))
stop('Number of degrees must equal number of memberships\n')
# Initializing objects
edge_list <- matrix(0, dT / 2, 2)
edge_set <- unlist(lapply(1:n, function (u) rep(u, degrees[u])))
edge_set <- sample(edge_set)
remaining <- rep(TRUE, length(edge_set))
mship_set <- membership[edge_set]
if (is.matrix(P)) {
Wmat <- t(apply(P, 1, function (r) r[mship_set]))
} else {
Wmat <- NULL
}
counter <- 1
while (sum(remaining) > 0) {
cat(counter, '/', dT / 2, '\n')
indx <- min(which(remaining))
remaining[indx] <- FALSE
indx_mship <- mship_set[indx]
now_rem <- which(remaining)
if (length(now_rem) > 1) {
indx2 <- sample(now_rem, 1, prob = Wmat[indx_mship, now_rem])
} else {
indx2 <- now_rem
}
remaining[indx2] <- FALSE
edge_list[counter, ] <- edge_set[c(indx, indx2)]
counter <- counter + 1
}
G <- graph.edgelist(edge_list, directed = FALSE)
return(G)
}
|
/configModel.R
|
no_license
|
jpalowitch/configModel
|
R
| false
| false
| 1,457
|
r
|
library(igraph)
configModel <- function (degrees = rep(n, floor(n / 2)),
membership = rep(1, n), P = NULL) {
# Checking P
if (!is.null(P)) {
if (!is.matrix(P))
stop('P must be a matrix\n')
K <- max(membership)
if (nrow(P) != K)
stop('P must have K rows\n')
}
# Checking degrees / membership
n <- length(degrees)
dT <- sum(degrees)
if (dT %% 2 != 0) stop('sum of degrees must be even\n')
if (length(membership) != length(degrees))
stop('Number of degrees must equal number of memberships\n')
# Initializing objects
edge_list <- matrix(0, dT / 2, 2)
edge_set <- unlist(lapply(1:n, function (u) rep(u, degrees[u])))
edge_set <- sample(edge_set)
remaining <- rep(TRUE, length(edge_set))
mship_set <- membership[edge_set]
if (is.matrix(P)) {
Wmat <- t(apply(P, 1, function (r) r[mship_set]))
} else {
Wmat <- NULL
}
counter <- 1
while (sum(remaining) > 0) {
cat(counter, '/', dT / 2, '\n')
indx <- min(which(remaining))
remaining[indx] <- FALSE
indx_mship <- mship_set[indx]
now_rem <- which(remaining)
if (length(now_rem) > 1) {
indx2 <- sample(now_rem, 1, prob = Wmat[indx_mship, now_rem])
} else {
indx2 <- now_rem
}
remaining[indx2] <- FALSE
edge_list[counter, ] <- edge_set[c(indx, indx2)]
counter <- counter + 1
}
G <- graph.edgelist(edge_list, directed = FALSE)
return(G)
}
|
#' Summarize the Multiple Imputation of Multivariate Regression Discontinuity Estimation
#'
#' \code{summary.mrdi} is a \code{summary} method for class \code{"mrdi"}
#'
#' @method summary mrdi
#'
#' @param object An object of class \code{"mrdi"}, usually a result of a call to
#' \code{\link{mrd_impute}} with \code{"front"} method.
#' @param level Numerical value between 0 and 1. Confidence level for confidence intervals.
#' @param digits Number of digits to display.
#' @param ... Additional arguments.
#'
#' @return \code{summary.mrdi} returns a list which has the following components:
#' \item{coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the complete model.}
#' \item{ht_coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the heterogeneous treatment model.}
#' \item{t_coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the treatment only model.}
#'
#' @importFrom stats residuals
#'
#' @include mrd_impute.R
#'
#' @export
summary.mrdi <- function(object, level = 0.95, digits = max(3, getOption("digits") - 3), ...) {
call.copy <- object$call
if ("data" %in% names(call.copy) && length(call.copy$data) > 1) {
call.copy$data <- "(.)"
}
if ("subset" %in% names(call.copy) && length(call.copy$subset) > 1) {
call.copy$subset <- "(.)"
}
cat("\n")
cat("Call:\n")
# print(object$call)
print(call.copy)
cat("\n")
obj.front = object$front$tau_MRD
n = dim(obj.front$est)
if (!all(is.na(obj.front$z))){
alpha <- 1 - level
lower.CL <- obj.front$est - qt(1-alpha/2, df = obj.front$df) * obj.front$se
upper.CL <- obj.front$est + qt(1-alpha/2, df = obj.front$df) * obj.front$se
stars <- matrix(NA, n[1], n[2])
rownames(stars) <- rownames(obj.front$est)
colnames(stars) <- colnames(obj.front$est)
for (i in 1:n[1]) {
for (j in 1:n[2]){
stars[i,j] <- if (is.na(obj.front$p[i,j]))
" " else if (obj.front$p[i,j] < 0.001)
"***" else if (obj.front$p[i,j] < 0.01)
"**" else if (obj.front$p[i,j] < 0.05)
"*" else if (obj.front$p[i,j] < 0.1)
"." else " "
}
}
}
if (!all(is.na(obj.front$z))){
out.all.param <- cbind(obj.front$est['Param',], obj.front$se['Param',], lower.CL['Param',], upper.CL['Param',],
obj.front$z['Param',], obj.front$df['Param',], obj.front$p['Param',])
outmat.param <- cbind(apply(out.all.param, 2, function(x) format(x, digits = digits)),
" " = stars['Param',])
out.all.bw <- cbind(obj.front$est['bw',], obj.front$se['bw',], lower.CL['bw',], upper.CL['bw',],
obj.front$z['bw',], obj.front$df['bw',], obj.front$p['bw',])
outmat.bw <- cbind(apply(out.all.bw, 2, function(x) format(x, digits = digits)),
" " = stars['bw',])
out.all.half <- cbind(obj.front$est['Half-bw',], obj.front$se['Half-bw',], lower.CL['Half-bw',], upper.CL['Half-bw',],
obj.front$z['Half-bw',], obj.front$df['Half-bw',], obj.front$p['Half-bw',])
outmat.half <- cbind(apply(out.all.half, 2, function(x) format(x, digits = digits)),
" " = stars['Half-bw',])
out.all.double <- cbind(obj.front$est['Double-bw',], obj.front$se['Double-bw',], lower.CL['Double-bw',], upper.CL['Double-bw',],
obj.front$z['Double-bw',], obj.front$df['Double-bw',], obj.front$p['Double-bw',])
outmat.double <- cbind(apply(out.all.double, 2, function(x) format(x, digits = digits)),
" " = stars['Double-bw',])
colnames(out.all.param) = colnames(out.all.bw) = colnames(out.all.half) = colnames(out.all.double) <-
c("Estimate", "Std. Error", "lower.CL", "upper.CL", "t value", "df", "Pr(>|t|)")
colnames(outmat.param) = colnames(outmat.bw) = colnames(outmat.half) = colnames(outmat.double) <-
c("Estimate", "Std. Error", "lower.CL", "upper.CL", "t value", "df", "Pr(>|t|)", "")
} else {
out.all.param <- matrix(obj.front$est['Param',], ncol = 1)
out.all.bw <- matrix(obj.front$est['bw',], ncol = 1)
out.all.half <- matrix(obj.front$est['Half-bw',], ncol = 1)
out.all.double <- matrix(obj.front$est['Double-bw',], ncol = 1)
rownames(out.all.param) = rownames(out.all.bw) = rownames(out.all.half) =
rownames(out.all.double) <- colnames(obj.front$est)
colnames(out.all.param) = colnames(out.all.bw) = colnames(out.all.half) =
colnames(out.all.double) <- c("Estimate")
outmat.param <- apply(out.all.param, 2, function(x) format(x, digits = digits))
outmat.bw <- apply(out.all.bw, 2, function(x) format(x, digits = digits))
outmat.half <- apply(out.all.half, 2, function(x) format(x, digits = digits))
outmat.double <- apply(out.all.double, 2, function(x) format(x, digits = digits))
}
cat("Estimates for Complete Model:\n")
if (!all(is.na(obj.front$z))){
out <- list('Param' = out.all.param[1:3,], 'bw' = out.all.bw[1:3,],
'Half-bw' = out.all.half[1:3,], 'Double-bw' = out.all.double[1:3,])
cat("Parametric:\n")
print.default(outmat.param[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[1], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
out <- list('Param' = matrix(out.all.param[1:3,]), 'bw' = matrix(out.all.bw[1:3,]),
'Half-bw' = matrix(out.all.half[1:3,]), 'Double-bw' = matrix(out.all.double[1:3,]))
colnames(out$'Param') = colnames(out$'bw') = colnames(out$'Half-bw') = colnames(out$'Double-bw') <- colnames(out.all.param)
rownames(out$'Param') = rownames(out$'bw') = rownames(out$'Half-bw') = rownames(out$'Double-bw') <- rownames(out.all.param)[1:3]
cat("Parametric:\n")
print.default(matrix(outmat.param[1:3,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[1], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[1:3,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[1:3,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[1:3,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
cat("Estimates for Heterogeneous Treatment Model:\n")
if (!all(is.na(obj.front$z))){
ht_out <- list('Param' = out.all.param[4:6,], 'bw' = out.all.bw[4:6,],
'Half-bw' = out.all.half[4:6,], 'Double-bw' = out.all.double[4:6,])
cat("Parametric:\n")
print.default(outmat.param[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[2], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
ht_out <- list('Param' = matrix(out.all.param[4:6,]), 'bw' = matrix(out.all.bw[4:6,]),
'Half-bw' = matrix(out.all.half[4:6,]), 'Double-bw' = matrix(out.all.double[4:6,]))
colnames(ht_out$'Param') = colnames(ht_out$'bw') = colnames(ht_out$'Half-bw') = colnames(ht_out$'Double-bw') <- colnames(out.all.param)
rownames(ht_out$'Param') = rownames(ht_out$'bw') = rownames(ht_out$'Half-bw') = rownames(ht_out$'Double-bw') <- rownames(out.all.param)[4:6]
cat("Parametric:\n")
print.default(matrix(outmat.param[4:6,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[2], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[4:6,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[4:6,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[4:6,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
cat("Estimates for Treatment Only Model:\n")
if (!all(is.na(obj.front$z))){
t_out <- list('Param' = out.all.param[7:9,], 'bw' = out.all.bw[7:9,],
'Half-bw' = out.all.half[7:9,], 'Double-bw' = out.all.double[7:9,])
cat("Parametric:\n")
print.default(outmat.param[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[3], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
t_out <- list('Param' = matrix(out.all.param[7:9,]), 'bw' = matrix(out.all.bw[7:9,]),
'Half-bw' = matrix(out.all.half[7:9,]), 'Double-bw' = matrix(out.all.double[7:9,]))
colnames(t_out$'Param') = colnames(t_out$'bw') = colnames(t_out$'Half-bw') = colnames(t_out$'Double-bw') <- colnames(out.all.param)
rownames(t_out$'Param') = rownames(t_out$'bw') = rownames(t_out$'Half-bw') = rownames(t_out$'Double-bw') <- rownames(out.all.param)[7:9]
cat("Parametric:\n")
print.default(matrix(outmat.param[7:9,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[3], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[7:9,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[7:9,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[7:9,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
all_out <- list(coefficients = out, ht_coefficients = ht_out, t_coefficients = t_out)
return(invisible(all_out))
}
|
/R/summary.mrdi.R
|
no_license
|
kimberlywebb/rddapp
|
R
| false
| false
| 12,587
|
r
|
#' Summarize the Multiple Imputation of Multivariate Regression Discontinuity Estimation
#'
#' \code{summary.mrdi} is a \code{summary} method for class \code{"mrdi"}
#'
#' @method summary mrdi
#'
#' @param object An object of class \code{"mrdi"}, usually a result of a call to
#' \code{\link{mrd_impute}} with \code{"front"} method.
#' @param level Numerical value between 0 and 1. Confidence level for confidence intervals.
#' @param digits Number of digits to display.
#' @param ... Additional arguments.
#'
#' @return \code{summary.mrdi} returns a list which has the following components:
#' \item{coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the complete model.}
#' \item{ht_coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the heterogeneous treatment model.}
#' \item{t_coefficients}{A matrix containing estimates and confidence intervals (if applicable)
#' for the treatment only model.}
#'
#' @importFrom stats residuals
#'
#' @include mrd_impute.R
#'
#' @export
summary.mrdi <- function(object, level = 0.95, digits = max(3, getOption("digits") - 3), ...) {
call.copy <- object$call
if ("data" %in% names(call.copy) && length(call.copy$data) > 1) {
call.copy$data <- "(.)"
}
if ("subset" %in% names(call.copy) && length(call.copy$subset) > 1) {
call.copy$subset <- "(.)"
}
cat("\n")
cat("Call:\n")
# print(object$call)
print(call.copy)
cat("\n")
obj.front = object$front$tau_MRD
n = dim(obj.front$est)
if (!all(is.na(obj.front$z))){
alpha <- 1 - level
lower.CL <- obj.front$est - qt(1-alpha/2, df = obj.front$df) * obj.front$se
upper.CL <- obj.front$est + qt(1-alpha/2, df = obj.front$df) * obj.front$se
stars <- matrix(NA, n[1], n[2])
rownames(stars) <- rownames(obj.front$est)
colnames(stars) <- colnames(obj.front$est)
for (i in 1:n[1]) {
for (j in 1:n[2]){
stars[i,j] <- if (is.na(obj.front$p[i,j]))
" " else if (obj.front$p[i,j] < 0.001)
"***" else if (obj.front$p[i,j] < 0.01)
"**" else if (obj.front$p[i,j] < 0.05)
"*" else if (obj.front$p[i,j] < 0.1)
"." else " "
}
}
}
if (!all(is.na(obj.front$z))){
out.all.param <- cbind(obj.front$est['Param',], obj.front$se['Param',], lower.CL['Param',], upper.CL['Param',],
obj.front$z['Param',], obj.front$df['Param',], obj.front$p['Param',])
outmat.param <- cbind(apply(out.all.param, 2, function(x) format(x, digits = digits)),
" " = stars['Param',])
out.all.bw <- cbind(obj.front$est['bw',], obj.front$se['bw',], lower.CL['bw',], upper.CL['bw',],
obj.front$z['bw',], obj.front$df['bw',], obj.front$p['bw',])
outmat.bw <- cbind(apply(out.all.bw, 2, function(x) format(x, digits = digits)),
" " = stars['bw',])
out.all.half <- cbind(obj.front$est['Half-bw',], obj.front$se['Half-bw',], lower.CL['Half-bw',], upper.CL['Half-bw',],
obj.front$z['Half-bw',], obj.front$df['Half-bw',], obj.front$p['Half-bw',])
outmat.half <- cbind(apply(out.all.half, 2, function(x) format(x, digits = digits)),
" " = stars['Half-bw',])
out.all.double <- cbind(obj.front$est['Double-bw',], obj.front$se['Double-bw',], lower.CL['Double-bw',], upper.CL['Double-bw',],
obj.front$z['Double-bw',], obj.front$df['Double-bw',], obj.front$p['Double-bw',])
outmat.double <- cbind(apply(out.all.double, 2, function(x) format(x, digits = digits)),
" " = stars['Double-bw',])
colnames(out.all.param) = colnames(out.all.bw) = colnames(out.all.half) = colnames(out.all.double) <-
c("Estimate", "Std. Error", "lower.CL", "upper.CL", "t value", "df", "Pr(>|t|)")
colnames(outmat.param) = colnames(outmat.bw) = colnames(outmat.half) = colnames(outmat.double) <-
c("Estimate", "Std. Error", "lower.CL", "upper.CL", "t value", "df", "Pr(>|t|)", "")
} else {
out.all.param <- matrix(obj.front$est['Param',], ncol = 1)
out.all.bw <- matrix(obj.front$est['bw',], ncol = 1)
out.all.half <- matrix(obj.front$est['Half-bw',], ncol = 1)
out.all.double <- matrix(obj.front$est['Double-bw',], ncol = 1)
rownames(out.all.param) = rownames(out.all.bw) = rownames(out.all.half) =
rownames(out.all.double) <- colnames(obj.front$est)
colnames(out.all.param) = colnames(out.all.bw) = colnames(out.all.half) =
colnames(out.all.double) <- c("Estimate")
outmat.param <- apply(out.all.param, 2, function(x) format(x, digits = digits))
outmat.bw <- apply(out.all.bw, 2, function(x) format(x, digits = digits))
outmat.half <- apply(out.all.half, 2, function(x) format(x, digits = digits))
outmat.double <- apply(out.all.double, 2, function(x) format(x, digits = digits))
}
cat("Estimates for Complete Model:\n")
if (!all(is.na(obj.front$z))){
out <- list('Param' = out.all.param[1:3,], 'bw' = out.all.bw[1:3,],
'Half-bw' = out.all.half[1:3,], 'Double-bw' = out.all.double[1:3,])
cat("Parametric:\n")
print.default(outmat.param[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[1], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[1:3,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
out <- list('Param' = matrix(out.all.param[1:3,]), 'bw' = matrix(out.all.bw[1:3,]),
'Half-bw' = matrix(out.all.half[1:3,]), 'Double-bw' = matrix(out.all.double[1:3,]))
colnames(out$'Param') = colnames(out$'bw') = colnames(out$'Half-bw') = colnames(out$'Double-bw') <- colnames(out.all.param)
rownames(out$'Param') = rownames(out$'bw') = rownames(out$'Half-bw') = rownames(out$'Double-bw') <- rownames(out.all.param)[1:3]
cat("Parametric:\n")
print.default(matrix(outmat.param[1:3,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[1], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[1:3,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[1:3,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[1:3,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
cat("Estimates for Heterogeneous Treatment Model:\n")
if (!all(is.na(obj.front$z))){
ht_out <- list('Param' = out.all.param[4:6,], 'bw' = out.all.bw[4:6,],
'Half-bw' = out.all.half[4:6,], 'Double-bw' = out.all.double[4:6,])
cat("Parametric:\n")
print.default(outmat.param[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[2], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[4:6,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
ht_out <- list('Param' = matrix(out.all.param[4:6,]), 'bw' = matrix(out.all.bw[4:6,]),
'Half-bw' = matrix(out.all.half[4:6,]), 'Double-bw' = matrix(out.all.double[4:6,]))
colnames(ht_out$'Param') = colnames(ht_out$'bw') = colnames(ht_out$'Half-bw') = colnames(ht_out$'Double-bw') <- colnames(out.all.param)
rownames(ht_out$'Param') = rownames(ht_out$'bw') = rownames(ht_out$'Half-bw') = rownames(ht_out$'Double-bw') <- rownames(out.all.param)[4:6]
cat("Parametric:\n")
print.default(matrix(outmat.param[4:6,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[2], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[4:6,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[4:6,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[4:6,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
cat("Estimates for Treatment Only Model:\n")
if (!all(is.na(obj.front$z))){
t_out <- list('Param' = out.all.param[7:9,], 'bw' = out.all.bw[7:9,],
'Half-bw' = out.all.half[7:9,], 'Double-bw' = out.all.double[7:9,])
cat("Parametric:\n")
print.default(outmat.param[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[3], digits = digits), ":\n",
sep = ''))
print.default(outmat.bw[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(outmat.half[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(outmat.double[7:9,], quote = FALSE, print.gap = 2, right = FALSE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
}else{
t_out <- list('Param' = matrix(out.all.param[7:9,]), 'bw' = matrix(out.all.bw[7:9,]),
'Half-bw' = matrix(out.all.half[7:9,]), 'Double-bw' = matrix(out.all.double[7:9,]))
colnames(t_out$'Param') = colnames(t_out$'bw') = colnames(t_out$'Half-bw') = colnames(t_out$'Double-bw') <- colnames(out.all.param)
rownames(t_out$'Param') = rownames(t_out$'bw') = rownames(t_out$'Half-bw') = rownames(t_out$'Double-bw') <- rownames(out.all.param)[7:9]
cat("Parametric:\n")
print.default(matrix(outmat.param[7:9,], dimnames = list(rownames(out$'Param'), colnames(out$'Param'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with bandwidth ", format(obj.front$front.bw[3], digits = digits), ":\n",
sep = ''))
print.default(matrix(outmat.bw[7:9,], dimnames = list(rownames(out$'bw'), colnames(out$'bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with half bandwidth:\n",
sep = ''))
print.default(matrix(outmat.half[7:9,], dimnames = list(rownames(out$'Half-bw'), colnames(out$'Half-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat(paste("Non-parametric with double bandwidth:\n",
sep = ''))
print.default(matrix(outmat.double[7:9,], dimnames = list(rownames(out$'Double-bw'), colnames(out$'Double-bw'))),
quote = FALSE, print.gap = 2, right = FALSE)
cat("\n")
}
all_out <- list(coefficients = out, ht_coefficients = ht_out, t_coefficients = t_out)
return(invisible(all_out))
}
|
build.dist.struct <-
function(z, X, exact = NULL, calip.option = 'propensity', calip.cov = NULL, caliper = 0.2, verbose = FALSE){
cal.penalty <- 100
if(is.null(exact)) exact = rep(1, length(z))
if(!(calip.option %in% c('propensity','user','none'))){
stop('Invalid calip.option specified.')
}
if (is.vector(X)) X <- matrix(X, length(X), 1)
if(!(length(z) == (dim(X)[1]))){
stop("Length of z does not match row count in X")
}
if(!(length(exact) == length(z))){
stop("Length of exact does not match length of z")
}
if(!(all((z == 1) | (z == 0)))){
stop("The z argument must contain only 1s and 0s")
}
if(is.data.frame(X) || is.character(X)){
if(!is.data.frame(X)) X <- as.data.frame(X)
X.chars <- which(laply(X, function(y) 'character' %in% class(y)))
if(length(X.chars) > 0){
if (verbose) print('Character variables found in X, converting to factors.')
for(i in X.chars){
X[,i] <- factor(X[,i])
}
}
#if some variables are factors convert to dummies
X.factors <- which(laply(X, function(y) 'factor' %in% class(y)))
#handle missing data
for(i in which(laply(X, function(x) any(is.na(x))))){
if (verbose) print(paste('Missing values found in column', i ,'of X; imputing and adding missingness indicators'))
if(i %in% X.factors){
#for factors, make NA a new factor level
X[,i] <- addNA(X[,i])
}else{
#for numeric/logical, impute means and add a new indicator for missingness
X[[paste(colnames(X)[i],'NA', sep = '')]] <- is.na(X[,i])
X[which(is.na(X[,i])),i] <- mean(X[,i], na.rm = TRUE)
}
}
for(i in rev(X.factors)){
dummyXi <- model.matrix(as.formula(
paste('~',colnames(X)[i], '-1')),data=X)
X <- cbind(X[,-i], dummyXi)
}
}else{
#handle missing data
for(i in c(1:ncol(X))){
if(any(is.na(X[,i]))){
X <- cbind(X,is.na(X[,i]))
colnames(X)[ncol(X)] <- paste(colnames(X)[i],'NA', sep = '')
X[which(is.na(X[,i])),i] <- mean(X[,i], na.rm = TRUE)
}
}
}
#get rid of columns that do not vary
varying <- apply(X,2, function(x) length(unique(x)) > 1)
if(!all(varying) && verbose) print('Constant-value columns found in X, they will not be used to calculate Mahalanobis distance.')
X <- X[,which(varying),drop = FALSE]
if (calip.option == 'propensity') {
calip.cov <- glm.fit(cbind(rep(1, nrow(X)),X), z, family = binomial())$linear.predictors
cal <- sd(calip.cov) * caliper
}else if(calip.option == 'user'){
stopifnot(!is.null(calip.cov))
cal <- sd(calip.cov) * caliper
}
nobs <- length(z)
rX <- as.matrix(X)
for (j in 1:(dim(rX)[2])) rX[, j] <- rank(rX[, j])
cv <- cov(rX)
vuntied <- var(1:nobs)
rat <- sqrt(vuntied/diag(cv))
if(length(rat) == 1){
cv <- as.matrix(rat) %*% cv %*% as.matrix(rat)
}else{
cv <- diag(rat) %*% cv %*% diag(rat)
}
#library(MASS)
icov <- ginv(cv)
nums <- 1:nobs
ctrl.nums <- 1:(sum(z == 0))
treated <- nums[z == 1]
#find distance between each treated and each control it will be connected to and store in a distance structure
dist.struct <- list()
for (i in c(1:length(treated))) {
controls <- nums[(z == 0) & (exact == exact[treated[i]])]
control.names <- ctrl.nums[exact[z == 0] == exact[treated[i]]]
costi <- mahalanobis(rX[controls, ,drop=FALSE], rX[treated[i], ], icov, inverted = T)
if (calip.option != 'none') {
calip.update <- rep(0, length(costi))
calip.update[abs(calip.cov[treated[i]] - calip.cov[controls]) - cal > 0] <- Inf
costi <- costi + calip.update
}
names(costi) <- control.names
dist.struct[[i]] <- costi[is.finite(costi)]
}
if (sum(laply(dist.struct, length)) == 0) stop('All matches forbidden. Considering using a wider caliper?')
return(dist.struct)
}
|
/R/build.dist.struct.R
|
no_license
|
cran/rcbalance
|
R
| false
| false
| 3,983
|
r
|
build.dist.struct <-
function(z, X, exact = NULL, calip.option = 'propensity', calip.cov = NULL, caliper = 0.2, verbose = FALSE){
cal.penalty <- 100
if(is.null(exact)) exact = rep(1, length(z))
if(!(calip.option %in% c('propensity','user','none'))){
stop('Invalid calip.option specified.')
}
if (is.vector(X)) X <- matrix(X, length(X), 1)
if(!(length(z) == (dim(X)[1]))){
stop("Length of z does not match row count in X")
}
if(!(length(exact) == length(z))){
stop("Length of exact does not match length of z")
}
if(!(all((z == 1) | (z == 0)))){
stop("The z argument must contain only 1s and 0s")
}
if(is.data.frame(X) || is.character(X)){
if(!is.data.frame(X)) X <- as.data.frame(X)
X.chars <- which(laply(X, function(y) 'character' %in% class(y)))
if(length(X.chars) > 0){
if (verbose) print('Character variables found in X, converting to factors.')
for(i in X.chars){
X[,i] <- factor(X[,i])
}
}
#if some variables are factors convert to dummies
X.factors <- which(laply(X, function(y) 'factor' %in% class(y)))
#handle missing data
for(i in which(laply(X, function(x) any(is.na(x))))){
if (verbose) print(paste('Missing values found in column', i ,'of X; imputing and adding missingness indicators'))
if(i %in% X.factors){
#for factors, make NA a new factor level
X[,i] <- addNA(X[,i])
}else{
#for numeric/logical, impute means and add a new indicator for missingness
X[[paste(colnames(X)[i],'NA', sep = '')]] <- is.na(X[,i])
X[which(is.na(X[,i])),i] <- mean(X[,i], na.rm = TRUE)
}
}
for(i in rev(X.factors)){
dummyXi <- model.matrix(as.formula(
paste('~',colnames(X)[i], '-1')),data=X)
X <- cbind(X[,-i], dummyXi)
}
}else{
#handle missing data
for(i in c(1:ncol(X))){
if(any(is.na(X[,i]))){
X <- cbind(X,is.na(X[,i]))
colnames(X)[ncol(X)] <- paste(colnames(X)[i],'NA', sep = '')
X[which(is.na(X[,i])),i] <- mean(X[,i], na.rm = TRUE)
}
}
}
#get rid of columns that do not vary
varying <- apply(X,2, function(x) length(unique(x)) > 1)
if(!all(varying) && verbose) print('Constant-value columns found in X, they will not be used to calculate Mahalanobis distance.')
X <- X[,which(varying),drop = FALSE]
if (calip.option == 'propensity') {
calip.cov <- glm.fit(cbind(rep(1, nrow(X)),X), z, family = binomial())$linear.predictors
cal <- sd(calip.cov) * caliper
}else if(calip.option == 'user'){
stopifnot(!is.null(calip.cov))
cal <- sd(calip.cov) * caliper
}
nobs <- length(z)
rX <- as.matrix(X)
for (j in 1:(dim(rX)[2])) rX[, j] <- rank(rX[, j])
cv <- cov(rX)
vuntied <- var(1:nobs)
rat <- sqrt(vuntied/diag(cv))
if(length(rat) == 1){
cv <- as.matrix(rat) %*% cv %*% as.matrix(rat)
}else{
cv <- diag(rat) %*% cv %*% diag(rat)
}
#library(MASS)
icov <- ginv(cv)
nums <- 1:nobs
ctrl.nums <- 1:(sum(z == 0))
treated <- nums[z == 1]
#find distance between each treated and each control it will be connected to and store in a distance structure
dist.struct <- list()
for (i in c(1:length(treated))) {
controls <- nums[(z == 0) & (exact == exact[treated[i]])]
control.names <- ctrl.nums[exact[z == 0] == exact[treated[i]]]
costi <- mahalanobis(rX[controls, ,drop=FALSE], rX[treated[i], ], icov, inverted = T)
if (calip.option != 'none') {
calip.update <- rep(0, length(costi))
calip.update[abs(calip.cov[treated[i]] - calip.cov[controls]) - cal > 0] <- Inf
costi <- costi + calip.update
}
names(costi) <- control.names
dist.struct[[i]] <- costi[is.finite(costi)]
}
if (sum(laply(dist.struct, length)) == 0) stop('All matches forbidden. Considering using a wider caliper?')
return(dist.struct)
}
|
###### categorical variable and factors
marital.status <- c("Married","Married","Single","Married","Divorced","Widowed","Divorced")
?str() #### display the structure of the object
str(marital.status)
typeof(marital.status)
##### factor and levels
marital.factor <- factor(marital.status)
marital.factor
str(marital.factor)
typeof(marital.factor)
### explicit level/order
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"))
new.factor
str(new.factor)
#### rename levels
labels(new.factor) <- c("S","M","D","W")
str(new.factor)
new.factor
### in one line
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"),
labels = c("S","M","D","W"))
str(new.factor)
#### order the factor- ordinal
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"),
labels = c("S","M","D","W"), ordered = T)
str(new.factor)
|
/CodeBase/5_Categorical_Variable_And_factors.R
|
no_license
|
akashishu777/R-Practice
|
R
| false
| false
| 964
|
r
|
###### categorical variable and factors
marital.status <- c("Married","Married","Single","Married","Divorced","Widowed","Divorced")
?str() #### display the structure of the object
str(marital.status)
typeof(marital.status)
##### factor and levels
marital.factor <- factor(marital.status)
marital.factor
str(marital.factor)
typeof(marital.factor)
### explicit level/order
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"))
new.factor
str(new.factor)
#### rename levels
labels(new.factor) <- c("S","M","D","W")
str(new.factor)
new.factor
### in one line
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"),
labels = c("S","M","D","W"))
str(new.factor)
#### order the factor- ordinal
new.factor <- factor(marital.status, levels = c("Single","Married","Divorced","Widowed"),
labels = c("S","M","D","W"), ordered = T)
str(new.factor)
|
prompt_runtime_factory <- function() {
last <- proc.time()
last[] <- NA_real_
function(...) {
diff <- proc.time() - last
elapsed <- sum(diff) - diff["elapsed"]
last <<- proc.time()
if (!is.na(elapsed) && elapsed > 1) {
paste0(round(elapsed), "s > ")
} else {
"> "
}
}
}
#' A prompt that shows the CPU time used by the last top level expression
#'
#' @param ... Arguments, ignored.
#'
#' @family example prompts
#' @export
prompt_runtime <- prompt_runtime_factory()
#' A prompt that shows the status (OK or error) of the last expression
#'
#' @param expr Evaluated expression.
#' @param value Its value.
#' @param ok Whether the evaluation succeeded.
#' @param visible Whether the result is visible.
#'
#' @importFrom clisymbols symbol
#' @family example prompts
#' @export
prompt_error <- function(expr, value, ok, visible) {
if (ok) {
paste0(symbol$tick, " ", symbol$pointer, " ")
} else {
paste0(symbol$cross, " ", symbol$pointer, " ")
}
}
prompt_error_hook <- function() {
update_prompt(expr = NA, value = NA, ok = FALSE, visible = NA)
orig <- prompt_env$error
if (!is.null(orig) && is.function(orig)) orig()
}
prompt_memuse_factory <- function() {
size <- 0
unit <- "MiB"
function(...) {
current <- memuse::Sys.procmem()[[1]]
size <<- memuse::size(current)
unit <<- memuse::unit(current)
paste0(round(size, 1), " ", unit, " ", symbol$pointer, " ")
}
}
#' Example prompt that shows the current memory usage of the R process
#'
#' @param ... Ignored.
#'
#' @family example prompts
#' @export
prompt_memuse <- prompt_memuse_factory()
|
/R/prompts.R
|
no_license
|
jimhester/prompt
|
R
| false
| false
| 1,638
|
r
|
prompt_runtime_factory <- function() {
last <- proc.time()
last[] <- NA_real_
function(...) {
diff <- proc.time() - last
elapsed <- sum(diff) - diff["elapsed"]
last <<- proc.time()
if (!is.na(elapsed) && elapsed > 1) {
paste0(round(elapsed), "s > ")
} else {
"> "
}
}
}
#' A prompt that shows the CPU time used by the last top level expression
#'
#' @param ... Arguments, ignored.
#'
#' @family example prompts
#' @export
prompt_runtime <- prompt_runtime_factory()
#' A prompt that shows the status (OK or error) of the last expression
#'
#' @param expr Evaluated expression.
#' @param value Its value.
#' @param ok Whether the evaluation succeeded.
#' @param visible Whether the result is visible.
#'
#' @importFrom clisymbols symbol
#' @family example prompts
#' @export
prompt_error <- function(expr, value, ok, visible) {
if (ok) {
paste0(symbol$tick, " ", symbol$pointer, " ")
} else {
paste0(symbol$cross, " ", symbol$pointer, " ")
}
}
prompt_error_hook <- function() {
update_prompt(expr = NA, value = NA, ok = FALSE, visible = NA)
orig <- prompt_env$error
if (!is.null(orig) && is.function(orig)) orig()
}
prompt_memuse_factory <- function() {
size <- 0
unit <- "MiB"
function(...) {
current <- memuse::Sys.procmem()[[1]]
size <<- memuse::size(current)
unit <<- memuse::unit(current)
paste0(round(size, 1), " ", unit, " ", symbol$pointer, " ")
}
}
#' Example prompt that shows the current memory usage of the R process
#'
#' @param ... Ignored.
#'
#' @family example prompts
#' @export
prompt_memuse <- prompt_memuse_factory()
|
#' Plot Google Earth images obtained through dismo::gmap
#'
#' This function is very slightly modified from \code{.plotCT} function in \code{raster} package to avoid producing an empty plot before the actual Google image.
#'
#' @param x RasterLayer, as obtained through \code{\link[dismo]{gmap}}.
#' @param maxpixels integer > 0. Maximum number of cells to use for the plot. If \code{maxpixels < ncell(x)}, \code{sampleRegular} is used before plotting. If \code{gridded=TRUE} maxpixels may be ignored to get a larger sample.
#' @param ext An extent object to zoom in a region
#' @param interpolate Logical. Should the image be interpolated (smoothed)?
#' @param axes not used
#' @param main character. Main plot title
#' @param xlab Optional. x-axis label
#' @param ylab Optional. y-axis label
#' @param asp not used
#' @param add Logical. Add to current plot?
#' @param addfun Function to add additional items such as points or polygons to the plot (map). Typically containing statements like "points(xy); plot(polygons, add=TRUE)". This is particularly useful to add something to each map when plotting a multi-layer Raster* object.
#' @param zlim not used
#' @param zlimcol not used
#' @param ... Graphical parameters. Any argument that can be passed to \code{\link[graphics]{rasterImage}}.
#'
#' @return A plot.
#' @export
#'
#' @examples
#' \dontrun{
#' library(dismo)
#' g <- gmap("France")
#' plot_gmap(g)
#' }
plot_gmap <- function(x, maxpixels=500000, ext=NULL, interpolate=FALSE, axes, main, xlab='', ylab='', asp, add=FALSE, addfun=NULL, zlim=NULL, zlimcol=NULL, ...) {
# plotting with a color table
if (missing(main)) {
main <- ''
}
sethook <- FALSE
if (!add) {
#graphics::plot.new() # just commented this line to avoid blank plot
if (missing(axes)) {
axes <- FALSE
}
if (!axes) {
# if (main != "") { } else {
old.par <- graphics::par(no.readonly = TRUE)
graphics::par(plt=c(0,1,0,1))
sethook <- TRUE
}
if (missing(asp)) {
if (raster::couldBeLonLat(x)) {
ym <- mean(c(x@extent@ymax, x@extent@ymin))
asp <- 1/cos((ym * pi)/180)
} else {
asp <- 1
}
}
}
coltab <- colortable(x)
x <- raster::sampleRegular(x, maxpixels, ext=ext, asRaster=TRUE, useGDAL=TRUE)
z <- raster::getValues(x)
if (!is.null(zlim)) { # not that relevant here, but for consistency....
if (is.null(zlimcol)) {
z[ z<zlim[1] ] <- zlim[1]
z[ z>zlim[2] ] <- zlim[2]
} else { #if (is.na(zlimcol)) {
z[z<zlim[1] | z>zlim[2]] <- NA
}
}
if (NCOL(coltab) == 2) {
# not implemented
z <- as.numeric(cut(z, coltab[,1]))
coltab <- as.vector(coltab[,2])
}
z <- z + 1
z[is.na(z)] <- 1
if (! is.null(coltab) ) {
z <- matrix(coltab[z], nrow=nrow(x), ncol=ncol(x), byrow=TRUE)
z <- as.raster(z)
} else {
z <- matrix(z, nrow=nrow(x), ncol=ncol(x), byrow=TRUE)
z <- as.raster(z, max=max(z)) #, na.rm=TRUE))
}
requireNamespace("grDevices")
bb <- as.vector(t(bbox(x)))
if (! add) {
plot(c(bb[1], bb[2]), c(bb[3], bb[4]), type = "n", xlab=xlab, ylab=ylab, asp=asp, axes=axes, main=main, ...)
}
graphics::rasterImage(z, bb[1], bb[3], bb[2], bb[4], interpolate=interpolate, ...)
if (!is.null(addfun)) {
if (is.function(addfun)) {
addfun()
}
}
if (sethook) {
setHook("plot.new", function(...) {
w <- getOption('warn')
on.exit(options('warn' = w))
options('warn'=-1)
on.exit(graphics::par(old.par))
}, action="replace")
setHook("plot.new", function(...) setHook("plot.new", NULL, "replace"))
}
}
|
/R/plot_gmap.R
|
no_license
|
raianu191/rSDM
|
R
| false
| false
| 3,634
|
r
|
#' Plot Google Earth images obtained through dismo::gmap
#'
#' This function is very slightly modified from \code{.plotCT} function in \code{raster} package to avoid producing an empty plot before the actual Google image.
#'
#' @param x RasterLayer, as obtained through \code{\link[dismo]{gmap}}.
#' @param maxpixels integer > 0. Maximum number of cells to use for the plot. If \code{maxpixels < ncell(x)}, \code{sampleRegular} is used before plotting. If \code{gridded=TRUE} maxpixels may be ignored to get a larger sample.
#' @param ext An extent object to zoom in a region
#' @param interpolate Logical. Should the image be interpolated (smoothed)?
#' @param axes not used
#' @param main character. Main plot title
#' @param xlab Optional. x-axis label
#' @param ylab Optional. y-axis label
#' @param asp not used
#' @param add Logical. Add to current plot?
#' @param addfun Function to add additional items such as points or polygons to the plot (map). Typically containing statements like "points(xy); plot(polygons, add=TRUE)". This is particularly useful to add something to each map when plotting a multi-layer Raster* object.
#' @param zlim not used
#' @param zlimcol not used
#' @param ... Graphical parameters. Any argument that can be passed to \code{\link[graphics]{rasterImage}}.
#'
#' @return A plot.
#' @export
#'
#' @examples
#' \dontrun{
#' library(dismo)
#' g <- gmap("France")
#' plot_gmap(g)
#' }
plot_gmap <- function(x, maxpixels=500000, ext=NULL, interpolate=FALSE, axes, main, xlab='', ylab='', asp, add=FALSE, addfun=NULL, zlim=NULL, zlimcol=NULL, ...) {
# plotting with a color table
if (missing(main)) {
main <- ''
}
sethook <- FALSE
if (!add) {
#graphics::plot.new() # just commented this line to avoid blank plot
if (missing(axes)) {
axes <- FALSE
}
if (!axes) {
# if (main != "") { } else {
old.par <- graphics::par(no.readonly = TRUE)
graphics::par(plt=c(0,1,0,1))
sethook <- TRUE
}
if (missing(asp)) {
if (raster::couldBeLonLat(x)) {
ym <- mean(c(x@extent@ymax, x@extent@ymin))
asp <- 1/cos((ym * pi)/180)
} else {
asp <- 1
}
}
}
coltab <- colortable(x)
x <- raster::sampleRegular(x, maxpixels, ext=ext, asRaster=TRUE, useGDAL=TRUE)
z <- raster::getValues(x)
if (!is.null(zlim)) { # not that relevant here, but for consistency....
if (is.null(zlimcol)) {
z[ z<zlim[1] ] <- zlim[1]
z[ z>zlim[2] ] <- zlim[2]
} else { #if (is.na(zlimcol)) {
z[z<zlim[1] | z>zlim[2]] <- NA
}
}
if (NCOL(coltab) == 2) {
# not implemented
z <- as.numeric(cut(z, coltab[,1]))
coltab <- as.vector(coltab[,2])
}
z <- z + 1
z[is.na(z)] <- 1
if (! is.null(coltab) ) {
z <- matrix(coltab[z], nrow=nrow(x), ncol=ncol(x), byrow=TRUE)
z <- as.raster(z)
} else {
z <- matrix(z, nrow=nrow(x), ncol=ncol(x), byrow=TRUE)
z <- as.raster(z, max=max(z)) #, na.rm=TRUE))
}
requireNamespace("grDevices")
bb <- as.vector(t(bbox(x)))
if (! add) {
plot(c(bb[1], bb[2]), c(bb[3], bb[4]), type = "n", xlab=xlab, ylab=ylab, asp=asp, axes=axes, main=main, ...)
}
graphics::rasterImage(z, bb[1], bb[3], bb[2], bb[4], interpolate=interpolate, ...)
if (!is.null(addfun)) {
if (is.function(addfun)) {
addfun()
}
}
if (sethook) {
setHook("plot.new", function(...) {
w <- getOption('warn')
on.exit(options('warn' = w))
options('warn'=-1)
on.exit(graphics::par(old.par))
}, action="replace")
setHook("plot.new", function(...) setHook("plot.new", NULL, "replace"))
}
}
|
##############################################################################################################################################################################################
#Random Forest for Northern California to investigate the LU variables directly related to the MMI scores....
#16March2015
##############################################################################################################################################################################################
##########################################################
#Set up
##########################################################
####################
##loading package
####################
library (randomForest)
####################
##loading data
####################
##load file with bugs, indicators, and naturals
RFLU=read.csv("\\\\share1.bluezone.usu.edu\\miller\\buglab\\Research Projects\\BLM_WRSA_Stream_Surveys\\Results and Reports\\NorCal_2013\\Analysis\\RandomForest\\Run5_MMI_to_LUdata\\LandUse_MMI_16March2015.csv")
####################
##Transform some variables
####################
#Boxplots were used to decide on transformed variables.
#Use all variables, but some transformed
boxplotdata=RFLU[,c(3:63)]
par(mfrow=c(2,6))
for (i in 1:length(boxplotdata)) {
boxplot(boxplotdata[,i], main=names(boxplotdata[i]))
}
# Transformations:
# If values have negatives need to assess how to Log (e.g., OR WQ)
# Calculated summary stats to know if ) were present, ect. summary(RFdata$PerDensC)
RFLU$Log_AREA_SQKM=log10(RFLU$AREA_SQKM)
RFLU$Log_OE_TN=log10(ifelse(RFLU$OE_TN<0,0,RFLU$OE_TN)+1)
RFLU$Log_OE_TP=log10(ifelse(RFLU$OE_TP<0,0,RFLU$OE_TP)+1)
RFLU$Log_alru_dom=log10(RFLU$alru_dom+1)
RFLU$Log_SprgNum_WS=log10(RFLU$SprgNum_WS+1)
RFLU$Log_PerDensC=log10(RFLU$PerDensC+1)
RFLU$Log_Slope_WS=log10(RFLU$Slope_WS)
RFLU$Log_HYDR_WS=log10(RFLU$HYDR_WS)
RFLU$Log_Prop_YrSp=log10(RFLU$Prop_YrSp+1)
RFLU$Log_Prop_4YrsPrSp=log10(RFLU$Prop_4YrsPrSp+1)
RFLU$Log_Prop_YrPr=log10(RFLU$Prop_YrPr+1)
RFLU$Log_Prop_3YrPrPr=log10(RFLU$Prop_3YrPrPr+1)
RFLU$Log_NumRdCross=log10(RFLU$NumRdCross+1)
RFLU$Log_Percent_Honly=log10(RFLU$Percent_Honly+1)
RFLU$Log_PctXclsr=log10(RFLU$PctXclsr+1)
#Mines and Dam numbers not transformed
RFLU$Log_DAMvol_WS=log10(RFLU$DAMvol_WS+1)
RFLU$Log_ArtPathDens=log10(RFLU$ArtPathDens+1)
RFLU$Log_AG_WS=log10(RFLU$AG_WS+1)
RFLU$Log_URBAN_WS=log10(RFLU$URBAN_WS+1)
RFLU$Log_PctFWS=log10(RFLU$PctFWS+1)
#All variables in data
####################
# 3-D Plot function
####################
#Once function is run you can use it to create 3-D plots, below two lines of code are an example of info needed for function
#nump = 15
#bpp.out = bivarpartialPlot.randomForest(RF model, data, first variable, second variable, ylab="rating", n1.pt=nump, n2.pt=nump, theta=40)
bivarpartialPlot.randomForest <-
function (x, pred.data, x1.var, x2.var, which.class, w,
n1.pt = min(length(unique(pred.data[, x1name])), 51),
n2.pt = min(length(unique(pred.data[, x2name])), 51),
x1lab=deparse(substitute(x1.var)),
x2lab=deparse(substitute(x2.var)), ylab="",
main=paste("Partial Dependence on", deparse(substitute(x1.var)),"and",deparse(substitute(x2.var))),
...)
{
classRF <- x$type != "regression"
if (is.null(x$forest)) stop("The randomForest object must contain the forest.\\n")
x1.var <- substitute(x1.var)
x2.var <- substitute(x2.var)
x1name <- if (is.character(x1.var)) x1.var else {
if (is.name(x1.var)) deparse(x1.var) else {
eval(x1.var)
}
}
x2name <- if (is.character(x2.var)) x2.var else {
if (is.name(x2.var)) deparse(x2.var) else {
eval(x2.var)
}
}
n <- nrow(pred.data)
if (missing(w)) w <- rep(1, n)
if (classRF) {
if (missing(which.class)) {
focus <- 1
}
else {
focus <- charmatch(which.class, colnames(x$votes))
if (is.na(focus))
stop(which.class, "is not one of the class labels.")
}
}
# the first predictor variable
xv1 <- pred.data[, x1name]
x1.pt <- seq(min(xv1), max(xv1), length = n1.pt)
# the second predictor variable
xv2 <- pred.data[, x2name]
x2.pt <- seq(min(xv2), max(xv2), length = n2.pt)
# y is big!
y.pt <- matrix(0, nrow=n1.pt, ncol=n2.pt)
for (i in 1:n1.pt) {
for (j in 1:n2.pt) {
x.data <- pred.data
x.data[, x1name] <- rep(x1.pt[i], n)
x.data[, x2name] <- rep(x2.pt[j], n)
if (classRF) {
pr <- predict(x, x.data, type = "prob")
y.pt[i,j] <- weighted.mean(log(ifelse(pr[, focus] == 0, 1, pr[, focus]))
- rowMeans(log(ifelse(pr == 0, 1, pr))), w, na.rm=TRUE)
} else {
y.pt[i,j] <- weighted.mean(predict(x, x.data), w, na.rm=TRUE)
}
}
}
# output is ready for persp
persp(y.pt, xlab=x1lab, ylab=x2lab, zlab="",main=main,...)
}
###################################################################
# Use MMI response variable and LU/Natural: MMILU
###################################################################
#1
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_AUM_4YrPrSp+P_AUM_YrPr+P_AUM_3YrPrPr+P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_4YrPrSp+AUM_YrPr+AUM_3YrPrPr+Prop_YrSp+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+PctOtherOwn+PrivPct+BLMPct+PctFWS+PctFS+
StmOrd+Slope_WS+PCT_SEDIM+Volcanic_7+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+TMIN_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#2
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_YrPr+AUM_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_HMA+MINEnum_WS+
DAMnum_WS+DAMvol_WS+AG_WS+URBAN_WS+PctOtherOwn+BLMPct+PctFWS+PctFS+
Volcanic_7+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_YrPr+AUM_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
RdDensC+Percent_HBonly+MINEnum_WS+
DAMvol_WS+AG_WS+URBAN_WS+BLMPct+PctFS+
Volcanic_7+SpNum800m+StreamDens+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
MINEnum_WS+
AG_WS+BLMPct+PctFS+
Volcanic_7+SpNum800m+IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#4
MMILU=randomForest(NV_MMI~P_Prop_4YrsPrSp+P_Prop_YrPr+
Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+alru_dom,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#5
MMILU=randomForest(NV_MMI~Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#6
MMILU=randomForest(NV_MMI~Prop_4YrsPrSp+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
par(mfrow=c(2,2))
partialPlot(MMILU, RFLU,Prop_4YrsPrSp, cex.main=1)
partialPlot(MMILU, RFLU,IntDensC, cex.main=1)
partialPlot(MMILU, RFLU,AREA_SQKM, cex.main=1)
#6
MMILU=randomForest(NV_MMI~
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#6
MMILU=randomForest(NV_MMI~AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
####################
##Subset data
####################
##Remove watersheds with small watershed area
RFLU1=RFLU[(RFLU$AREA_SQKM>14),]
#1
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_AUM_4YrPrSp+P_AUM_YrPr+P_AUM_3YrPrPr+P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_4YrPrSp+AUM_YrPr+AUM_3YrPrPr+Prop_YrSp+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+PctOtherOwn+PrivPct+BLMPct+PctFWS+PctFS+
StmOrd+Slope_WS+PCT_SEDIM+Volcanic_7+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+TMIN_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#2
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
NumRdCross+Percent_Honly+Percent_Allotment+
AG_WS+URBAN_WS+BLMPct+PctFS+
SpNum300m+SpNum800m+
Slope_WS+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
Percent_Honly+Percent_Allotment+
KFCT_AVE+alru_dom+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#################################################################################
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+
StmOrd+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
Percent_HBonly+Percent_Honly+Percent_HMA+Percent_Allotment+
ArtPathDens+AG_WS+URBAN_WS+
SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
Percent_Honly+Percent_Allotment+
ArtPathDens+
StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
StreamDens+IntDensC+
AREA_SQKM+ELEV_RANGE,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
par(mfrow=c(2,2))
partialPlot(MMILU, RFLU,P_Prop_YrSp, cex.main=1)
partialPlot(MMILU, RFLU,P_Prop_YrPr, cex.main=1)
partialPlot(MMILU, RFLU,IntDensC, cex.main=1)
partialPlot(MMILU, RFLU,AREA_SQKM, cex.main=1)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
##############################################################
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+
StmOrd+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
|
/misc R scripts/NC_RandomForest/NC_RF_MMItoLU.R
|
no_license
|
usubuglab/AIM_2016
|
R
| false
| false
| 13,730
|
r
|
##############################################################################################################################################################################################
#Random Forest for Northern California to investigate the LU variables directly related to the MMI scores....
#16March2015
##############################################################################################################################################################################################
##########################################################
#Set up
##########################################################
####################
##loading package
####################
library (randomForest)
####################
##loading data
####################
##load file with bugs, indicators, and naturals
RFLU=read.csv("\\\\share1.bluezone.usu.edu\\miller\\buglab\\Research Projects\\BLM_WRSA_Stream_Surveys\\Results and Reports\\NorCal_2013\\Analysis\\RandomForest\\Run5_MMI_to_LUdata\\LandUse_MMI_16March2015.csv")
####################
##Transform some variables
####################
#Boxplots were used to decide on transformed variables.
#Use all variables, but some transformed
boxplotdata=RFLU[,c(3:63)]
par(mfrow=c(2,6))
for (i in 1:length(boxplotdata)) {
boxplot(boxplotdata[,i], main=names(boxplotdata[i]))
}
# Transformations:
# If values have negatives need to assess how to Log (e.g., OR WQ)
# Calculated summary stats to know if ) were present, ect. summary(RFdata$PerDensC)
RFLU$Log_AREA_SQKM=log10(RFLU$AREA_SQKM)
RFLU$Log_OE_TN=log10(ifelse(RFLU$OE_TN<0,0,RFLU$OE_TN)+1)
RFLU$Log_OE_TP=log10(ifelse(RFLU$OE_TP<0,0,RFLU$OE_TP)+1)
RFLU$Log_alru_dom=log10(RFLU$alru_dom+1)
RFLU$Log_SprgNum_WS=log10(RFLU$SprgNum_WS+1)
RFLU$Log_PerDensC=log10(RFLU$PerDensC+1)
RFLU$Log_Slope_WS=log10(RFLU$Slope_WS)
RFLU$Log_HYDR_WS=log10(RFLU$HYDR_WS)
RFLU$Log_Prop_YrSp=log10(RFLU$Prop_YrSp+1)
RFLU$Log_Prop_4YrsPrSp=log10(RFLU$Prop_4YrsPrSp+1)
RFLU$Log_Prop_YrPr=log10(RFLU$Prop_YrPr+1)
RFLU$Log_Prop_3YrPrPr=log10(RFLU$Prop_3YrPrPr+1)
RFLU$Log_NumRdCross=log10(RFLU$NumRdCross+1)
RFLU$Log_Percent_Honly=log10(RFLU$Percent_Honly+1)
RFLU$Log_PctXclsr=log10(RFLU$PctXclsr+1)
#Mines and Dam numbers not transformed
RFLU$Log_DAMvol_WS=log10(RFLU$DAMvol_WS+1)
RFLU$Log_ArtPathDens=log10(RFLU$ArtPathDens+1)
RFLU$Log_AG_WS=log10(RFLU$AG_WS+1)
RFLU$Log_URBAN_WS=log10(RFLU$URBAN_WS+1)
RFLU$Log_PctFWS=log10(RFLU$PctFWS+1)
#All variables in data
####################
# 3-D Plot function
####################
#Once function is run you can use it to create 3-D plots, below two lines of code are an example of info needed for function
#nump = 15
#bpp.out = bivarpartialPlot.randomForest(RF model, data, first variable, second variable, ylab="rating", n1.pt=nump, n2.pt=nump, theta=40)
bivarpartialPlot.randomForest <-
function (x, pred.data, x1.var, x2.var, which.class, w,
n1.pt = min(length(unique(pred.data[, x1name])), 51),
n2.pt = min(length(unique(pred.data[, x2name])), 51),
x1lab=deparse(substitute(x1.var)),
x2lab=deparse(substitute(x2.var)), ylab="",
main=paste("Partial Dependence on", deparse(substitute(x1.var)),"and",deparse(substitute(x2.var))),
...)
{
classRF <- x$type != "regression"
if (is.null(x$forest)) stop("The randomForest object must contain the forest.\\n")
x1.var <- substitute(x1.var)
x2.var <- substitute(x2.var)
x1name <- if (is.character(x1.var)) x1.var else {
if (is.name(x1.var)) deparse(x1.var) else {
eval(x1.var)
}
}
x2name <- if (is.character(x2.var)) x2.var else {
if (is.name(x2.var)) deparse(x2.var) else {
eval(x2.var)
}
}
n <- nrow(pred.data)
if (missing(w)) w <- rep(1, n)
if (classRF) {
if (missing(which.class)) {
focus <- 1
}
else {
focus <- charmatch(which.class, colnames(x$votes))
if (is.na(focus))
stop(which.class, "is not one of the class labels.")
}
}
# the first predictor variable
xv1 <- pred.data[, x1name]
x1.pt <- seq(min(xv1), max(xv1), length = n1.pt)
# the second predictor variable
xv2 <- pred.data[, x2name]
x2.pt <- seq(min(xv2), max(xv2), length = n2.pt)
# y is big!
y.pt <- matrix(0, nrow=n1.pt, ncol=n2.pt)
for (i in 1:n1.pt) {
for (j in 1:n2.pt) {
x.data <- pred.data
x.data[, x1name] <- rep(x1.pt[i], n)
x.data[, x2name] <- rep(x2.pt[j], n)
if (classRF) {
pr <- predict(x, x.data, type = "prob")
y.pt[i,j] <- weighted.mean(log(ifelse(pr[, focus] == 0, 1, pr[, focus]))
- rowMeans(log(ifelse(pr == 0, 1, pr))), w, na.rm=TRUE)
} else {
y.pt[i,j] <- weighted.mean(predict(x, x.data), w, na.rm=TRUE)
}
}
}
# output is ready for persp
persp(y.pt, xlab=x1lab, ylab=x2lab, zlab="",main=main,...)
}
###################################################################
# Use MMI response variable and LU/Natural: MMILU
###################################################################
#1
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_AUM_4YrPrSp+P_AUM_YrPr+P_AUM_3YrPrPr+P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_4YrPrSp+AUM_YrPr+AUM_3YrPrPr+Prop_YrSp+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+PctOtherOwn+PrivPct+BLMPct+PctFWS+PctFS+
StmOrd+Slope_WS+PCT_SEDIM+Volcanic_7+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+TMIN_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#2
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_YrPr+AUM_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_HMA+MINEnum_WS+
DAMnum_WS+DAMvol_WS+AG_WS+URBAN_WS+PctOtherOwn+BLMPct+PctFWS+PctFS+
Volcanic_7+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_YrPr+AUM_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
RdDensC+Percent_HBonly+MINEnum_WS+
DAMvol_WS+AG_WS+URBAN_WS+BLMPct+PctFS+
Volcanic_7+SpNum800m+StreamDens+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
MINEnum_WS+
AG_WS+BLMPct+PctFS+
Volcanic_7+SpNum800m+IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+UCS_Mean+SumAve_P,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#4
MMILU=randomForest(NV_MMI~P_Prop_4YrsPrSp+P_Prop_YrPr+
Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+alru_dom,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#5
MMILU=randomForest(NV_MMI~Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#6
MMILU=randomForest(NV_MMI~Prop_4YrsPrSp+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
par(mfrow=c(2,2))
partialPlot(MMILU, RFLU,Prop_4YrsPrSp, cex.main=1)
partialPlot(MMILU, RFLU,IntDensC, cex.main=1)
partialPlot(MMILU, RFLU,AREA_SQKM, cex.main=1)
#6
MMILU=randomForest(NV_MMI~
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#6
MMILU=randomForest(NV_MMI~AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
####################
##Subset data
####################
##Remove watersheds with small watershed area
RFLU1=RFLU[(RFLU$AREA_SQKM>14),]
#1
MMILU=randomForest(NV_MMI~P_AUM_YrSp+P_AUM_4YrPrSp+P_AUM_YrPr+P_AUM_3YrPrPr+P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+
P_Prop_3YrPrPr+AUM_YrSp+AUM_4YrPrSp+AUM_YrPr+AUM_3YrPrPr+Prop_YrSp+Prop_4YrsPrSp+Prop_YrPr+Prop_3YrPrPr+
NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+PctOtherOwn+PrivPct+BLMPct+PctFWS+PctFS+
StmOrd+Slope_WS+PCT_SEDIM+Volcanic_7+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+HYDR_WS+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+TMIN_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#2
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
NumRdCross+Percent_Honly+Percent_Allotment+
AG_WS+URBAN_WS+BLMPct+PctFS+
SpNum300m+SpNum800m+
Slope_WS+KFCT_AVE+PRMH_AVE+alru_dom+TMAX_WS+UCS_Mean+SumAve_P+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
Percent_Honly+Percent_Allotment+
KFCT_AVE+alru_dom+MEANP_WS,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#3
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr,
data=RFLU1, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#################################################################################
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+
StmOrd+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
Percent_HBonly+Percent_Honly+Percent_HMA+Percent_Allotment+
ArtPathDens+AG_WS+URBAN_WS+
SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
Percent_Honly+Percent_Allotment+
ArtPathDens+
StreamDens+PerDensC+IntDensC+
Slope_WS+AREA_SQKM+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+RdDensC+
StreamDens+IntDensC+
AREA_SQKM+ELEV_RANGE,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_YrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
par(mfrow=c(2,2))
partialPlot(MMILU, RFLU,P_Prop_YrSp, cex.main=1)
partialPlot(MMILU, RFLU,P_Prop_YrPr, cex.main=1)
partialPlot(MMILU, RFLU,IntDensC, cex.main=1)
partialPlot(MMILU, RFLU,AREA_SQKM, cex.main=1)
#1
MMILU=randomForest(NV_MMI~P_Prop_YrPr+
IntDensC+
AREA_SQKM,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
##############################################################
#1
MMILU=randomForest(NV_MMI~P_Prop_YrSp+P_Prop_4YrsPrSp+P_Prop_YrPr+NumRdCross+RdDensC+Percent_HBonly+Percent_Honly+Percent_HMA+PctXclsr+Percent_Allotment+MINEnum_WS+
DAMnum_WS+DAMvol_WS+ArtPathDens+AG_WS+URBAN_WS+
StmOrd+SprgNum_WS+SpNum300m+SpNum800m+StreamDens+PerDensC+IntDensC+
Slope_WS+SITE_ELEV+ELEV_RANGE+KFCT_AVE+PRMH_AVE+UCS_Mean,
data=RFLU, importance=TRUE, proximity=TRUE, bias.corr=TRUE)
MMILU
varImpPlot(MMILU)
|
# V-fold Cross-validation wrapper for SuperLearner
CV.SuperLearner <- function(Y, X, V = NULL, family = gaussian(), SL.library, method = 'method.NNLS', id = NULL, verbose = FALSE, control = list(saveFitLibrary = FALSE), cvControl = list(), innerCvControl = list(), obsWeights = NULL, saveAll = TRUE, parallel = "seq", env = parent.frame()) {
call <- match.call()
N <- dim(X)[1L]
# create CV folds:
if(any(names(cvControl) == "V") & !is.null(V)) {
stop(paste0("You specified a value for V and a value in the cvControl, please only use one, preferably the cvControl"))
}
cvControl <- do.call('SuperLearner.CV.control', cvControl)
if(!is.null(V)) {
# if the user specified V in the function call, override the default in cvControl
# backward compatibility to not remove the V
cvControl$V <- V
}
folds <- CVFolds(N = N, id = id, Y = Y, cvControl = cvControl)
V <- cvControl$V # save this because it appears in the output value
if(length(innerCvControl) > 0) {
if(length(innerCvControl) == 1) {
warning("Only a single innerCvControl is given, will be replicated across all cross-validation split calls to SuperLearner")
newInnerCvControl <- vector("list", cvControl$V)
for(ii in seq(cvControl$V)) {
newInnerCvControl[[ii]] <- unlist(innerCvControl, recursive = FALSE)
}
innerCvControl <- newInnerCvControl # write over previous with replicated list
}
if(length(innerCvControl) != cvControl$V) stop("innerCvControl must be a list with V cvControl lists")
} else {
innerCvControl <- vector("list", cvControl$V) # if no innerCvControl is given, generate an empty list
for(ii in seq(cvControl$V)) {
innerCvControl[[ii]] <- list()
}
}
# put together folds and cvControl (inner loop one) into a list to loop over
foldsList <- Map(list, folds = folds, cvControl = innerCvControl)
# check input:
if(is.null(obsWeights)) {
obsWeights <- rep(1, N)
}
if(!identical(length(obsWeights), N)) {
stop("obsWeights vector must have the same dimension as Y")
}
# check method:
if(is.character(method)) {
if(exists(method, mode = 'list')) {
method <- get(method, mode = 'list')
} else if(exists(method, mode = 'function')) {
method <- get(method, mode = 'function')()
}
} else if(is.function(method)) {
method <- method()
}
if(!is.list(method)) {
stop("method is not in the appropriate format. Check out help('method.template')")
}
# create placeholders:
library <- .createLibrary(SL.library)
libraryNames <- paste(library$library$predAlgorithm, library$screenAlgorithm[library$library$rowScreen], sep="_")
k <- nrow(library$library)
AllSL <- vector('list', V)
names(AllSL) <- paste("training", 1:V, sep=" ")
SL.predict <- rep(NA, N)
discreteSL.predict <- rep.int(NA, N)
whichDiscreteSL <- rep.int(NA, V)
library.predict <- matrix(NA, nrow = N, ncol = k)
colnames(library.predict) <- libraryNames
# run SuperLearner:
.crossValFun <- function(valid, Y, dataX, family, id, obsWeights, SL.library, method, verbose, control, saveAll) {
cvLearn <- dataX[-valid[[1]], , drop = FALSE]
cvOutcome <- Y[-valid[[1]]]
cvValid <- dataX[valid[[1]], , drop = FALSE]
cvId <- id[-valid[[1]]]
cvObsWeights <- obsWeights[-valid[[1]]]
fit.SL <- SuperLearner(Y = cvOutcome, X = cvLearn, newX = cvValid, family = family, SL.library = SL.library, method = method, id = cvId, verbose = verbose, control = control, cvControl = valid[[2]], obsWeights = cvObsWeights, env = env)
out <- list(cvAllSL = if(saveAll) fit.SL, cvSL.predict = fit.SL$SL.predict, cvdiscreteSL.predict = fit.SL$library.predict[, which.min(fit.SL$cvRisk)], cvwhichDiscreteSL = names(which.min(fit.SL$cvRisk)), cvlibrary.predict = fit.SL$library.predict, cvcoef = fit.SL$coef)
return(out)
}
## Why is CV.SuperLearner not saving the output from SuperLearner, only the call name?
## if we add something like force() will this eval multiple times?
if (inherits(parallel, 'cluster')) {
.SL.require('parallel')
cvList <- parallel::parLapply(parallel, X = foldsList, fun = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll)
} else if (parallel == 'multicore') {
.SL.require('parallel')
cvList <- parallel::mclapply(foldsList, FUN = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll, mc.set.seed = FALSE)
} else if (parallel == "seq") {
cvList <- lapply(foldsList, FUN = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll)
} else {
stop('parallel option was not recognized, use parallel = "seq" for sequential computation.')
}
# check out Biobase::subListExtract to replace the lapply
AllSL <- lapply(cvList, '[[', 'cvAllSL')
SL.predict[unlist(folds, use.names = FALSE)] <- unlist(lapply(cvList, '[[', 'cvSL.predict'), use.names = FALSE)
discreteSL.predict[unlist(folds, use.names = FALSE)] <- unlist(lapply(cvList, '[[', 'cvdiscreteSL.predict'), use.names = FALSE)
whichDiscreteSL <- lapply(cvList, '[[', 'cvwhichDiscreteSL')
library.predict[unlist(folds, use.names = FALSE), ] <- do.call('rbind', lapply(cvList, '[[', 'cvlibrary.predict'))
coef <- do.call('rbind', lapply(cvList, '[[', 'cvcoef'))
colnames(coef) <- libraryNames
# put together output
out <- list(call = call, AllSL = AllSL, SL.predict = SL.predict, discreteSL.predict = discreteSL.predict, whichDiscreteSL = whichDiscreteSL, library.predict = library.predict, coef = coef, folds = folds, V = V, libraryNames = libraryNames, SL.library = library, method = method, Y = Y)
class(out) <- 'CV.SuperLearner'
return(out)
}
|
/R/CV.SuperLearner.R
|
no_license
|
frbl/SuperLearner
|
R
| false
| false
| 5,986
|
r
|
# V-fold Cross-validation wrapper for SuperLearner
CV.SuperLearner <- function(Y, X, V = NULL, family = gaussian(), SL.library, method = 'method.NNLS', id = NULL, verbose = FALSE, control = list(saveFitLibrary = FALSE), cvControl = list(), innerCvControl = list(), obsWeights = NULL, saveAll = TRUE, parallel = "seq", env = parent.frame()) {
call <- match.call()
N <- dim(X)[1L]
# create CV folds:
if(any(names(cvControl) == "V") & !is.null(V)) {
stop(paste0("You specified a value for V and a value in the cvControl, please only use one, preferably the cvControl"))
}
cvControl <- do.call('SuperLearner.CV.control', cvControl)
if(!is.null(V)) {
# if the user specified V in the function call, override the default in cvControl
# backward compatibility to not remove the V
cvControl$V <- V
}
folds <- CVFolds(N = N, id = id, Y = Y, cvControl = cvControl)
V <- cvControl$V # save this because it appears in the output value
if(length(innerCvControl) > 0) {
if(length(innerCvControl) == 1) {
warning("Only a single innerCvControl is given, will be replicated across all cross-validation split calls to SuperLearner")
newInnerCvControl <- vector("list", cvControl$V)
for(ii in seq(cvControl$V)) {
newInnerCvControl[[ii]] <- unlist(innerCvControl, recursive = FALSE)
}
innerCvControl <- newInnerCvControl # write over previous with replicated list
}
if(length(innerCvControl) != cvControl$V) stop("innerCvControl must be a list with V cvControl lists")
} else {
innerCvControl <- vector("list", cvControl$V) # if no innerCvControl is given, generate an empty list
for(ii in seq(cvControl$V)) {
innerCvControl[[ii]] <- list()
}
}
# put together folds and cvControl (inner loop one) into a list to loop over
foldsList <- Map(list, folds = folds, cvControl = innerCvControl)
# check input:
if(is.null(obsWeights)) {
obsWeights <- rep(1, N)
}
if(!identical(length(obsWeights), N)) {
stop("obsWeights vector must have the same dimension as Y")
}
# check method:
if(is.character(method)) {
if(exists(method, mode = 'list')) {
method <- get(method, mode = 'list')
} else if(exists(method, mode = 'function')) {
method <- get(method, mode = 'function')()
}
} else if(is.function(method)) {
method <- method()
}
if(!is.list(method)) {
stop("method is not in the appropriate format. Check out help('method.template')")
}
# create placeholders:
library <- .createLibrary(SL.library)
libraryNames <- paste(library$library$predAlgorithm, library$screenAlgorithm[library$library$rowScreen], sep="_")
k <- nrow(library$library)
AllSL <- vector('list', V)
names(AllSL) <- paste("training", 1:V, sep=" ")
SL.predict <- rep(NA, N)
discreteSL.predict <- rep.int(NA, N)
whichDiscreteSL <- rep.int(NA, V)
library.predict <- matrix(NA, nrow = N, ncol = k)
colnames(library.predict) <- libraryNames
# run SuperLearner:
.crossValFun <- function(valid, Y, dataX, family, id, obsWeights, SL.library, method, verbose, control, saveAll) {
cvLearn <- dataX[-valid[[1]], , drop = FALSE]
cvOutcome <- Y[-valid[[1]]]
cvValid <- dataX[valid[[1]], , drop = FALSE]
cvId <- id[-valid[[1]]]
cvObsWeights <- obsWeights[-valid[[1]]]
fit.SL <- SuperLearner(Y = cvOutcome, X = cvLearn, newX = cvValid, family = family, SL.library = SL.library, method = method, id = cvId, verbose = verbose, control = control, cvControl = valid[[2]], obsWeights = cvObsWeights, env = env)
out <- list(cvAllSL = if(saveAll) fit.SL, cvSL.predict = fit.SL$SL.predict, cvdiscreteSL.predict = fit.SL$library.predict[, which.min(fit.SL$cvRisk)], cvwhichDiscreteSL = names(which.min(fit.SL$cvRisk)), cvlibrary.predict = fit.SL$library.predict, cvcoef = fit.SL$coef)
return(out)
}
## Why is CV.SuperLearner not saving the output from SuperLearner, only the call name?
## if we add something like force() will this eval multiple times?
if (inherits(parallel, 'cluster')) {
.SL.require('parallel')
cvList <- parallel::parLapply(parallel, X = foldsList, fun = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll)
} else if (parallel == 'multicore') {
.SL.require('parallel')
cvList <- parallel::mclapply(foldsList, FUN = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll, mc.set.seed = FALSE)
} else if (parallel == "seq") {
cvList <- lapply(foldsList, FUN = .crossValFun, Y = Y, dataX = X, family = family, SL.library = SL.library, method = method, id = id, obsWeights = obsWeights, verbose = verbose, control = control, saveAll = saveAll)
} else {
stop('parallel option was not recognized, use parallel = "seq" for sequential computation.')
}
# check out Biobase::subListExtract to replace the lapply
AllSL <- lapply(cvList, '[[', 'cvAllSL')
SL.predict[unlist(folds, use.names = FALSE)] <- unlist(lapply(cvList, '[[', 'cvSL.predict'), use.names = FALSE)
discreteSL.predict[unlist(folds, use.names = FALSE)] <- unlist(lapply(cvList, '[[', 'cvdiscreteSL.predict'), use.names = FALSE)
whichDiscreteSL <- lapply(cvList, '[[', 'cvwhichDiscreteSL')
library.predict[unlist(folds, use.names = FALSE), ] <- do.call('rbind', lapply(cvList, '[[', 'cvlibrary.predict'))
coef <- do.call('rbind', lapply(cvList, '[[', 'cvcoef'))
colnames(coef) <- libraryNames
# put together output
out <- list(call = call, AllSL = AllSL, SL.predict = SL.predict, discreteSL.predict = discreteSL.predict, whichDiscreteSL = whichDiscreteSL, library.predict = library.predict, coef = coef, folds = folds, V = V, libraryNames = libraryNames, SL.library = library, method = method, Y = Y)
class(out) <- 'CV.SuperLearner'
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\name{default_fvars}
\alias{default_fvars}
\title{default fvars}
\usage{
default_fvars(object)
}
\arguments{
\item{object}{SummarizedExperiment}
}
\value{
string vector
}
\description{
default fvars
}
\examples{
if (require(autonomics.data)){
require(magrittr)
object <- autonomics.data::stemcomp.proteinratios
object \%>\% default_fvars()
}
}
|
/autonomics.plot/man/default_fvars.Rd
|
no_license
|
bhagwataditya/autonomics0
|
R
| false
| true
| 442
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\name{default_fvars}
\alias{default_fvars}
\title{default fvars}
\usage{
default_fvars(object)
}
\arguments{
\item{object}{SummarizedExperiment}
}
\value{
string vector
}
\description{
default fvars
}
\examples{
if (require(autonomics.data)){
require(magrittr)
object <- autonomics.data::stemcomp.proteinratios
object \%>\% default_fvars()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics-getters.R
\name{getDefaultPackageData}
\alias{getDefaultPackageData}
\title{Get a list shared across modules within a same package}
\usage{
getDefaultPackageData()
}
\value{
A \code{\link[dipsaus]{fastmap2}} instance
}
\description{
Package data is a \code{\link[dipsaus]{fastmap2}} instance that
stores key-value pairs. A package data is shared across modules but
independent across 'RAVE' packages. It's useful to store shared data
for modules
}
|
/man/getDefaultPackageData.Rd
|
no_license
|
dipterix/ravecore
|
R
| false
| true
| 535
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics-getters.R
\name{getDefaultPackageData}
\alias{getDefaultPackageData}
\title{Get a list shared across modules within a same package}
\usage{
getDefaultPackageData()
}
\value{
A \code{\link[dipsaus]{fastmap2}} instance
}
\description{
Package data is a \code{\link[dipsaus]{fastmap2}} instance that
stores key-value pairs. A package data is shared across modules but
independent across 'RAVE' packages. It's useful to store shared data
for modules
}
|
library(RSurveillance)
### Name: n.tp
### Title: Sample size for true prevalence
### Aliases: n.tp
### Keywords: methods
### ** Examples
# examples for n.tp
n.tp(0.1, 0.9, 0.99, 0.05)
n.tp(0.1, 0.9, 0.99, 0.05, conf = 0.99)
n.tp(c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5), 0.9, 0.99, 0.05)
n.tp(0.5, 0.9, 0.99, c(0.01, 0.02, 0.05, 0.1, 0.2))
|
/data/genthat_extracted_code/RSurveillance/examples/n.tp.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 339
|
r
|
library(RSurveillance)
### Name: n.tp
### Title: Sample size for true prevalence
### Aliases: n.tp
### Keywords: methods
### ** Examples
# examples for n.tp
n.tp(0.1, 0.9, 0.99, 0.05)
n.tp(0.1, 0.9, 0.99, 0.05, conf = 0.99)
n.tp(c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5), 0.9, 0.99, 0.05)
n.tp(0.5, 0.9, 0.99, c(0.01, 0.02, 0.05, 0.1, 0.2))
|
library(mvtnorm)
library(fBasics)
set.seed(1)
N=1000
m=N/100
tt=seq(0,1, length.out = N) # USE M AS SAME NOTATION IN OUR LATEX FILE - M AND NOT H
t=vector(mode = "list", length = m) # TAKE SUBSET OF THE GRAM MATRIX
for (i in 1:m) {
t[[i]]=tt[1:(m*10*i)]
}
kernel_family=0
#KSE
Omega_true = c(0.25,0.5, 3,4,5, 10, 50)
Omega_test = c(seq(0.01,10,0.01), 15,20,30,40,50,60)
Omega_test = unique(c( Omega_true, Omega_test))
J=length(Omega_true) #M_true
I=length(Omega_test) #M_test
if(kernel_family == 0){
k =function(x1,x2,l ) {exp(- (x1-x2)^2 / (2 * l^2) ) }
} else if(kernel_family == 1){
l_list_true= c(0.25,0.5, 3,4,5, 10, 50)
p_list_true= c(0.25, 0.5, 1,2)
Omega_true = expand.grid(l_list_true,p_list_true)
l_list_test= c(seq(0.01,10,0.01), 15,20,30,40,50,60)
p_list_test= c(0.25, 0.5, 1,2)
l_list_test = unique(c(l_list_true, l_list_test))
p_list_test = unique(c(p_list_true, p_list_test))
Omega_test= expand.grid(l_list_test, p_list_test)
Omega_test = unique(rbind(Omega_true, Omega_test), MARGIN = 1)
psi_test = Omega_test[i,]
psi_true = Omega_true[j,]
k= function(x1,x2,l ,p ){exp(-2 * sin(pi*(x1 - x2) / p)^2 / (l^2) ) }
}
K_true=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
K_test=lapply(1:I, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
# K_true=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
# K_test=lapply(1:I, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
for (j in 1:J) {
for(h in 1:m){
for(n1 in (1:(dim(K_true[[j]][[h]])[1]))){
for(n2 in (n1:(dim(K_true[[j]][[h]])[1]))){
psi_true = Omega_true[j]
K_true[[j]][[h]][n1,n2]=k(t[[h]][n1],t[[h]][n2], l = psi_true )
}
}
}
}
for (j in 1:I) {
for(h in 1:m){
for(n1 in (1:(dim(K_test[[j]][[h]])[1]))){
for(n2 in (n1:(dim(K_test[[j]][[h]])[1]))){
psi_test = Omega_test[j]
K_test[[j]][[h]][n1,n2]=k(t[[h]][n1],t[[h]][n2], l = psi_test )
}
}
}
}
#DO EVERYTHING IN ONE LOOP ABOVE
for (j in 1:J) {
for(h in 1:m){
K_true[[j]][[h]]=K_true[[j]][[h]] + t(K_true[[j]][[h]])
diag(K_true[[j]][[h]]) =diag(K_true[[j]][[h]])/2
}
}
for (j in 1:I) {
for(h in 1:m){
K_test[[j]][[h]]=K_test[[j]][[h]] + t(K_test[[j]][[h]])
diag(K_test[[j]][[h]]) =diag(K_test[[j]][[h]])/2
}
}
y_m=vector(mode = "list", length = J)
for(j in (1:J)){
for(h in (1:m)){
y_m[[j]][[h]]=rmvnorm(1,rep(0,(dim(K_true[[j]][[h]])[1])), K_true[[j]][[h]])
y_m[[j]][[h]]=y_m[[j]][[h]] /as.numeric(sqrt(var(t(y_m[[j]][[h]]))))
}
}
S_m=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
#S_m=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
for(j in (1:J)){
for(h in (1:m))
S_m[[j]][[h]]=t(y_m[[j]][[h]])%*%y_m[[j]][[h]]
}
F_norm_S_m=vector(mode = "list", length = J)
for(j in (1:J)){
for(h in (1:m))
F_norm_S_m[[j]][[h]]=(S_m[[j]][[m]]/norm(S_m[[j]][[m]], type = "F"))
}
F_norm_K_test=vector(mode= "list", length = I)
for(i in (1:I)){
for(h in (1:m))
F_norm_K_test[[i]][[h]]=(K_test[[i]][[m]]/norm(K_test[[i]][[m]], type = "F"))
}
a_ijm=lapply(1:J, function(j){
a_ijm1=lapply(1:I, function(i){
a_ijm2=sapply(1:m, function(h){
2 - norm( F_norm_S_m[[j]][[h]] - F_norm_K_test[[i]][[h]] , type = "F")
})
})
})
return_list=lapply(1:J, function(j){
return_list1=lapply(1:I, function(i){
return_list2=sapply(1:m, function(h){
c(a_ijm = a_ijm[[j]][[i]][h], i = i, j = j, m = h ,
psi_true = Omega_true[j], psi_test = Omega_test[i]) #N = N,
})
})
})
#return_list = list(a_ijm = a_ijm[[j]][[i]][[h]], i = i, j = j,
# m = m , N = N, psi_true = psi_true, psi_test = psi_test)
#DOROTA - BOX-PLOT EXERCISE
|
/R_code_Experiments/Ex_1_alg2_wrong.R
|
no_license
|
andreas-koukorinis/EMD_GP_Gareth_Marta_Dorota
|
R
| false
| false
| 4,241
|
r
|
library(mvtnorm)
library(fBasics)
set.seed(1)
N=1000
m=N/100
tt=seq(0,1, length.out = N) # USE M AS SAME NOTATION IN OUR LATEX FILE - M AND NOT H
t=vector(mode = "list", length = m) # TAKE SUBSET OF THE GRAM MATRIX
for (i in 1:m) {
t[[i]]=tt[1:(m*10*i)]
}
kernel_family=0
#KSE
Omega_true = c(0.25,0.5, 3,4,5, 10, 50)
Omega_test = c(seq(0.01,10,0.01), 15,20,30,40,50,60)
Omega_test = unique(c( Omega_true, Omega_test))
J=length(Omega_true) #M_true
I=length(Omega_test) #M_test
if(kernel_family == 0){
k =function(x1,x2,l ) {exp(- (x1-x2)^2 / (2 * l^2) ) }
} else if(kernel_family == 1){
l_list_true= c(0.25,0.5, 3,4,5, 10, 50)
p_list_true= c(0.25, 0.5, 1,2)
Omega_true = expand.grid(l_list_true,p_list_true)
l_list_test= c(seq(0.01,10,0.01), 15,20,30,40,50,60)
p_list_test= c(0.25, 0.5, 1,2)
l_list_test = unique(c(l_list_true, l_list_test))
p_list_test = unique(c(p_list_true, p_list_test))
Omega_test= expand.grid(l_list_test, p_list_test)
Omega_test = unique(rbind(Omega_true, Omega_test), MARGIN = 1)
psi_test = Omega_test[i,]
psi_true = Omega_true[j,]
k= function(x1,x2,l ,p ){exp(-2 * sin(pi*(x1 - x2) / p)^2 / (l^2) ) }
}
K_true=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
K_test=lapply(1:I, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
# K_true=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
# K_test=lapply(1:I, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
for (j in 1:J) {
for(h in 1:m){
for(n1 in (1:(dim(K_true[[j]][[h]])[1]))){
for(n2 in (n1:(dim(K_true[[j]][[h]])[1]))){
psi_true = Omega_true[j]
K_true[[j]][[h]][n1,n2]=k(t[[h]][n1],t[[h]][n2], l = psi_true )
}
}
}
}
for (j in 1:I) {
for(h in 1:m){
for(n1 in (1:(dim(K_test[[j]][[h]])[1]))){
for(n2 in (n1:(dim(K_test[[j]][[h]])[1]))){
psi_test = Omega_test[j]
K_test[[j]][[h]][n1,n2]=k(t[[h]][n1],t[[h]][n2], l = psi_test )
}
}
}
}
#DO EVERYTHING IN ONE LOOP ABOVE
for (j in 1:J) {
for(h in 1:m){
K_true[[j]][[h]]=K_true[[j]][[h]] + t(K_true[[j]][[h]])
diag(K_true[[j]][[h]]) =diag(K_true[[j]][[h]])/2
}
}
for (j in 1:I) {
for(h in 1:m){
K_test[[j]][[h]]=K_test[[j]][[h]] + t(K_test[[j]][[h]])
diag(K_test[[j]][[h]]) =diag(K_test[[j]][[h]])/2
}
}
y_m=vector(mode = "list", length = J)
for(j in (1:J)){
for(h in (1:m)){
y_m[[j]][[h]]=rmvnorm(1,rep(0,(dim(K_true[[j]][[h]])[1])), K_true[[j]][[h]])
y_m[[j]][[h]]=y_m[[j]][[h]] /as.numeric(sqrt(var(t(y_m[[j]][[h]]))))
}
}
S_m=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(m*10*i) ,(m*10*i))) )
#S_m=lapply(1:J, function(j)lapply(1:m,function(i)matrix(0,(100) ,(100))) )
for(j in (1:J)){
for(h in (1:m))
S_m[[j]][[h]]=t(y_m[[j]][[h]])%*%y_m[[j]][[h]]
}
F_norm_S_m=vector(mode = "list", length = J)
for(j in (1:J)){
for(h in (1:m))
F_norm_S_m[[j]][[h]]=(S_m[[j]][[m]]/norm(S_m[[j]][[m]], type = "F"))
}
F_norm_K_test=vector(mode= "list", length = I)
for(i in (1:I)){
for(h in (1:m))
F_norm_K_test[[i]][[h]]=(K_test[[i]][[m]]/norm(K_test[[i]][[m]], type = "F"))
}
a_ijm=lapply(1:J, function(j){
a_ijm1=lapply(1:I, function(i){
a_ijm2=sapply(1:m, function(h){
2 - norm( F_norm_S_m[[j]][[h]] - F_norm_K_test[[i]][[h]] , type = "F")
})
})
})
return_list=lapply(1:J, function(j){
return_list1=lapply(1:I, function(i){
return_list2=sapply(1:m, function(h){
c(a_ijm = a_ijm[[j]][[i]][h], i = i, j = j, m = h ,
psi_true = Omega_true[j], psi_test = Omega_test[i]) #N = N,
})
})
})
#return_list = list(a_ijm = a_ijm[[j]][[i]][[h]], i = i, j = j,
# m = m , N = N, psi_true = psi_true, psi_test = psi_test)
#DOROTA - BOX-PLOT EXERCISE
|
library(smcsmcTools)
library(rjson)
library(GGally)
library(dplyr)
library(stringi)
library(scales)
library(ggalt)
config = fromJSON(file = "~/repos/eurasian-backmigration/analyses/sgdp_replication.json")
source = names_from_config(config$source) # from tools
source_strings = vapply(source, ids_to_names, character(1)) %>% as.vector
sink = names_from_config(config$sink) # from tools
sink_strings = vapply(sink, ids_to_names, character(1)) %>% as.vector
smc2_path = "~/repos/eurasian-backmigration/v2/data/sgdp_subset/"
msmc_path = "~/repos/eurasian-backmigration/v2/data/sgdp_subset/"
plots = list()
i = 1
#This is stupidly complicated but I don't think its worth a multi-plot function
files = list.files(smc2_path, pattern = "S_Han-1.S_Yoruba-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
dfs = 0
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Yoruba")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Yoruba"))
}
}
files = list.files(smc2_path, pattern = "S_Han-1.S_Mbuti-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Mbuti")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Mbuti"))
}
}
files = list.files(smc2_path, pattern = "S_Han-1.S_Khomani_San-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Khomani San")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Khomani San"))
}
}
summary <- dfs %>%
group_by(Start, From, pop) %>%
summarise(mean = mean(Rate),
sd = sd(Rate))
ggplot(data = summary, aes(x = Start*g, fill = pop, linetype = From, y = mean, ymin=mean-sd, ymax=mean+sd)) +
geom_rect(aes(xmin = 1e5, xmax=3e5, ymin=0, ymax=4e-4), fill = "lightgrey", alpha = 0.05, inherit.aes = F) +
geom_vline(xintercept = seq(1e4,1e5,1e4), col = "grey", alpha = 0.3) +
geom_vline(xintercept = seq(1e5,1e6,1e5), col = "grey", alpha = 0.3) +
geom_step(aes(col = pop)) +
geom_ribbon(stat="stepribbon",
alpha = 0.3) +
scale_y_continuous(limits = c(0,4e-4),
labels = label_comma()) +
scale_x_log10(limits = c(1e4,3e5),
labels = label_comma(scale = 0.001)) +
scale_color_manual(values = c('blue', 'green', "red"), labels = c("Khomani San", "Mbuti", "Yoruba")) +
ylab("Proportion Replaced per Generation") +
xlab("Thousands of Years before Present") +
scale_linetype_manual(values = c(2,1), labels = c("Eur to Afr", "Afr to Eur")) +
scale_fill_manual(values = c('blue', 'green', "red"), labels = c("Khomani San", "Mbuti", "Yoruba")) +
#scale_fill_manual(values = c('blue', 'red', 'black'), labels = c("Afr to Eur", "Eur to Afr")) +
theme_bw() +
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.minor = element_blank())
ggsave("~/repos/dirmig/plot/mig/sgdp_subet_three_pop.pdf", height = 4, width = 4, unit = "in")
|
/r/sgdp_subset_individual_mig.R
|
no_license
|
Chris1221/ancient_african_admixture
|
R
| false
| false
| 3,526
|
r
|
library(smcsmcTools)
library(rjson)
library(GGally)
library(dplyr)
library(stringi)
library(scales)
library(ggalt)
config = fromJSON(file = "~/repos/eurasian-backmigration/analyses/sgdp_replication.json")
source = names_from_config(config$source) # from tools
source_strings = vapply(source, ids_to_names, character(1)) %>% as.vector
sink = names_from_config(config$sink) # from tools
sink_strings = vapply(sink, ids_to_names, character(1)) %>% as.vector
smc2_path = "~/repos/eurasian-backmigration/v2/data/sgdp_subset/"
msmc_path = "~/repos/eurasian-backmigration/v2/data/sgdp_subset/"
plots = list()
i = 1
#This is stupidly complicated but I don't think its worth a multi-plot function
files = list.files(smc2_path, pattern = "S_Han-1.S_Yoruba-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
dfs = 0
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Yoruba")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Yoruba"))
}
}
files = list.files(smc2_path, pattern = "S_Han-1.S_Mbuti-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Mbuti")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Mbuti"))
}
}
files = list.files(smc2_path, pattern = "S_Han-1.S_Khomani_San-1.out", full.names = T)
list_of_smcsmc <- lapply(files, smcsmc)
for(smc in list_of_smcsmc){
seed = stri_extract_all_regex(smc@file, "[0-9]{3,}") %>% unlist
if(!is.data.frame(dfs)){
dfs = plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Khomani San")
} else {
dfs = rbind(dfs, plot(smc, return_df = T) %>% mutate(seed = seed) %>% mutate(pop = "Khomani San"))
}
}
summary <- dfs %>%
group_by(Start, From, pop) %>%
summarise(mean = mean(Rate),
sd = sd(Rate))
ggplot(data = summary, aes(x = Start*g, fill = pop, linetype = From, y = mean, ymin=mean-sd, ymax=mean+sd)) +
geom_rect(aes(xmin = 1e5, xmax=3e5, ymin=0, ymax=4e-4), fill = "lightgrey", alpha = 0.05, inherit.aes = F) +
geom_vline(xintercept = seq(1e4,1e5,1e4), col = "grey", alpha = 0.3) +
geom_vline(xintercept = seq(1e5,1e6,1e5), col = "grey", alpha = 0.3) +
geom_step(aes(col = pop)) +
geom_ribbon(stat="stepribbon",
alpha = 0.3) +
scale_y_continuous(limits = c(0,4e-4),
labels = label_comma()) +
scale_x_log10(limits = c(1e4,3e5),
labels = label_comma(scale = 0.001)) +
scale_color_manual(values = c('blue', 'green', "red"), labels = c("Khomani San", "Mbuti", "Yoruba")) +
ylab("Proportion Replaced per Generation") +
xlab("Thousands of Years before Present") +
scale_linetype_manual(values = c(2,1), labels = c("Eur to Afr", "Afr to Eur")) +
scale_fill_manual(values = c('blue', 'green', "red"), labels = c("Khomani San", "Mbuti", "Yoruba")) +
#scale_fill_manual(values = c('blue', 'red', 'black'), labels = c("Afr to Eur", "Eur to Afr")) +
theme_bw() +
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.minor = element_blank())
ggsave("~/repos/dirmig/plot/mig/sgdp_subet_three_pop.pdf", height = 4, width = 4, unit = "in")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{.silenceF}
\alias{.silenceF}
\title{Silencing Functions}
\usage{
.silenceF(f, level = 7L)
}
\arguments{
\item{f}{function to silence}
\item{level}{a single numeric (integer) that indicates the silencing level, which encodes the set of
output to be silenced.
It is interpreted like unix permission bit system, where each bit of the binary expression of the silencing
level corresponds to a given type of output:
\itemize{
\item 0: nothing silenced;
\item 1: \emph{stdout};
\item 2: \emph{stderr} messages;
\item 4: \emph{stderr} warnings.
}
For example, level \code{3 = 2 + 1} means silencing \emph{stdout} and \emph{stderr}, while
\code{5 = 3 + 2} means silencing \emph{stderr} messages and warnings, but not outputs to \emph{stdout}.
The default value is \code{7 = 4 + 2 + 1}, which silences all output.
Negative values are supported and mean \emph{"silence everything except the corresponding type"},
e.g., \code{level = -1} silences all except \emph{stdout} (computed as the binary complementary of 7, i.e. \code{7 - 1 = 5 = 3 + 2}).
See examples.}
}
\value{
a function
}
\description{
Generates a wrapper function that silences the output, messages, and/or warnings of a given function.
}
\examples{
\dontshow{
options(R_CHECK_RUNNING_EXAMPLES_=TRUE) ## roxygen generated flag
}
f <- function(){
cat("stdout message\\n")
message("stderr message")
warning("stderr warning", immediate. = TRUE)
}
# example of generated wrapper
g <- .silenceF(f)
g
# use of silencing level
for(l in 7:-7){ message("\\nLevel: ", l); .silenceF(f, l)() }
# inline functions
ifun <- .silenceF(function(){ f(); invisible(1) })
ifun()
ifun <- .silenceF(function(){ f(); 1 })
ifun()
ifun <- .silenceF(function(){ f(); 1 }, 2L)
ifun()
}
|
/man/dot-silenceF.Rd
|
no_license
|
Bhanditz/pkgmaker
|
R
| false
| true
| 1,818
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{.silenceF}
\alias{.silenceF}
\title{Silencing Functions}
\usage{
.silenceF(f, level = 7L)
}
\arguments{
\item{f}{function to silence}
\item{level}{a single numeric (integer) that indicates the silencing level, which encodes the set of
output to be silenced.
It is interpreted like unix permission bit system, where each bit of the binary expression of the silencing
level corresponds to a given type of output:
\itemize{
\item 0: nothing silenced;
\item 1: \emph{stdout};
\item 2: \emph{stderr} messages;
\item 4: \emph{stderr} warnings.
}
For example, level \code{3 = 2 + 1} means silencing \emph{stdout} and \emph{stderr}, while
\code{5 = 3 + 2} means silencing \emph{stderr} messages and warnings, but not outputs to \emph{stdout}.
The default value is \code{7 = 4 + 2 + 1}, which silences all output.
Negative values are supported and mean \emph{"silence everything except the corresponding type"},
e.g., \code{level = -1} silences all except \emph{stdout} (computed as the binary complementary of 7, i.e. \code{7 - 1 = 5 = 3 + 2}).
See examples.}
}
\value{
a function
}
\description{
Generates a wrapper function that silences the output, messages, and/or warnings of a given function.
}
\examples{
\dontshow{
options(R_CHECK_RUNNING_EXAMPLES_=TRUE) ## roxygen generated flag
}
f <- function(){
cat("stdout message\\n")
message("stderr message")
warning("stderr warning", immediate. = TRUE)
}
# example of generated wrapper
g <- .silenceF(f)
g
# use of silencing level
for(l in 7:-7){ message("\\nLevel: ", l); .silenceF(f, l)() }
# inline functions
ifun <- .silenceF(function(){ f(); invisible(1) })
ifun()
ifun <- .silenceF(function(){ f(); 1 })
ifun()
ifun <- .silenceF(function(){ f(); 1 }, 2L)
ifun()
}
|
packages_to_install <- c(
"devtools",
"dplyr",
"ggplot2",
"knitr",
"magrittr",
"rmarkdown",
"tidyr"
)
install.packages(
packages_to_install,
dep = TRUE,
repos = "http://cran.rstudio.com"
)
|
/r-packages.R
|
permissive
|
briandk/docker-academic-writing
|
R
| false
| false
| 210
|
r
|
packages_to_install <- c(
"devtools",
"dplyr",
"ggplot2",
"knitr",
"magrittr",
"rmarkdown",
"tidyr"
)
install.packages(
packages_to_install,
dep = TRUE,
repos = "http://cran.rstudio.com"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{princurve-package}
\alias{princurve-package}
\alias{princurve}
\title{Fit a Principal Curve in Arbitrary Dimension}
\description{
Fit a principal curve which describes a smooth curve that passes through the \code{middle}
of the data \code{x} in an orthogonal sense. This curve is a non-parametric generalization
of a linear principal component. If a closed curve is fit (using \code{smoother = "periodic_lowess"})
then the starting curve defaults to a circle, and each fit is followed by a bias correction
suggested by Jeff Banfield.
}
\references{
Hastie, T. and Stuetzle, W.,
\href{https://www.jstor.org/stable/2289936}{Principal Curves},
JASA, Vol. 84, No. 406 (Jun., 1989), pp. 502-516,
\doi{10.2307/2289936}
(\href{https://web.stanford.edu/~hastie/Papers/principalcurves.pdf}{PDF}).
See also Banfield and Raftery (JASA, 1992).
}
\seealso{
\code{\link{principal_curve}}, \code{\link{project_to_curve}}
}
\keyword{nonparametric}
\keyword{regression}
\keyword{smooth}
|
/man/princurve-package.Rd
|
no_license
|
rcannood/princurve
|
R
| false
| true
| 1,088
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{princurve-package}
\alias{princurve-package}
\alias{princurve}
\title{Fit a Principal Curve in Arbitrary Dimension}
\description{
Fit a principal curve which describes a smooth curve that passes through the \code{middle}
of the data \code{x} in an orthogonal sense. This curve is a non-parametric generalization
of a linear principal component. If a closed curve is fit (using \code{smoother = "periodic_lowess"})
then the starting curve defaults to a circle, and each fit is followed by a bias correction
suggested by Jeff Banfield.
}
\references{
Hastie, T. and Stuetzle, W.,
\href{https://www.jstor.org/stable/2289936}{Principal Curves},
JASA, Vol. 84, No. 406 (Jun., 1989), pp. 502-516,
\doi{10.2307/2289936}
(\href{https://web.stanford.edu/~hastie/Papers/principalcurves.pdf}{PDF}).
See also Banfield and Raftery (JASA, 1992).
}
\seealso{
\code{\link{principal_curve}}, \code{\link{project_to_curve}}
}
\keyword{nonparametric}
\keyword{regression}
\keyword{smooth}
|
options(scipen = 999)
args <- commandArgs(trailingOnly = TRUE)
adds <- args[1]
removes <- args[2]
lookups <- args[3]
repetitions <- args[4]
a <- sample(1 : adds, adds)
b <- sample(a, removes)
if (repetitions == 0) {
c <- sample(a, lookups)
} else {
c <- sample(a, lookups, replace = TRUE)
}
write(c(0, adds, removes, lookups, a, b, c), file = paste0(adds, "I", removes, "R", lookups, "L.txt"), ncolumns = 1, sep = " ")
|
/Inputs/input_generator.R
|
no_license
|
AndreCascais/SelfBalancingTrees
|
R
| false
| false
| 427
|
r
|
options(scipen = 999)
args <- commandArgs(trailingOnly = TRUE)
adds <- args[1]
removes <- args[2]
lookups <- args[3]
repetitions <- args[4]
a <- sample(1 : adds, adds)
b <- sample(a, removes)
if (repetitions == 0) {
c <- sample(a, lookups)
} else {
c <- sample(a, lookups, replace = TRUE)
}
write(c(0, adds, removes, lookups, a, b, c), file = paste0(adds, "I", removes, "R", lookups, "L.txt"), ncolumns = 1, sep = " ")
|
#!/usr/bin/env Rscript
#######
# LOG #
#######
log <- file(snakemake@log[[1]], open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
#############
# LIBRARIES #
#############
library(data.table)
###########
# GLOBALS #
###########
interpro_res <- snakemake@input[["interpro_res"]]
blastp_gff_file <- snakemake@input[["blastp_gff_file"]]
########
# MAIN #
########
interpro <- fread(interpro_res)
blastp_gff <- fread(blastp_gff_file)
#write log
sessionInfo()
|
/src/full_viral_table.R
|
no_license
|
sarahinwood/microctonus-viral
|
R
| false
| false
| 498
|
r
|
#!/usr/bin/env Rscript
#######
# LOG #
#######
log <- file(snakemake@log[[1]], open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
#############
# LIBRARIES #
#############
library(data.table)
###########
# GLOBALS #
###########
interpro_res <- snakemake@input[["interpro_res"]]
blastp_gff_file <- snakemake@input[["blastp_gff_file"]]
########
# MAIN #
########
interpro <- fread(interpro_res)
blastp_gff <- fread(blastp_gff_file)
#write log
sessionInfo()
|
## The function takes two arguments:an outcome name (outcome) and the ranking of
## a hospital (num), and returns a 2-column data frame with the name of the hospital
## in each state that has the ranking specified in num
rankall <- function(outcome,num="best"){
data<-read.csv("outcome-of-care-measures.csv",stringsAsFactors = FALSE)
disease<-c("heart attack","heart failure","pneumonia")
if (!outcome %in% disease)
stop("invalid outcome")
data<-data[order(data$State),]
statenames<-unique(data$State)
temp<-(data[c(2,7,11,17,23)])
colnames(temp)<-c("hospital","State",disease)
temp<-temp[!temp[,outcome]=="Not Available",]
}
|
/C2Week4/rankall3.R
|
no_license
|
Shawvin/Data-Specilisation
|
R
| false
| false
| 673
|
r
|
## The function takes two arguments:an outcome name (outcome) and the ranking of
## a hospital (num), and returns a 2-column data frame with the name of the hospital
## in each state that has the ranking specified in num
rankall <- function(outcome,num="best"){
data<-read.csv("outcome-of-care-measures.csv",stringsAsFactors = FALSE)
disease<-c("heart attack","heart failure","pneumonia")
if (!outcome %in% disease)
stop("invalid outcome")
data<-data[order(data$State),]
statenames<-unique(data$State)
temp<-(data[c(2,7,11,17,23)])
colnames(temp)<-c("hospital","State",disease)
temp<-temp[!temp[,outcome]=="Not Available",]
}
|
# Load required packages
library(readxl)
library(tidyverse)
################ IMPORTING DATA ################
# Reading in CSV files
read_csv("data/mydata.csv")
# Read in and save data
mydata <- read_csv("data/mydata.csv")
# reading in an Excel file
excel_sheets("data/mydata.xlsx")
read_excel("data/mydata.xlsx", sheet = "PICK_ME_FIRST!")
#############
# YOUR TURN #
#############
# dimension of data
data(mtcars)
nrow(mtcars)
ncol(mtcars)
dim(mtcars)
# what are the variables
names(mtcars)
glimpse(mtcars)
View(mtcars)
# missing values
data(airquality)
is.na(airquality)
sum(is.na(airquality))
colSums(is.na(airquality))
clean_data <- na.omit(airquality)
################ DATA STRUCTURES ################
# Creating vectors
c("Learning", "to", "create", "character", "vectors")
c(3, 2, 10, 55)
c(TRUE, FALSE, FALSE, FALSE, TRUE)
6:15
15.5:6.75
# Indexing vectors
v1 <- 1:10
v1[4]
v1[4:7]
v1[c(4, 3, 4)]
v1[v1 > 6]
# different summaries of vectors
length(v1)
summary(v1)
mean(v1)
median(v1)
v1 > 5
#############
# YOUR TURN #
#############
# Creating matrices
set.seed(123)
v1 <- sample(1:10, 25, replace = TRUE)
m1 <- matrix(v1, nrow = 5)
m1
# Indexing matrices
m1[1, 3]
m1[ , 1:3]
m1[1:3, ]
# Summaries of matrices
summary(m1)
mean(m1)
mean(m[1,])
rowMeans(m1)
colMeans(m1)
rowSums(m1)
colSums(m1)
m > .5
sum(m > .5)
which(m > .5)
m[m > .5]
#############
# YOUR TURN #
#############
# Data frames
raw_data <- read_csv("data/CustomerData.csv")
# indexing data frames
raw_data[, 4]
raw_data[, "Gender"]
raw_data[, 1:3]
raw_data[, c("CustomerID", "Region", "TownSize")]
raw_data[1, ]
# check out the first 6 rows with:
head(raw_data)
# Lists
model <- lm(mpg ~ wt, data = mtcars)
summary(model)
names(model)
str(model)
# Indexing lists
model[“residuals”]
model[[“residuals”]]
model$residuals
|
/Internet/2018 - Intro to R Bootcamp/Module 02/script2.R
|
permissive
|
tarsoqueiroz/Rlang
|
R
| false
| false
| 1,837
|
r
|
# Load required packages
library(readxl)
library(tidyverse)
################ IMPORTING DATA ################
# Reading in CSV files
read_csv("data/mydata.csv")
# Read in and save data
mydata <- read_csv("data/mydata.csv")
# reading in an Excel file
excel_sheets("data/mydata.xlsx")
read_excel("data/mydata.xlsx", sheet = "PICK_ME_FIRST!")
#############
# YOUR TURN #
#############
# dimension of data
data(mtcars)
nrow(mtcars)
ncol(mtcars)
dim(mtcars)
# what are the variables
names(mtcars)
glimpse(mtcars)
View(mtcars)
# missing values
data(airquality)
is.na(airquality)
sum(is.na(airquality))
colSums(is.na(airquality))
clean_data <- na.omit(airquality)
################ DATA STRUCTURES ################
# Creating vectors
c("Learning", "to", "create", "character", "vectors")
c(3, 2, 10, 55)
c(TRUE, FALSE, FALSE, FALSE, TRUE)
6:15
15.5:6.75
# Indexing vectors
v1 <- 1:10
v1[4]
v1[4:7]
v1[c(4, 3, 4)]
v1[v1 > 6]
# different summaries of vectors
length(v1)
summary(v1)
mean(v1)
median(v1)
v1 > 5
#############
# YOUR TURN #
#############
# Creating matrices
set.seed(123)
v1 <- sample(1:10, 25, replace = TRUE)
m1 <- matrix(v1, nrow = 5)
m1
# Indexing matrices
m1[1, 3]
m1[ , 1:3]
m1[1:3, ]
# Summaries of matrices
summary(m1)
mean(m1)
mean(m[1,])
rowMeans(m1)
colMeans(m1)
rowSums(m1)
colSums(m1)
m > .5
sum(m > .5)
which(m > .5)
m[m > .5]
#############
# YOUR TURN #
#############
# Data frames
raw_data <- read_csv("data/CustomerData.csv")
# indexing data frames
raw_data[, 4]
raw_data[, "Gender"]
raw_data[, 1:3]
raw_data[, c("CustomerID", "Region", "TownSize")]
raw_data[1, ]
# check out the first 6 rows with:
head(raw_data)
# Lists
model <- lm(mpg ~ wt, data = mtcars)
summary(model)
names(model)
str(model)
# Indexing lists
model[“residuals”]
model[[“residuals”]]
model$residuals
|
#
# samplesize_twofactorindindpower <- function(k1=3,k2=3,effectsize=0.8,sig_level = 0.05,power=0.8,forplot=FALSE,samplerange=NULL,effectsizerange=NULL){
k=k1*k2
library(pwr);library(jsonlite)
if(!forplot){
result = ceiling(pwr.anova.test(k = k, f = effectsize, sig.level = sig_level, power = power)$n)
}else{
if(is.null(effectsizerange)){
effectsizes = c(0.2,0.5,0.8)
}else{
effectsizes = as.numeric(strsplit(effectsizerange, ",")[[1]])
}
if(is.null(samplerange)){
n = seq(5,100,by=5)
}else{
forn = as.numeric(unlist(strsplit(strsplit(samplerange, ",")[[1]],"-")))
n=seq(forn[1],forn[2],by=forn[3])
}
ns = list()
powers = list()
for(i in 1:length(effectsizes)){
df1 = k-1
df2 = k*n-k
ncp = effectsizes[i]^2 * k*n
power = pf(qf(sig_level,df1,df2,lower.tail = F),df1,df2,ncp,lower.tail = F)
ns[[as.character(effectsizes[i])]] = n
powers[[as.character(effectsizes[i])]] = power
}
# ceiling(power.t.test(delta = effectsize,power = power, sig.level = sig_level)$n)
result = list(ns = ns, powers = powers)
}
# }
|
/MetDADevelopmentR/rscript/samplesize_twofactorindindpower.R
|
no_license
|
slfan2013/MetDA-development
|
R
| false
| false
| 1,137
|
r
|
#
# samplesize_twofactorindindpower <- function(k1=3,k2=3,effectsize=0.8,sig_level = 0.05,power=0.8,forplot=FALSE,samplerange=NULL,effectsizerange=NULL){
k=k1*k2
library(pwr);library(jsonlite)
if(!forplot){
result = ceiling(pwr.anova.test(k = k, f = effectsize, sig.level = sig_level, power = power)$n)
}else{
if(is.null(effectsizerange)){
effectsizes = c(0.2,0.5,0.8)
}else{
effectsizes = as.numeric(strsplit(effectsizerange, ",")[[1]])
}
if(is.null(samplerange)){
n = seq(5,100,by=5)
}else{
forn = as.numeric(unlist(strsplit(strsplit(samplerange, ",")[[1]],"-")))
n=seq(forn[1],forn[2],by=forn[3])
}
ns = list()
powers = list()
for(i in 1:length(effectsizes)){
df1 = k-1
df2 = k*n-k
ncp = effectsizes[i]^2 * k*n
power = pf(qf(sig_level,df1,df2,lower.tail = F),df1,df2,ncp,lower.tail = F)
ns[[as.character(effectsizes[i])]] = n
powers[[as.character(effectsizes[i])]] = power
}
# ceiling(power.t.test(delta = effectsize,power = power, sig.level = sig_level)$n)
result = list(ns = ns, powers = powers)
}
# }
|
#' @title Generates a new set of knots for the following iteration
#' @description A sub-function of \code{\link{StagedChoiceSplineMix}}. This function generates a new set of knots for the following iteration. Please refer to Bruch et al. (in press) for the precise rule used.
#' @param num.knot See \code{\link{StagedChoiceSplineMix}} for details.
#' @param sp.knots See \code{\link{StagedChoiceSplineMix}} for details.
#' @param k See \code{\link{StagedChoiceSplineMix}} for details.
#' @references
#' Bruch, E., F. Feinberg, K. Lee (in press), "Detecting Cupid's Vector: Extracting Decision Rules from Online Dating Activity Data," \emph{Proceedings of the National Academy of Sciences}.
#' @seealso
#' \code{\link{StagedChoiceSplineMix}}
#' @export
move.knot<-function(num.knot,sp.knots,k)
{
min.knot<-1
max.knot<-num.knot
km.prob<-c(0.2,0.6,0.2)
newknot.b<-matrix(double(k*2),ncol=2)
newknot.w<-matrix(double(k*2),ncol=2)
for (i in 1:k){
oldknot.b<-sp.knots[i,1:2]
oldknot.w<-sp.knots[i,3:4]
k1d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k1.new<-(k1d==1)*(oldknot.b[1]-1)+(k1d==2)*oldknot.b[1]+(k1d==3)*(oldknot.b[1]+1)
k2d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k2.new<-(k2d==1)*(oldknot.b[2]-1)+(k2d==2)*oldknot.b[2]+(k2d==3)*(oldknot.b[2]+1)
ind<-(k1.new==(min.knot-1)|k1.new==max.knot|k2.new<=k1.new|k2.new>max.knot)
k1.new<-(ind)*oldknot.b[1]+(!ind)*k1.new
k2.new<-(ind)*oldknot.b[2]+(!ind)*k2.new
newknot.b[i,1]<-k1.new
newknot.b[i,2]<-k2.new
k1d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k1.new<-(k1d==1)*(oldknot.w[1]-1)+(k1d==2)*oldknot.w[1]+(k1d==3)*(oldknot.w[1]+1)
k2d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k2.new<-(k2d==1)*(oldknot.w[2]-1)+(k2d==2)*oldknot.w[2]+(k2d==3)*(oldknot.w[2]+1)
ind<-(k1.new==(min.knot-1)|k1.new==max.knot|k2.new<=k1.new|k2.new>max.knot)
k1.new<-(ind)*oldknot.w[1]+(!ind)*k1.new
k2.new<-(ind)*oldknot.w[2]+(!ind)*k2.new
newknot.w[i,1]<-k1.new
newknot.w[i,2]<-k2.new
}
newknot<-cbind(newknot.b,newknot.w)
list(newknot=newknot)
}
|
/R/move.knot.r
|
no_license
|
cran/StagedChoiceSplineMix
|
R
| false
| false
| 2,157
|
r
|
#' @title Generates a new set of knots for the following iteration
#' @description A sub-function of \code{\link{StagedChoiceSplineMix}}. This function generates a new set of knots for the following iteration. Please refer to Bruch et al. (in press) for the precise rule used.
#' @param num.knot See \code{\link{StagedChoiceSplineMix}} for details.
#' @param sp.knots See \code{\link{StagedChoiceSplineMix}} for details.
#' @param k See \code{\link{StagedChoiceSplineMix}} for details.
#' @references
#' Bruch, E., F. Feinberg, K. Lee (in press), "Detecting Cupid's Vector: Extracting Decision Rules from Online Dating Activity Data," \emph{Proceedings of the National Academy of Sciences}.
#' @seealso
#' \code{\link{StagedChoiceSplineMix}}
#' @export
move.knot<-function(num.knot,sp.knots,k)
{
min.knot<-1
max.knot<-num.knot
km.prob<-c(0.2,0.6,0.2)
newknot.b<-matrix(double(k*2),ncol=2)
newknot.w<-matrix(double(k*2),ncol=2)
for (i in 1:k){
oldknot.b<-sp.knots[i,1:2]
oldknot.w<-sp.knots[i,3:4]
k1d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k1.new<-(k1d==1)*(oldknot.b[1]-1)+(k1d==2)*oldknot.b[1]+(k1d==3)*(oldknot.b[1]+1)
k2d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k2.new<-(k2d==1)*(oldknot.b[2]-1)+(k2d==2)*oldknot.b[2]+(k2d==3)*(oldknot.b[2]+1)
ind<-(k1.new==(min.knot-1)|k1.new==max.knot|k2.new<=k1.new|k2.new>max.knot)
k1.new<-(ind)*oldknot.b[1]+(!ind)*k1.new
k2.new<-(ind)*oldknot.b[2]+(!ind)*k2.new
newknot.b[i,1]<-k1.new
newknot.b[i,2]<-k2.new
k1d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k1.new<-(k1d==1)*(oldknot.w[1]-1)+(k1d==2)*oldknot.w[1]+(k1d==3)*(oldknot.w[1]+1)
k2d<-sample(c(1:3), 1, replace = T, prob=km.prob)
k2.new<-(k2d==1)*(oldknot.w[2]-1)+(k2d==2)*oldknot.w[2]+(k2d==3)*(oldknot.w[2]+1)
ind<-(k1.new==(min.knot-1)|k1.new==max.knot|k2.new<=k1.new|k2.new>max.knot)
k1.new<-(ind)*oldknot.w[1]+(!ind)*k1.new
k2.new<-(ind)*oldknot.w[2]+(!ind)*k2.new
newknot.w[i,1]<-k1.new
newknot.w[i,2]<-k2.new
}
newknot<-cbind(newknot.b,newknot.w)
list(newknot=newknot)
}
|
##split test/train dataset for cross-validation, generating the input tables and lists of predictors for the models
set.seed(1234)
##function to generate the list of variables to include in an analysis
##vars = initial list of variables to include (some may be removed if there are few unique values)
##tab = table of data
##name = suffix used in output files
fixVars <- function(vars, tab, name) {
print(name)
##remove variables with only one value (not informative and can mess up some algorithms)
for(v in vars) {
if(!(grepl("pct", v) | grepl("cnt", v)) & !v %in% c("timediff", "casecnt", "nbhflag", "qtrsbtwflags", "nbhbase", "nbhcount", "prebasecases")) { #keep continuous variables if have more than 5 unique values
counts = table(tab[,v])
if(length(unique(tab[,v])) < 2 | any(counts < 5)) {
print(paste("removed", v))
vars = vars[vars!=v]
}
} else {
if(length(unique(tab[,v])) < 2) {
print(paste("removed", v))
vars = vars[vars!=v]
}
}
}
any = vars[grepl("_any", vars)]
pct = vars[grepl("_pct", vars)]
cnt = vars[grepl("_cnt", vars)]
vars = vars[!vars %in% cnt] #remove count
hlf = vars[grepl("_half", vars)]
oth = c("timediff", "nbhcount", "nbhbase", "incident", "prebasecases")
print(vars[!vars %in% c(any,pct,cnt,hlf,oth)]) #should only be empty
write.table(data.frame(k = c(any, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_any.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = c(pct, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_pct.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = c(hlf, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_half.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = vars), paste0("variablesList_", name, "_", flag, "_", co, "_all.txt"), row.names = F, col.names = F, quote = F)
return(vars)
}
###split each cohort into cross-validation datasets
flag = "first" #include first flag only
cohorts = c("1Q", "2Q", "4Q", "9Q")
yr = paste0(1, "yrfu") #follow up period number of years for outcome (years)
for(co in cohorts) {
tab = read.csv(paste0("../data/Fulldata", tolower(co), "_072920.csv"), header = T, stringsAsFactors = F)
names(tab) = gsub("prev_", "prevtb_", names(tab)) #these are the same variables but have different starts
allflags = tab
##set up outcome based on number of years of follow-up
tab$maxaccumxs = tab$accum1yr
##classification
tab$outcome = ifelse(tab$maxaccumxs <= 0, "cluster", "outbreak")
##set up incident/prevalent
tab$incident = ifelse(tab$clustyp=="Incident", 2, 1)
tab$prebasecases[is.na(tab$prebasecases)] = 0
name = paste(flag, yr, co, sep="_")
print(name)
###split data for cross validation
tab = tab[tab$initflag=="Y",]
n = nrow(tab)/5
sp5 = 1:nrow(tab) #5th split will be the remainder of the others
sp1 = sample(x = sp5, size=n+1, replace = F) #remainder of 3 when divide by 5, distribute
sp5 = sp5[!sp5 %in% sp1]
sp2 = sample(x = sp5, size=n+1, replace = F)
sp5 = sp5[!sp5 %in% sp2]
sp3 = sample(x = sp5, size=n, replace = F)
sp5 = sp5[!sp5 %in% sp3]
sp4 = sample(x = sp5, size=n, replace = F)
sp5 = sp5[!sp5 %in% sp4]
write.csv(tab[c(sp2, sp3, sp4, sp5),], paste0("trainingSet1C_", name, ".csv"), row.names = F)
write.csv(tab[sp1,], paste0("testSet1C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp3, sp4, sp5),], paste0("trainingSet2C_", name, ".csv"), row.names = F)
write.csv(tab[sp2,], paste0("testSet2C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp4, sp5),], paste0("trainingSet3C_", name, ".csv"), row.names = F)
write.csv(tab[sp3,], paste0("testSet3C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp3, sp5),], paste0("trainingSet4C_", name, ".csv"), row.names = F)
write.csv(tab[sp4,], paste0("testSet4C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp3, sp4),], paste0("trainingSet5C_", name, ".csv"), row.names = F)
write.csv(tab[sp5,], paste0("testSet5C_", name, ".csv"), row.names = F)
####check splits
for(r in 1:5) {
train = read.csv(paste0("trainingSet", r, "C_", name, ".csv"), header = T, stringsAsFactors = F)
test = read.csv(paste0("testSet", r, "C_", name, ".csv"), header = T, stringsAsFactors = F)
if(nrow(train) + nrow(test) != nrow(tab)) {
stop("Incorrect number of rows")
}
clust = c(train$nbh_no, test$nbh_no)
if(any(duplicated(clust))) {
stop("Duplicated IDs")
}
if(!all(tab$nbh_no %in% clust)) {
stop("Missing IDs")
}
}
####set up list of variables
vars = c("nbhbase", "nbhcount", "timediff", "incident", "prebasecases",
names(tab)[which(names(tab)=="hl_pct"):which(names(tab)=="margin_half")])
##remove drug, substance, and margin
vars = vars[!grepl("subs", vars) & !grepl("margin", vars) & !grepl("drug", vars)]
vc = fixVars(vars, tab, "comb")
}
|
/machine_learning/1_format_model_inputs.R
|
permissive
|
CDCgov/Predicting_TB_cluster_growth
|
R
| false
| false
| 5,011
|
r
|
##split test/train dataset for cross-validation, generating the input tables and lists of predictors for the models
set.seed(1234)
##function to generate the list of variables to include in an analysis
##vars = initial list of variables to include (some may be removed if there are few unique values)
##tab = table of data
##name = suffix used in output files
fixVars <- function(vars, tab, name) {
print(name)
##remove variables with only one value (not informative and can mess up some algorithms)
for(v in vars) {
if(!(grepl("pct", v) | grepl("cnt", v)) & !v %in% c("timediff", "casecnt", "nbhflag", "qtrsbtwflags", "nbhbase", "nbhcount", "prebasecases")) { #keep continuous variables if have more than 5 unique values
counts = table(tab[,v])
if(length(unique(tab[,v])) < 2 | any(counts < 5)) {
print(paste("removed", v))
vars = vars[vars!=v]
}
} else {
if(length(unique(tab[,v])) < 2) {
print(paste("removed", v))
vars = vars[vars!=v]
}
}
}
any = vars[grepl("_any", vars)]
pct = vars[grepl("_pct", vars)]
cnt = vars[grepl("_cnt", vars)]
vars = vars[!vars %in% cnt] #remove count
hlf = vars[grepl("_half", vars)]
oth = c("timediff", "nbhcount", "nbhbase", "incident", "prebasecases")
print(vars[!vars %in% c(any,pct,cnt,hlf,oth)]) #should only be empty
write.table(data.frame(k = c(any, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_any.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = c(pct, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_pct.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = c(hlf, oth)), paste0("variablesList_", name, "_", flag, "_", co, "_half.txt"), row.names = F, col.names = F, quote = F)
write.table(data.frame(k = vars), paste0("variablesList_", name, "_", flag, "_", co, "_all.txt"), row.names = F, col.names = F, quote = F)
return(vars)
}
###split each cohort into cross-validation datasets
flag = "first" #include first flag only
cohorts = c("1Q", "2Q", "4Q", "9Q")
yr = paste0(1, "yrfu") #follow up period number of years for outcome (years)
for(co in cohorts) {
tab = read.csv(paste0("../data/Fulldata", tolower(co), "_072920.csv"), header = T, stringsAsFactors = F)
names(tab) = gsub("prev_", "prevtb_", names(tab)) #these are the same variables but have different starts
allflags = tab
##set up outcome based on number of years of follow-up
tab$maxaccumxs = tab$accum1yr
##classification
tab$outcome = ifelse(tab$maxaccumxs <= 0, "cluster", "outbreak")
##set up incident/prevalent
tab$incident = ifelse(tab$clustyp=="Incident", 2, 1)
tab$prebasecases[is.na(tab$prebasecases)] = 0
name = paste(flag, yr, co, sep="_")
print(name)
###split data for cross validation
tab = tab[tab$initflag=="Y",]
n = nrow(tab)/5
sp5 = 1:nrow(tab) #5th split will be the remainder of the others
sp1 = sample(x = sp5, size=n+1, replace = F) #remainder of 3 when divide by 5, distribute
sp5 = sp5[!sp5 %in% sp1]
sp2 = sample(x = sp5, size=n+1, replace = F)
sp5 = sp5[!sp5 %in% sp2]
sp3 = sample(x = sp5, size=n, replace = F)
sp5 = sp5[!sp5 %in% sp3]
sp4 = sample(x = sp5, size=n, replace = F)
sp5 = sp5[!sp5 %in% sp4]
write.csv(tab[c(sp2, sp3, sp4, sp5),], paste0("trainingSet1C_", name, ".csv"), row.names = F)
write.csv(tab[sp1,], paste0("testSet1C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp3, sp4, sp5),], paste0("trainingSet2C_", name, ".csv"), row.names = F)
write.csv(tab[sp2,], paste0("testSet2C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp4, sp5),], paste0("trainingSet3C_", name, ".csv"), row.names = F)
write.csv(tab[sp3,], paste0("testSet3C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp3, sp5),], paste0("trainingSet4C_", name, ".csv"), row.names = F)
write.csv(tab[sp4,], paste0("testSet4C_", name, ".csv"), row.names = F)
write.csv(tab[c(sp1, sp2, sp3, sp4),], paste0("trainingSet5C_", name, ".csv"), row.names = F)
write.csv(tab[sp5,], paste0("testSet5C_", name, ".csv"), row.names = F)
####check splits
for(r in 1:5) {
train = read.csv(paste0("trainingSet", r, "C_", name, ".csv"), header = T, stringsAsFactors = F)
test = read.csv(paste0("testSet", r, "C_", name, ".csv"), header = T, stringsAsFactors = F)
if(nrow(train) + nrow(test) != nrow(tab)) {
stop("Incorrect number of rows")
}
clust = c(train$nbh_no, test$nbh_no)
if(any(duplicated(clust))) {
stop("Duplicated IDs")
}
if(!all(tab$nbh_no %in% clust)) {
stop("Missing IDs")
}
}
####set up list of variables
vars = c("nbhbase", "nbhcount", "timediff", "incident", "prebasecases",
names(tab)[which(names(tab)=="hl_pct"):which(names(tab)=="margin_half")])
##remove drug, substance, and margin
vars = vars[!grepl("subs", vars) & !grepl("margin", vars) & !grepl("drug", vars)]
vc = fixVars(vars, tab, "comb")
}
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Appending data to a worksheet
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
setGeneric("appendWorksheet",
function(object, data, sheet, header = FALSE, rownames = NULL) standardGeneric("appendWorksheet"))
setMethod("appendWorksheet",
signature(object = "workbook", data = "ANY", sheet = "numeric"),
function(object, data, sheet, header = FALSE, rownames = NULL) {
if(is.character(rownames))
data <- includeRownames(data, rownames)
# pass data.frame's to Java - construct RDataFrameWrapper Java object references
data <- lapply(wrapList(data), dataframeToJava)
xlcCall(object, "appendWorksheet", data, as.integer(sheet - 1), header)
invisible()
}
)
setMethod("appendWorksheet",
signature(object = "workbook", data = "ANY", sheet = "character"),
function(object, data, sheet, header = FALSE, rownames = NULL) {
if(is.character(rownames))
data <- includeRownames(data, rownames)
# pass data.frame's to Java - construct RDataFrameWrapper Java object references
data <- lapply(wrapList(data), dataframeToJava)
xlcCall(object, "appendWorksheet", data, sheet, header)
invisible()
}
)
|
/R/workbook.appendWorksheet.R
|
no_license
|
harisxue/xlconnect
|
R
| false
| false
| 2,194
|
r
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Appending data to a worksheet
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
setGeneric("appendWorksheet",
function(object, data, sheet, header = FALSE, rownames = NULL) standardGeneric("appendWorksheet"))
setMethod("appendWorksheet",
signature(object = "workbook", data = "ANY", sheet = "numeric"),
function(object, data, sheet, header = FALSE, rownames = NULL) {
if(is.character(rownames))
data <- includeRownames(data, rownames)
# pass data.frame's to Java - construct RDataFrameWrapper Java object references
data <- lapply(wrapList(data), dataframeToJava)
xlcCall(object, "appendWorksheet", data, as.integer(sheet - 1), header)
invisible()
}
)
setMethod("appendWorksheet",
signature(object = "workbook", data = "ANY", sheet = "character"),
function(object, data, sheet, header = FALSE, rownames = NULL) {
if(is.character(rownames))
data <- includeRownames(data, rownames)
# pass data.frame's to Java - construct RDataFrameWrapper Java object references
data <- lapply(wrapList(data), dataframeToJava)
xlcCall(object, "appendWorksheet", data, sheet, header)
invisible()
}
)
|
#' trigrame feature vector
#' @description This feature vector is 8000-dimentional feature vector wich is computed from tri-gram probability matrix
#'T obtained from PSSM Matrix.to achieve this purpose elements in three successive rows and arbitrary columns are multiplied
#'together then these results are added together by changing variable i from 1 to L-1, which i is counter of row and L
#'indicates protein length. since there are 20 columns thus final feature vector would be of length 8000.
#' @param pssm_name name of PSSM Matrix file
#' @import utils
#' @return feature vector of lenght 8000
#' @references
#' Paliwal, K.K., et al. (2014) A tri-gram based feature extraction technique using linear probabilities of position
#' specific scoring matrix for protein fold recognition, IEEE transactions on nanobioscience, 13, 44-50
#' @export
#' @examples
#' as<-trigrame_pssm(paste0(system.file("extdata",package="PSSMCOOL"),"/C7GSI6.txt.pssm"))
trigrame_pssm<-function(pssm_name){
x<-read.delim(pssm_name,skip = 2,sep = "",header = FALSE)
x<-x[-1,-c(1,23:44)]
d<-which(x=="Lambda")
if(length(d)!=0){
x<-x[-c(d:dim(x)[1]),]
}
x<-x[,-1]
colnames(x)<-NULL
rownames(x)<-NULL
x<-as.matrix(x)
mode(x)<-"integer"
p<-x
p<-1/(1+exp(-p))
L<-dim(p)[1]
t<-array(0,dim = c(20,20,20))
k<-1
vec<-vector(mode = "numeric",length = 8000)
for(m in 1:20){
for(n in 1:20){
for(r in 1:20){
for(i in 1:(L-2)){
t[m,n,r]<-t[m,n,r]+p[i,m]*p[i+1,n]*p[i+2,r]
}
vec[k]<-t[m,n,r]
k<-k+1
}
}
}
return(round(vec,digits = 4))
}
#system.time({trigrame_pssm(paste0(system.file("extdata",package="PSSMFeatures"),"/C7GSI6.txt.pssm"))})
|
/R/AAtrigrame_pssm.R
|
no_license
|
minghao2016/PSSMCOOL-1
|
R
| false
| false
| 1,713
|
r
|
#' trigrame feature vector
#' @description This feature vector is 8000-dimentional feature vector wich is computed from tri-gram probability matrix
#'T obtained from PSSM Matrix.to achieve this purpose elements in three successive rows and arbitrary columns are multiplied
#'together then these results are added together by changing variable i from 1 to L-1, which i is counter of row and L
#'indicates protein length. since there are 20 columns thus final feature vector would be of length 8000.
#' @param pssm_name name of PSSM Matrix file
#' @import utils
#' @return feature vector of lenght 8000
#' @references
#' Paliwal, K.K., et al. (2014) A tri-gram based feature extraction technique using linear probabilities of position
#' specific scoring matrix for protein fold recognition, IEEE transactions on nanobioscience, 13, 44-50
#' @export
#' @examples
#' as<-trigrame_pssm(paste0(system.file("extdata",package="PSSMCOOL"),"/C7GSI6.txt.pssm"))
trigrame_pssm<-function(pssm_name){
x<-read.delim(pssm_name,skip = 2,sep = "",header = FALSE)
x<-x[-1,-c(1,23:44)]
d<-which(x=="Lambda")
if(length(d)!=0){
x<-x[-c(d:dim(x)[1]),]
}
x<-x[,-1]
colnames(x)<-NULL
rownames(x)<-NULL
x<-as.matrix(x)
mode(x)<-"integer"
p<-x
p<-1/(1+exp(-p))
L<-dim(p)[1]
t<-array(0,dim = c(20,20,20))
k<-1
vec<-vector(mode = "numeric",length = 8000)
for(m in 1:20){
for(n in 1:20){
for(r in 1:20){
for(i in 1:(L-2)){
t[m,n,r]<-t[m,n,r]+p[i,m]*p[i+1,n]*p[i+2,r]
}
vec[k]<-t[m,n,r]
k<-k+1
}
}
}
return(round(vec,digits = 4))
}
#system.time({trigrame_pssm(paste0(system.file("extdata",package="PSSMFeatures"),"/C7GSI6.txt.pssm"))})
|
data <- read.table("foo.txt")
## Reading in Larger Datasets with read.table
initial <- read.table("datatable.txt", nrows = 100)
classes <- sapply(initial, class)
tabAll <- read.table("datatable.txt", colClasses = classes)
## dput-ting R Objects
y <- data.frame(a = 1, b = "a")
dput(y)
structure(list(a = 1,
b = structure(1L, .Label = "a",
class = "factor")),
.Names = c("a", "b"), row.names = c(NA, -1L),
class = "data.frame")
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
## Dumping R Objects
x <- "foo"
y <- data.frame(a = 1, b = "a")
dump(c("x", "y"), file = "data.R")
rm(x, y)
source("data.R")
y
x
## Interfaces to the Outside World
str(file)
function (description = "", open = "", blocking = TRUE,
encoding = getOption("encoding"))
## Connections
con <- file("foo.txt", "r")
data <- read.csv(con)
close(con)
data <- read.csv("foo.txt")
## Reading Lines of a Text File
con <- gzfile("words.gz")
x <- readLines(con, 10)
x
## Reading Lines of a Text File
con <- url("http://www.jhsph.edu", "r")
x <- readLines(con)
head(x)
|
/2. R Programming/ReadingData.R
|
no_license
|
shubhamjanhere/Coursera-Data-Science-Specialization
|
R
| false
| false
| 1,117
|
r
|
data <- read.table("foo.txt")
## Reading in Larger Datasets with read.table
initial <- read.table("datatable.txt", nrows = 100)
classes <- sapply(initial, class)
tabAll <- read.table("datatable.txt", colClasses = classes)
## dput-ting R Objects
y <- data.frame(a = 1, b = "a")
dput(y)
structure(list(a = 1,
b = structure(1L, .Label = "a",
class = "factor")),
.Names = c("a", "b"), row.names = c(NA, -1L),
class = "data.frame")
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
## Dumping R Objects
x <- "foo"
y <- data.frame(a = 1, b = "a")
dump(c("x", "y"), file = "data.R")
rm(x, y)
source("data.R")
y
x
## Interfaces to the Outside World
str(file)
function (description = "", open = "", blocking = TRUE,
encoding = getOption("encoding"))
## Connections
con <- file("foo.txt", "r")
data <- read.csv(con)
close(con)
data <- read.csv("foo.txt")
## Reading Lines of a Text File
con <- gzfile("words.gz")
x <- readLines(con, 10)
x
## Reading Lines of a Text File
con <- url("http://www.jhsph.edu", "r")
x <- readLines(con)
head(x)
|
#' Select TextGrid Tier by its name
#'
#' This function takes a TextGrid and a name (character string), and returns the specified tier from that TextGrid
#' @param tg TextGrid data, returned from readTextGrid() function
#' @param tier_name character string. The name of the tier to retrive from TextGrid \code{tg}
#' @export
#' @examples
#' # Get word tier from data in mytg.TextGrid
#' tg_data <- readTextGrid("mytg.TextGrid")
#' getTierByName(tg_data, "word")
#'
#' # You may also chain with %>% from dplyr
#' readTextGrid("mytg.TextGrid") %>%
#' getTierByName("word")
#'
getTierByName <- function(tg, tier_name) {
if(!tier_name %in% tg$item$name) {
stop(paste0("Error: tier \"", tier_name, "\" not found in TextGrid."))
} else {
tg$item[which(tg$item$name == tier_name), ]
}
}
|
/R/getTierByName.R
|
no_license
|
fauxneticien/phonpack
|
R
| false
| false
| 801
|
r
|
#' Select TextGrid Tier by its name
#'
#' This function takes a TextGrid and a name (character string), and returns the specified tier from that TextGrid
#' @param tg TextGrid data, returned from readTextGrid() function
#' @param tier_name character string. The name of the tier to retrive from TextGrid \code{tg}
#' @export
#' @examples
#' # Get word tier from data in mytg.TextGrid
#' tg_data <- readTextGrid("mytg.TextGrid")
#' getTierByName(tg_data, "word")
#'
#' # You may also chain with %>% from dplyr
#' readTextGrid("mytg.TextGrid") %>%
#' getTierByName("word")
#'
getTierByName <- function(tg, tier_name) {
if(!tier_name %in% tg$item$name) {
stop(paste0("Error: tier \"", tier_name, "\" not found in TextGrid."))
} else {
tg$item[which(tg$item$name == tier_name), ]
}
}
|
########################################################################################################################
## RnBSet-class.R
## created: 2012-04-06
## creator: Pavlo Lutsik
## ---------------------------------------------------------------------------------------------------------------------
## RnBSet class definition.
########################################################################################################################
## GLOBALS
RNBSET.SLOTNAMES<-c("meth.sites", "covg.sites")
##
## ---------------------------------------------------------------------------------------------------------------------
## CLASS DEFINITIONS
## ---------------------------------------------------------------------------------------------------------------------
#' @include bigFf.R
setOldClass(c("ff_matrix"))
setClassUnion("matrixOrff", c("matrix", "ff_matrix"))
setClassUnion("matrixOrffOrBigFfMat", c("matrix", "ff_matrix", "BigFfMat"))
setClassUnion("matrixOrffOrNULL", c("matrix", "ff_matrix", "NULL"))
setClassUnion("matrixOrffOrBigFfMatOrNULL", c("matrix", "ff_matrix", "BigFfMat", "NULL"))
setClassUnion("listOrNULL", c("list", "NULL"))
setClassUnion("characterOrNULL", c("character", "NULL"))
#' RnBSet Class
#'
#' Basic class for storing DNA methylation and experimental quality information
#'
#' @details
#' It is a virtual class and objects of type \code{RnBSet} should not be instantiated. Instead, the child classes are
#' used: \code{\linkS4class{RnBeadRawSet}} and \code{\linkS4class{RnBeadSet}} for Infinium HumanMethylation and
#' \code{\linkS4class{RnBiseqSet}} for bisulfite sequencing data
#'
#' @section Slots:
#' \describe{
#' \item{\code{pheno}}{Sample annotations (phenotypic and processing data) in the form of a \code{data.frame}.}
#' \item{\code{sites}}{A \code{matrix} object storing the identifiers of the methylation sites for which the
#' methylation information is present}
#' \item{\code{meth.sites}}{\code{matrix} of methylation values. Every row corresponds to a methylation site,
#' and every column - to a sample.}
#' \item{\code{covg.sites}}{\code{matrix} of coverage values. Every row corresponds to a methylation site,
#' and every column - to a sample.}
#' \item{\code{regions}}{\code{list} of all identifiers of methylation sites for which methylation information
#' is available.}
#' \item{\code{meth.regions}}{\code{list} of methylation \code{matrix} objects, one per available region type. Every row in a
#' matrix corresponds to a methylation site, and every column - to a sample.}
#' \item{\code{covg.regions}}{\code{list} of coverage \code{matrix} objects, one per available region type.
#' Every row corresponds to a region, and every column - to a sample.}
#' \item{\code{status}}{\code{list} with meta-information about the object.}
#' \item{\code{assembly}}{\code{character} vector of length one, specifying the genome assembly which the object is linked to, e.g. "hg19".}
#' \item{\code{target}}{\code{character} vector of length one, specifying the feature class:
#' \code{"CpG"} for sequencing data, \code{"probes450"} and \code{"probes27"} for
#' HumanMethylation450 and HumanMethylation27 microarrays respectively.}
#' \item{\code{inferred.covariates}}{\code{list} with covariate information.
#' Can contain elements \code{"sva"} and \code{"cell.types"}.}
#' \item{\code{version}}{Package version in which the dataset was created.}
#' \item{\code{imputed}}{Flag indicating if methylation matrix has been imputed.}
#' }
#'
#' @section Methods and Functions:
#' \describe{
#' \item{\code{\link[=pheno,RnBSet-method]{pheno}}}{Gets the phenotypic and processing data of the dataset.}
#' \item{\code{\link[=samples,RnBSet-method]{samples}}}{Gets the identifiers of all samples in the dataset.}
#' \item{\code{\link[=summarized.regions,RnBSet-method]{summarized.regions}}}{Gets the genomic annotations for
#' which methylation data is present.}
#' \item{\code{\link[=meth,RnBSet-method]{meth}}}{Gets a \code{matrix} of methylation values in the dataset.}
#' \item{\code{\link[=mval,RnBSet-method]{mval}}}{Gets a \code{matrix} of M values in the dataset.}
#' \item{\code{\link[=covg,RnBSet-method]{covg}}}{Gets the \code{matrix} of coverage values of the dataset.}
#' \item{\code{\link[=remove.sites,RnBSet-method]{remove.sites}}}{Removes sites from the dataset.}
#' \item{\code{\link[=remove.samples,RnBSet-method]{remove.samples}}}{Removes samples from the dataset.}
#' \item{\code{\link[=addPheno,RnBSet-method]{addPheno,RnBSet-method}}}{Add sample annotation to the dataset.}
#' \item{\code{\link[BiocGenerics]{combine}}}{Combines two datasets.}
#' \item{\code{\link{regionMapping,RnBSet-method}}}{Retrieve the sites mapping to a given region type}
#' \item{\code{\link[=rnb.sample.summary.table,RnBSet-method]{rnb.sample.summary.table}}}{Creates a sample summary table from an RnBSet object.}
#' \item{\code{\link{isImputed,RnBSet-method}}}{Getter for the imputation slot.}
#' }
#'
#' @name RnBSet-class
#' @rdname RnBSet-class
#' @author Pavlo Lutsik
#' @exportClass RnBSet
setClass("RnBSet",
representation(pheno="data.frame",
sites="matrix",
meth.sites="matrixOrffOrBigFfMat",
covg.sites="matrixOrffOrBigFfMatOrNULL",
regions="list",
meth.regions="list",
covg.regions="listOrNULL",
status="listOrNULL",
assembly="character",
target="characterOrNULL",
inferred.covariates="list",
version="characterOrNULL",
imputed="logical"),
prototype(pheno=data.frame(),
sites=matrix(nrow=0, ncol=0),
meth.sites=matrix(nrow=0, ncol=0),
covg.sites=NULL,
regions=list(),
meth.regions=list(),
covg.regions=NULL,
status=NULL,
assembly="hg19",
target=NULL,
inferred.covariates=list(),
version=as.character(packageVersion("RnBeads")),
imputed=FALSE),
contains = "VIRTUAL",
package = "RnBeads")
## ---------------------------------------------------------------------------------------------------------------------
## DUMMY CONSTRUCTOR
## ---------------------------------------------------------------------------------------------------------------------
#
#setMethod("initialize", "RnBSet",
# function(pheno=data.frame(),
# sites=matrix(),
# meth.sites=matrix(),
# covg.sites=NULL,
# regions=list(),
# meth.regions=list(),
# covg.regions=NULL,
# status=NULL,
# assembly="hg19",
# target=NULL,
# inferred.covariates=list()
# ){
# .Object@pheno<-pheno
# .Object@sites<-sites
# .Object@meth.sites<-betas
# .Object@covg.sites<-covg.sites
#
# .Object@status<-status
#
# .Object@target<-target
#
#
# })
## ---------------------------------------------------------------------------------------------------------------------
## ACCESSORS
## ---------------------------------------------------------------------------------------------------------------------
if (!isGeneric("pheno")) setGeneric("pheno", function(object) standardGeneric("pheno"))
#' pheno-methods
#'
#' Extracts sample phenotype and/or processing information.
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname pheno-methods
#' @docType methods
#' @aliases pheno
#' @aliases pheno,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' pheno(rnb.set.example)
#' }
setMethod("pheno", signature(object="RnBSet"), function(object) object@pheno)
########################################################################################################################
if (!isGeneric("samples")) {
setGeneric("samples", function(object) standardGeneric("samples"))
}
#' samples-methods
#'
#' Extracts sample identifiers
#'
#' @param object Dataset of interest.
#'
#' @details The column of the sample annotation table which contains identifiers is globally controlled via the
#' \code{"identifiers.column"} option. In case the latter is \code{NULL} column names of the matrix returned
#' by the \code{meth} method are treated as sample identifiers. In case the latter are also missing, a \code{character}
#' vector with sample numbers is returned.
#'
#' @return \code{character} vector of sample identifiers.
#'
#' @rdname samples-methods
#' @docType methods
#' @aliases samples
#' @aliases samples,RnBSet-method
#' @aliases samples,RnBeadClustering-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' samples(rnb.set.example)
#' }
setMethod("samples", signature(object="RnBSet"),
function(object) {
pheno.table <- pheno(object)
id.column <- rnb.getOption("identifiers.column")
ids <- NULL
if (!(is.null(pheno.table) || is.null(id.column))) {
if (is.character(id.column)) {
if (id.column %in% colnames(pheno.table)) {
ids <- pheno.table[, id.column]
}
} else if (1L <= id.column && id.column <= ncol(pheno.table)) {
ids <- pheno.table[, id.column]
}
if (is.null(ids) || any(is.na(ids)) || anyDuplicated(ids) != 0) {
rnb.warning("The supplied identifiers column is not found or is not suitable")
ids <- as.character(1:nrow(object@pheno))
}
ids <- as.character(ids)
} else if (!is.null(colnames(object@meth.sites))) {
ids <- colnames(object@meth.sites)
} else {
ids <- as.character(1:nrow(object@pheno))
}
ids
}
)
########################################################################################################################
if(!isGeneric("sites")) setGeneric("sites",
function(object) standardGeneric("sites"))
#' sites-methods
#'
#' Methylation sites object information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#'
#' @return A matrix of type \code{integer} describing the sites, information for which is
#' present in the \code{object}
#'
#' @rdname sites-methods
#' @docType methods
#' @aliases sites
#' @aliases sites,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' sites(rnb.set.example)
#' }
setMethod("sites", signature(object="RnBSet"),
function(object){
return(object@sites)
})
if(!isGeneric("regions")) setGeneric("regions",
function(object, ...) standardGeneric("regions"))
########################################################################################################################
#' regions-methods
#'
#' Methylation regions, information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param type Region type(s) of interest as a \code{character} vector. If this is set to \code{NULL}, all region
#' types summarized in the object are returned.
#' @return Methylation site and region assignment. If \code{type} is singleton, a \code{matrix} is returned. The first
#' column corresponds to the methylation context index. The second column is the index of the chromosome in
#' the genome, and the third is the index of the region in the \code{GRanges} object of the region type
#' annotation. When \code{length(type)>1}, a list of such matrices is returned for each element of \code{type}.
#' If \code{type} is \code{NULL}, matrices for all summarized region types are returned.
#'
#' @note
#' Methylation context index is an integer number denoting the sequence context of the cytosine of interest. Index
#' \code{1} corresponds to \code{CpG}, the only supported index in bisulfite sequencing datasets.
#'
#' @rdname regions-methods
#' @docType methods
#' @aliases regions
#' @aliases regions,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' head(regions(rnb.set.example))
#' }
#' @seealso \code{\link[=summarized.regions,RnBSet-method]{summarized.regions}} for all summarized region types in a dataset;
#' \code{\link{rnb.get.chromosomes}} listing all supported chromosomes for a given genome assembly
#' @author Pavlo Lutsik
#' @export
setMethod("regions", signature(object="RnBSet"),
function(object, type=NULL){
if(!(is.character(type)))
stop("Invalid argument type")
if(is.null(object@regions)){
warning("No region information present, returning NULL")
return(NULL)
}
if(!is.null(type)){
if(!all(type %in% names(object@regions)))
stop(sprintf("No information for type %s",type))
if(length(type==1))
return(object@regions[[type]]) else
return(object@regions[type])
}else{
return((object@regions))
}
})
########################################################################################################################
if (!isGeneric("summarized.regions")) {
setGeneric("summarized.regions", function(object) standardGeneric("summarized.regions"))
}
#' summarized.regions-methods
#'
#' Gets the genomic annotations for which methylation data is present in the \code{RnBSet} object.
#'
#' @param object Methylation dataset of interest.
#'
#' @return \code{character} vector listing all genomic annotations summarized in the given dataset. If the dataset
#' contains methylation in sites only, an empty vector is returned.
#'
#' @seealso \code{\link[=summarize.regions,RnBSet-method]{summarize.regions}} for calculating region-wise methylation in a dataset;
#' \code{\link{rnb.set.annotation}} for adding or replacing a region annotation table
#'
#' @rdname summarized.regions-methods
#' @docType methods
#' @aliases summarized.regions
#' @aliases summarized.regions,RnBSet-method
#' @author Yassen Assenov
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' summarized.regions(rnb.set.example)
#' }
setMethod("summarized.regions", signature(object = "RnBSet"),
function(object) {
result <- names(object@regions)
if (is.null(result)) {
result <- character()
}
result
}
)
########################################################################################################################
## get.dataset.matrix
##
## Extracts a specific data matrix from the given methylation dataset and sets row names if necessary.
##
## @param object Methylation dataset as an object of class inheriting \code{RnBSet}.
## @param type Site type (e.g. \code{"sites"} or \code{"probes450"}) for site/probe matrix, or region name for
## the corresponding region-based matrix.
## @param row.names Flag indicating if row names must be generated.
## @param mm.sites Data matrix for the site level.
## @param mm.regions List of data matrices, one per supported region type.
## @param i indices of sites/regions to be retrieved (index or logical). retrieves all if \code{NULL} (default).
## @param j indices of samples to be retrieved (index or logical). retrieves all if \code{NULL} (default).
## @return Requested data matrix. Note that this might be \code{NULL}.
## @author Pavlo Lutsik
get.dataset.matrix <- function(object, type, row.names, mm.sites, mm.regions, i=NULL, j=NULL) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (!parameter.is.flag(row.names)) {
stop("invalid value for row.names; expected TRUE or FALSE")
}
if (!is.element(class(i), c("NULL", "integer", "numeric", "logical"))) {
stop("invalid value for i; expected NULL, index or logical")
}
if (!is.element(class(j), c("NULL", "integer", "numeric", "logical", "character"))) {
stop("invalid value for j; expected NULL, index, character or logical")
}
if (is.character(j)){
j <- match(j, samples(object))
if (any(is.na(j))){
stop("invalid sample names")
}
}
if (type %in% c("sites", object@target)) {
if (is.null(mm.sites)) {
return(NULL)
}
if("ff" %in% class(mm.sites)){
open(mm.sites)
}
if (is.null(i) && is.null(j)){
result <- mm.sites[, , drop = FALSE]
} else if(is.null(i)){
result <- mm.sites[, j, drop = FALSE]
} else if(is.null(j)){
result <- mm.sites[i, , drop = FALSE]
} else {
result <- mm.sites[i, j, drop = FALSE]
}
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else if (is.null(mm.regions[[type]])) {
return(NULL)
} else {
if (is.null(i) && is.null(j)){
result <- mm.regions[[type]][, , drop = FALSE]
} else if(is.null(i)){
result <- mm.regions[[type]][, j, drop = FALSE]
} else if(is.null(j)){
result <- mm.regions[[type]][i, , drop = FALSE]
} else {
result <- mm.regions[[type]][i, j, drop = FALSE]
}
}
if (is.null(j)){
colnames(result) <- samples(object)
} else {
colnames(result) <- samples(object)[j]
}
if (row.names) {
if (is.null(i)){
rownames(result) <- get.row.names(object, type)
} else {
rownames(result) <- get.row.names(object, type)[i]
}
} else {
rownames(result) <- NULL
}
return(result)
}
########################################################################################################################
if(!isGeneric("mval")) setGeneric("mval", function(object, ...) standardGeneric("mval"))
#' mval-methods
#'
#' Extracts DNA methylation information (M values) for a specified set of genomic features.
#'
#' @param object dataset of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), DNA methylation information
#' for each available site is returned. Otherwise, this should be one of region types for for which
#' summarized DNA methylation information is computed in the given dataset.
#' @param row.names Flag indicating of row names are to be generated in the result.
#' @param epsilon Threshold of beta values to use when adjusting for potential M values close to +infinity or
#' -infinity. See \code{\link{rnb.beta2mval}} for more details.
#'
#' @return \code{matrix} with methylation M values.
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} for extracting methylation beta values
#' @rdname mval-methods
#' @docType methods
#' @aliases mval
#' @aliases mval,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site M-value matrix
#' mm<-mval(rnb.set.example, row.names=TRUE)
#' head(mm)
#' ## M-values for each covered gene
#' gmm<-mval(rnb.set.example, type="gene", row.names=TRUE)
#' head(gmm)
#' }
#' @export
setMethod("mval", signature(object = "RnBSet"),
function(object, type = "sites", row.names = FALSE, epsilon = 0) {
beta.values <- get.dataset.matrix(object, type, row.names, object@meth.sites, object@meth.regions)
rnb.beta2mval(beta.values, epsilon)
}
)
if(!isGeneric("meth")) setGeneric("meth", function(object, ...) standardGeneric("meth"))
#' meth-methods
#'
#' Extracts DNA methylation information (beta values) for a specified set of genomic features.
#'
#' @param object dataset of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), DNA methylation information
#' for each available site is returned. Otherwise, this should be one of region types for for which
#' summarized DNA methylation information is computed in the given dataset.
#' @param row.names flag indicating if row names are to be generated in the result.
#' @param i indices of sites/regions to be retrieved. By default (\code{NULL}), all will be retrieved.
#' @param j indices of samples to be retrieved. By default (\code{NULL}), all will be retrieved.
#'
#' @return \code{matrix} with methylation beta values.
#'
#' @seealso \code{\link[=mval,RnBSet-method]{mval}} for calculating M values
#' @rdname meth-methods
#' @docType methods
#' @aliases meth
#' @aliases meth,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' mm<-meth(rnb.set.example, row.names=TRUE)
#' head(mm)
#' ## beta-values for each covered gene
#' gmm<-meth(rnb.set.example, type="gene", row.names=TRUE)
#' head(gmm)
#' }
#' @export
setMethod("meth", signature(object = "RnBSet"),
function(object, type="sites", row.names=FALSE, i=NULL, j=NULL) {
get.dataset.matrix(object, type, row.names, object@meth.sites, object@meth.regions, i=i, j=j)
}
)
if(!isGeneric("hasCovg")) setGeneric("hasCovg", function(object,...) standardGeneric("hasCovg"))
#' hasCovg-methods
#'
#' Returns \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type.
#'
#' @param object \code{RnBSet} of interest.
#' @param type \code{character} singleton. If \code{sites} or a region type summarized in the object
#'
#' @return \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type. \code{FALSE} otherwise
#'
#' @rdname hasCovg-methods
#' @docType methods
#' @export
#' @aliases hasCovg
#' @aliases hasCovg,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' hasCovg(rnb.set.example)
#' }
setMethod("hasCovg", signature(object="RnBSet"),
function (object, type="sites") {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- !is.null(object@covg.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else {
result <- !is.null(object@covg.regions[[type]])
}
return(result)
}
)
if(!isGeneric("covg")) setGeneric("covg", function(object,...) standardGeneric("covg"))
#' covg-methods
#'
#' Extract coverage information from an object of \code{RnBSet} class.
#'
#' @param object Dataset of interest.
#' @param type \code{character} singleton. If \code{sites} DNA methylation information per each available
#' site is returned. Otherwise should be one of region types for for which the summarized
#' coverage information is available
#' @param row.names Flag indicating of row names are to be generated in the result.
#' @param i indices of sites/regions to be retrieved. By default (\code{NULL}), all will be retrieved.
#' @param j indices of samples to be retrieved. By default (\code{NULL}), all will be retrieved.
#'
#' @return coverage information available for the dataset in the form of a \code{matrix}.
#'
#' @rdname covg-methods
#' @docType methods
#' @export
#' @aliases covg
#' @aliases covg,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' cvg<-covg(rnb.set.example, row.names=TRUE)
#' head(cvg)
#' }
setMethod("covg", signature(object="RnBSet"),
function (object, type="sites", row.names=FALSE, i=NULL, j=NULL) {
m<-get.dataset.matrix(object, type, row.names, object@covg.sites, object@covg.regions, i=i, j=j)
m
}
)
if(!isGeneric("nsites")) setGeneric("nsites", function(object, ...) standardGeneric("nsites"))
#' nsites-methods
#'
#' Returns the number of sites/regions for a given \code{RnBSet} object
#'
#' @param object \code{RnBSet} of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), the number of sites is returned.
#' Otherwise, this should be one of region types for for which the number of regions is returned.
#'
#' @return \code{integer} stating the number of sites/regions. \code{NA} if the regions have not been summarized yet.
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} Retrieving the matrix of methylation values
#' @rdname nsites-methods
#' @docType methods
#' @aliases nsites
#' @aliases nsites,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' nsites(rnb.set.example)
#' }
#' @export
setMethod("nsites", signature(object = "RnBSet"),
function(object, type="sites") {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@meth.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else if (is.null(object@meth.regions[[type]])) {
result <- NA
} else {
result <- nrow(object@meth.regions[[type]])
}
return(result)
}
)
########################################################################################################################
if (!isGeneric("assembly")) {
setGeneric("assembly", function(object) standardGeneric("assembly"))
}
#' assembly-methods
#'
#' Extracts information about assembly
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname assembly-methods
#' @docType methods
#' @aliases assembly
#' @aliases assembly,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' assembly(rnb.set.example) # "hg19"
#' }
setMethod("assembly", signature(object="RnBSet"),
function(object){
return(object@assembly)
})
## ---------------------------------------------------------------------------------------------------------------------
## MODIFIERS
## ---------------------------------------------------------------------------------------------------------------------
if (!isGeneric("updateRegionSummaries")) {
setGeneric("updateRegionSummaries", function(object) standardGeneric("updateRegionSummaries"))
}
#' updateRegionSummaries
#'
#' Updates the region information present in an RnBSet by invoking summarize.regions on all region types
#' present in the object
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname updateRegionSummaries
#' @docType methods
#' @aliases updateRegionSummaries
#' @aliases updateRegionSummaries,RnBSet-method
#' @export
setMethod("updateRegionSummaries", signature(object="RnBSet"),
function(object){
if (length(object@meth.regions) != 0) {
region.types <- names(object@meth.regions)
aggregations <- sapply(object@meth.regions, attr, "aggregation")
for (i in 1:length(region.types)) {
object <- summarize.regions(object, region.types[i], aggregations[i])
}
}
object
}
)
########################################################################################################################
if (!isGeneric("remove.sites")) {
setGeneric("remove.sites", function(object, probelist, verbose = TRUE) standardGeneric("remove.sites"))
}
#' remove.sites-methods
#'
#' Removes the specified probes from the dataset.
#'
#' @param object Dataset of interest.
#' @param probelist List of probes to be removed in the form of a \code{logical}, \code{integer} or \code{character}
#' vector. If this parameter is \code{logical}, it is not recycled; its length must be equal to the
#' number of probes in \code{object}. If it is \code{integer} or \code{character}, it must list only
#' probes that exist in the dataset. Specifying probe indices larger than the number of probes, or
#' non-existent probe identifiers results in an error.
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'
#' @seealso \code{\link[=remove.samples,RnBSet-method]{remove.samples}} for removing samples from a methylation dataset
#'
#' @rdname remove.sites-methods
#' @aliases remove.sites
#' @aliases remove.sites,RnBSet-method
#' @docType methods
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' print(rnb.set.example)
#' ## remove 100 random sites
#' s2r<-sample.int(nrow(sites(rnb.set.example)), 100)
#' rnb.set.f<-remove.sites(rnb.set.example, s2r)
#' print(rnb.set.f)
#' }
setMethod("remove.sites", signature(object = "RnBSet"),
function(object, probelist, verbose=FALSE) {
inds <- get.i.vector(probelist, rownames(object@sites))
if(verbose) {
rnb.logger.start("Removing sites")
}
## Delete methylation sites
if(length(inds) != 0) {
object@sites <- object@sites[-inds, ]
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
bff.finalizer <- NULL
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf) bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
nSites.new <- nrow(object@meth.sites) - length(inds)
nSamples <- length(samples(object))
# methylation
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites.new, col.n=nSamples, row.names=NULL, col.names=samples(object), finalizer=bff.finalizer)
} else {
newMat <- ff(NA, dim=c(nSites.new, nSamples), dimnames=list(NULL, samples(object)), vmode="double")
}
for (j in 1:nSamples){
newMat[,j] <- object@meth.sites[-inds,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
object@meth.sites <- newMat
# coverage
if(!is.null(object@covg.sites)) {
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites.new, col.n=nSamples, row.names=NULL, col.names=samples(object), na.prototype=as.integer(NA), finalizer=bff.finalizer)
} else {
newMat <- ff(NA_integer_, dim=c(nSites.new, nSamples), dimnames=list(NULL, samples(object)))
}
for (j in 1:nSamples){
newMat[,j] <- object@covg.sites[-inds,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.sites)
}
object@covg.sites <- newMat
}
} else {
object@meth.sites <- object@meth.sites[-inds, ,drop=FALSE]
if(!is.null(object@covg.sites)) {
object@covg.sites <- object@covg.sites[-inds, ,drop=FALSE]
}
}
}
## Update region methylation
if(length(object@meth.regions) != 0){
region.types <- names(object@meth.regions)
aggregations <- sapply(object@meth.regions, attr, "aggregation")
for(i in 1:length(region.types)){
if(verbose){
rnb.status(c("summarizing regions:",region.types[i]))
}
object <- summarize.regions(object, region.types[i], aggregations[i])
}
}
## Remove information on inferred covariates (they are likely to change when sites are removed)
if (.hasSlot(object, "inferred.covariates")) {
i.covariates <- setdiff(names(object@inferred.covariates), "sex")
if (length(i.covariates) != 0) {
object@inferred.covariates[i.covariates] <- NULL
if(verbose){
rnb.info("removed information on inferred covariates")
}
}
}
if(verbose){
rnb.logger.completed()
}
object
}
)
########################################################################################################################
if (!isGeneric("updateMethylationSites")) {
setGeneric("updateMethylationSites", function(object, meth.data, verbose = TRUE) standardGeneric("updateMethylationSites"))
}
#' updateMethylationSites-methods
#'
#' Replaces the methylation info with the specified data frame.
#'
#' @param object Dataset of interest.
#' @param meth.data This object has to be a \code{data.frame} of equal dimension than the one already contained in
#' \code{object}, containing the methylation info that should be associated with the object.
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'#'
#' @rdname updateMethylationSites-methods
#' @aliases updateMethylationSites
#' @aliases updateMethylationSites,RnBSet-method
#' @docType methods
#' @export
setMethod("updateMethylationSites", signature(object = "RnBSet"),
function(object, meth.data, verbose=FALSE) {
if(verbose) {
rnb.logger.start("Updating sites")
}
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
bff.finalizer <- NULL
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf) bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
nSites <- nrow(object@meth.sites)
if(nSites!=nrow(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
nSamples <- length(samples(object))
if(nSites!=nrow(meth.data)||nSamples!=ncol(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
# methylation
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites, col.n=nSamples, row.names=NULL, col.names=samples(object), finalizer=bff.finalizer)
} else {
newMat <- ff(NA, dim=c(nSites, nSamples), dimnames=list(NULL, samples(object)), vmode="double")
}
for (j in 1:nSamples){
newMat[,j] <- meth.data[,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
object@meth.sites <- newMat
} else {
nSites <- nrow(object@meth.sites)
if(nSites!=nrow(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
nSamples <- length(samples(object))
if(nSites!=nrow(meth.data)||nSamples!=ncol(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
object@meth.sites <- meth.data
}
if(verbose){
logger.completed()
}
if(verbose){
logger.start("Update regional methylation")
}
object <- updateRegionSummaries(object)
if(verbose){
logger.completed()
}
object
}
)
########################################################################################################################
if (!isGeneric("mask.sites.meth")) {
setGeneric("mask.sites.meth", function(object, mask, verbose=FALSE) standardGeneric("mask.sites.meth"))
}
#' mask.sites.meth-methods
#'
#' Given a logical matrix, sets corresponding entries in the methylation table to NA (masking).
#' Low memory footprint
#'
#' @param object Dataset of interest.
#' @param mask logical matrix indicating which sites should be masked
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'
#' @rdname mask.sites.meth-methods
#' @aliases mask.sites.meth
#' @aliases mask.sites.meth,RnBSet-method
#' @docType methods
setMethod("mask.sites.meth", signature(object = "RnBSet"),
function(object, mask, verbose=FALSE) {
if(!is.null(object@status) && object@status$disk.dump){
nSamples <- length(samples(object))
for (j in 1:nSamples){
object@meth.sites[mask[,j],j] <- NA
}
} else {
object@meth.sites[,][mask] <- NA
if(inherits(object, "RnBeadRawSet")){
object@M[,][mask] <- NA
object@U[,][mask] <- NA
if(!is.null(object@M0)){
object@M0[,][mask] <- NA
}
if(!is.null(object@U0)){
object@U0[,][mask] <- NA
}
}
}
object
}
)
########################################################################################################################
if (!isGeneric("remove.samples")) {
setGeneric("remove.samples", function(object, samplelist) standardGeneric("remove.samples"))
}
#' remove.samples-methods
#'
#' Removes the specified samples from the dataset.
#'
#' @param object Dataset of interest.
#' @param samplelist List of samples to be removed in the form of a \code{logical}, \code{integer} or \code{character}
#' vector. If this parameter is \code{logical}, it is not recycled; its length must be equal to the
#' number of samples in \code{object}. If it is \code{integer} or \code{character}, it must list only
#' samples that exist in the dataset. Specifying sample indices larger than the number of samples, or
#' non-existent sample identifiers results in an error.
#' @return The modified dataset.
#'
#' @seealso \code{\link[=remove.sites,RnBSet-method]{remove.sites}} for removing sites or probes from a methylation dataset
#'
#' @rdname remove.samples-methods
#' @aliases remove.samples
#' @aliases remove.samples,RnBSet-method
#' @docType methods
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' samples(rnb.set.example)
#' ## remove 3 random samples
#' s2r<-sample.int(length(samples(rnb.set.example)), 3)
#' rnb.set.f<-remove.samples(rnb.set.example, s2r)
#' samples(rnb.set.f)
#' }
setMethod("remove.samples", signature(object = "RnBSet"),
function(object, samplelist) {
object.old <- object
inds <- get.i.vector(samplelist, samples(object))
bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
if (length(inds) != 0) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@meth.sites[,]
new.matrix <- mat[,-inds, drop=FALSE]
# delete(object@meth.sites)
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
if (doBigFf){
object@meth.sites <- BigFfMat(new.matrix, finalizer=bff.finalizer)
} else {
object@meth.sites <- convert.to.ff.matrix.tmp(new.matrix)
}
}else{
object@meth.sites <- object@meth.sites[,-inds, drop=FALSE]
}
if (!is.null(object@pheno)) {
object@pheno <- object@pheno[-inds, ,drop=FALSE]
}
if (!is.null(object@covg.sites)) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@covg.sites[,]
new.matrix <- mat[,-inds, drop=FALSE]
# delete(object@covg.sites)
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.sites)
}
if (doBigFf){
object@covg.sites <- BigFfMat(new.matrix, finalizer=bff.finalizer)
} else {
object@covg.sites <- convert.to.ff.matrix.tmp(new.matrix)
}
}else{
object@covg.sites <- object@covg.sites[,-inds, drop=FALSE]
}
}
for (region in names(object@regions)) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@meth.regions[[region]][,]
meth.matrix <- mat[, -inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.regions[[region]])
}
if (doBigFf){
object@meth.regions[[region]] <- BigFfMat(meth.matrix, finalizer=bff.finalizer)
} else {
object@meth.regions[[region]] <- convert.to.ff.matrix.tmp(meth.matrix)
}
if(!is.null(object@covg.regions)){
mat <- object@covg.regions[[region]][,]
covg.matrix <- mat[, -inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.regions[[region]])
}
if (doBigFf){
object@covg.regions[[region]] <- BigFfMat(covg.matrix, finalizer=bff.finalizer)
} else {
object@covg.regions[[region]] <- convert.to.ff.matrix.tmp(covg.matrix)
}
}
# delete(object@meth.regions[[region]])
# delete(object@covg.regions[[region]])
}else{
object@meth.regions[[region]] <- object@meth.regions[[region]][, -inds, drop=FALSE]
if(!is.null(object@covg.regions)){
object@covg.regions[[region]] <- object@covg.regions[[region]][, -inds, drop=FALSE]
}
}
attr(object@meth.regions[[region]], "aggregation")<-attr(object.old@meth.regions[[region]], "aggregation")
}
## Remove information on inferred covariates (they are likely to change when samples are removed)
if (.hasSlot(object, "inferred.covariates")) {
i.covariates <- setdiff(names(object@inferred.covariates), "sex")
if (length(i.covariates) != 0) {
## FIXME: Wouldn't it make more sense to simply take the samples out?
object@inferred.covariates[i.covariates] <- NULL
}
}
}
object
}
)
########################################################################################################################
if (!isGeneric("mergeSamples")) {
setGeneric("mergeSamples", function(object, ...) standardGeneric("mergeSamples"))
}
#' mergeSamples
#'
#' Take an RnBSet object and merge methylation and phenotype information given a grouping column in the pheno table
#' coverage is combined by taking the sum of coverages
#' pheno is combined by concatenating entries from all samples
#' @param object input RnBSet object
#' @param grp.col a column name (string) of \code{pheno(rnb.set)} that contains unique identifiers for sample groups/replicates
#' to be combined
#' @return the modified RnBSet object
#' @details combines phenotype information, coverage information and methylation information
#' methylation is combined by taking the average. Detection p-values are combined using Fisher's method.
#' For methylation arrays, bead counts are currently not taken into account.
#' objects of class \code{RnBeadRawSet} are automatically converted to \code{RnBeadSet}.
#' @note Requires the packages \pkg{foreach} and \pkg{doParallel}.
#'
#' @rdname mergeSamples-methods
#' @aliases mergeSamples
#' @aliases mergeSamples,RnBSet-method
#' @docType methods
#'
#' @author Fabian Mueller
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' rnb.set.example
#' rnb.set.merged <- mergeSamples(rnb.set.example,"Cell_Line")
#' rnb.set.merged
#' pheno(rnb.set.merged)
#' }
# TODOs:
# - incorporate weighted methylation average (coverage)
setMethod("mergeSamples", signature(object = "RnBSet"),
function(object, grp.col){
ph <- pheno(object)
if (!is.element(grp.col,colnames(ph))){
stop("Could not merge samples: phenotype column does not exist")
}
res <- object
replicate.list <- getMergeList(object, grp.col)
num.replicates <- sapply(replicate.list,length)
phm <- sapply(ph, format, trim=TRUE, justify="none") #fomat to matrix, avoiding padded whitespaces
ph.t <- t(phm)
mf.pheno <- function(X.sub){
sapply(1:nrow(X.sub),FUN=function(i){
if (length(unique(X.sub[i,]))==1 && sum(is.na(X.sub[i,]))==0) {
return(X.sub[i,1])
} else if (all(is.na(X.sub[i,]))) {
return(NA)
} else {
return(paste(X.sub[i,],collapse=";"))
}
})
}
pheno.new <- t(mergeColumns(ph.t,replicate.list,mergeFun=mf.pheno))
pheno.new <- cbind(pheno.new,num.replicates)
colnames(pheno.new) <- c(colnames(ph),"rnb_number_merged_samples")
if (class(object) == "RnBiseqSet"){
meth.site.new <- mergeColumns(meth(object,type="sites",row.names=FALSE),replicate.list)
covg.site.new <- NULL
if (!is.null(object@covg.sites)){
covg.site.new <- mergeColumns(covg(object,type="sites"),replicate.list,mergeFun=function(X.sub){rowSums(X.sub,na.rm=TRUE)})
}
# res <- new("RnBiseqSet",
# pheno=data.frame(pheno.new),
# sites=object@sites,
# meth.sites=meth.site.new,
# covg.sites=covg.site.new,
# region.types=summarized.regions(object),
# assembly=object@assembly)
aa <- annotation(object,"sites")
sites.obj <- data.frame(chrom=as.character(aa$Chromosome),start=aa$Start,strand=as.character(aa$Strand),stringsAsFactors=FALSE)
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
res <- RnBiseqSet(
pheno=data.frame(pheno.new),
sites=sites.obj,
meth=meth.site.new,
covg=covg.site.new,
region.types=summarized.regions(object),
assembly=object@assembly,
useff=object@status$disk.dump,
usebigff=doBigFf
)
} else if (is.element(class(object),c("RnBeadSet","RnBeadRawSet"))) {
meth.site.new <- mergeColumns(meth(object,type="sites",row.names=TRUE),replicate.list)
p.vals <- NULL
if (!is.null(object@pval.sites)){
p.vals <- mergeColumns(dpval(object,row.names=TRUE),replicate.list,
mergeFun=function(X.sub){
apply(X.sub,1,function(x){combineTestPvalsMeth(na.omit(x),correlated=FALSE)})
}
)
}
b.counts <- NULL
if(object@target=="probesEPIC"){
platform<-"EPIC"
}else if (object@target=="probes450"){
platform<-"450k"
}else if(object@target=="probes27"){
platform<-"27k"
}
# res <- new("RnBeadSet",
# data.frame(pheno.new),
# meth.site.new,
# p.values=p.vals,
# bead.counts=b.counts,
# platform=platform,
# region.types=summarized.regions(object)
# )
res <- RnBeadSet(
pheno=data.frame(pheno.new),
betas=meth.site.new,
p.values=p.vals,
bead.counts=b.counts,
platform=platform,
region.types=summarized.regions(object),
useff=object@status$disk.dump
)
} else {
stop("Could not merge samples: Invalid class of object")
}
return(res)
}
)
########################################################################################################################
setGeneric("combine.rnb.sets", function(x,y, ...) standardGeneric("combine.rnb.sets"))
#' combine.rnb.sets-methods
#'
#' Combine two objects inheriting from \code{\linkS4class{RnBSet}} class
#'
#' @param x,y \code{\linkS4class{RnBeadSet}}, \code{\linkS4class{RnBeadRawSet}}
#' or \code{\linkS4class{RnBiseqSet}} object
#' @param type \code{character} singleton defining the set operation applied to the two site sets,
#' one of "all", "all.x", "all.y" or "common"
#'
#' @details Combine method supports a merge of any two RnBSet objects that contain data of the same specie.
#' In case a non-synonymous merge is performed, the class conversion will follow the following hierarchy:
#' \code{\linkS4class{RnBeadSet}} < \code{\linkS4class{RnBeadRawSet}} < \code{\linkS4class{RnBiseqSet}}.
#' In case \code{x} and \code{y} are both array data containers (\code{RnBeadSet} or \code{RnBeadRawSet}),
#' the resulting object will have an annotation that corresponds to the newer array version
#' (\code{27k} < \code{450k} < \code{EPIC}).
#' The sample sets of \code{x} and \code{y} should be unique. Sample annotation information is merged only for columns
#' which have identical names in both objects. CpG sites of the new object are a union of those present in both objects.
#'
#' @return combined \code{\linkS4class{RnBeadSet}}, \code{\linkS4class{RnBeadRawSet}} or
#' \code{\linkS4class{RnBiseqSet}} object
#'
#' @rdname combine.rnb.sets-methods
#' @docType methods
#' @export
#' @aliases combine.rnb.sets
#' @aliases combine.rnb.sets,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' r1 <- rnb.set.example
#' r1 <- remove.samples(r1,samples(rnb.set.example)[1:5])
#' i <- which(r1@@sites[,2] == 15 | r1@@sites[,2] == 21)
#' sites.rem.r1 <- union(sample(1:nrow(meth(rnb.set.example)),500),i)
#' r1 <- remove.sites(r1,sites.rem.r1)
#' r2 <- rnb.set.example
#' r2 <- remove.samples(r2,samples(rnb.set.example)[6:12])
#' sites.rem.r2 <- sample(1:nrow(meth(rnb.set.example)),800)
#' r2 <- remove.sites(r2,sites.rem.r2)
#' rc <- combine.rnb.sets(r1,r2)
#' #assertion: check the number of sites
#' sites.rem.c <- intersect(sites.rem.r1,sites.rem.r2)
#' (nrow(meth(rnb.set.example))-length(sites.rem.c)) == nrow(meth(rc))
#' }
setMethod("combine.rnb.sets", signature(x="RnBSet", y="RnBSet"),
function(x, y, type="all"){
if(class(x)==class(y)){
if(inherits(x, "RnBeadSet")){
rnb.combine.arrays(x, y, type=type)
}else if(inherits(x, "RnBiseqSet")){
rnb.combine.seq(x, y, type=type)
}else{
rnb.error("This combine operation is currently not supported")
}
}else{
if(inherits(x, "RnBiseqSet")){
y.seq<-as(y, "RnBiseqSet")
rnb.combine.seq(x, y.seq, type=type)
}else if(inherits(y, "RnBiseqSet")){
x.seq<-as(x, "RnBiseqSet")
rnb.combine.seq(x.seq, y, type=type)
}
}
}
)
########################################################################################################################
if (!isGeneric("addPheno")) {
setGeneric("addPheno", function(object, ...) standardGeneric("addPheno"))
}
#' addPheno
#'
#' Adds phenotypic or processing information to the sample annotation table of the given \code{RnBSet} object.
#'
#' @param object \code{\linkS4class{RnBSet}} of interest.
#' @param trait Trait as a non-empty \code{vector} or \code{factor}. The length of this vector must be equal to the
#' number of samples in \code{object}, the i-th element storing the value for the i-th sample. Note that
#' names, if present, are ignored.
#' @param header Trait name given as a one-element \code{character}. This is the heading to be used for the sample
#' annotation table. This method fails if such a trait already exists; in other words, if
#' \code{header \%in\% names(pheno(object))}.
#' @return The modified dataset as an object of type \code{\linkS4class{RnBSet}}.
#'
#' @author Fabian Mueller
#' @export
#' @docType methods
#' @rdname addPheno-RnBSet-methods
#' @aliases addPheno
#' @aliases addPheno,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' is.hiPSC <- pheno(rnb.set.example)[, "Sample_Group"]=="hiPSC"
#' rnb.set.mod <- addPheno(rnb.set.example, is.hiPSC, "is_hiPSC")
#' pheno(rnb.set.mod)
#' }
setMethod("addPheno", signature(object="RnBSet"),
function(object, trait, header) {
if (!((is.vector(trait) || is.factor(trait)) && length(trait) == nrow(pheno(object)))) {
stop(paste("invalid value for trait; expected vector of length", nrow(pheno(object))))
}
if (!(is.character(header) && length(header) == 1 && (!is.na(header)))) {
stop("invalid value for header; expected one-element character")
}
if (is.element(header, names(pheno(object)))) {
stop(paste("trait", header, "already exists in the sample annotation table"))
}
object@pheno[[header]] <- trait
return(object)
}
)
########################################################################################################################
if (!isGeneric("summarize.regions")) {
setGeneric("summarize.regions", function(object, ...) standardGeneric("summarize.regions"))
}
#' summarize.regions-methods
#'
#' Summarize DNA methylation information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param region.type Type of the region annotation for which the summarization will be performed or \code{"strands"} for summarizing the methylation values from both strands
#' @param aggregation Operation to summarize the methylation values. Currently supported values are \code{"mean"}, \code{"median"}, \code{"min"}, \code{"max"} and \code{"coverage.weighted"}
#' @param overwrite If \code{TRUE} the existing region-level information for \code{region.type} is discarded
#'
#' @return object of the same class as the supplied one containing the summarized methylation information for the specified region types
#'
#' @rdname summarize.regions-methods
#' @docType methods
#' @aliases summarize.regions
#' @aliases summarize.regions,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' rnb.set.summarized<-summarize.regions(rnb.set.example, "genes", overwrite=TRUE)
#' head(meth(rnb.set.summarized, type="genes", row.names=TRUE))
#' }
setMethod("summarize.regions", signature(object="RnBSet"),
function(object, region.type, aggregation = rnb.getOption("region.aggregation"), overwrite = TRUE) {
if (!(is.character(region.type) && length(region.type) == 1 && (!is.na(region.type)))) {
stop("invalid value for region.type")
}
if (!(is.character(aggregation) && length(aggregation) == 1 && (!is.na(aggregation)))) {
stop("invalid value for aggregation; expected single character")
}
## FIXME: Some of these aren't implemented; and I need them (min and max in particular)
## Is there a measurable improvement over the simple get(...) implementation that was dropped?
aggregation <- aggregation[1]
if (!(aggregation %in% c("min", "max", "mean", "median", "sum", "coverage.weighted"))) {
stop("invalid value for aggregation; expected one of \"min\", \"max\", \"mean\", \"median\", \"sum\" or \"coverage.weighted\"")
}
if (overwrite == FALSE && region.type %in% names(object@meth.regions)) {
stop("invalid region type; methylation data already present")
}
# aggregate.f <- get(aggregation)
# aggregate.function <- function(x) {
# tryCatch(aggregate.f(x, na.rm = TRUE), warning = function(w) { as.double(NA) })
# }
## Extract the full annotation tables for the regions and the sites
if (!(region.type %in% c(rnb.region.types(object@assembly),"strands"))){
stop("unsupported region type")
}
if (region.type =="strands" && !inherits(object, "RnBiseqSet")){
stop("cannot summarize the strand-specific information for objects other than RnBiseqSet")
}
if (aggregation == "coverage.weighted" && !inherits(object, "RnBiseqSet")){
stop("coverage.weighted aggregation is allowed only for objects of type RnBiseqSet")
}
if (aggregation == "coverage.weighted" && is.null(object@covg.sites)){
stop("cannot apply coverage.weighted aggregation method to an RnBiseqSet object with
missing coverage information")
}
bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
if(region.type=="strands"){
annot.sizes <- rnb.annotation.size(assembly=object@assembly)
mapping <- sapply(names(rnb.get.chromosomes(assembly=object@assembly)), function(chr){
num.sites <- annot.sizes[[chr]]
#TODO:this is not really robust
IRanges(start=(1:(num.sites/2))*2-1, width=2, names=(1:(num.sites/2))*2-1)
})
}else{
mapping <- rnb.get.mapping(region.type, object@target, object@assembly)
}
chromInds <- unique(object@sites[,2])
#construct the overlap data structure for retrieving other information
regMap.ov.str <- lapply(chromInds, function(chr.id){
chr.map <- object@sites[,2]==chr.id
names(chr.map) <- NULL
site.ranges <- IRanges(start=object@sites[chr.map,3], width=1)
chr.name <- names(rnb.get.chromosomes(assembly=object@assembly))[chr.id]
mapping.contains.chrom <- chr.name %in% names(mapping)
if(!mapping.contains.chrom){
return(NULL)
}
chr.mapping.ind <- match(chr.name,names(mapping))
olap <- IRanges::as.matrix(findOverlaps(mapping[[chr.mapping.ind]], site.ranges))
if(nrow(olap)<1) return(NULL)
return(list(
chr.id=chr.id,
chr.name=chr.name,
chr.mapping.ind=chr.mapping.ind,
chr.match.inds=which(chr.map),
olap=olap
))
})
# logger.info(c("DEBUG:","Generated mapping structure for all chromosomes"))
region.indices <- do.call("rbind", lapply(regMap.ov.str, function(x){
if (is.null(x)) return(NULL)
indOnChrom <- unique(x$olap[,1])
regInd <- as.integer(names(mapping[[x$chr.mapping.ind]][indOnChrom]))
cbind(rep(1, length(regInd)), rep(x$chr.id, length(regInd)), regInd)
}))
# logger.info(c("DEBUG:","Generated region index data frame"))
regions2sites <- unlist(lapply(regMap.ov.str, function(x){
if (is.null(x)) return(list())
tapply(x$chr.match.inds[x$olap[,2]], factor(x$olap[,1], levels=unique(x$olap[,1])), list)
}), recursive=FALSE)
names(regions2sites) <- NULL
# regions2sites.tab <- do.call("rbind",lapply(1:length(regions2sites), FUN=function(i){
# cbind(rep(i, length(regions2sites[[i]])), regions2sites[[i]])
# }))
# regions2sites.tab.fac <- factor(regions2sites.tab[,1], levels=unique(regions2sites.tab[,1]))
# logger.info(c("DEBUG:","Generated mapping of regions to sites"))
nSamples <- length(samples(object))
aggr.f <- NULL
if (aggregation=="mean"){
aggr.f <- function(siteInds, siteVec, covgVec=NULL){
mean(siteVec[siteInds], na.rm=TRUE)
# 0.666
}
} else if (is.element(aggregation, c("min", "max", "mean", "median", "sum"))){
aggr.f <- function(siteInds, siteVec, covgVec=NULL){
do.call(aggregation, list(siteVec[siteInds], na.rm=TRUE))
}
} else if (aggregation=="coverage.weighted"){
aggr.f <- function(siteInds, siteVec, covgVec){
cTotal <- sum(covgVec[siteInds], na.rm=TRUE)
sum(siteVec[siteInds]*covgVec[siteInds], na.rm=TRUE)/cTotal
}
}
site.meth <- object@meth.sites
site.covg <- object@covg.sites
aggr.meth.sample <- function(j){
siteVec <- site.meth[,j]
covgVec <- NULL
if (aggregation=="coverage.weighted") covgVec <- site.covg[,j]
vapply(regions2sites, aggr.f, numeric(1), siteVec=siteVec, covgVec=covgVec)
}
aggr.covg.sample <- function(j){
siteVec <- site.covg[,j]
vapply(regions2sites, function(siteInds){
sum(siteVec[siteInds], na.rm=TRUE)
}, numeric(1))
}
## Assign the resulting matrices to the object
if (region.type=="strands"){
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# delete(object@meth.sites)
if (doBigFf) {
object@meth.sites <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
# logger.info(c("DEBUG:","Created BigFfMat for meth"))
} else {
object@meth.sites <- convert.to.ff.matrix.tmp(matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else{
object@meth.sites <- matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating methylation for sample",j))
object@meth.sites[,j] <- aggr.meth.sample(j)
}
if (!is.null(object@covg.sites)) {
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# delete(object@covg.sites)
if (doBigFf) {
object@covg.sites <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
} else {
object@covg.sites <- convert.to.ff.matrix.tmp(matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else {
object@covg.sites <- matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating coverage for sample",j))
object@covg.sites[,j] <- aggr.covg.sample(j)
}
} else {
object@covg.sites <- NULL
}
object@sites <- region.indices
} else if(!is.null(region.indices)){
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# if(!is.null(object@meth.regions[[region.type]])){
# delete(object@meth.regions[[region.type]])
# }
if(rnb.getOption("enforce.destroy.disk.dumps")){
delete(object@meth.regions[[region.type]])
}
if (doBigFf){
object@meth.regions[[region.type]] <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
# logger.info(c("DEBUG:","Created BigFfMat for meth"))
} else {
object@meth.regions[[region.type]] <- convert.to.ff.matrix.tmp(matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else {
object@meth.regions[[region.type]] <- matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating methylation for sample",j))
object@meth.regions[[region.type]][,j] <- aggr.meth.sample(j)
}
if(!is.null(object@covg.sites)) {
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# if(!is.null(object@covg.regions[[region.type]])) {
# delete(object@covg.regions[[region.type]])
# }
if(rnb.getOption("enforce.destroy.disk.dumps")){
delete(object@covg.regions[[region.type]])
}
if (doBigFf){
if (is.null(object@covg.regions)) object@covg.regions <- list()
object@covg.regions[[region.type]] <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
} else {
object@covg.regions[[region.type]] <- convert.to.ff.matrix.tmp(matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
}else{
object@covg.regions[[region.type]] <- matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating coverage for sample",j))
object@covg.regions[[region.type]][,j] <- aggr.covg.sample(j)
}
}else{
object@covg.regions <- NULL
}
attr(object@meth.regions[[region.type]], "aggregation") <- aggregation
object@regions[[region.type]] <- region.indices
}else{ #no valid regions found
object@meth.regions[[region.type]] <- matrix(0L, nrow=0, ncol=ncol(object@meth.sites))
if(!is.null(object@covg.sites)) object@covg.regions[[region.type]] <- matrix(0L, nrow=0, ncol=ncol(object@meth.sites))
attr(object@meth.regions[[region.type]], "aggregation") <- aggregation
object@regions[[region.type]] <- matrix(0L, nrow=0, ncol=3)
}
rm(site.meth) #for ff and BigFfMat, the finalizer should be "delete" and thus the objects should be deleted from disk when this function terminates
rm(site.covg)
object
}
)
########################################################################################################################
if (!isGeneric("remove.regions")) {
setGeneric("remove.regions", function(object, ...) standardGeneric("remove.regions"))
}
#' remove.regions-methods
#'
#' Remove the summarized methylation information for a given region type from an \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param region.type Type of the region annotation for which the summarization should be removed
#'
#' @return object of the same class as the supplied one without the summarized methylation information for the specified region type
#'
#' @rdname remove.regions-methods
#' @docType methods
#' @aliases remove.regions
#' @aliases remove.regions,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' summarized.regions(rnb.set.example)
#' rnb.set.reduced<-remove.regions(rnb.set.example, "genes")
#' summarized.regions(rnb.set.reduced)
#' }
setMethod("remove.regions", signature(object="RnBSet"),
function(object, region.type) {
object@regions[[region.type]] <- NULL
object@meth.regions[[region.type]] <- NULL
if(!is.null(object@covg.sites)) object@covg.regions[[region.type]] <- NULL
return(object)
}
)
########################################################################################################################
if (!isGeneric("regionMapping")) {
setGeneric("regionMapping", function(object, ...) standardGeneric("regionMapping"))
}
#' regionMapping-methods
#'
#' get the mapping of regions in the RnBSet object to methylation site indices in the RnBSet object
#'
#' @param object Dataset as an object of type inheriting \code{\linkS4class{RnBSet}}.
#' @param region.type region type. see \code{\link{rnb.region.types}} for possible values
#' @return A list containing for each region the indices (as integers) of sites that belong to that region
#'
#' @rdname regionMapping-methods
#' @docType methods
#' @aliases regionMapping
#' @aliases regionMapping,RnBSet-method
#' @author Fabian Mueller
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' promoter.probe.list <- regionMapping(rnb.set.example,"promoters")
#' #get the number of CpGs per promoter in the dataset:
#' sapply(promoter.probe.list,length)
#' }
setMethod("regionMapping", signature(object = "RnBSet"),
function(object, region.type) {
if (!inherits(object, "RnBSet")) {
stop("invalid value for object; expected RnBSet")
}
if (!(is.character(region.type) && length(region.type) == 1 && (!is.na(region.type)))) {
stop("invalid value for type")
}
if (!(region.type %in% rnb.region.types(object@assembly))) {
stop(paste0("unsupported annotation type (annotation): ",region.type))
}
if (!(region.type %in% names(object@regions))) {
stop(paste0("unsupported annotation type (RnBSet): ",region.type))
}
chrom.maps <- rnb.get.mapping(region.type, object@target, object@assembly)
chrom.integer2name <- names(rnb.get.chromosomes(assembly=object@assembly))
obj.sites <- data.frame(object@sites)
region.map <- object@regions[[region.type]]
chr.inds.reg <- unique(region.map[,2])
obj.sites[,2] <- factor(chrom.integer2name[obj.sites[,2]],levels=chrom.integer2name[unique(obj.sites[,2])])
# obj.sites[,2] <- factor(chrom.integer2name[obj.sites[,2]],levels=chrom.integer2name)
# obj.sites[,2] <- as.factor(chrom.integer2name[obj.sites[,2]])
chrom.site.inds <- tapply(obj.sites[,3],obj.sites[,2],FUN=function(x){
IRanges(start=x,width=1)
})
chrom.offsets <- sapply(chrom.site.inds,length)
chrom.offsets <-cumsum(c(0,chrom.offsets[-length(chrom.offsets)]))
names(chrom.offsets) <- names(chrom.site.inds)
result <- lapply(chr.inds.reg,FUN=function(chr){
curChromName <- chrom.integer2name[chr]
rnbs.regs <- region.map[region.map[,2]==chr,3]
rnbs.regs.char <- format(rnbs.regs,trim=TRUE,scientific=FALSE)
rrRanges <- chrom.maps[[curChromName]]
#only take the regions that are also in the RnBSet object
if (!all(rnbs.regs.char %in% names(rrRanges))) {stop(paste("Not all regions in RnBSet are present in the annotation (",curChromName,")"))}
rrRanges <- rrRanges[rnbs.regs.char,]
olap<-as.matrix(findOverlaps(chrom.site.inds[[curChromName]], rrRanges))
olap[,1]<-olap[,1]+chrom.offsets[curChromName]
res<-tapply(olap[,1], olap[,2], list)
return(res)
})
result<-unlist(result, recursive=FALSE)
names(result)<-NULL
if (dim(region.map)[1] != length(result)){
stop("regionMapping failed")
}
return(result)
}
)
########################################################################################################################
#' annotation-methods
#'
#' Genomic annotation of the methylation sites or regions covered in the supplied dataset.
#'
#' @param object dataset as an object of type inheriting \code{RnBSet}.
#' @param type loci or regions for which the annotation should be obtained. If the value of this parameter is
#' \code{"sites"} (default), individual methylation sites are annotated. Otherwise, this must be one of
#' the available region types, as returned by \code{\link{rnb.region.types}}.
#' @param add.names flag specifying whether the unique site identifiers should be used as row names of the
#' resulting data frame
#' @param include.regions if \code{TRUE} one additional column is added to the returned annotation dat frame
#' for each of the available region types, giving the indices of the
#'
#' @return Annotation table in the form of a \code{data.frame}.
#'
#' @rdname annotation-methods
#' @docType methods
#' @aliases annotation
#' @aliases annotation,RnBSet-method
#' @author Pavlo Lutsik
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## show present sites
#' head(annotation(rnb.set.example, add.names=TRUE))
#' ## show promoters
#' ann.prom<-annotation(rnb.set.example, type="promoters", add.names=TRUE)
#' head(ann.prom)
#' }
setMethod("annotation", signature(object = "RnBSet"),
function(object, type="sites", add.names=FALSE, include.regions=FALSE) {
if (!inherits(object, "RnBSet")) {
stop("invalid value for object; expected RnBSet")
}
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type == "sites") {
type <- object@target
subsets <- object@sites
} else {
if (!(type %in% rnb.region.types(object@assembly))) {
stop(paste0("unsupported annotation type (annotation): ",type))
}
if (!(type %in% names(object@regions))) {
## This region type is not initialized with summarize.regions
## FIXME: Report an error or initialize.
stop(paste0("unsupported annotation type (RnBSet): ",type))
}
subsets <- object@regions[[type]]
}
annot <- rnb.get.annotation(type, object@assembly)
ind.shift<-rnb.annotation.size(type, object@assembly)
ind.shift<-cumsum(c(0,ind.shift[-length(ind.shift)]))
subsets.full<-subsets[,3]+ind.shift[subsets[,2]]
result<-rnb.annotation2data.frame(annot, add.names=add.names)[subsets.full,]
if(include.regions){
dump<-sapply(names(object@regions), function(rt){
result[,rt]<<-rep(0L,nrow(result))
map<-regionMapping(object, rt)
index_map<-lapply(1:length(map), function(ix) rep(ix, length(map[[ix]])))
result[unlist(map),rt]<<-unlist(index_map)
})
}
return(result)
}
)
########################################################################################################################
#if (!isGeneric("save.matrices")) {
setGeneric("save.matrices", function(object, path, ...) standardGeneric("save.matrices"))
#}
setMethod("save.matrices", signature(object="RnBSet", path="character"),
function(object, path){
if(!is.null(object@status) && object@status$disk.dump){
if("ff" %in% class(object@meth.sites)){
ffmatrix <- object@meth.sites
ffsave(ffmatrix,file=file.path(path, "rnb.meth"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@meth.sites)){
save.bigFfMat(object@meth.sites, file=file.path(path, "rnb.meth"), rootpath=getOption('fftempdir'))
}
if("ff" %in% class(object@covg.sites)){
ffmatrix <- object@covg.sites
ffsave(ffmatrix, file=file.path(path, "rnb.covg"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@covg.sites)){
save.bigFfMat(object@covg.sites, file=file.path(path, "rnb.covg"), rootpath=getOption('fftempdir'))
}
if(length(object@regions) != 0){
for(rgn in 1:length(object@regions)){
rgnpath<-file.path(path,rgn)
if(!file.exists(rgnpath)){
dir.create(rgnpath)
}
if("ff" %in% class(object@meth.regions[[rgn]])){
ffmatrix<-object@meth.regions[[rgn]]
ffsave(ffmatrix, file=file.path(path, rgn, "rnb.meth"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@meth.regions[[rgn]])){
save.bigFfMat(object@meth.regions[[rgn]], file=file.path(path, rgn, "rnb.meth"), rootpath=getOption('fftempdir'))
}
if("ff" %in% class(object@covg.regions[[rgn]])){
ffmatrix<-object@covg.regions[[rgn]]
ffsave(ffmatrix, file=file.path(path, rgn, "rnb.covg"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@covg.regions[[rgn]])){
save.bigFfMat(object@covg.regions[[rgn]], file=file.path(path, rgn, "rnb.covg"), rootpath=getOption('fftempdir'))
}
}
}
}
})
########################################################################################################################
setGeneric("load.matrices",
function(object, path, ...) standardGeneric("load.matrices"))
setMethod("load.matrices", signature(object="RnBSet", path="character"),
function(object, path, temp.dir=tempdir()){
doBigFf <- !is.null(object@status)
if (doBigFf) doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf){
object@meth.sites <- load.bigFfMat(file.path(path, "rnb.meth"), rootpath=getOption("fftempdir"))
if(!is.null(object@covg.sites)){
object@covg.sites <- load.bigFfMat(file.path(path, "rnb.covg"), rootpath=getOption("fftempdir"))
}
} else {
if(sum(grepl("rnb.meth", list.files(path)))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, "rnb.meth"), envir=load_env,rootpath=getOption("fftempdir")))
object@meth.sites<-get("ffmatrix", envir=load_env)
rm(load_env)
}
if(sum(grepl("rnb.covg", list.files(path)))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, "rnb.covg"), envir=load_env,rootpath=getOption("fftempdir")))
object@covg.sites<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
rgns <- names(object@regions)
if(!is.null(rgns)){
if (.hasSlot(object, 'version')) {
rgns <- 1:length(rgns)
}
for(rgn in rgns){
if (doBigFf){
object@meth.regions[[rgn]] <- load.bigFfMat(file.path(path, rgn, "rnb.meth"), rootpath=getOption("fftempdir"))
if(!is.null(object@covg.regions[[rgn]])){
object@covg.regions[[rgn]] <- load.bigFfMat(file.path(path, rgn, "rnb.covg"), rootpath=getOption("fftempdir"))
}
} else {
if(sum(grepl("rnb.meth",list.files(file.path(path, rgn))))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, rgn, "rnb.meth"), envir=load_env, rootpath=getOption("fftempdir")))
object@meth.regions[[rgn]]<-get("ffmatrix", envir=load_env)
rm(load_env)
}
if(sum(grepl("rnb.covg",list.files(file.path(path, rgn))))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, rgn, "rnb.covg"), envir=load_env, rootpath=getOption("fftempdir")))
object@covg.regions[[rgn]]<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
}
}
return(object)
})
########################################################################################################################
#' save.rnb.set
#'
#' Consistent saving of an \code{RnBSet} objects with large matrices of type \link{ff}.
#'
#' @param object \code{RnBSet}-inheriting object.
#' @param path the name of the output file (or directory if \code{archive} is \code{FALSE})
#' without an extension. If only the file name is given the object will be saved
#' in the current working directory.
#' @param archive if \code{TRUE} (default value) the output is a ZIP-file.
#'
#' @details The saved object can be reloaded with the \link{load.rnb.set} function.
#'
#' @return invisibly, the full path to the ZIP file (if \code{archive} is \code{TRUE}),
#' or to the output directory (otherwise)
#'
#' @author Pavlo Lutsik
#' @export
save.rnb.set<-function(object, path, archive=TRUE){
## Validate parameters
if (!inherits(object, "RnBSet")) {
stop("invalid value for object")
}
if (!(is.character(path) && length(path) == 1 && isTRUE(!grepl("^[/\\.]*$", path)))) {
stop("invalid value for path")
}
if (!parameter.is.flag(archive)) {
stop("invalid value for archive")
}
if(object@status$disk.dump && .Platform$OS == "windows" && Sys.getenv("R_ZIPCMD")==""){
rnb.warning(c("Zip not found on this Windows system, this RnBSet object will not be saved.",
"See the instructions for installing ZIP on Windows in the FAQ section of the RnBeads website."))
return(invisible(path))
}
## Get the full path of the file or directory to be created
fullpath <- normalizePath(gsub("/$", "", gsub("\\", "/", path, fixed = TRUE)), winslash = "/", mustWork = FALSE)
## Create or overwrite a directory to store the files
if (unlink(fullpath, recursive = TRUE) == 1) {
stop("Specified path already exists and cannot be overwritten")
}
if (archive) {
if (unlink(paste0(fullpath, ".zip"), recursive = TRUE) == 1) {
stop("Specified path already exists and cannot be overwritten")
}
}
if (!dir.create(fullpath, showWarnings = FALSE, recursive = TRUE)) {
stop("Could not create output directory")
}
## Save all data structures
save.matrices(object, fullpath)
save(object, file=file.path(fullpath, "rnb.set.RData"))
## Create a ZIP archive of the whole directory
if(archive){
currdir <- setwd(fullpath)
zip(paste0(fullpath, ".zip"), dir(), flags = "-rm9X")
while(length(list.files(path))>0){
TRUE;
}
setwd(currdir)
if (unlink(fullpath, recursive = TRUE) == 1) {
rnb.warning("Could not clean output directory after zipping")
}
fullpath <- paste0(fullpath, ".zip")
}
return(invisible(fullpath))
}
########################################################################################################################
#' load.rnb.set
#'
#' Loading of the \code{RnBSet} objects with large matrices of type \pkg{ff}.
#'
#' @param path full path of the file or directory. If \code{archive} is \code{FALSE})
#' without an extension.
#' @param temp.dir \code{character} singleton which specifies temporary directory, used while loading
#'
#' @return Loaded object
#'
#' @author Pavlo Lutsik
#' @export
load.rnb.set<-function(path, temp.dir=tempdir()){
## Validate parameters
if (!(is.character(path) && length(path) == 1 && isTRUE(!grepl("^[/\\.]*$", path)))) {
stop("invalid value for path")
}
if (!(is.character(temp.dir) && length(temp.dir) == 1 && isTRUE(!grepl("^[/\\.]*$", temp.dir)))) {
stop("invalid value for temp.dir")
}
if (!file.exists(path)) {
stop("invalid value for path; the path does not exist")
}
if (!isTRUE(file.info(temp.dir)[1, "isdir"])) {
stop("invalid value for temp.dir; the path does not exist or is not a directory")
}
if(.Platform$OS == "windows" && Sys.getenv("R_ZIPCMD")==""){
method="internal"
}else{
method="unzip"
}
if(grepl("rnb.set.RData",path)){
logger.info("The path to the data set directory should be provided, not to the invidual file. Changing to parent directory.")
path <- dirname(path)
}
if(!file.info(path)[["isdir"]]){
td<-tempfile("extraction", temp.dir)
unzip(path, exdir=td, unzip=method)
}else{
td<-path
}
load_env<-new.env(parent=emptyenv())
load(file.path(td, "rnb.set.RData"),envir=load_env)
load.matrices(get("object", load_env), td, temp.dir=temp.dir)
}
########################################################################################################################
if (!isGeneric("destroy")) setGeneric("destroy", function(object) standardGeneric("destroy"))
#' destroy-methods
#'
#' Remove tables stored to disk from the file system. Useful for cleaning up disk dumped objects.
#'
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @return Nothing of particular interest
#'
#' @rdname destroy-methods
#' @docType methods
#' @aliases destroy
#' @aliases destroy,RnBSet-method
#' @export
setMethod("destroy", signature(object="RnBSet"),
function(object){
if(object@status$disk.dump){
delete(object@meth.sites)
if(!is.null(object@covg.sites)){
delete(object@covg.sites)
}
if(!is.null(object@regions)){
for(rgn in names(object@regions)){
delete(object@meth.regions[[rgn]])
if(!is.null(object@covg.regions))
{
delete(object@covg.regions[[rgn]])
}
}
}
}
return(invisible(TRUE))
}
)
########################################################################################################################
## meth.matrices
##
## Creates a list of methylation value (beta) matrices for the given dataset.
##
## @param object Methylation dataset object of type that inherits \code{RnBSet}.
## @param include.sites Flag indicating if the methylation matrix of sites or probes is to be included in the result.
## @return Non-empty \code{list} of matrices of beta values. If \code{include.sites} is \code{TRUE}, the first matrix in
## the list is the one based on sites or probes. Other matrices store region-based methylation for (some of) the
## regions addressed in the option \code{"region.types"}.
## @author Yassen Assenov
meth.matrices <- function(object, include.sites = rnb.getOption("analyze.sites")) {
result <- list()
if (include.sites) result[["sites"]] <- meth(object)
for (rtype in rnb.region.types.for.analysis(object)) {
X <- tryCatch(meth(object, rtype), error = function(e) { NULL })
if (!is.null(X)) {
result[[rtype]] <- X
}
}
return(result)
}
########################################################################################################################
## get.row.names
##
## Generates row names based on the genomic location.
##
## @param object \code{RnBSet} object.
## @return \code{character} vector of row names.
## @author Pavlo Lutsik
get.row.names<-function(object, type="sites"){
if(type=="sites"){
target<-object@target
subsets<-object@sites
}else if(type %in% names(object@regions)){
target<-type
subsets<-object@regions[[type]]
}else stop("unsupported region type")
loc.info<-annotation(object, type=type, add.names=TRUE)
if ("ID" %in% colnames(loc.info) && anyDuplicated(loc.info[, "ID"]) == 0) {
result <- loc.info[,"ID"]
} else if (!is.null(rownames(loc.info))) {
result <- rownames(loc.info)
} else {
result <- paste(loc.info[,"Chromosome"], loc.info[,"Start"], as.character(loc.info[,"Strand"]), sep=".")
}
result
}
########################################################################################################################
## rnb.get.row.token
##
## Gets the methylation target, that is, the basic methylation feature of a dataset based on its platform.
##
## @param object Methylation dataset of interest, an object of type inheriting \code{MethyLumiSet} or \code{RnBSet}.
## @param plural Flag, indicating if the plural form of the word.
## @return Word or phrase denoting the term for a single target of the platform.
## @author Pavlo Lutsik
rnb.get.row.token<-function(object, plural = FALSE){
if (is.character(object)) {
result <- ifelse(object %in% c("RnBiseqSet", "RnBSet"), "site", "probe")
} else if (inherits(object, "MethyLumiSet")){
result <- "probe"
} else if (object@target == "CpG") {
result <- "site"
} else { # object@target == "probes450"
result <- "probe"
}
ifelse(plural, paste0(result, "s"), result)
}
########################################################################################################################
## rnb.get.covg.token
##
## Gets the measure of coverage of a dataset based on its platform.
##
## @param object Methylation dataset of interest, an object of type inheriting \code{MethyLumiSet} or \code{RnBSet}.
## @param capital Flag, indicating if the first letter of the returned phrase should be capitalized.
## @return Word or phrase denoting the term for depth of coverage.
## @author Pavlo Lutsik
rnb.get.covg.token<-function(object, capital=FALSE){
if (is.character(object)) {
result <- ifelse(object %in% c("RnBiseqSet", "RnBSet"), "coverage", "bead counts")
} else if (inherits(object, "MethyLumiSet")) {
result <- "bead counts"
} else if (object@target == "CpG") {
result <- "coverage"
} else { # object@target == "probes450"
result <- "bead counts"
}
ifelse(capital, capitalize(result), result)
}
########################################################################################################################
if(!isGeneric("sampleMethApply")) setGeneric("sampleMethApply", function(object, ...) standardGeneric("sampleMethApply"))
#' sampleMethApply-methods
#'
#' Applies a function over the methylation values for all samples in an \code{RnBSet} using a low memory footprint.
#'
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param fn function to be applied
#' @param type \code{character} singleton. Specify "sites" (default) or a region type over which the function is applied
#' @param ... arguments passed on to the function
#' @return Result analogous to \code{apply(meth(rnbSet, type), 2, FUN=FUN)}
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} Retrieving the matrix of methylation values
#' @rdname sampleMethApply-methods
#' @docType methods
#' @aliases sampleMethApply
#' @aliases sampleMethApply,RnBSet-method
setMethod("sampleMethApply", signature(object = "RnBSet"),
function(object, fn, type="sites", ...) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@meth.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
}
res <- sapply(1:length(samples(object)), FUN=function(j){
fn(meth(object, type=type, j=j), ...)
})
return(res)
}
)
if(!isGeneric("sampleCovgApply")) setGeneric("sampleCovgApply", function(object, ...) standardGeneric("sampleCovgApply"))
#' sampleCovgApply-methods
#'
#' Applies a function over the coverage values for all samples in an \code{RnBSet} using a low memory footprint.
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param fn function to be applied
#' @param type \code{character} singleton. Specify "sites" (default) or a region type over which the function is applied
#' @param ... arguments passed on to the function
#' @return Result analogous to \code{apply(covg(rnbSet, type), 2, FUN=FUN)}
#'
#' @seealso \code{\link[=meth,RnBSet-method]{covg}} Retrieving the matrix of coverage values
#' @rdname sampleCovgApply-methods
#' @docType methods
#' @aliases sampleCovgApply
#' @aliases sampleCovgApply,RnBSet-method
setMethod("sampleCovgApply", signature(object = "RnBSet"),
function(object, fn, type="sites", ...) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@covg.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
}
res <- sapply(1:length(samples(object)), FUN=function(j){
fn(covg(object, type=type, j=j), ...)
})
return(res)
}
)
########################################################################################################################
if(!isGeneric("getNumNaMeth")) setGeneric("getNumNaMeth", function(object, ...) standardGeneric("getNumNaMeth"))
#' getNumNaMeth-methods
#'
#' for each site/region, the getNumNaMeth retrieves the number of NA values accross all samples.
#' Does this efficiently by breaking down the methylation matrix into submatrices
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param type "sites" or region type
#' @param chunkSize size of each submatrix (performance tuning parameter)
#' @param mask logical matrix. its entries will also be considered NAs in counting
#' @return vector containing the number of NAs per site/region
#'
#' @rdname getNumNaMeth-methods
#' @docType methods
#' @aliases getNumNaMeth
#' @aliases getNumNaMeth,RnBSet-method
setMethod("getNumNaMeth", signature(object = "RnBSet"),
function(object, type="sites", chunkSize=1e5, mask=NULL) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (!(type %in% c("sites", object@target, names(object@regions)))) {
stop("unsupported region type")
}
#get start and end indices for the chunks
n <- nsites(object, type)
indStarts <- seq(1,n,by=chunkSize)
indEnds <- c(indStarts[-1]-1, n)
#apply to each chunk
res <- unlist(lapply(1:length(indStarts), FUN=function(i){
indsCur <- indStarts[i]:indEnds[i]
mm <- meth(object, type=type, i=indsCur)
isNaMat <- is.na(mm)
if (!is.null(mask)) isNaMat <- isNaMat | mask[indsCur,]
return(as.integer(rowSums(isNaMat)))
}))
return(res)
}
)
if(!isGeneric("isImputed")) setGeneric("isImputed", function(object, ...) standardGeneric("isImputed"))
#' isImputed
#'
#' Getter for the imputation field. Return TRUE, if the object has been imputed and FALSE otherwise.
#' @param object Object for which the information should be returned
#' @return TRUE, if the object has been imputed and FALSE otherwise.
#' @author Michael Scherer
#' @aliases isImputed
#' @aliases isImputed,RnBSet-method
#' @export
setMethod("isImputed",signature(object="RnBSet"),
function(object){
if(.hasSlot(object,"imputed")){
return(object@imputed)
}
return(FALSE)
}
)
########################################################################################################################
#' rnb.sample.summary.table
#'
#' Creates a sample summary table from an RnBSet object
#'
#' @param rnbSet \code{\linkS4class{RnBSet}} of interest.
#' @return a summary table (as data.frame) with the following variables for each sample (rows):
#' \item{sampleName}{Name of the sample}
#' \item{*_num (* can be 'sites' or a region type)}{Number of sites or regions with coverage in the sample}
#' \item{*_covgMean (\code{RnBiseqSet} only)}{Mean coverage of sites or regions in the sample}
#' \item{*_covgMedian (\code{RnBiseqSet} only)}{Median coverage of sites or regions in the sample}
#' \item{*_covgPerc25 (\code{RnBiseqSet} only)}{25 percentile of coverage of sites or regions in the sample}
#' \item{*_covgPerc75 (\code{RnBiseqSet} only)}{75 percentile of coverage of sites or regions in the sample}
#' \item{*_numCovg5,10,30,60 (\code{RnBiseqSet} only)}{Number of sites or regions with coverage greater or equal to 5,10,30,60}
#' \item{sites_numDPval5em2,1em2,1em3 (\code{RnBeadSet} only)}{Number of sites with a detection p-value smaller than 0.05,0.01,0.001}
#' \item{**_numSitesMean (** is any region type)}{Mean number of sites in a region}
#' \item{**_numSitesMedian}{Median number of sites in a region}
#' \item{**_numSites2,5,10,20}{Number of regions with at least 2,5,10,20 sites with valid methylation measurements}
#' @author Fabian Mueller
#' @aliases rnb.sample.summary.table,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' rnb.sample.summary.table(rnb.set.example)
#' }
rnb.sample.summary.table <- function(rnbSet) {
is.biseq <- "RnBiseqSet" %in% class(rnbSet)
is.beads <- "RnBeadSet" %in% class(rnbSet)
df.empty <- data.frame(matrix(nrow=length(samples(rnbSet)),ncol=0))
rownames(df.empty) <- samples(rnbSet)
tt <- data.frame(df.empty,sampleName=samples(rnbSet),stringsAsFactors=FALSE)
reg.types.regions <- summarized.regions(rnbSet)
reg.types <- c("sites",reg.types.regions)
for (rr in reg.types){
# logger.status(c("Region type:",rr))
# rnb.cleanMem()
tt.cur <- df.empty
tt.cur$num <- sampleMethApply(rnbSet, function(x){sum(!is.na(x))}, type=rr)
if (is.biseq){
covgStats <- do.call("rbind", lapply(1:length(samples(rnbSet)), FUN=function(j){
# logger.status(c(" Sample:",j))
mm <- as.vector(meth(rnbSet, rr, j=j))
cc <- as.vector(covg(rnbSet, rr, j=j))
cc[cc==0] <- NA
cc[is.na(mm)] <- NA
qq <- quantile(cc, probs = c(0.25,0.75), na.rm=TRUE)
res <- c(
mean(cc, na.rm=TRUE),
median(cc, na.rm=TRUE),
qq[1],
qq[2],
sum(cc>=5, na.rm=TRUE),
sum(cc>=10, na.rm=TRUE),
sum(cc>=30, na.rm=TRUE),
sum(cc>=60, na.rm=TRUE)
)
return(res)
}))
colnames(covgStats) <- c("covgMean", "covgMedian", "covgPerc25", "covgPerc75", "numCovg5", "numCovg10", "numCovg30", "numCovg60")
tt.cur <- cbind(tt.cur, covgStats)
}
if (is.beads){
if (rr == "sites"){
pp <- dpval(rnbSet,type=rr)
if (!is.null(pp)) {
tt.cur$numDPval5em2 <- colSums(pp < 5e-2, na.rm=TRUE)
tt.cur$numDPval1em2 <- colSums(pp < 1e-2, na.rm=TRUE)
tt.cur$numDPval1em3 <- colSums(pp < 1e-3, na.rm=TRUE)
}
}
}
if (rr %in% reg.types.regions){
regions2sites <- regionMapping(rnbSet,region.type=rr)
#compute the number of sites per region and sample
nsamples <- length(samples(rnbSet))
num.sites <- sapply(1:nsamples,function(i){
# logger.status(c(" Sample:",i))
mm.s.nna <- !is.na(as.vector(meth(rnbSet, j=i)))
sapply(1:nsites(rnbSet, rr),function(j){
sum(mm.s.nna[regions2sites[[j]]])
})
})
# num.sites2 <- t(sapply(1:nsites(rnbSet, rr),function(i){
# # logger.status(c(" Site/Region:",i))
# colSums(!is.na(meth(rnbSet, i=regions2sites[[i]])))
# })) # a bit slower, but more memory effective, if to include later, check the code again
tt.cur$numSitesMean <- colMeans(num.sites, na.rm=TRUE)
tt.cur$numSitesMedian <- colMedians(num.sites, na.rm=TRUE)
tt.cur$numSites2 <- colSums(num.sites>=2, na.rm=TRUE)
tt.cur$numSites5 <- colSums(num.sites>=5, na.rm=TRUE)
tt.cur$numSites10 <- colSums(num.sites>=10,na.rm=TRUE)
tt.cur$numSites20 <- colSums(num.sites>=20,na.rm=TRUE)
}
colnames(tt.cur) <- paste(rr,colnames(tt.cur),sep="_")
tt <- data.frame(tt,tt.cur)
}
return(tt)
}
########################################################################################################################
|
/R/RnBSet-class.R
|
no_license
|
epigen/RnBeads
|
R
| false
| false
| 94,066
|
r
|
########################################################################################################################
## RnBSet-class.R
## created: 2012-04-06
## creator: Pavlo Lutsik
## ---------------------------------------------------------------------------------------------------------------------
## RnBSet class definition.
########################################################################################################################
## GLOBALS
RNBSET.SLOTNAMES<-c("meth.sites", "covg.sites")
##
## ---------------------------------------------------------------------------------------------------------------------
## CLASS DEFINITIONS
## ---------------------------------------------------------------------------------------------------------------------
#' @include bigFf.R
setOldClass(c("ff_matrix"))
setClassUnion("matrixOrff", c("matrix", "ff_matrix"))
setClassUnion("matrixOrffOrBigFfMat", c("matrix", "ff_matrix", "BigFfMat"))
setClassUnion("matrixOrffOrNULL", c("matrix", "ff_matrix", "NULL"))
setClassUnion("matrixOrffOrBigFfMatOrNULL", c("matrix", "ff_matrix", "BigFfMat", "NULL"))
setClassUnion("listOrNULL", c("list", "NULL"))
setClassUnion("characterOrNULL", c("character", "NULL"))
#' RnBSet Class
#'
#' Basic class for storing DNA methylation and experimental quality information
#'
#' @details
#' It is a virtual class and objects of type \code{RnBSet} should not be instantiated. Instead, the child classes are
#' used: \code{\linkS4class{RnBeadRawSet}} and \code{\linkS4class{RnBeadSet}} for Infinium HumanMethylation and
#' \code{\linkS4class{RnBiseqSet}} for bisulfite sequencing data
#'
#' @section Slots:
#' \describe{
#' \item{\code{pheno}}{Sample annotations (phenotypic and processing data) in the form of a \code{data.frame}.}
#' \item{\code{sites}}{A \code{matrix} object storing the identifiers of the methylation sites for which the
#' methylation information is present}
#' \item{\code{meth.sites}}{\code{matrix} of methylation values. Every row corresponds to a methylation site,
#' and every column - to a sample.}
#' \item{\code{covg.sites}}{\code{matrix} of coverage values. Every row corresponds to a methylation site,
#' and every column - to a sample.}
#' \item{\code{regions}}{\code{list} of all identifiers of methylation sites for which methylation information
#' is available.}
#' \item{\code{meth.regions}}{\code{list} of methylation \code{matrix} objects, one per available region type. Every row in a
#' matrix corresponds to a methylation site, and every column - to a sample.}
#' \item{\code{covg.regions}}{\code{list} of coverage \code{matrix} objects, one per available region type.
#' Every row corresponds to a region, and every column - to a sample.}
#' \item{\code{status}}{\code{list} with meta-information about the object.}
#' \item{\code{assembly}}{\code{character} vector of length one, specifying the genome assembly which the object is linked to, e.g. "hg19".}
#' \item{\code{target}}{\code{character} vector of length one, specifying the feature class:
#' \code{"CpG"} for sequencing data, \code{"probes450"} and \code{"probes27"} for
#' HumanMethylation450 and HumanMethylation27 microarrays respectively.}
#' \item{\code{inferred.covariates}}{\code{list} with covariate information.
#' Can contain elements \code{"sva"} and \code{"cell.types"}.}
#' \item{\code{version}}{Package version in which the dataset was created.}
#' \item{\code{imputed}}{Flag indicating if methylation matrix has been imputed.}
#' }
#'
#' @section Methods and Functions:
#' \describe{
#' \item{\code{\link[=pheno,RnBSet-method]{pheno}}}{Gets the phenotypic and processing data of the dataset.}
#' \item{\code{\link[=samples,RnBSet-method]{samples}}}{Gets the identifiers of all samples in the dataset.}
#' \item{\code{\link[=summarized.regions,RnBSet-method]{summarized.regions}}}{Gets the genomic annotations for
#' which methylation data is present.}
#' \item{\code{\link[=meth,RnBSet-method]{meth}}}{Gets a \code{matrix} of methylation values in the dataset.}
#' \item{\code{\link[=mval,RnBSet-method]{mval}}}{Gets a \code{matrix} of M values in the dataset.}
#' \item{\code{\link[=covg,RnBSet-method]{covg}}}{Gets the \code{matrix} of coverage values of the dataset.}
#' \item{\code{\link[=remove.sites,RnBSet-method]{remove.sites}}}{Removes sites from the dataset.}
#' \item{\code{\link[=remove.samples,RnBSet-method]{remove.samples}}}{Removes samples from the dataset.}
#' \item{\code{\link[=addPheno,RnBSet-method]{addPheno,RnBSet-method}}}{Add sample annotation to the dataset.}
#' \item{\code{\link[BiocGenerics]{combine}}}{Combines two datasets.}
#' \item{\code{\link{regionMapping,RnBSet-method}}}{Retrieve the sites mapping to a given region type}
#' \item{\code{\link[=rnb.sample.summary.table,RnBSet-method]{rnb.sample.summary.table}}}{Creates a sample summary table from an RnBSet object.}
#' \item{\code{\link{isImputed,RnBSet-method}}}{Getter for the imputation slot.}
#' }
#'
#' @name RnBSet-class
#' @rdname RnBSet-class
#' @author Pavlo Lutsik
#' @exportClass RnBSet
setClass("RnBSet",
representation(pheno="data.frame",
sites="matrix",
meth.sites="matrixOrffOrBigFfMat",
covg.sites="matrixOrffOrBigFfMatOrNULL",
regions="list",
meth.regions="list",
covg.regions="listOrNULL",
status="listOrNULL",
assembly="character",
target="characterOrNULL",
inferred.covariates="list",
version="characterOrNULL",
imputed="logical"),
prototype(pheno=data.frame(),
sites=matrix(nrow=0, ncol=0),
meth.sites=matrix(nrow=0, ncol=0),
covg.sites=NULL,
regions=list(),
meth.regions=list(),
covg.regions=NULL,
status=NULL,
assembly="hg19",
target=NULL,
inferred.covariates=list(),
version=as.character(packageVersion("RnBeads")),
imputed=FALSE),
contains = "VIRTUAL",
package = "RnBeads")
## ---------------------------------------------------------------------------------------------------------------------
## DUMMY CONSTRUCTOR
## ---------------------------------------------------------------------------------------------------------------------
#
#setMethod("initialize", "RnBSet",
# function(pheno=data.frame(),
# sites=matrix(),
# meth.sites=matrix(),
# covg.sites=NULL,
# regions=list(),
# meth.regions=list(),
# covg.regions=NULL,
# status=NULL,
# assembly="hg19",
# target=NULL,
# inferred.covariates=list()
# ){
# .Object@pheno<-pheno
# .Object@sites<-sites
# .Object@meth.sites<-betas
# .Object@covg.sites<-covg.sites
#
# .Object@status<-status
#
# .Object@target<-target
#
#
# })
## ---------------------------------------------------------------------------------------------------------------------
## ACCESSORS
## ---------------------------------------------------------------------------------------------------------------------
if (!isGeneric("pheno")) setGeneric("pheno", function(object) standardGeneric("pheno"))
#' pheno-methods
#'
#' Extracts sample phenotype and/or processing information.
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname pheno-methods
#' @docType methods
#' @aliases pheno
#' @aliases pheno,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' pheno(rnb.set.example)
#' }
setMethod("pheno", signature(object="RnBSet"), function(object) object@pheno)
########################################################################################################################
if (!isGeneric("samples")) {
setGeneric("samples", function(object) standardGeneric("samples"))
}
#' samples-methods
#'
#' Extracts sample identifiers
#'
#' @param object Dataset of interest.
#'
#' @details The column of the sample annotation table which contains identifiers is globally controlled via the
#' \code{"identifiers.column"} option. In case the latter is \code{NULL} column names of the matrix returned
#' by the \code{meth} method are treated as sample identifiers. In case the latter are also missing, a \code{character}
#' vector with sample numbers is returned.
#'
#' @return \code{character} vector of sample identifiers.
#'
#' @rdname samples-methods
#' @docType methods
#' @aliases samples
#' @aliases samples,RnBSet-method
#' @aliases samples,RnBeadClustering-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' samples(rnb.set.example)
#' }
setMethod("samples", signature(object="RnBSet"),
function(object) {
pheno.table <- pheno(object)
id.column <- rnb.getOption("identifiers.column")
ids <- NULL
if (!(is.null(pheno.table) || is.null(id.column))) {
if (is.character(id.column)) {
if (id.column %in% colnames(pheno.table)) {
ids <- pheno.table[, id.column]
}
} else if (1L <= id.column && id.column <= ncol(pheno.table)) {
ids <- pheno.table[, id.column]
}
if (is.null(ids) || any(is.na(ids)) || anyDuplicated(ids) != 0) {
rnb.warning("The supplied identifiers column is not found or is not suitable")
ids <- as.character(1:nrow(object@pheno))
}
ids <- as.character(ids)
} else if (!is.null(colnames(object@meth.sites))) {
ids <- colnames(object@meth.sites)
} else {
ids <- as.character(1:nrow(object@pheno))
}
ids
}
)
########################################################################################################################
if(!isGeneric("sites")) setGeneric("sites",
function(object) standardGeneric("sites"))
#' sites-methods
#'
#' Methylation sites object information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#'
#' @return A matrix of type \code{integer} describing the sites, information for which is
#' present in the \code{object}
#'
#' @rdname sites-methods
#' @docType methods
#' @aliases sites
#' @aliases sites,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' sites(rnb.set.example)
#' }
setMethod("sites", signature(object="RnBSet"),
function(object){
return(object@sites)
})
if(!isGeneric("regions")) setGeneric("regions",
function(object, ...) standardGeneric("regions"))
########################################################################################################################
#' regions-methods
#'
#' Methylation regions, information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param type Region type(s) of interest as a \code{character} vector. If this is set to \code{NULL}, all region
#' types summarized in the object are returned.
#' @return Methylation site and region assignment. If \code{type} is singleton, a \code{matrix} is returned. The first
#' column corresponds to the methylation context index. The second column is the index of the chromosome in
#' the genome, and the third is the index of the region in the \code{GRanges} object of the region type
#' annotation. When \code{length(type)>1}, a list of such matrices is returned for each element of \code{type}.
#' If \code{type} is \code{NULL}, matrices for all summarized region types are returned.
#'
#' @note
#' Methylation context index is an integer number denoting the sequence context of the cytosine of interest. Index
#' \code{1} corresponds to \code{CpG}, the only supported index in bisulfite sequencing datasets.
#'
#' @rdname regions-methods
#' @docType methods
#' @aliases regions
#' @aliases regions,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' head(regions(rnb.set.example))
#' }
#' @seealso \code{\link[=summarized.regions,RnBSet-method]{summarized.regions}} for all summarized region types in a dataset;
#' \code{\link{rnb.get.chromosomes}} listing all supported chromosomes for a given genome assembly
#' @author Pavlo Lutsik
#' @export
setMethod("regions", signature(object="RnBSet"),
function(object, type=NULL){
if(!(is.character(type)))
stop("Invalid argument type")
if(is.null(object@regions)){
warning("No region information present, returning NULL")
return(NULL)
}
if(!is.null(type)){
if(!all(type %in% names(object@regions)))
stop(sprintf("No information for type %s",type))
if(length(type==1))
return(object@regions[[type]]) else
return(object@regions[type])
}else{
return((object@regions))
}
})
########################################################################################################################
if (!isGeneric("summarized.regions")) {
setGeneric("summarized.regions", function(object) standardGeneric("summarized.regions"))
}
#' summarized.regions-methods
#'
#' Gets the genomic annotations for which methylation data is present in the \code{RnBSet} object.
#'
#' @param object Methylation dataset of interest.
#'
#' @return \code{character} vector listing all genomic annotations summarized in the given dataset. If the dataset
#' contains methylation in sites only, an empty vector is returned.
#'
#' @seealso \code{\link[=summarize.regions,RnBSet-method]{summarize.regions}} for calculating region-wise methylation in a dataset;
#' \code{\link{rnb.set.annotation}} for adding or replacing a region annotation table
#'
#' @rdname summarized.regions-methods
#' @docType methods
#' @aliases summarized.regions
#' @aliases summarized.regions,RnBSet-method
#' @author Yassen Assenov
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' summarized.regions(rnb.set.example)
#' }
setMethod("summarized.regions", signature(object = "RnBSet"),
function(object) {
result <- names(object@regions)
if (is.null(result)) {
result <- character()
}
result
}
)
########################################################################################################################
## get.dataset.matrix
##
## Extracts a specific data matrix from the given methylation dataset and sets row names if necessary.
##
## @param object Methylation dataset as an object of class inheriting \code{RnBSet}.
## @param type Site type (e.g. \code{"sites"} or \code{"probes450"}) for site/probe matrix, or region name for
## the corresponding region-based matrix.
## @param row.names Flag indicating if row names must be generated.
## @param mm.sites Data matrix for the site level.
## @param mm.regions List of data matrices, one per supported region type.
## @param i indices of sites/regions to be retrieved (index or logical). retrieves all if \code{NULL} (default).
## @param j indices of samples to be retrieved (index or logical). retrieves all if \code{NULL} (default).
## @return Requested data matrix. Note that this might be \code{NULL}.
## @author Pavlo Lutsik
get.dataset.matrix <- function(object, type, row.names, mm.sites, mm.regions, i=NULL, j=NULL) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (!parameter.is.flag(row.names)) {
stop("invalid value for row.names; expected TRUE or FALSE")
}
if (!is.element(class(i), c("NULL", "integer", "numeric", "logical"))) {
stop("invalid value for i; expected NULL, index or logical")
}
if (!is.element(class(j), c("NULL", "integer", "numeric", "logical", "character"))) {
stop("invalid value for j; expected NULL, index, character or logical")
}
if (is.character(j)){
j <- match(j, samples(object))
if (any(is.na(j))){
stop("invalid sample names")
}
}
if (type %in% c("sites", object@target)) {
if (is.null(mm.sites)) {
return(NULL)
}
if("ff" %in% class(mm.sites)){
open(mm.sites)
}
if (is.null(i) && is.null(j)){
result <- mm.sites[, , drop = FALSE]
} else if(is.null(i)){
result <- mm.sites[, j, drop = FALSE]
} else if(is.null(j)){
result <- mm.sites[i, , drop = FALSE]
} else {
result <- mm.sites[i, j, drop = FALSE]
}
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else if (is.null(mm.regions[[type]])) {
return(NULL)
} else {
if (is.null(i) && is.null(j)){
result <- mm.regions[[type]][, , drop = FALSE]
} else if(is.null(i)){
result <- mm.regions[[type]][, j, drop = FALSE]
} else if(is.null(j)){
result <- mm.regions[[type]][i, , drop = FALSE]
} else {
result <- mm.regions[[type]][i, j, drop = FALSE]
}
}
if (is.null(j)){
colnames(result) <- samples(object)
} else {
colnames(result) <- samples(object)[j]
}
if (row.names) {
if (is.null(i)){
rownames(result) <- get.row.names(object, type)
} else {
rownames(result) <- get.row.names(object, type)[i]
}
} else {
rownames(result) <- NULL
}
return(result)
}
########################################################################################################################
if(!isGeneric("mval")) setGeneric("mval", function(object, ...) standardGeneric("mval"))
#' mval-methods
#'
#' Extracts DNA methylation information (M values) for a specified set of genomic features.
#'
#' @param object dataset of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), DNA methylation information
#' for each available site is returned. Otherwise, this should be one of region types for for which
#' summarized DNA methylation information is computed in the given dataset.
#' @param row.names Flag indicating of row names are to be generated in the result.
#' @param epsilon Threshold of beta values to use when adjusting for potential M values close to +infinity or
#' -infinity. See \code{\link{rnb.beta2mval}} for more details.
#'
#' @return \code{matrix} with methylation M values.
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} for extracting methylation beta values
#' @rdname mval-methods
#' @docType methods
#' @aliases mval
#' @aliases mval,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site M-value matrix
#' mm<-mval(rnb.set.example, row.names=TRUE)
#' head(mm)
#' ## M-values for each covered gene
#' gmm<-mval(rnb.set.example, type="gene", row.names=TRUE)
#' head(gmm)
#' }
#' @export
setMethod("mval", signature(object = "RnBSet"),
function(object, type = "sites", row.names = FALSE, epsilon = 0) {
beta.values <- get.dataset.matrix(object, type, row.names, object@meth.sites, object@meth.regions)
rnb.beta2mval(beta.values, epsilon)
}
)
if(!isGeneric("meth")) setGeneric("meth", function(object, ...) standardGeneric("meth"))
#' meth-methods
#'
#' Extracts DNA methylation information (beta values) for a specified set of genomic features.
#'
#' @param object dataset of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), DNA methylation information
#' for each available site is returned. Otherwise, this should be one of region types for for which
#' summarized DNA methylation information is computed in the given dataset.
#' @param row.names flag indicating if row names are to be generated in the result.
#' @param i indices of sites/regions to be retrieved. By default (\code{NULL}), all will be retrieved.
#' @param j indices of samples to be retrieved. By default (\code{NULL}), all will be retrieved.
#'
#' @return \code{matrix} with methylation beta values.
#'
#' @seealso \code{\link[=mval,RnBSet-method]{mval}} for calculating M values
#' @rdname meth-methods
#' @docType methods
#' @aliases meth
#' @aliases meth,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' mm<-meth(rnb.set.example, row.names=TRUE)
#' head(mm)
#' ## beta-values for each covered gene
#' gmm<-meth(rnb.set.example, type="gene", row.names=TRUE)
#' head(gmm)
#' }
#' @export
setMethod("meth", signature(object = "RnBSet"),
function(object, type="sites", row.names=FALSE, i=NULL, j=NULL) {
get.dataset.matrix(object, type, row.names, object@meth.sites, object@meth.regions, i=i, j=j)
}
)
if(!isGeneric("hasCovg")) setGeneric("hasCovg", function(object,...) standardGeneric("hasCovg"))
#' hasCovg-methods
#'
#' Returns \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type.
#'
#' @param object \code{RnBSet} of interest.
#' @param type \code{character} singleton. If \code{sites} or a region type summarized in the object
#'
#' @return \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type. \code{FALSE} otherwise
#'
#' @rdname hasCovg-methods
#' @docType methods
#' @export
#' @aliases hasCovg
#' @aliases hasCovg,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' hasCovg(rnb.set.example)
#' }
setMethod("hasCovg", signature(object="RnBSet"),
function (object, type="sites") {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- !is.null(object@covg.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else {
result <- !is.null(object@covg.regions[[type]])
}
return(result)
}
)
if(!isGeneric("covg")) setGeneric("covg", function(object,...) standardGeneric("covg"))
#' covg-methods
#'
#' Extract coverage information from an object of \code{RnBSet} class.
#'
#' @param object Dataset of interest.
#' @param type \code{character} singleton. If \code{sites} DNA methylation information per each available
#' site is returned. Otherwise should be one of region types for for which the summarized
#' coverage information is available
#' @param row.names Flag indicating of row names are to be generated in the result.
#' @param i indices of sites/regions to be retrieved. By default (\code{NULL}), all will be retrieved.
#' @param j indices of samples to be retrieved. By default (\code{NULL}), all will be retrieved.
#'
#' @return coverage information available for the dataset in the form of a \code{matrix}.
#'
#' @rdname covg-methods
#' @docType methods
#' @export
#' @aliases covg
#' @aliases covg,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## per-site beta-value matrix
#' cvg<-covg(rnb.set.example, row.names=TRUE)
#' head(cvg)
#' }
setMethod("covg", signature(object="RnBSet"),
function (object, type="sites", row.names=FALSE, i=NULL, j=NULL) {
m<-get.dataset.matrix(object, type, row.names, object@covg.sites, object@covg.regions, i=i, j=j)
m
}
)
if(!isGeneric("nsites")) setGeneric("nsites", function(object, ...) standardGeneric("nsites"))
#' nsites-methods
#'
#' Returns the number of sites/regions for a given \code{RnBSet} object
#'
#' @param object \code{RnBSet} of interest.
#' @param type \code{character} singleton. If this is set to \code{"sites"} (default), the number of sites is returned.
#' Otherwise, this should be one of region types for for which the number of regions is returned.
#'
#' @return \code{integer} stating the number of sites/regions. \code{NA} if the regions have not been summarized yet.
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} Retrieving the matrix of methylation values
#' @rdname nsites-methods
#' @docType methods
#' @aliases nsites
#' @aliases nsites,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' nsites(rnb.set.example)
#' }
#' @export
setMethod("nsites", signature(object = "RnBSet"),
function(object, type="sites") {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@meth.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
} else if (is.null(object@meth.regions[[type]])) {
result <- NA
} else {
result <- nrow(object@meth.regions[[type]])
}
return(result)
}
)
########################################################################################################################
if (!isGeneric("assembly")) {
setGeneric("assembly", function(object) standardGeneric("assembly"))
}
#' assembly-methods
#'
#' Extracts information about assembly
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname assembly-methods
#' @docType methods
#' @aliases assembly
#' @aliases assembly,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' assembly(rnb.set.example) # "hg19"
#' }
setMethod("assembly", signature(object="RnBSet"),
function(object){
return(object@assembly)
})
## ---------------------------------------------------------------------------------------------------------------------
## MODIFIERS
## ---------------------------------------------------------------------------------------------------------------------
if (!isGeneric("updateRegionSummaries")) {
setGeneric("updateRegionSummaries", function(object) standardGeneric("updateRegionSummaries"))
}
#' updateRegionSummaries
#'
#' Updates the region information present in an RnBSet by invoking summarize.regions on all region types
#' present in the object
#'
#' @param object Dataset of interest.
#' @return Sample annotation information available for the dataset in the form of a \code{data.frame}.
#'
#' @rdname updateRegionSummaries
#' @docType methods
#' @aliases updateRegionSummaries
#' @aliases updateRegionSummaries,RnBSet-method
#' @export
setMethod("updateRegionSummaries", signature(object="RnBSet"),
function(object){
if (length(object@meth.regions) != 0) {
region.types <- names(object@meth.regions)
aggregations <- sapply(object@meth.regions, attr, "aggregation")
for (i in 1:length(region.types)) {
object <- summarize.regions(object, region.types[i], aggregations[i])
}
}
object
}
)
########################################################################################################################
if (!isGeneric("remove.sites")) {
setGeneric("remove.sites", function(object, probelist, verbose = TRUE) standardGeneric("remove.sites"))
}
#' remove.sites-methods
#'
#' Removes the specified probes from the dataset.
#'
#' @param object Dataset of interest.
#' @param probelist List of probes to be removed in the form of a \code{logical}, \code{integer} or \code{character}
#' vector. If this parameter is \code{logical}, it is not recycled; its length must be equal to the
#' number of probes in \code{object}. If it is \code{integer} or \code{character}, it must list only
#' probes that exist in the dataset. Specifying probe indices larger than the number of probes, or
#' non-existent probe identifiers results in an error.
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'
#' @seealso \code{\link[=remove.samples,RnBSet-method]{remove.samples}} for removing samples from a methylation dataset
#'
#' @rdname remove.sites-methods
#' @aliases remove.sites
#' @aliases remove.sites,RnBSet-method
#' @docType methods
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' print(rnb.set.example)
#' ## remove 100 random sites
#' s2r<-sample.int(nrow(sites(rnb.set.example)), 100)
#' rnb.set.f<-remove.sites(rnb.set.example, s2r)
#' print(rnb.set.f)
#' }
setMethod("remove.sites", signature(object = "RnBSet"),
function(object, probelist, verbose=FALSE) {
inds <- get.i.vector(probelist, rownames(object@sites))
if(verbose) {
rnb.logger.start("Removing sites")
}
## Delete methylation sites
if(length(inds) != 0) {
object@sites <- object@sites[-inds, ]
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
bff.finalizer <- NULL
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf) bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
nSites.new <- nrow(object@meth.sites) - length(inds)
nSamples <- length(samples(object))
# methylation
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites.new, col.n=nSamples, row.names=NULL, col.names=samples(object), finalizer=bff.finalizer)
} else {
newMat <- ff(NA, dim=c(nSites.new, nSamples), dimnames=list(NULL, samples(object)), vmode="double")
}
for (j in 1:nSamples){
newMat[,j] <- object@meth.sites[-inds,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
object@meth.sites <- newMat
# coverage
if(!is.null(object@covg.sites)) {
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites.new, col.n=nSamples, row.names=NULL, col.names=samples(object), na.prototype=as.integer(NA), finalizer=bff.finalizer)
} else {
newMat <- ff(NA_integer_, dim=c(nSites.new, nSamples), dimnames=list(NULL, samples(object)))
}
for (j in 1:nSamples){
newMat[,j] <- object@covg.sites[-inds,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.sites)
}
object@covg.sites <- newMat
}
} else {
object@meth.sites <- object@meth.sites[-inds, ,drop=FALSE]
if(!is.null(object@covg.sites)) {
object@covg.sites <- object@covg.sites[-inds, ,drop=FALSE]
}
}
}
## Update region methylation
if(length(object@meth.regions) != 0){
region.types <- names(object@meth.regions)
aggregations <- sapply(object@meth.regions, attr, "aggregation")
for(i in 1:length(region.types)){
if(verbose){
rnb.status(c("summarizing regions:",region.types[i]))
}
object <- summarize.regions(object, region.types[i], aggregations[i])
}
}
## Remove information on inferred covariates (they are likely to change when sites are removed)
if (.hasSlot(object, "inferred.covariates")) {
i.covariates <- setdiff(names(object@inferred.covariates), "sex")
if (length(i.covariates) != 0) {
object@inferred.covariates[i.covariates] <- NULL
if(verbose){
rnb.info("removed information on inferred covariates")
}
}
}
if(verbose){
rnb.logger.completed()
}
object
}
)
########################################################################################################################
if (!isGeneric("updateMethylationSites")) {
setGeneric("updateMethylationSites", function(object, meth.data, verbose = TRUE) standardGeneric("updateMethylationSites"))
}
#' updateMethylationSites-methods
#'
#' Replaces the methylation info with the specified data frame.
#'
#' @param object Dataset of interest.
#' @param meth.data This object has to be a \code{data.frame} of equal dimension than the one already contained in
#' \code{object}, containing the methylation info that should be associated with the object.
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'#'
#' @rdname updateMethylationSites-methods
#' @aliases updateMethylationSites
#' @aliases updateMethylationSites,RnBSet-method
#' @docType methods
#' @export
setMethod("updateMethylationSites", signature(object = "RnBSet"),
function(object, meth.data, verbose=FALSE) {
if(verbose) {
rnb.logger.start("Updating sites")
}
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
bff.finalizer <- NULL
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf) bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
nSites <- nrow(object@meth.sites)
if(nSites!=nrow(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
nSamples <- length(samples(object))
if(nSites!=nrow(meth.data)||nSamples!=ncol(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
# methylation
newMat <- NULL
if (doBigFf){
newMat <- BigFfMat(row.n=nSites, col.n=nSamples, row.names=NULL, col.names=samples(object), finalizer=bff.finalizer)
} else {
newMat <- ff(NA, dim=c(nSites, nSamples), dimnames=list(NULL, samples(object)), vmode="double")
}
for (j in 1:nSamples){
newMat[,j] <- meth.data[,j]
}
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
object@meth.sites <- newMat
} else {
nSites <- nrow(object@meth.sites)
if(nSites!=nrow(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
nSamples <- length(samples(object))
if(nSites!=nrow(meth.data)||nSamples!=ncol(meth.data)){
stop("Dimensions of provided and existing methylation info do not match.")
}
object@meth.sites <- meth.data
}
if(verbose){
logger.completed()
}
if(verbose){
logger.start("Update regional methylation")
}
object <- updateRegionSummaries(object)
if(verbose){
logger.completed()
}
object
}
)
########################################################################################################################
if (!isGeneric("mask.sites.meth")) {
setGeneric("mask.sites.meth", function(object, mask, verbose=FALSE) standardGeneric("mask.sites.meth"))
}
#' mask.sites.meth-methods
#'
#' Given a logical matrix, sets corresponding entries in the methylation table to NA (masking).
#' Low memory footprint
#'
#' @param object Dataset of interest.
#' @param mask logical matrix indicating which sites should be masked
#' @param verbose if \code{TRUE} additional diagnostic output is generated
#'
#' @return The modified dataset.
#'
#' @rdname mask.sites.meth-methods
#' @aliases mask.sites.meth
#' @aliases mask.sites.meth,RnBSet-method
#' @docType methods
setMethod("mask.sites.meth", signature(object = "RnBSet"),
function(object, mask, verbose=FALSE) {
if(!is.null(object@status) && object@status$disk.dump){
nSamples <- length(samples(object))
for (j in 1:nSamples){
object@meth.sites[mask[,j],j] <- NA
}
} else {
object@meth.sites[,][mask] <- NA
if(inherits(object, "RnBeadRawSet")){
object@M[,][mask] <- NA
object@U[,][mask] <- NA
if(!is.null(object@M0)){
object@M0[,][mask] <- NA
}
if(!is.null(object@U0)){
object@U0[,][mask] <- NA
}
}
}
object
}
)
########################################################################################################################
if (!isGeneric("remove.samples")) {
setGeneric("remove.samples", function(object, samplelist) standardGeneric("remove.samples"))
}
#' remove.samples-methods
#'
#' Removes the specified samples from the dataset.
#'
#' @param object Dataset of interest.
#' @param samplelist List of samples to be removed in the form of a \code{logical}, \code{integer} or \code{character}
#' vector. If this parameter is \code{logical}, it is not recycled; its length must be equal to the
#' number of samples in \code{object}. If it is \code{integer} or \code{character}, it must list only
#' samples that exist in the dataset. Specifying sample indices larger than the number of samples, or
#' non-existent sample identifiers results in an error.
#' @return The modified dataset.
#'
#' @seealso \code{\link[=remove.sites,RnBSet-method]{remove.sites}} for removing sites or probes from a methylation dataset
#'
#' @rdname remove.samples-methods
#' @aliases remove.samples
#' @aliases remove.samples,RnBSet-method
#' @docType methods
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' samples(rnb.set.example)
#' ## remove 3 random samples
#' s2r<-sample.int(length(samples(rnb.set.example)), 3)
#' rnb.set.f<-remove.samples(rnb.set.example, s2r)
#' samples(rnb.set.f)
#' }
setMethod("remove.samples", signature(object = "RnBSet"),
function(object, samplelist) {
object.old <- object
inds <- get.i.vector(samplelist, samples(object))
bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
if (length(inds) != 0) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@meth.sites[,]
new.matrix <- mat[,-inds, drop=FALSE]
# delete(object@meth.sites)
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.sites)
}
if (doBigFf){
object@meth.sites <- BigFfMat(new.matrix, finalizer=bff.finalizer)
} else {
object@meth.sites <- convert.to.ff.matrix.tmp(new.matrix)
}
}else{
object@meth.sites <- object@meth.sites[,-inds, drop=FALSE]
}
if (!is.null(object@pheno)) {
object@pheno <- object@pheno[-inds, ,drop=FALSE]
}
if (!is.null(object@covg.sites)) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@covg.sites[,]
new.matrix <- mat[,-inds, drop=FALSE]
# delete(object@covg.sites)
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.sites)
}
if (doBigFf){
object@covg.sites <- BigFfMat(new.matrix, finalizer=bff.finalizer)
} else {
object@covg.sites <- convert.to.ff.matrix.tmp(new.matrix)
}
}else{
object@covg.sites <- object@covg.sites[,-inds, drop=FALSE]
}
}
for (region in names(object@regions)) {
if(object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
mat <- object@meth.regions[[region]][,]
meth.matrix <- mat[, -inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@meth.regions[[region]])
}
if (doBigFf){
object@meth.regions[[region]] <- BigFfMat(meth.matrix, finalizer=bff.finalizer)
} else {
object@meth.regions[[region]] <- convert.to.ff.matrix.tmp(meth.matrix)
}
if(!is.null(object@covg.regions)){
mat <- object@covg.regions[[region]][,]
covg.matrix <- mat[, -inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(object@covg.regions[[region]])
}
if (doBigFf){
object@covg.regions[[region]] <- BigFfMat(covg.matrix, finalizer=bff.finalizer)
} else {
object@covg.regions[[region]] <- convert.to.ff.matrix.tmp(covg.matrix)
}
}
# delete(object@meth.regions[[region]])
# delete(object@covg.regions[[region]])
}else{
object@meth.regions[[region]] <- object@meth.regions[[region]][, -inds, drop=FALSE]
if(!is.null(object@covg.regions)){
object@covg.regions[[region]] <- object@covg.regions[[region]][, -inds, drop=FALSE]
}
}
attr(object@meth.regions[[region]], "aggregation")<-attr(object.old@meth.regions[[region]], "aggregation")
}
## Remove information on inferred covariates (they are likely to change when samples are removed)
if (.hasSlot(object, "inferred.covariates")) {
i.covariates <- setdiff(names(object@inferred.covariates), "sex")
if (length(i.covariates) != 0) {
## FIXME: Wouldn't it make more sense to simply take the samples out?
object@inferred.covariates[i.covariates] <- NULL
}
}
}
object
}
)
########################################################################################################################
if (!isGeneric("mergeSamples")) {
setGeneric("mergeSamples", function(object, ...) standardGeneric("mergeSamples"))
}
#' mergeSamples
#'
#' Take an RnBSet object and merge methylation and phenotype information given a grouping column in the pheno table
#' coverage is combined by taking the sum of coverages
#' pheno is combined by concatenating entries from all samples
#' @param object input RnBSet object
#' @param grp.col a column name (string) of \code{pheno(rnb.set)} that contains unique identifiers for sample groups/replicates
#' to be combined
#' @return the modified RnBSet object
#' @details combines phenotype information, coverage information and methylation information
#' methylation is combined by taking the average. Detection p-values are combined using Fisher's method.
#' For methylation arrays, bead counts are currently not taken into account.
#' objects of class \code{RnBeadRawSet} are automatically converted to \code{RnBeadSet}.
#' @note Requires the packages \pkg{foreach} and \pkg{doParallel}.
#'
#' @rdname mergeSamples-methods
#' @aliases mergeSamples
#' @aliases mergeSamples,RnBSet-method
#' @docType methods
#'
#' @author Fabian Mueller
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' rnb.set.example
#' rnb.set.merged <- mergeSamples(rnb.set.example,"Cell_Line")
#' rnb.set.merged
#' pheno(rnb.set.merged)
#' }
# TODOs:
# - incorporate weighted methylation average (coverage)
setMethod("mergeSamples", signature(object = "RnBSet"),
function(object, grp.col){
ph <- pheno(object)
if (!is.element(grp.col,colnames(ph))){
stop("Could not merge samples: phenotype column does not exist")
}
res <- object
replicate.list <- getMergeList(object, grp.col)
num.replicates <- sapply(replicate.list,length)
phm <- sapply(ph, format, trim=TRUE, justify="none") #fomat to matrix, avoiding padded whitespaces
ph.t <- t(phm)
mf.pheno <- function(X.sub){
sapply(1:nrow(X.sub),FUN=function(i){
if (length(unique(X.sub[i,]))==1 && sum(is.na(X.sub[i,]))==0) {
return(X.sub[i,1])
} else if (all(is.na(X.sub[i,]))) {
return(NA)
} else {
return(paste(X.sub[i,],collapse=";"))
}
})
}
pheno.new <- t(mergeColumns(ph.t,replicate.list,mergeFun=mf.pheno))
pheno.new <- cbind(pheno.new,num.replicates)
colnames(pheno.new) <- c(colnames(ph),"rnb_number_merged_samples")
if (class(object) == "RnBiseqSet"){
meth.site.new <- mergeColumns(meth(object,type="sites",row.names=FALSE),replicate.list)
covg.site.new <- NULL
if (!is.null(object@covg.sites)){
covg.site.new <- mergeColumns(covg(object,type="sites"),replicate.list,mergeFun=function(X.sub){rowSums(X.sub,na.rm=TRUE)})
}
# res <- new("RnBiseqSet",
# pheno=data.frame(pheno.new),
# sites=object@sites,
# meth.sites=meth.site.new,
# covg.sites=covg.site.new,
# region.types=summarized.regions(object),
# assembly=object@assembly)
aa <- annotation(object,"sites")
sites.obj <- data.frame(chrom=as.character(aa$Chromosome),start=aa$Start,strand=as.character(aa$Strand),stringsAsFactors=FALSE)
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
res <- RnBiseqSet(
pheno=data.frame(pheno.new),
sites=sites.obj,
meth=meth.site.new,
covg=covg.site.new,
region.types=summarized.regions(object),
assembly=object@assembly,
useff=object@status$disk.dump,
usebigff=doBigFf
)
} else if (is.element(class(object),c("RnBeadSet","RnBeadRawSet"))) {
meth.site.new <- mergeColumns(meth(object,type="sites",row.names=TRUE),replicate.list)
p.vals <- NULL
if (!is.null(object@pval.sites)){
p.vals <- mergeColumns(dpval(object,row.names=TRUE),replicate.list,
mergeFun=function(X.sub){
apply(X.sub,1,function(x){combineTestPvalsMeth(na.omit(x),correlated=FALSE)})
}
)
}
b.counts <- NULL
if(object@target=="probesEPIC"){
platform<-"EPIC"
}else if (object@target=="probes450"){
platform<-"450k"
}else if(object@target=="probes27"){
platform<-"27k"
}
# res <- new("RnBeadSet",
# data.frame(pheno.new),
# meth.site.new,
# p.values=p.vals,
# bead.counts=b.counts,
# platform=platform,
# region.types=summarized.regions(object)
# )
res <- RnBeadSet(
pheno=data.frame(pheno.new),
betas=meth.site.new,
p.values=p.vals,
bead.counts=b.counts,
platform=platform,
region.types=summarized.regions(object),
useff=object@status$disk.dump
)
} else {
stop("Could not merge samples: Invalid class of object")
}
return(res)
}
)
########################################################################################################################
setGeneric("combine.rnb.sets", function(x,y, ...) standardGeneric("combine.rnb.sets"))
#' combine.rnb.sets-methods
#'
#' Combine two objects inheriting from \code{\linkS4class{RnBSet}} class
#'
#' @param x,y \code{\linkS4class{RnBeadSet}}, \code{\linkS4class{RnBeadRawSet}}
#' or \code{\linkS4class{RnBiseqSet}} object
#' @param type \code{character} singleton defining the set operation applied to the two site sets,
#' one of "all", "all.x", "all.y" or "common"
#'
#' @details Combine method supports a merge of any two RnBSet objects that contain data of the same specie.
#' In case a non-synonymous merge is performed, the class conversion will follow the following hierarchy:
#' \code{\linkS4class{RnBeadSet}} < \code{\linkS4class{RnBeadRawSet}} < \code{\linkS4class{RnBiseqSet}}.
#' In case \code{x} and \code{y} are both array data containers (\code{RnBeadSet} or \code{RnBeadRawSet}),
#' the resulting object will have an annotation that corresponds to the newer array version
#' (\code{27k} < \code{450k} < \code{EPIC}).
#' The sample sets of \code{x} and \code{y} should be unique. Sample annotation information is merged only for columns
#' which have identical names in both objects. CpG sites of the new object are a union of those present in both objects.
#'
#' @return combined \code{\linkS4class{RnBeadSet}}, \code{\linkS4class{RnBeadRawSet}} or
#' \code{\linkS4class{RnBiseqSet}} object
#'
#' @rdname combine.rnb.sets-methods
#' @docType methods
#' @export
#' @aliases combine.rnb.sets
#' @aliases combine.rnb.sets,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' r1 <- rnb.set.example
#' r1 <- remove.samples(r1,samples(rnb.set.example)[1:5])
#' i <- which(r1@@sites[,2] == 15 | r1@@sites[,2] == 21)
#' sites.rem.r1 <- union(sample(1:nrow(meth(rnb.set.example)),500),i)
#' r1 <- remove.sites(r1,sites.rem.r1)
#' r2 <- rnb.set.example
#' r2 <- remove.samples(r2,samples(rnb.set.example)[6:12])
#' sites.rem.r2 <- sample(1:nrow(meth(rnb.set.example)),800)
#' r2 <- remove.sites(r2,sites.rem.r2)
#' rc <- combine.rnb.sets(r1,r2)
#' #assertion: check the number of sites
#' sites.rem.c <- intersect(sites.rem.r1,sites.rem.r2)
#' (nrow(meth(rnb.set.example))-length(sites.rem.c)) == nrow(meth(rc))
#' }
setMethod("combine.rnb.sets", signature(x="RnBSet", y="RnBSet"),
function(x, y, type="all"){
if(class(x)==class(y)){
if(inherits(x, "RnBeadSet")){
rnb.combine.arrays(x, y, type=type)
}else if(inherits(x, "RnBiseqSet")){
rnb.combine.seq(x, y, type=type)
}else{
rnb.error("This combine operation is currently not supported")
}
}else{
if(inherits(x, "RnBiseqSet")){
y.seq<-as(y, "RnBiseqSet")
rnb.combine.seq(x, y.seq, type=type)
}else if(inherits(y, "RnBiseqSet")){
x.seq<-as(x, "RnBiseqSet")
rnb.combine.seq(x.seq, y, type=type)
}
}
}
)
########################################################################################################################
if (!isGeneric("addPheno")) {
setGeneric("addPheno", function(object, ...) standardGeneric("addPheno"))
}
#' addPheno
#'
#' Adds phenotypic or processing information to the sample annotation table of the given \code{RnBSet} object.
#'
#' @param object \code{\linkS4class{RnBSet}} of interest.
#' @param trait Trait as a non-empty \code{vector} or \code{factor}. The length of this vector must be equal to the
#' number of samples in \code{object}, the i-th element storing the value for the i-th sample. Note that
#' names, if present, are ignored.
#' @param header Trait name given as a one-element \code{character}. This is the heading to be used for the sample
#' annotation table. This method fails if such a trait already exists; in other words, if
#' \code{header \%in\% names(pheno(object))}.
#' @return The modified dataset as an object of type \code{\linkS4class{RnBSet}}.
#'
#' @author Fabian Mueller
#' @export
#' @docType methods
#' @rdname addPheno-RnBSet-methods
#' @aliases addPheno
#' @aliases addPheno,RnBSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' is.hiPSC <- pheno(rnb.set.example)[, "Sample_Group"]=="hiPSC"
#' rnb.set.mod <- addPheno(rnb.set.example, is.hiPSC, "is_hiPSC")
#' pheno(rnb.set.mod)
#' }
setMethod("addPheno", signature(object="RnBSet"),
function(object, trait, header) {
if (!((is.vector(trait) || is.factor(trait)) && length(trait) == nrow(pheno(object)))) {
stop(paste("invalid value for trait; expected vector of length", nrow(pheno(object))))
}
if (!(is.character(header) && length(header) == 1 && (!is.na(header)))) {
stop("invalid value for header; expected one-element character")
}
if (is.element(header, names(pheno(object)))) {
stop(paste("trait", header, "already exists in the sample annotation table"))
}
object@pheno[[header]] <- trait
return(object)
}
)
########################################################################################################################
if (!isGeneric("summarize.regions")) {
setGeneric("summarize.regions", function(object, ...) standardGeneric("summarize.regions"))
}
#' summarize.regions-methods
#'
#' Summarize DNA methylation information for which is present in the \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param region.type Type of the region annotation for which the summarization will be performed or \code{"strands"} for summarizing the methylation values from both strands
#' @param aggregation Operation to summarize the methylation values. Currently supported values are \code{"mean"}, \code{"median"}, \code{"min"}, \code{"max"} and \code{"coverage.weighted"}
#' @param overwrite If \code{TRUE} the existing region-level information for \code{region.type} is discarded
#'
#' @return object of the same class as the supplied one containing the summarized methylation information for the specified region types
#'
#' @rdname summarize.regions-methods
#' @docType methods
#' @aliases summarize.regions
#' @aliases summarize.regions,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' rnb.set.summarized<-summarize.regions(rnb.set.example, "genes", overwrite=TRUE)
#' head(meth(rnb.set.summarized, type="genes", row.names=TRUE))
#' }
setMethod("summarize.regions", signature(object="RnBSet"),
function(object, region.type, aggregation = rnb.getOption("region.aggregation"), overwrite = TRUE) {
if (!(is.character(region.type) && length(region.type) == 1 && (!is.na(region.type)))) {
stop("invalid value for region.type")
}
if (!(is.character(aggregation) && length(aggregation) == 1 && (!is.na(aggregation)))) {
stop("invalid value for aggregation; expected single character")
}
## FIXME: Some of these aren't implemented; and I need them (min and max in particular)
## Is there a measurable improvement over the simple get(...) implementation that was dropped?
aggregation <- aggregation[1]
if (!(aggregation %in% c("min", "max", "mean", "median", "sum", "coverage.weighted"))) {
stop("invalid value for aggregation; expected one of \"min\", \"max\", \"mean\", \"median\", \"sum\" or \"coverage.weighted\"")
}
if (overwrite == FALSE && region.type %in% names(object@meth.regions)) {
stop("invalid region type; methylation data already present")
}
# aggregate.f <- get(aggregation)
# aggregate.function <- function(x) {
# tryCatch(aggregate.f(x, na.rm = TRUE), warning = function(w) { as.double(NA) })
# }
## Extract the full annotation tables for the regions and the sites
if (!(region.type %in% c(rnb.region.types(object@assembly),"strands"))){
stop("unsupported region type")
}
if (region.type =="strands" && !inherits(object, "RnBiseqSet")){
stop("cannot summarize the strand-specific information for objects other than RnBiseqSet")
}
if (aggregation == "coverage.weighted" && !inherits(object, "RnBiseqSet")){
stop("coverage.weighted aggregation is allowed only for objects of type RnBiseqSet")
}
if (aggregation == "coverage.weighted" && is.null(object@covg.sites)){
stop("cannot apply coverage.weighted aggregation method to an RnBiseqSet object with
missing coverage information")
}
bff.finalizer <- rnb.getOption("disk.dump.bigff.finalizer")
if(region.type=="strands"){
annot.sizes <- rnb.annotation.size(assembly=object@assembly)
mapping <- sapply(names(rnb.get.chromosomes(assembly=object@assembly)), function(chr){
num.sites <- annot.sizes[[chr]]
#TODO:this is not really robust
IRanges(start=(1:(num.sites/2))*2-1, width=2, names=(1:(num.sites/2))*2-1)
})
}else{
mapping <- rnb.get.mapping(region.type, object@target, object@assembly)
}
chromInds <- unique(object@sites[,2])
#construct the overlap data structure for retrieving other information
regMap.ov.str <- lapply(chromInds, function(chr.id){
chr.map <- object@sites[,2]==chr.id
names(chr.map) <- NULL
site.ranges <- IRanges(start=object@sites[chr.map,3], width=1)
chr.name <- names(rnb.get.chromosomes(assembly=object@assembly))[chr.id]
mapping.contains.chrom <- chr.name %in% names(mapping)
if(!mapping.contains.chrom){
return(NULL)
}
chr.mapping.ind <- match(chr.name,names(mapping))
olap <- IRanges::as.matrix(findOverlaps(mapping[[chr.mapping.ind]], site.ranges))
if(nrow(olap)<1) return(NULL)
return(list(
chr.id=chr.id,
chr.name=chr.name,
chr.mapping.ind=chr.mapping.ind,
chr.match.inds=which(chr.map),
olap=olap
))
})
# logger.info(c("DEBUG:","Generated mapping structure for all chromosomes"))
region.indices <- do.call("rbind", lapply(regMap.ov.str, function(x){
if (is.null(x)) return(NULL)
indOnChrom <- unique(x$olap[,1])
regInd <- as.integer(names(mapping[[x$chr.mapping.ind]][indOnChrom]))
cbind(rep(1, length(regInd)), rep(x$chr.id, length(regInd)), regInd)
}))
# logger.info(c("DEBUG:","Generated region index data frame"))
regions2sites <- unlist(lapply(regMap.ov.str, function(x){
if (is.null(x)) return(list())
tapply(x$chr.match.inds[x$olap[,2]], factor(x$olap[,1], levels=unique(x$olap[,1])), list)
}), recursive=FALSE)
names(regions2sites) <- NULL
# regions2sites.tab <- do.call("rbind",lapply(1:length(regions2sites), FUN=function(i){
# cbind(rep(i, length(regions2sites[[i]])), regions2sites[[i]])
# }))
# regions2sites.tab.fac <- factor(regions2sites.tab[,1], levels=unique(regions2sites.tab[,1]))
# logger.info(c("DEBUG:","Generated mapping of regions to sites"))
nSamples <- length(samples(object))
aggr.f <- NULL
if (aggregation=="mean"){
aggr.f <- function(siteInds, siteVec, covgVec=NULL){
mean(siteVec[siteInds], na.rm=TRUE)
# 0.666
}
} else if (is.element(aggregation, c("min", "max", "mean", "median", "sum"))){
aggr.f <- function(siteInds, siteVec, covgVec=NULL){
do.call(aggregation, list(siteVec[siteInds], na.rm=TRUE))
}
} else if (aggregation=="coverage.weighted"){
aggr.f <- function(siteInds, siteVec, covgVec){
cTotal <- sum(covgVec[siteInds], na.rm=TRUE)
sum(siteVec[siteInds]*covgVec[siteInds], na.rm=TRUE)/cTotal
}
}
site.meth <- object@meth.sites
site.covg <- object@covg.sites
aggr.meth.sample <- function(j){
siteVec <- site.meth[,j]
covgVec <- NULL
if (aggregation=="coverage.weighted") covgVec <- site.covg[,j]
vapply(regions2sites, aggr.f, numeric(1), siteVec=siteVec, covgVec=covgVec)
}
aggr.covg.sample <- function(j){
siteVec <- site.covg[,j]
vapply(regions2sites, function(siteInds){
sum(siteVec[siteInds], na.rm=TRUE)
}, numeric(1))
}
## Assign the resulting matrices to the object
if (region.type=="strands"){
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# delete(object@meth.sites)
if (doBigFf) {
object@meth.sites <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
# logger.info(c("DEBUG:","Created BigFfMat for meth"))
} else {
object@meth.sites <- convert.to.ff.matrix.tmp(matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else{
object@meth.sites <- matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating methylation for sample",j))
object@meth.sites[,j] <- aggr.meth.sample(j)
}
if (!is.null(object@covg.sites)) {
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# delete(object@covg.sites)
if (doBigFf) {
object@covg.sites <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
} else {
object@covg.sites <- convert.to.ff.matrix.tmp(matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else {
object@covg.sites <- matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating coverage for sample",j))
object@covg.sites[,j] <- aggr.covg.sample(j)
}
} else {
object@covg.sites <- NULL
}
object@sites <- region.indices
} else if(!is.null(region.indices)){
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# if(!is.null(object@meth.regions[[region.type]])){
# delete(object@meth.regions[[region.type]])
# }
if(rnb.getOption("enforce.destroy.disk.dumps")){
delete(object@meth.regions[[region.type]])
}
if (doBigFf){
object@meth.regions[[region.type]] <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
# logger.info(c("DEBUG:","Created BigFfMat for meth"))
} else {
object@meth.regions[[region.type]] <- convert.to.ff.matrix.tmp(matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
} else {
object@meth.regions[[region.type]] <- matrix(numeric(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating methylation for sample",j))
object@meth.regions[[region.type]][,j] <- aggr.meth.sample(j)
}
if(!is.null(object@covg.sites)) {
if(!is.null(object@status) && object@status$disk.dump){
doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
# if(!is.null(object@covg.regions[[region.type]])) {
# delete(object@covg.regions[[region.type]])
# }
if(rnb.getOption("enforce.destroy.disk.dumps")){
delete(object@covg.regions[[region.type]])
}
if (doBigFf){
if (is.null(object@covg.regions)) object@covg.regions <- list()
object@covg.regions[[region.type]] <- BigFfMat(row.n=nrow(region.indices), col.n=nSamples, col.names=samples(object), finalizer=bff.finalizer)
} else {
object@covg.regions[[region.type]] <- convert.to.ff.matrix.tmp(matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object))))
}
}else{
object@covg.regions[[region.type]] <- matrix(integer(0), nrow=nrow(region.indices), ncol=nSamples, dimnames=list(NULL,samples(object)))
}
for (j in 1:nSamples){
# logger.info(c("DEBUG:","Aggregating coverage for sample",j))
object@covg.regions[[region.type]][,j] <- aggr.covg.sample(j)
}
}else{
object@covg.regions <- NULL
}
attr(object@meth.regions[[region.type]], "aggregation") <- aggregation
object@regions[[region.type]] <- region.indices
}else{ #no valid regions found
object@meth.regions[[region.type]] <- matrix(0L, nrow=0, ncol=ncol(object@meth.sites))
if(!is.null(object@covg.sites)) object@covg.regions[[region.type]] <- matrix(0L, nrow=0, ncol=ncol(object@meth.sites))
attr(object@meth.regions[[region.type]], "aggregation") <- aggregation
object@regions[[region.type]] <- matrix(0L, nrow=0, ncol=3)
}
rm(site.meth) #for ff and BigFfMat, the finalizer should be "delete" and thus the objects should be deleted from disk when this function terminates
rm(site.covg)
object
}
)
########################################################################################################################
if (!isGeneric("remove.regions")) {
setGeneric("remove.regions", function(object, ...) standardGeneric("remove.regions"))
}
#' remove.regions-methods
#'
#' Remove the summarized methylation information for a given region type from an \code{RnBSet} object.
#'
#' @param object Dataset of interest.
#' @param region.type Type of the region annotation for which the summarization should be removed
#'
#' @return object of the same class as the supplied one without the summarized methylation information for the specified region type
#'
#' @rdname remove.regions-methods
#' @docType methods
#' @aliases remove.regions
#' @aliases remove.regions,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' summarized.regions(rnb.set.example)
#' rnb.set.reduced<-remove.regions(rnb.set.example, "genes")
#' summarized.regions(rnb.set.reduced)
#' }
setMethod("remove.regions", signature(object="RnBSet"),
function(object, region.type) {
object@regions[[region.type]] <- NULL
object@meth.regions[[region.type]] <- NULL
if(!is.null(object@covg.sites)) object@covg.regions[[region.type]] <- NULL
return(object)
}
)
########################################################################################################################
if (!isGeneric("regionMapping")) {
setGeneric("regionMapping", function(object, ...) standardGeneric("regionMapping"))
}
#' regionMapping-methods
#'
#' get the mapping of regions in the RnBSet object to methylation site indices in the RnBSet object
#'
#' @param object Dataset as an object of type inheriting \code{\linkS4class{RnBSet}}.
#' @param region.type region type. see \code{\link{rnb.region.types}} for possible values
#' @return A list containing for each region the indices (as integers) of sites that belong to that region
#'
#' @rdname regionMapping-methods
#' @docType methods
#' @aliases regionMapping
#' @aliases regionMapping,RnBSet-method
#' @author Fabian Mueller
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' promoter.probe.list <- regionMapping(rnb.set.example,"promoters")
#' #get the number of CpGs per promoter in the dataset:
#' sapply(promoter.probe.list,length)
#' }
setMethod("regionMapping", signature(object = "RnBSet"),
function(object, region.type) {
if (!inherits(object, "RnBSet")) {
stop("invalid value for object; expected RnBSet")
}
if (!(is.character(region.type) && length(region.type) == 1 && (!is.na(region.type)))) {
stop("invalid value for type")
}
if (!(region.type %in% rnb.region.types(object@assembly))) {
stop(paste0("unsupported annotation type (annotation): ",region.type))
}
if (!(region.type %in% names(object@regions))) {
stop(paste0("unsupported annotation type (RnBSet): ",region.type))
}
chrom.maps <- rnb.get.mapping(region.type, object@target, object@assembly)
chrom.integer2name <- names(rnb.get.chromosomes(assembly=object@assembly))
obj.sites <- data.frame(object@sites)
region.map <- object@regions[[region.type]]
chr.inds.reg <- unique(region.map[,2])
obj.sites[,2] <- factor(chrom.integer2name[obj.sites[,2]],levels=chrom.integer2name[unique(obj.sites[,2])])
# obj.sites[,2] <- factor(chrom.integer2name[obj.sites[,2]],levels=chrom.integer2name)
# obj.sites[,2] <- as.factor(chrom.integer2name[obj.sites[,2]])
chrom.site.inds <- tapply(obj.sites[,3],obj.sites[,2],FUN=function(x){
IRanges(start=x,width=1)
})
chrom.offsets <- sapply(chrom.site.inds,length)
chrom.offsets <-cumsum(c(0,chrom.offsets[-length(chrom.offsets)]))
names(chrom.offsets) <- names(chrom.site.inds)
result <- lapply(chr.inds.reg,FUN=function(chr){
curChromName <- chrom.integer2name[chr]
rnbs.regs <- region.map[region.map[,2]==chr,3]
rnbs.regs.char <- format(rnbs.regs,trim=TRUE,scientific=FALSE)
rrRanges <- chrom.maps[[curChromName]]
#only take the regions that are also in the RnBSet object
if (!all(rnbs.regs.char %in% names(rrRanges))) {stop(paste("Not all regions in RnBSet are present in the annotation (",curChromName,")"))}
rrRanges <- rrRanges[rnbs.regs.char,]
olap<-as.matrix(findOverlaps(chrom.site.inds[[curChromName]], rrRanges))
olap[,1]<-olap[,1]+chrom.offsets[curChromName]
res<-tapply(olap[,1], olap[,2], list)
return(res)
})
result<-unlist(result, recursive=FALSE)
names(result)<-NULL
if (dim(region.map)[1] != length(result)){
stop("regionMapping failed")
}
return(result)
}
)
########################################################################################################################
#' annotation-methods
#'
#' Genomic annotation of the methylation sites or regions covered in the supplied dataset.
#'
#' @param object dataset as an object of type inheriting \code{RnBSet}.
#' @param type loci or regions for which the annotation should be obtained. If the value of this parameter is
#' \code{"sites"} (default), individual methylation sites are annotated. Otherwise, this must be one of
#' the available region types, as returned by \code{\link{rnb.region.types}}.
#' @param add.names flag specifying whether the unique site identifiers should be used as row names of the
#' resulting data frame
#' @param include.regions if \code{TRUE} one additional column is added to the returned annotation dat frame
#' for each of the available region types, giving the indices of the
#'
#' @return Annotation table in the form of a \code{data.frame}.
#'
#' @rdname annotation-methods
#' @docType methods
#' @aliases annotation
#' @aliases annotation,RnBSet-method
#' @author Pavlo Lutsik
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' ## show present sites
#' head(annotation(rnb.set.example, add.names=TRUE))
#' ## show promoters
#' ann.prom<-annotation(rnb.set.example, type="promoters", add.names=TRUE)
#' head(ann.prom)
#' }
setMethod("annotation", signature(object = "RnBSet"),
function(object, type="sites", add.names=FALSE, include.regions=FALSE) {
if (!inherits(object, "RnBSet")) {
stop("invalid value for object; expected RnBSet")
}
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type == "sites") {
type <- object@target
subsets <- object@sites
} else {
if (!(type %in% rnb.region.types(object@assembly))) {
stop(paste0("unsupported annotation type (annotation): ",type))
}
if (!(type %in% names(object@regions))) {
## This region type is not initialized with summarize.regions
## FIXME: Report an error or initialize.
stop(paste0("unsupported annotation type (RnBSet): ",type))
}
subsets <- object@regions[[type]]
}
annot <- rnb.get.annotation(type, object@assembly)
ind.shift<-rnb.annotation.size(type, object@assembly)
ind.shift<-cumsum(c(0,ind.shift[-length(ind.shift)]))
subsets.full<-subsets[,3]+ind.shift[subsets[,2]]
result<-rnb.annotation2data.frame(annot, add.names=add.names)[subsets.full,]
if(include.regions){
dump<-sapply(names(object@regions), function(rt){
result[,rt]<<-rep(0L,nrow(result))
map<-regionMapping(object, rt)
index_map<-lapply(1:length(map), function(ix) rep(ix, length(map[[ix]])))
result[unlist(map),rt]<<-unlist(index_map)
})
}
return(result)
}
)
########################################################################################################################
#if (!isGeneric("save.matrices")) {
setGeneric("save.matrices", function(object, path, ...) standardGeneric("save.matrices"))
#}
setMethod("save.matrices", signature(object="RnBSet", path="character"),
function(object, path){
if(!is.null(object@status) && object@status$disk.dump){
if("ff" %in% class(object@meth.sites)){
ffmatrix <- object@meth.sites
ffsave(ffmatrix,file=file.path(path, "rnb.meth"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@meth.sites)){
save.bigFfMat(object@meth.sites, file=file.path(path, "rnb.meth"), rootpath=getOption('fftempdir'))
}
if("ff" %in% class(object@covg.sites)){
ffmatrix <- object@covg.sites
ffsave(ffmatrix, file=file.path(path, "rnb.covg"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@covg.sites)){
save.bigFfMat(object@covg.sites, file=file.path(path, "rnb.covg"), rootpath=getOption('fftempdir'))
}
if(length(object@regions) != 0){
for(rgn in 1:length(object@regions)){
rgnpath<-file.path(path,rgn)
if(!file.exists(rgnpath)){
dir.create(rgnpath)
}
if("ff" %in% class(object@meth.regions[[rgn]])){
ffmatrix<-object@meth.regions[[rgn]]
ffsave(ffmatrix, file=file.path(path, rgn, "rnb.meth"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@meth.regions[[rgn]])){
save.bigFfMat(object@meth.regions[[rgn]], file=file.path(path, rgn, "rnb.meth"), rootpath=getOption('fftempdir'))
}
if("ff" %in% class(object@covg.regions[[rgn]])){
ffmatrix<-object@covg.regions[[rgn]]
ffsave(ffmatrix, file=file.path(path, rgn, "rnb.covg"),rootpath=getOption('fftempdir'))
rm(ffmatrix)
} else if("BigFfMat" %in% class(object@covg.regions[[rgn]])){
save.bigFfMat(object@covg.regions[[rgn]], file=file.path(path, rgn, "rnb.covg"), rootpath=getOption('fftempdir'))
}
}
}
}
})
########################################################################################################################
setGeneric("load.matrices",
function(object, path, ...) standardGeneric("load.matrices"))
setMethod("load.matrices", signature(object="RnBSet", path="character"),
function(object, path, temp.dir=tempdir()){
doBigFf <- !is.null(object@status)
if (doBigFf) doBigFf <- !is.null(object@status$disk.dump.bigff)
if (doBigFf) doBigFf <- object@status$disk.dump.bigff
if (doBigFf){
object@meth.sites <- load.bigFfMat(file.path(path, "rnb.meth"), rootpath=getOption("fftempdir"))
if(!is.null(object@covg.sites)){
object@covg.sites <- load.bigFfMat(file.path(path, "rnb.covg"), rootpath=getOption("fftempdir"))
}
} else {
if(sum(grepl("rnb.meth", list.files(path)))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, "rnb.meth"), envir=load_env,rootpath=getOption("fftempdir")))
object@meth.sites<-get("ffmatrix", envir=load_env)
rm(load_env)
}
if(sum(grepl("rnb.covg", list.files(path)))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, "rnb.covg"), envir=load_env,rootpath=getOption("fftempdir")))
object@covg.sites<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
rgns <- names(object@regions)
if(!is.null(rgns)){
if (.hasSlot(object, 'version')) {
rgns <- 1:length(rgns)
}
for(rgn in rgns){
if (doBigFf){
object@meth.regions[[rgn]] <- load.bigFfMat(file.path(path, rgn, "rnb.meth"), rootpath=getOption("fftempdir"))
if(!is.null(object@covg.regions[[rgn]])){
object@covg.regions[[rgn]] <- load.bigFfMat(file.path(path, rgn, "rnb.covg"), rootpath=getOption("fftempdir"))
}
} else {
if(sum(grepl("rnb.meth",list.files(file.path(path, rgn))))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, rgn, "rnb.meth"), envir=load_env, rootpath=getOption("fftempdir")))
object@meth.regions[[rgn]]<-get("ffmatrix", envir=load_env)
rm(load_env)
}
if(sum(grepl("rnb.covg",list.files(file.path(path, rgn))))==2){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, rgn, "rnb.covg"), envir=load_env, rootpath=getOption("fftempdir")))
object@covg.regions[[rgn]]<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
}
}
return(object)
})
########################################################################################################################
#' save.rnb.set
#'
#' Consistent saving of an \code{RnBSet} objects with large matrices of type \link{ff}.
#'
#' @param object \code{RnBSet}-inheriting object.
#' @param path the name of the output file (or directory if \code{archive} is \code{FALSE})
#' without an extension. If only the file name is given the object will be saved
#' in the current working directory.
#' @param archive if \code{TRUE} (default value) the output is a ZIP-file.
#'
#' @details The saved object can be reloaded with the \link{load.rnb.set} function.
#'
#' @return invisibly, the full path to the ZIP file (if \code{archive} is \code{TRUE}),
#' or to the output directory (otherwise)
#'
#' @author Pavlo Lutsik
#' @export
save.rnb.set<-function(object, path, archive=TRUE){
## Validate parameters
if (!inherits(object, "RnBSet")) {
stop("invalid value for object")
}
if (!(is.character(path) && length(path) == 1 && isTRUE(!grepl("^[/\\.]*$", path)))) {
stop("invalid value for path")
}
if (!parameter.is.flag(archive)) {
stop("invalid value for archive")
}
if(object@status$disk.dump && .Platform$OS == "windows" && Sys.getenv("R_ZIPCMD")==""){
rnb.warning(c("Zip not found on this Windows system, this RnBSet object will not be saved.",
"See the instructions for installing ZIP on Windows in the FAQ section of the RnBeads website."))
return(invisible(path))
}
## Get the full path of the file or directory to be created
fullpath <- normalizePath(gsub("/$", "", gsub("\\", "/", path, fixed = TRUE)), winslash = "/", mustWork = FALSE)
## Create or overwrite a directory to store the files
if (unlink(fullpath, recursive = TRUE) == 1) {
stop("Specified path already exists and cannot be overwritten")
}
if (archive) {
if (unlink(paste0(fullpath, ".zip"), recursive = TRUE) == 1) {
stop("Specified path already exists and cannot be overwritten")
}
}
if (!dir.create(fullpath, showWarnings = FALSE, recursive = TRUE)) {
stop("Could not create output directory")
}
## Save all data structures
save.matrices(object, fullpath)
save(object, file=file.path(fullpath, "rnb.set.RData"))
## Create a ZIP archive of the whole directory
if(archive){
currdir <- setwd(fullpath)
zip(paste0(fullpath, ".zip"), dir(), flags = "-rm9X")
while(length(list.files(path))>0){
TRUE;
}
setwd(currdir)
if (unlink(fullpath, recursive = TRUE) == 1) {
rnb.warning("Could not clean output directory after zipping")
}
fullpath <- paste0(fullpath, ".zip")
}
return(invisible(fullpath))
}
########################################################################################################################
#' load.rnb.set
#'
#' Loading of the \code{RnBSet} objects with large matrices of type \pkg{ff}.
#'
#' @param path full path of the file or directory. If \code{archive} is \code{FALSE})
#' without an extension.
#' @param temp.dir \code{character} singleton which specifies temporary directory, used while loading
#'
#' @return Loaded object
#'
#' @author Pavlo Lutsik
#' @export
load.rnb.set<-function(path, temp.dir=tempdir()){
## Validate parameters
if (!(is.character(path) && length(path) == 1 && isTRUE(!grepl("^[/\\.]*$", path)))) {
stop("invalid value for path")
}
if (!(is.character(temp.dir) && length(temp.dir) == 1 && isTRUE(!grepl("^[/\\.]*$", temp.dir)))) {
stop("invalid value for temp.dir")
}
if (!file.exists(path)) {
stop("invalid value for path; the path does not exist")
}
if (!isTRUE(file.info(temp.dir)[1, "isdir"])) {
stop("invalid value for temp.dir; the path does not exist or is not a directory")
}
if(.Platform$OS == "windows" && Sys.getenv("R_ZIPCMD")==""){
method="internal"
}else{
method="unzip"
}
if(grepl("rnb.set.RData",path)){
logger.info("The path to the data set directory should be provided, not to the invidual file. Changing to parent directory.")
path <- dirname(path)
}
if(!file.info(path)[["isdir"]]){
td<-tempfile("extraction", temp.dir)
unzip(path, exdir=td, unzip=method)
}else{
td<-path
}
load_env<-new.env(parent=emptyenv())
load(file.path(td, "rnb.set.RData"),envir=load_env)
load.matrices(get("object", load_env), td, temp.dir=temp.dir)
}
########################################################################################################################
if (!isGeneric("destroy")) setGeneric("destroy", function(object) standardGeneric("destroy"))
#' destroy-methods
#'
#' Remove tables stored to disk from the file system. Useful for cleaning up disk dumped objects.
#'
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @return Nothing of particular interest
#'
#' @rdname destroy-methods
#' @docType methods
#' @aliases destroy
#' @aliases destroy,RnBSet-method
#' @export
setMethod("destroy", signature(object="RnBSet"),
function(object){
if(object@status$disk.dump){
delete(object@meth.sites)
if(!is.null(object@covg.sites)){
delete(object@covg.sites)
}
if(!is.null(object@regions)){
for(rgn in names(object@regions)){
delete(object@meth.regions[[rgn]])
if(!is.null(object@covg.regions))
{
delete(object@covg.regions[[rgn]])
}
}
}
}
return(invisible(TRUE))
}
)
########################################################################################################################
## meth.matrices
##
## Creates a list of methylation value (beta) matrices for the given dataset.
##
## @param object Methylation dataset object of type that inherits \code{RnBSet}.
## @param include.sites Flag indicating if the methylation matrix of sites or probes is to be included in the result.
## @return Non-empty \code{list} of matrices of beta values. If \code{include.sites} is \code{TRUE}, the first matrix in
## the list is the one based on sites or probes. Other matrices store region-based methylation for (some of) the
## regions addressed in the option \code{"region.types"}.
## @author Yassen Assenov
meth.matrices <- function(object, include.sites = rnb.getOption("analyze.sites")) {
result <- list()
if (include.sites) result[["sites"]] <- meth(object)
for (rtype in rnb.region.types.for.analysis(object)) {
X <- tryCatch(meth(object, rtype), error = function(e) { NULL })
if (!is.null(X)) {
result[[rtype]] <- X
}
}
return(result)
}
########################################################################################################################
## get.row.names
##
## Generates row names based on the genomic location.
##
## @param object \code{RnBSet} object.
## @return \code{character} vector of row names.
## @author Pavlo Lutsik
get.row.names<-function(object, type="sites"){
if(type=="sites"){
target<-object@target
subsets<-object@sites
}else if(type %in% names(object@regions)){
target<-type
subsets<-object@regions[[type]]
}else stop("unsupported region type")
loc.info<-annotation(object, type=type, add.names=TRUE)
if ("ID" %in% colnames(loc.info) && anyDuplicated(loc.info[, "ID"]) == 0) {
result <- loc.info[,"ID"]
} else if (!is.null(rownames(loc.info))) {
result <- rownames(loc.info)
} else {
result <- paste(loc.info[,"Chromosome"], loc.info[,"Start"], as.character(loc.info[,"Strand"]), sep=".")
}
result
}
########################################################################################################################
## rnb.get.row.token
##
## Gets the methylation target, that is, the basic methylation feature of a dataset based on its platform.
##
## @param object Methylation dataset of interest, an object of type inheriting \code{MethyLumiSet} or \code{RnBSet}.
## @param plural Flag, indicating if the plural form of the word.
## @return Word or phrase denoting the term for a single target of the platform.
## @author Pavlo Lutsik
rnb.get.row.token<-function(object, plural = FALSE){
if (is.character(object)) {
result <- ifelse(object %in% c("RnBiseqSet", "RnBSet"), "site", "probe")
} else if (inherits(object, "MethyLumiSet")){
result <- "probe"
} else if (object@target == "CpG") {
result <- "site"
} else { # object@target == "probes450"
result <- "probe"
}
ifelse(plural, paste0(result, "s"), result)
}
########################################################################################################################
## rnb.get.covg.token
##
## Gets the measure of coverage of a dataset based on its platform.
##
## @param object Methylation dataset of interest, an object of type inheriting \code{MethyLumiSet} or \code{RnBSet}.
## @param capital Flag, indicating if the first letter of the returned phrase should be capitalized.
## @return Word or phrase denoting the term for depth of coverage.
## @author Pavlo Lutsik
rnb.get.covg.token<-function(object, capital=FALSE){
if (is.character(object)) {
result <- ifelse(object %in% c("RnBiseqSet", "RnBSet"), "coverage", "bead counts")
} else if (inherits(object, "MethyLumiSet")) {
result <- "bead counts"
} else if (object@target == "CpG") {
result <- "coverage"
} else { # object@target == "probes450"
result <- "bead counts"
}
ifelse(capital, capitalize(result), result)
}
########################################################################################################################
if(!isGeneric("sampleMethApply")) setGeneric("sampleMethApply", function(object, ...) standardGeneric("sampleMethApply"))
#' sampleMethApply-methods
#'
#' Applies a function over the methylation values for all samples in an \code{RnBSet} using a low memory footprint.
#'
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param fn function to be applied
#' @param type \code{character} singleton. Specify "sites" (default) or a region type over which the function is applied
#' @param ... arguments passed on to the function
#' @return Result analogous to \code{apply(meth(rnbSet, type), 2, FUN=FUN)}
#'
#' @seealso \code{\link[=meth,RnBSet-method]{meth}} Retrieving the matrix of methylation values
#' @rdname sampleMethApply-methods
#' @docType methods
#' @aliases sampleMethApply
#' @aliases sampleMethApply,RnBSet-method
setMethod("sampleMethApply", signature(object = "RnBSet"),
function(object, fn, type="sites", ...) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@meth.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
}
res <- sapply(1:length(samples(object)), FUN=function(j){
fn(meth(object, type=type, j=j), ...)
})
return(res)
}
)
if(!isGeneric("sampleCovgApply")) setGeneric("sampleCovgApply", function(object, ...) standardGeneric("sampleCovgApply"))
#' sampleCovgApply-methods
#'
#' Applies a function over the coverage values for all samples in an \code{RnBSet} using a low memory footprint.
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param fn function to be applied
#' @param type \code{character} singleton. Specify "sites" (default) or a region type over which the function is applied
#' @param ... arguments passed on to the function
#' @return Result analogous to \code{apply(covg(rnbSet, type), 2, FUN=FUN)}
#'
#' @seealso \code{\link[=meth,RnBSet-method]{covg}} Retrieving the matrix of coverage values
#' @rdname sampleCovgApply-methods
#' @docType methods
#' @aliases sampleCovgApply
#' @aliases sampleCovgApply,RnBSet-method
setMethod("sampleCovgApply", signature(object = "RnBSet"),
function(object, fn, type="sites", ...) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (type %in% c("sites", object@target)) {
result <- nrow(object@covg.sites)
} else if (!(type %in% names(object@regions))) {
stop("unsupported region type")
}
res <- sapply(1:length(samples(object)), FUN=function(j){
fn(covg(object, type=type, j=j), ...)
})
return(res)
}
)
########################################################################################################################
if(!isGeneric("getNumNaMeth")) setGeneric("getNumNaMeth", function(object, ...) standardGeneric("getNumNaMeth"))
#' getNumNaMeth-methods
#'
#' for each site/region, the getNumNaMeth retrieves the number of NA values accross all samples.
#' Does this efficiently by breaking down the methylation matrix into submatrices
#' @param object object inheriting from \code{\linkS4class{RnBSet}}
#' @param type "sites" or region type
#' @param chunkSize size of each submatrix (performance tuning parameter)
#' @param mask logical matrix. its entries will also be considered NAs in counting
#' @return vector containing the number of NAs per site/region
#'
#' @rdname getNumNaMeth-methods
#' @docType methods
#' @aliases getNumNaMeth
#' @aliases getNumNaMeth,RnBSet-method
setMethod("getNumNaMeth", signature(object = "RnBSet"),
function(object, type="sites", chunkSize=1e5, mask=NULL) {
if (!(is.character(type) && length(type) == 1 && (!is.na(type)))) {
stop("invalid value for type")
}
if (!(type %in% c("sites", object@target, names(object@regions)))) {
stop("unsupported region type")
}
#get start and end indices for the chunks
n <- nsites(object, type)
indStarts <- seq(1,n,by=chunkSize)
indEnds <- c(indStarts[-1]-1, n)
#apply to each chunk
res <- unlist(lapply(1:length(indStarts), FUN=function(i){
indsCur <- indStarts[i]:indEnds[i]
mm <- meth(object, type=type, i=indsCur)
isNaMat <- is.na(mm)
if (!is.null(mask)) isNaMat <- isNaMat | mask[indsCur,]
return(as.integer(rowSums(isNaMat)))
}))
return(res)
}
)
if(!isGeneric("isImputed")) setGeneric("isImputed", function(object, ...) standardGeneric("isImputed"))
#' isImputed
#'
#' Getter for the imputation field. Return TRUE, if the object has been imputed and FALSE otherwise.
#' @param object Object for which the information should be returned
#' @return TRUE, if the object has been imputed and FALSE otherwise.
#' @author Michael Scherer
#' @aliases isImputed
#' @aliases isImputed,RnBSet-method
#' @export
setMethod("isImputed",signature(object="RnBSet"),
function(object){
if(.hasSlot(object,"imputed")){
return(object@imputed)
}
return(FALSE)
}
)
########################################################################################################################
#' rnb.sample.summary.table
#'
#' Creates a sample summary table from an RnBSet object
#'
#' @param rnbSet \code{\linkS4class{RnBSet}} of interest.
#' @return a summary table (as data.frame) with the following variables for each sample (rows):
#' \item{sampleName}{Name of the sample}
#' \item{*_num (* can be 'sites' or a region type)}{Number of sites or regions with coverage in the sample}
#' \item{*_covgMean (\code{RnBiseqSet} only)}{Mean coverage of sites or regions in the sample}
#' \item{*_covgMedian (\code{RnBiseqSet} only)}{Median coverage of sites or regions in the sample}
#' \item{*_covgPerc25 (\code{RnBiseqSet} only)}{25 percentile of coverage of sites or regions in the sample}
#' \item{*_covgPerc75 (\code{RnBiseqSet} only)}{75 percentile of coverage of sites or regions in the sample}
#' \item{*_numCovg5,10,30,60 (\code{RnBiseqSet} only)}{Number of sites or regions with coverage greater or equal to 5,10,30,60}
#' \item{sites_numDPval5em2,1em2,1em3 (\code{RnBeadSet} only)}{Number of sites with a detection p-value smaller than 0.05,0.01,0.001}
#' \item{**_numSitesMean (** is any region type)}{Mean number of sites in a region}
#' \item{**_numSitesMedian}{Median number of sites in a region}
#' \item{**_numSites2,5,10,20}{Number of regions with at least 2,5,10,20 sites with valid methylation measurements}
#' @author Fabian Mueller
#' @aliases rnb.sample.summary.table,RnBSet-method
#' @export
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' logger.start(fname=NA)
#' rnb.sample.summary.table(rnb.set.example)
#' }
rnb.sample.summary.table <- function(rnbSet) {
is.biseq <- "RnBiseqSet" %in% class(rnbSet)
is.beads <- "RnBeadSet" %in% class(rnbSet)
df.empty <- data.frame(matrix(nrow=length(samples(rnbSet)),ncol=0))
rownames(df.empty) <- samples(rnbSet)
tt <- data.frame(df.empty,sampleName=samples(rnbSet),stringsAsFactors=FALSE)
reg.types.regions <- summarized.regions(rnbSet)
reg.types <- c("sites",reg.types.regions)
for (rr in reg.types){
# logger.status(c("Region type:",rr))
# rnb.cleanMem()
tt.cur <- df.empty
tt.cur$num <- sampleMethApply(rnbSet, function(x){sum(!is.na(x))}, type=rr)
if (is.biseq){
covgStats <- do.call("rbind", lapply(1:length(samples(rnbSet)), FUN=function(j){
# logger.status(c(" Sample:",j))
mm <- as.vector(meth(rnbSet, rr, j=j))
cc <- as.vector(covg(rnbSet, rr, j=j))
cc[cc==0] <- NA
cc[is.na(mm)] <- NA
qq <- quantile(cc, probs = c(0.25,0.75), na.rm=TRUE)
res <- c(
mean(cc, na.rm=TRUE),
median(cc, na.rm=TRUE),
qq[1],
qq[2],
sum(cc>=5, na.rm=TRUE),
sum(cc>=10, na.rm=TRUE),
sum(cc>=30, na.rm=TRUE),
sum(cc>=60, na.rm=TRUE)
)
return(res)
}))
colnames(covgStats) <- c("covgMean", "covgMedian", "covgPerc25", "covgPerc75", "numCovg5", "numCovg10", "numCovg30", "numCovg60")
tt.cur <- cbind(tt.cur, covgStats)
}
if (is.beads){
if (rr == "sites"){
pp <- dpval(rnbSet,type=rr)
if (!is.null(pp)) {
tt.cur$numDPval5em2 <- colSums(pp < 5e-2, na.rm=TRUE)
tt.cur$numDPval1em2 <- colSums(pp < 1e-2, na.rm=TRUE)
tt.cur$numDPval1em3 <- colSums(pp < 1e-3, na.rm=TRUE)
}
}
}
if (rr %in% reg.types.regions){
regions2sites <- regionMapping(rnbSet,region.type=rr)
#compute the number of sites per region and sample
nsamples <- length(samples(rnbSet))
num.sites <- sapply(1:nsamples,function(i){
# logger.status(c(" Sample:",i))
mm.s.nna <- !is.na(as.vector(meth(rnbSet, j=i)))
sapply(1:nsites(rnbSet, rr),function(j){
sum(mm.s.nna[regions2sites[[j]]])
})
})
# num.sites2 <- t(sapply(1:nsites(rnbSet, rr),function(i){
# # logger.status(c(" Site/Region:",i))
# colSums(!is.na(meth(rnbSet, i=regions2sites[[i]])))
# })) # a bit slower, but more memory effective, if to include later, check the code again
tt.cur$numSitesMean <- colMeans(num.sites, na.rm=TRUE)
tt.cur$numSitesMedian <- colMedians(num.sites, na.rm=TRUE)
tt.cur$numSites2 <- colSums(num.sites>=2, na.rm=TRUE)
tt.cur$numSites5 <- colSums(num.sites>=5, na.rm=TRUE)
tt.cur$numSites10 <- colSums(num.sites>=10,na.rm=TRUE)
tt.cur$numSites20 <- colSums(num.sites>=20,na.rm=TRUE)
}
colnames(tt.cur) <- paste(rr,colnames(tt.cur),sep="_")
tt <- data.frame(tt,tt.cur)
}
return(tt)
}
########################################################################################################################
|
args <- commandArgs(TRUE)
path_to_sig_file <- as.character(args[1])
path_to_file_bsmooth <- as.character(args[2])
path_to_file_dss <- as.character(args[3])
path_to_file_methylkit <- as.character(args[4])
path_to_file_metilene <- as.character(args[5])
path_to_file_rnbeads <- as.character(args[6])
sig <- read.table(path_to_sig_file)
bsmooth <- read.table(path_to_file_bsmooth)
dss <- read.table(path_to_file_dss)
methylkit <- read.table(path_to_file_methylkit)
metilene <- read.table(path_to_file_metilene)
rnbeads <- read.table(path_to_file_rnbeads)
sig$V10 <- NULL
colnames(sig) <- c("MR_chr", "MR_start", "MR_end", "MR_BS", "MR_DS", "MR_MH", "MR_MT", "MR_RB", "MR_IDR")
colnames(bsmooth) <- c("bsmooth_chr", "bsmooth_start", "bsmooth_end", "bsmooth_cpgs", "bsmooth_methA", "bsmooth_mathB", "bsmooth_methDiff", "bsmooth_QM")
colnames(dss) <- c("dss_chr", "dss_start", "dss_end", "dss_cpgs", "dss_methA", "dss_mathB", "dss_methDiff", "dss_QM")
colnames(methylkit) <- c("methylkit_chr", "methylkit_start", "methylkit_end", "methylkit_cpgs", "methylkit_methA", "methylkit_mathB", "methylkit_methDiff", "methylkit_QM")
colnames(metilene) <- c("metilene_chr", "metilene_start", "metilene_end", "metilene_cpgs", "metilene_methA", "metilene_mathB", "metilene_methDiff", "metilene_QM")
colnames(rnbeads) <- c("rnbeads_chr", "rnbeads_start", "rnbeads_end", "rnbeads_cpgs", "rnbeads_methA", "rnbeads_mathB", "rnbeads_methDiff", "rnbeads_QM")
sig$bsmooth <- NA
sig$dss <- NA
sig$methylkit <- NA
sig$metilene <- NA
sig$rnbeads <- NA
for(i in 1:nrow(sig)){
if (i %% 1000 == 0){
print(i)
}
chr <- sig[i, ]$MR_chr
start <- sig[i, ]$MR_start
end <- sig[i, ]$MR_end
if (sig[i, ]$MR_BS > 0){
match_bsmooth <- bsmooth[bsmooth$bsmooth_chr == chr & bsmooth$bsmooth_start >= start & bsmooth$bsmooth_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_bsmooth) == 1){
methDiff <- match_bsmooth$bsmooth_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_bsmooth) > 1){
methDiff <- mean(match_bsmooth$bsmooth_methDiff)
}
sig[i, ]$bsmooth <- methDiff
}
if (sig[i, ]$MR_DS > 0){
match_dss <- dss[dss$dss_chr == chr & dss$dss_start >= start & dss$dss_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_dss) == 1){
methDiff <- match_dss$dss_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_dss) > 1){
methDiff <- mean(match_dss$dss_methDiff)
}
sig[i, ]$dss <- methDiff
}
if (sig[i, ]$MR_MH > 0){
match_methylkit <- methylkit[methylkit$methylkit_chr == chr & methylkit$methylkit_start >= start & methylkit$methylkit_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_methylkit) == 1){
methDiff <- match_methylkit$methylkit_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_methylkit) > 1){
methDiff <- mean(match_methylkit$methylkit_methDiff)
}
sig[i, ]$methylkit <- methDiff
}
if (sig[i, ]$MR_MT > 0){
match_metilene <- metilene[metilene$metilene_chr == chr & metilene$metilene_start >= start & metilene$metilene_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_metilene) == 1){
methDiff <- match_metilene$metilene_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_metilene) > 1){
methDiff <- mean(match_metilene$metilene_methDiff)
}
sig[i, ]$metilene <- methDiff
}
if (sig[i, ]$MR_RB > 0){
match_rnbeads <- rnbeads[rnbeads$rnbeads_chr == chr & rnbeads$rnbeads_start >= start & rnbeads$rnbeads_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_rnbeads) == 1){
methDiff <- match_rnbeads$rnbeads_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_rnbeads) > 1){
methDiff <- mean(match_rnbeads$rnbeads_methDiff)
}
sig[i, ]$rnbeads <- methDiff
}
}
sig$methylkit <- sig$methylkit/100
# Calculate mean from all available MethDiffs
sig$MeanDifference <- rowMeans(subset(sig, select = c(bsmooth, dss, methylkit, metilene, rnbeads)), na.rm = T)
# Write to file
write.table(sig, file = "sig.tsv", sep = "\t", col.names = NA, quote = FALSE)
# Split data set
sig_11111 <- sig[sig$MR_BS > 0 & sig$MR_DS > 0 & sig$MR_MH > 0 & sig$MR_MT > 0 & sig$MR_RB > 0, ]
sig_other <- sig[sig$MR_BS == 0 | sig$MR_DS == 0 | sig$MR_MH == 0 | sig$MR_MT == 0 | sig$MR_RB == 0, ]
# Perform T test
t.test(sig_11111$MeanDifference, sig_other$MeanDifference)
# Plot
library(ggplot2)
sig_11111$Category <- "Found by All Tools"
sig_other$Category <- "Other"
all <- subset(sig_11111, select=c("MeanDifference", "Category"))
other <- subset(sig_other, select=c("MeanDifference", "Category"))
comb <- rbind(all, other)
p <- ggplot(comb, aes(x=Cat, y=MeanDifference, fill=Category)) + geom_boxplot()
p <- p+scale_color_manual(values=c("#4C76A3", "#E15759"))
ggsave('boxplot.pdf', plot=p, device="pdf")
|
/01_scripts/additional_scripts/Evaluation/MethDiff.R
|
no_license
|
HeleneLuessem/Meta-Analysis-of-DMR-Finders
|
R
| false
| false
| 4,857
|
r
|
args <- commandArgs(TRUE)
path_to_sig_file <- as.character(args[1])
path_to_file_bsmooth <- as.character(args[2])
path_to_file_dss <- as.character(args[3])
path_to_file_methylkit <- as.character(args[4])
path_to_file_metilene <- as.character(args[5])
path_to_file_rnbeads <- as.character(args[6])
sig <- read.table(path_to_sig_file)
bsmooth <- read.table(path_to_file_bsmooth)
dss <- read.table(path_to_file_dss)
methylkit <- read.table(path_to_file_methylkit)
metilene <- read.table(path_to_file_metilene)
rnbeads <- read.table(path_to_file_rnbeads)
sig$V10 <- NULL
colnames(sig) <- c("MR_chr", "MR_start", "MR_end", "MR_BS", "MR_DS", "MR_MH", "MR_MT", "MR_RB", "MR_IDR")
colnames(bsmooth) <- c("bsmooth_chr", "bsmooth_start", "bsmooth_end", "bsmooth_cpgs", "bsmooth_methA", "bsmooth_mathB", "bsmooth_methDiff", "bsmooth_QM")
colnames(dss) <- c("dss_chr", "dss_start", "dss_end", "dss_cpgs", "dss_methA", "dss_mathB", "dss_methDiff", "dss_QM")
colnames(methylkit) <- c("methylkit_chr", "methylkit_start", "methylkit_end", "methylkit_cpgs", "methylkit_methA", "methylkit_mathB", "methylkit_methDiff", "methylkit_QM")
colnames(metilene) <- c("metilene_chr", "metilene_start", "metilene_end", "metilene_cpgs", "metilene_methA", "metilene_mathB", "metilene_methDiff", "metilene_QM")
colnames(rnbeads) <- c("rnbeads_chr", "rnbeads_start", "rnbeads_end", "rnbeads_cpgs", "rnbeads_methA", "rnbeads_mathB", "rnbeads_methDiff", "rnbeads_QM")
sig$bsmooth <- NA
sig$dss <- NA
sig$methylkit <- NA
sig$metilene <- NA
sig$rnbeads <- NA
for(i in 1:nrow(sig)){
if (i %% 1000 == 0){
print(i)
}
chr <- sig[i, ]$MR_chr
start <- sig[i, ]$MR_start
end <- sig[i, ]$MR_end
if (sig[i, ]$MR_BS > 0){
match_bsmooth <- bsmooth[bsmooth$bsmooth_chr == chr & bsmooth$bsmooth_start >= start & bsmooth$bsmooth_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_bsmooth) == 1){
methDiff <- match_bsmooth$bsmooth_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_bsmooth) > 1){
methDiff <- mean(match_bsmooth$bsmooth_methDiff)
}
sig[i, ]$bsmooth <- methDiff
}
if (sig[i, ]$MR_DS > 0){
match_dss <- dss[dss$dss_chr == chr & dss$dss_start >= start & dss$dss_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_dss) == 1){
methDiff <- match_dss$dss_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_dss) > 1){
methDiff <- mean(match_dss$dss_methDiff)
}
sig[i, ]$dss <- methDiff
}
if (sig[i, ]$MR_MH > 0){
match_methylkit <- methylkit[methylkit$methylkit_chr == chr & methylkit$methylkit_start >= start & methylkit$methylkit_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_methylkit) == 1){
methDiff <- match_methylkit$methylkit_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_methylkit) > 1){
methDiff <- mean(match_methylkit$methylkit_methDiff)
}
sig[i, ]$methylkit <- methDiff
}
if (sig[i, ]$MR_MT > 0){
match_metilene <- metilene[metilene$metilene_chr == chr & metilene$metilene_start >= start & metilene$metilene_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_metilene) == 1){
methDiff <- match_metilene$metilene_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_metilene) > 1){
methDiff <- mean(match_metilene$metilene_methDiff)
}
sig[i, ]$metilene <- methDiff
}
if (sig[i, ]$MR_RB > 0){
match_rnbeads <- rnbeads[rnbeads$rnbeads_chr == chr & rnbeads$rnbeads_start >= start & rnbeads$rnbeads_end <= end, ]
methDiff <- NA
# Only 1 match
if (nrow(match_rnbeads) == 1){
methDiff <- match_rnbeads$rnbeads_methDiff
# Multiple Matches --> aggregate
} else if (nrow(match_rnbeads) > 1){
methDiff <- mean(match_rnbeads$rnbeads_methDiff)
}
sig[i, ]$rnbeads <- methDiff
}
}
sig$methylkit <- sig$methylkit/100
# Calculate mean from all available MethDiffs
sig$MeanDifference <- rowMeans(subset(sig, select = c(bsmooth, dss, methylkit, metilene, rnbeads)), na.rm = T)
# Write to file
write.table(sig, file = "sig.tsv", sep = "\t", col.names = NA, quote = FALSE)
# Split data set
sig_11111 <- sig[sig$MR_BS > 0 & sig$MR_DS > 0 & sig$MR_MH > 0 & sig$MR_MT > 0 & sig$MR_RB > 0, ]
sig_other <- sig[sig$MR_BS == 0 | sig$MR_DS == 0 | sig$MR_MH == 0 | sig$MR_MT == 0 | sig$MR_RB == 0, ]
# Perform T test
t.test(sig_11111$MeanDifference, sig_other$MeanDifference)
# Plot
library(ggplot2)
sig_11111$Category <- "Found by All Tools"
sig_other$Category <- "Other"
all <- subset(sig_11111, select=c("MeanDifference", "Category"))
other <- subset(sig_other, select=c("MeanDifference", "Category"))
comb <- rbind(all, other)
p <- ggplot(comb, aes(x=Cat, y=MeanDifference, fill=Category)) + geom_boxplot()
p <- p+scale_color_manual(values=c("#4C76A3", "#E15759"))
ggsave('boxplot.pdf', plot=p, device="pdf")
|
##########################################################
# A modification of ITCsegment package by Michele Dalponte
# Changes made by Tom Swinfield for the RSPB under funding from the Cambridge Conservation Initiative
# Collaborative Fund
##########################################################
# Changes made:
# 160607: - Enabled the search window to size to be scalable.
# - Tidied up some of the code.
# - Made the maxima search tool work from tallest to smallest.
# 160711: - Changed the distance of pixels to the local maximum a raial distance rather than straight line distance.
# - Scale the maximum distance according to the allometry.
# The programme should work through the trees from tallest to shortest and identify the crown area on the fly;
# This would over come problems associated with climbing up the canopy on to adjacent crowns.
# !!! take the window scaling as linear for the moment. Later it would be worth converting to an appropriate allometry.
# !!! Make it possible to specify in the function an alternative height maximum to scale the search window;
# - if not given this will revert to the maximum tree height in the image - Although that probably isn't much good
# - either way a warning should be given if the tallest point in the image exceeds the max value provided by the user.
#rm(list=ls())
# Distance is for the radius (not diameter) in pixels (not meters):
#imagery = chm.plot.buffer; searchWinSize = 3; TRESHSeed = 0.45; TRESHCrown = 0.55; DIST = 10; specT = 0
#TRESHSeed=0.45 # Proportional height relative to tree maximum.
#TRESHCrown=0.55 # Proportional height relative to tree mean.
#DIST=8 # Distance in pixels from tree maximum.
#specT=2 # Minimum height in m for tree or crown.
#win.min = 3
#win.max = 19
#z.max = 45
#z.min = specT
# Fits a basic linear model to set up the relationship between tree height and window size:
win.lmfun<-function(win.min, win.max, z.min, z.max)
{
y<-c(win.min, win.max)
z<-c(z.min, z.max)
fm.win<-lm(y~z)
return(fm.win)
}
# Calculates the window size when given the relationship between tree height and window size as a fitted model (fm.win)
# and z the tree heights:
win.sizer<-function(z, fm.win)
{
win.pred<-predict(fm.win, newdata=data.frame(z=z))
win.size<-2*round((win.pred+1)/2)-1
names(win.size)<-NULL
return(win.size)
}
# Extracts just the locations of the tree maxima:
itIMG_ts<-function (imagery = NULL, win.min, win.max, z.min, z.max, specT = 0)
{
# !!! You need to make some appropriate checks of the arguments here !!!
# Blurs the chm:
imagery <- raster::focal(imagery, w = matrix(1, 3, 3),
fun = function(x) {
mean(x, na.rm = T)
})
# Extracts the image data as a matrix:
Max <- matrix(dim(imagery)[2], dim(imagery)[1], data = imagery[,
], byrow = FALSE)
# Flips the image the right way again:
Max <- Max[1:dim(imagery)[2], dim(imagery)[1]:1]
Gnew <- Max # Copies the max matrix.
Max[, ] <- 0 # Sets max to 0s. This will be used later for storing ...
Index <- Max # Copies max again.
Index[, ] <- 0 # Sets index to 0s.
Gnew[is.na(Gnew)] <- 0 # Sets any nas to 0s.
Gnew[Gnew < specT] <- 0 # Any values beneath the minimum height are set to 0s.
index = 1 # Initiates the index
II <- which(Gnew != 0, arr.ind = T) # Extracts the locations of pixels which are bigger than the min tree height.
dim(II)
fm.win<-win.lmfun(win.min, win.max, z.min, z.max) # generates the relationship between height and search window size.
# Extracts only the pixels that are sufficiently far from the image edge for the search window. In each direction.
# The search window is selected according to the height of the tree:
z<-Gnew[II[,1]+nrow(Gnew)*(II[,2]-1)] # extracts the tree heights from the matrix
WinSize<-win.sizer(z, fm.win) # Finds the window size for each pixel
half.WinSize<-ceiling(WinSize/2) # Halfs the window size for subsetting by pixels far enough from the image edge
# Extracts only the pixels far enough from the image edge.
II.ind<-II[, 1] >= half.WinSize &
II[, 1] <= (nrow(Gnew) - half.WinSize) &
II[,2] >= half.WinSize &
II[,2] <= (ncol(Gnew) - half.WinSize)
II<-II[II.ind,]
WinSize<-WinSize[II.ind]
z<-z[II.ind]
dim(II)
# reorder from greatest to least according to z value (i.e. big trees first):
z.order<-order(z, decreasing = TRUE)
II<-II[z.order,]
WinSize<-WinSize[z.order]
# Works through each pixel one by one:
indexII<-1
#indexII<-(1:nrow(II)[1])[1]
for (indexII in 1:dim(II)[1])
{
r = as.numeric(II[indexII, 1]) # Extracts the row pos.
k = as.numeric(II[indexII, 2]) # Extracts the column pos.
searchWinSize<-WinSize[indexII]
hc.sWS<-ceiling(searchWinSize/2) # half the search window size rounded to floor
hf.sWS<-floor(searchWinSize/2) # half the search window size rounded to ceiling
FIL <- matrix(searchWinSize, searchWinSize, data = NA)
# Extracts the search window for the pixel:
FIL <- Gnew[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# Extracts the window from Max indicating whether a tree has already being designated or not:
Max.chk<-Max[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# If the focal pixel has the greatest value in the window & there is no tree already assigned in the output matrix within the window & the max value is no 0...
# because the order is from tallest to shortest, large trees will always suppress the designation of small trees.
if (FIL[hc.sWS, hc.sWS] == max(FIL, na.rm = T) &
max(Max.chk, na.rm = T) == 0 &
max(FIL, na.rm = T) != 0)
{
Max[r, k] <- 1 # A logical assignment of tallest tree
Index[r, k] <- index # Assigns the sequence in which the trees were found.
index <- index + 1 # increments
}
}
Ntrees <- index # Number of trees encountered.
# converts back to a spatial grid:
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Index[1:dim(Index)[1], dim(Index)[2]:1],
byrow = TRUE) # given the correct orientation again.
Cb[Cb==0]<-NA # Excludes non maxima pixels.
m2 <- methods::as(Cb, "SpatialGridDataFrame")
m3 <- raster::raster(m2, layer = 1)
m3.shp <- raster::rasterToPoints(m3, fun = , dissolve = TRUE)
IT <- sp::SpatialPointsDataFrame(m3.shp, data = data.frame(tree=m3.shp[,'layer']),
match.ID = F)
return(IT)
}
itcIMG_ts<-function (imagery = NULL, win.min, win.max, z.min, z.max, TRESHSeed = 0.45,
TRESHCrown = 0.55, DIST = 10, specT = 0)
{
# !!! You need to make some appropriate checks of the arguments here !!!
# Blurs the chm:
imagery <- raster::focal(imagery, w = matrix(1, 3, 3),
fun = function(x) {
mean(x, na.rm = T)
})
# Extracts the image data as a matrix:
Max <- matrix(dim(imagery)[2], dim(imagery)[1], data = imagery[,
], byrow = FALSE)
# Flips the image the right way again:
Max <- Max[1:dim(imagery)[2], dim(imagery)[1]:1]
Gnew <- Max # Copies the max matrix.
Max[, ] <- 0 # Sets max to 0s. This will be used later for storing ...
Index <- Max # Copies max again.
Index[, ] <- 0 # Sets index to 0s.
Gnew[is.na(Gnew)] <- 0 # Sets any nas to 0s.
Gnew[Gnew < specT] <- 0 # Any values beneath the minimum height are set to 0s.
index = 1 # Initiates the index
II <- which(Gnew != 0, arr.ind = T) # Extracts the locations of pixels which are bigger than the min tree height.
dim(II)
fm.win<-win.lmfun(win.min, win.max, z.min, z.max) # generates the relationship between height and search window size.
# Extracts only the pixels that are sufficiently far from the image edge for the search window. In each direction.
# The search window is selected according to the height of the tree:
z<-Gnew[II[,1]+nrow(Gnew)*(II[,2]-1)] # extracts the tree heights from the matrix
WinSize<-win.sizer(z, fm.win) # Finds the window size for each pixel
half.WinSize<-ceiling(WinSize/2) # Halfs the window size for subsetting by pixels far enough from the image edge
# Extracts only the pixels far enough from the image edge.
II.ind<-II[, 1] >= half.WinSize &
II[, 1] <= (nrow(Gnew) - half.WinSize) &
II[,2] >= half.WinSize &
II[,2] <= (ncol(Gnew) - half.WinSize)
II<-II[II.ind,]
WinSize<-WinSize[II.ind]
z<-z[II.ind]
dim(II)
# reorder from greatest to least according to z value (i.e. big trees first):
z.order<-order(z, decreasing = TRUE)
II<-II[z.order,]
WinSize<-WinSize[z.order]
# Works through each pixel one by one:
indexII<-1
#indexII<-(1:nrow(II)[1])[1]
for (indexII in 1:dim(II)[1])
{
r = as.numeric(II[indexII, 1]) # Extracts the row pos.
k = as.numeric(II[indexII, 2]) # Extracts the column pos.
searchWinSize<-WinSize[indexII]
hc.sWS<-ceiling(searchWinSize/2) # half the search window size rounded to floor
hf.sWS<-floor(searchWinSize/2) # half the search window size rounded to ceiling
FIL <- matrix(searchWinSize, searchWinSize, data = NA)
# Extracts the search window for the pixel:
FIL <- Gnew[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# Extracts the window from Max indicating whether a tree has already being designated or not:
Max.chk<-Max[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# If the focal pixel has the greatest value in the window & there is no tree already assigned in the output matrix within the window & the max value is no 0...
# because the order is from tallest to shortest, large trees will always suppress the designation of small trees.
if (FIL[hc.sWS, hc.sWS] == max(FIL, na.rm = T) &
max(Max.chk, na.rm = T) == 0 &
max(FIL, na.rm = T) != 0)
{
Max[r, k] <- 1 # A logical assignment of tallest tree
Index[r, k] <- index # Assigns the sequence in which the trees were found.
index <- index + 1 # increments
}
}
Ntrees <- index # Number of trees encountered.
if (Ntrees > 0)
{
# Extracts the chm values:
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Gnew[1:dim(Gnew)[1], dim(Gnew)[2]:1],
byrow = TRUE)
Mb[] <- as.numeric(Max[1:dim(Max)[1], dim(Max)[2]:1],
byrow = TRUE)
Crowns <- Index # Assigns the crown sequence within the raster to Crowns
OldCrowns <- Crowns # Copies crowns
Check <- OldCrowns # Copies again.
Check[, ] <- 0 # Sets check to 0.
#filsize <- 3 # ????
#Niter <- 100 # ????
it = 1
while (it == 1)
{
it = 0
II <- which(Crowns != 0 & Check == 0, arr.ind = T) # Extracts the crown pixels that have not been done yet; seems a bit inefficient.
if (length(II) > 0) # should be nrow and not length.
{
#indexII<-(1:nrow(II))[1]
for (indexII in 1:dim(II)[1]) # Works through all the crown pixels simultaneously
{
r = as.numeric(II[indexII, 1])
k = as.numeric(II[indexII, 2]) # Extracts the tree location.
if (r != 1 & r != dim(Gnew)[1] & k != 1 & k != dim(Gnew)[2])
{ # So longs as the pixel is not right on the boundary; !!! this might need to be changed depending on window size.
ind <- Crowns[r, k]
coordSeed <- which(Index == ind, arr.ind = TRUE) # This finds the tree seed location.
coordCrown <- which(Crowns == ind, arr.ind = TRUE) # ... this finds the current areas occupied by the crown.
rvSeed <- Gnew[coordSeed] # Extracts the tree height.
rvCrown <- mean(Gnew[coordCrown], na.rm = T) # Extracts the mean tree height.
# Makes a matrix of the coordinates and chm values for the adjacent cells (surrounding the focal pixel).
filData <- matrix(4, 3, data = 0)
filData[1, 1] <- r - 1
filData[1, 2] <- k
filData[1, 3] <- Gnew[r - 1, k]
filData[2, 1] <- r
filData[2, 2] <- k - 1
filData[2, 3] <- Gnew[r, k - 1]
filData[3, 1] <- r
filData[3, 2] <- k + 1
filData[3, 3] <- Gnew[r, k + 1]
filData[4, 1] <- r + 1
filData[4, 2] <- k
filData[4, 3] <- Gnew[r + 1, k]
# Calculates distance of pixels from the focal pixel
fil.dists<-as.matrix(dist(rbind(coordSeed, filData[,1:2])))[1,-1]
# Checks which (if any) of the values are greater than the max or mean tree heights adjusted by the thresholds.
# and less than 5% taller than the max tree height.
# and the euclidean distance from the maximum crown radius is less than the specified DIST.
GFIL <- (filData[, 3] > (rvSeed * TRESHSeed) &
(filData[, 3] > (rvCrown * TRESHCrown)) &
(filData[, 3] <= (rvSeed + (rvSeed * 0.05))) &
(fil.dists<DIST))
#(abs(coordSeed[1] - filData[,1]) < DIST) &
#(abs(coordSeed[2] - filData[, 2]) < DIST))
filData <- filData[GFIL, ] # Subsets filData by the decision tree output.
if (length(filData) > 3)
{
# pp<-nrow(filData)[1]
# Assigns each remaining pixel to the crown:
for (pp in 1:dim(filData)[1])
{
rr <- filData[pp, 1]
kk <- filData[pp, 2]
if (Crowns[rr, kk] == 0 & Gnew[rr, kk] != 0)
{
Crowns[rr, kk] <- Crowns[r, k]
it <- 1
}
}
}
}
}
}
Check <- OldCrowns # Marks the pixels that have been completed:
OldCrowns <- Crowns # Recreates the oldcrowns matrix.
}
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Crowns[1:dim(Crowns)[1], dim(Crowns)[2]:1],
byrow = TRUE)
Mb[] <- as.numeric(Max[1:dim(Max)[1], dim(Max)[2]:1],
byrow = TRUE)
m2 <- methods::as(Cb, "SpatialGridDataFrame")
m3 <- raster::raster(m2, layer = 1)
m3.shp <- raster::rasterToPolygons(m3, fun = , dissolve = TRUE)
# Extracts the mean and max tree heights:
names(m3.shp@data) <- "tree"
HyperCrowns <- m3.shp[m3.shp@data[, 1] != 0, ]
HCbuf <- rgeos::gBuffer(HyperCrowns, width = -res(imagery)[1]/2, byid = T)
HyperCrowns<-HyperCrowns[HyperCrowns$tree %in% HCbuf$tree,] # excludes any trees that have been buffered out.
ITCcv <- rgeos::gConvexHull(HCbuf, byid = T)
ITCcvSD <- sp::SpatialPolygonsDataFrame(ITCcv, data = HyperCrowns@data, match.ID = F)
#ITCcvSD <- sp::SpatialPolygonsDataFrame(HyperCrowns, data = HyperCrowns@data, match.ID = F)
ITCcvSD$CA_m2 <- unlist(lapply(ITCcvSD@polygons, function(x) methods::slot(x, "area")))
# Adds the tree height info:
u.trees<-ITCcvSD$tree
CH<-lapply(u.trees, function(indexIII)
{
CH<-Gnew[Crowns==indexIII]
data.frame(tree=indexIII, CH_mean=mean(CH, na.rm=TRUE), CH_max=max(CH, na.rm=TRUE))
}
)
CH<-do.call(rbind, CH)
ITCcvSD$CH_mean <- CH$CH_mean
ITCcvSD$CH_max <- CH$CH_max
ITCcvSD <- ITCcvSD[ITCcvSD$CA_m2 > 1, ] # excludes the trees smaller than 1m^2
return<-ITCcvSD
# if (exists("ITCcvSD")) {
# return <- ITCcvSD[, -1] # excludes the tree label.
# }
}
}
|
/ITC segment update.R
|
no_license
|
swinersha/Tree-crown-segmentation
|
R
| false
| false
| 15,833
|
r
|
##########################################################
# A modification of ITCsegment package by Michele Dalponte
# Changes made by Tom Swinfield for the RSPB under funding from the Cambridge Conservation Initiative
# Collaborative Fund
##########################################################
# Changes made:
# 160607: - Enabled the search window to size to be scalable.
# - Tidied up some of the code.
# - Made the maxima search tool work from tallest to smallest.
# 160711: - Changed the distance of pixels to the local maximum a raial distance rather than straight line distance.
# - Scale the maximum distance according to the allometry.
# The programme should work through the trees from tallest to shortest and identify the crown area on the fly;
# This would over come problems associated with climbing up the canopy on to adjacent crowns.
# !!! take the window scaling as linear for the moment. Later it would be worth converting to an appropriate allometry.
# !!! Make it possible to specify in the function an alternative height maximum to scale the search window;
# - if not given this will revert to the maximum tree height in the image - Although that probably isn't much good
# - either way a warning should be given if the tallest point in the image exceeds the max value provided by the user.
#rm(list=ls())
# Distance is for the radius (not diameter) in pixels (not meters):
#imagery = chm.plot.buffer; searchWinSize = 3; TRESHSeed = 0.45; TRESHCrown = 0.55; DIST = 10; specT = 0
#TRESHSeed=0.45 # Proportional height relative to tree maximum.
#TRESHCrown=0.55 # Proportional height relative to tree mean.
#DIST=8 # Distance in pixels from tree maximum.
#specT=2 # Minimum height in m for tree or crown.
#win.min = 3
#win.max = 19
#z.max = 45
#z.min = specT
# Fits a basic linear model to set up the relationship between tree height and window size:
win.lmfun<-function(win.min, win.max, z.min, z.max)
{
y<-c(win.min, win.max)
z<-c(z.min, z.max)
fm.win<-lm(y~z)
return(fm.win)
}
# Calculates the window size when given the relationship between tree height and window size as a fitted model (fm.win)
# and z the tree heights:
win.sizer<-function(z, fm.win)
{
win.pred<-predict(fm.win, newdata=data.frame(z=z))
win.size<-2*round((win.pred+1)/2)-1
names(win.size)<-NULL
return(win.size)
}
# Extracts just the locations of the tree maxima:
itIMG_ts<-function (imagery = NULL, win.min, win.max, z.min, z.max, specT = 0)
{
# !!! You need to make some appropriate checks of the arguments here !!!
# Blurs the chm:
imagery <- raster::focal(imagery, w = matrix(1, 3, 3),
fun = function(x) {
mean(x, na.rm = T)
})
# Extracts the image data as a matrix:
Max <- matrix(dim(imagery)[2], dim(imagery)[1], data = imagery[,
], byrow = FALSE)
# Flips the image the right way again:
Max <- Max[1:dim(imagery)[2], dim(imagery)[1]:1]
Gnew <- Max # Copies the max matrix.
Max[, ] <- 0 # Sets max to 0s. This will be used later for storing ...
Index <- Max # Copies max again.
Index[, ] <- 0 # Sets index to 0s.
Gnew[is.na(Gnew)] <- 0 # Sets any nas to 0s.
Gnew[Gnew < specT] <- 0 # Any values beneath the minimum height are set to 0s.
index = 1 # Initiates the index
II <- which(Gnew != 0, arr.ind = T) # Extracts the locations of pixels which are bigger than the min tree height.
dim(II)
fm.win<-win.lmfun(win.min, win.max, z.min, z.max) # generates the relationship between height and search window size.
# Extracts only the pixels that are sufficiently far from the image edge for the search window. In each direction.
# The search window is selected according to the height of the tree:
z<-Gnew[II[,1]+nrow(Gnew)*(II[,2]-1)] # extracts the tree heights from the matrix
WinSize<-win.sizer(z, fm.win) # Finds the window size for each pixel
half.WinSize<-ceiling(WinSize/2) # Halfs the window size for subsetting by pixels far enough from the image edge
# Extracts only the pixels far enough from the image edge.
II.ind<-II[, 1] >= half.WinSize &
II[, 1] <= (nrow(Gnew) - half.WinSize) &
II[,2] >= half.WinSize &
II[,2] <= (ncol(Gnew) - half.WinSize)
II<-II[II.ind,]
WinSize<-WinSize[II.ind]
z<-z[II.ind]
dim(II)
# reorder from greatest to least according to z value (i.e. big trees first):
z.order<-order(z, decreasing = TRUE)
II<-II[z.order,]
WinSize<-WinSize[z.order]
# Works through each pixel one by one:
indexII<-1
#indexII<-(1:nrow(II)[1])[1]
for (indexII in 1:dim(II)[1])
{
r = as.numeric(II[indexII, 1]) # Extracts the row pos.
k = as.numeric(II[indexII, 2]) # Extracts the column pos.
searchWinSize<-WinSize[indexII]
hc.sWS<-ceiling(searchWinSize/2) # half the search window size rounded to floor
hf.sWS<-floor(searchWinSize/2) # half the search window size rounded to ceiling
FIL <- matrix(searchWinSize, searchWinSize, data = NA)
# Extracts the search window for the pixel:
FIL <- Gnew[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# Extracts the window from Max indicating whether a tree has already being designated or not:
Max.chk<-Max[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# If the focal pixel has the greatest value in the window & there is no tree already assigned in the output matrix within the window & the max value is no 0...
# because the order is from tallest to shortest, large trees will always suppress the designation of small trees.
if (FIL[hc.sWS, hc.sWS] == max(FIL, na.rm = T) &
max(Max.chk, na.rm = T) == 0 &
max(FIL, na.rm = T) != 0)
{
Max[r, k] <- 1 # A logical assignment of tallest tree
Index[r, k] <- index # Assigns the sequence in which the trees were found.
index <- index + 1 # increments
}
}
Ntrees <- index # Number of trees encountered.
# converts back to a spatial grid:
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Index[1:dim(Index)[1], dim(Index)[2]:1],
byrow = TRUE) # given the correct orientation again.
Cb[Cb==0]<-NA # Excludes non maxima pixels.
m2 <- methods::as(Cb, "SpatialGridDataFrame")
m3 <- raster::raster(m2, layer = 1)
m3.shp <- raster::rasterToPoints(m3, fun = , dissolve = TRUE)
IT <- sp::SpatialPointsDataFrame(m3.shp, data = data.frame(tree=m3.shp[,'layer']),
match.ID = F)
return(IT)
}
itcIMG_ts<-function (imagery = NULL, win.min, win.max, z.min, z.max, TRESHSeed = 0.45,
TRESHCrown = 0.55, DIST = 10, specT = 0)
{
# !!! You need to make some appropriate checks of the arguments here !!!
# Blurs the chm:
imagery <- raster::focal(imagery, w = matrix(1, 3, 3),
fun = function(x) {
mean(x, na.rm = T)
})
# Extracts the image data as a matrix:
Max <- matrix(dim(imagery)[2], dim(imagery)[1], data = imagery[,
], byrow = FALSE)
# Flips the image the right way again:
Max <- Max[1:dim(imagery)[2], dim(imagery)[1]:1]
Gnew <- Max # Copies the max matrix.
Max[, ] <- 0 # Sets max to 0s. This will be used later for storing ...
Index <- Max # Copies max again.
Index[, ] <- 0 # Sets index to 0s.
Gnew[is.na(Gnew)] <- 0 # Sets any nas to 0s.
Gnew[Gnew < specT] <- 0 # Any values beneath the minimum height are set to 0s.
index = 1 # Initiates the index
II <- which(Gnew != 0, arr.ind = T) # Extracts the locations of pixels which are bigger than the min tree height.
dim(II)
fm.win<-win.lmfun(win.min, win.max, z.min, z.max) # generates the relationship between height and search window size.
# Extracts only the pixels that are sufficiently far from the image edge for the search window. In each direction.
# The search window is selected according to the height of the tree:
z<-Gnew[II[,1]+nrow(Gnew)*(II[,2]-1)] # extracts the tree heights from the matrix
WinSize<-win.sizer(z, fm.win) # Finds the window size for each pixel
half.WinSize<-ceiling(WinSize/2) # Halfs the window size for subsetting by pixels far enough from the image edge
# Extracts only the pixels far enough from the image edge.
II.ind<-II[, 1] >= half.WinSize &
II[, 1] <= (nrow(Gnew) - half.WinSize) &
II[,2] >= half.WinSize &
II[,2] <= (ncol(Gnew) - half.WinSize)
II<-II[II.ind,]
WinSize<-WinSize[II.ind]
z<-z[II.ind]
dim(II)
# reorder from greatest to least according to z value (i.e. big trees first):
z.order<-order(z, decreasing = TRUE)
II<-II[z.order,]
WinSize<-WinSize[z.order]
# Works through each pixel one by one:
indexII<-1
#indexII<-(1:nrow(II)[1])[1]
for (indexII in 1:dim(II)[1])
{
r = as.numeric(II[indexII, 1]) # Extracts the row pos.
k = as.numeric(II[indexII, 2]) # Extracts the column pos.
searchWinSize<-WinSize[indexII]
hc.sWS<-ceiling(searchWinSize/2) # half the search window size rounded to floor
hf.sWS<-floor(searchWinSize/2) # half the search window size rounded to ceiling
FIL <- matrix(searchWinSize, searchWinSize, data = NA)
# Extracts the search window for the pixel:
FIL <- Gnew[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# Extracts the window from Max indicating whether a tree has already being designated or not:
Max.chk<-Max[(r - hf.sWS):(r + hf.sWS),
(k - hf.sWS):(k + hf.sWS)]
# If the focal pixel has the greatest value in the window & there is no tree already assigned in the output matrix within the window & the max value is no 0...
# because the order is from tallest to shortest, large trees will always suppress the designation of small trees.
if (FIL[hc.sWS, hc.sWS] == max(FIL, na.rm = T) &
max(Max.chk, na.rm = T) == 0 &
max(FIL, na.rm = T) != 0)
{
Max[r, k] <- 1 # A logical assignment of tallest tree
Index[r, k] <- index # Assigns the sequence in which the trees were found.
index <- index + 1 # increments
}
}
Ntrees <- index # Number of trees encountered.
if (Ntrees > 0)
{
# Extracts the chm values:
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Gnew[1:dim(Gnew)[1], dim(Gnew)[2]:1],
byrow = TRUE)
Mb[] <- as.numeric(Max[1:dim(Max)[1], dim(Max)[2]:1],
byrow = TRUE)
Crowns <- Index # Assigns the crown sequence within the raster to Crowns
OldCrowns <- Crowns # Copies crowns
Check <- OldCrowns # Copies again.
Check[, ] <- 0 # Sets check to 0.
#filsize <- 3 # ????
#Niter <- 100 # ????
it = 1
while (it == 1)
{
it = 0
II <- which(Crowns != 0 & Check == 0, arr.ind = T) # Extracts the crown pixels that have not been done yet; seems a bit inefficient.
if (length(II) > 0) # should be nrow and not length.
{
#indexII<-(1:nrow(II))[1]
for (indexII in 1:dim(II)[1]) # Works through all the crown pixels simultaneously
{
r = as.numeric(II[indexII, 1])
k = as.numeric(II[indexII, 2]) # Extracts the tree location.
if (r != 1 & r != dim(Gnew)[1] & k != 1 & k != dim(Gnew)[2])
{ # So longs as the pixel is not right on the boundary; !!! this might need to be changed depending on window size.
ind <- Crowns[r, k]
coordSeed <- which(Index == ind, arr.ind = TRUE) # This finds the tree seed location.
coordCrown <- which(Crowns == ind, arr.ind = TRUE) # ... this finds the current areas occupied by the crown.
rvSeed <- Gnew[coordSeed] # Extracts the tree height.
rvCrown <- mean(Gnew[coordCrown], na.rm = T) # Extracts the mean tree height.
# Makes a matrix of the coordinates and chm values for the adjacent cells (surrounding the focal pixel).
filData <- matrix(4, 3, data = 0)
filData[1, 1] <- r - 1
filData[1, 2] <- k
filData[1, 3] <- Gnew[r - 1, k]
filData[2, 1] <- r
filData[2, 2] <- k - 1
filData[2, 3] <- Gnew[r, k - 1]
filData[3, 1] <- r
filData[3, 2] <- k + 1
filData[3, 3] <- Gnew[r, k + 1]
filData[4, 1] <- r + 1
filData[4, 2] <- k
filData[4, 3] <- Gnew[r + 1, k]
# Calculates distance of pixels from the focal pixel
fil.dists<-as.matrix(dist(rbind(coordSeed, filData[,1:2])))[1,-1]
# Checks which (if any) of the values are greater than the max or mean tree heights adjusted by the thresholds.
# and less than 5% taller than the max tree height.
# and the euclidean distance from the maximum crown radius is less than the specified DIST.
GFIL <- (filData[, 3] > (rvSeed * TRESHSeed) &
(filData[, 3] > (rvCrown * TRESHCrown)) &
(filData[, 3] <= (rvSeed + (rvSeed * 0.05))) &
(fil.dists<DIST))
#(abs(coordSeed[1] - filData[,1]) < DIST) &
#(abs(coordSeed[2] - filData[, 2]) < DIST))
filData <- filData[GFIL, ] # Subsets filData by the decision tree output.
if (length(filData) > 3)
{
# pp<-nrow(filData)[1]
# Assigns each remaining pixel to the crown:
for (pp in 1:dim(filData)[1])
{
rr <- filData[pp, 1]
kk <- filData[pp, 2]
if (Crowns[rr, kk] == 0 & Gnew[rr, kk] != 0)
{
Crowns[rr, kk] <- Crowns[r, k]
it <- 1
}
}
}
}
}
}
Check <- OldCrowns # Marks the pixels that have been completed:
OldCrowns <- Crowns # Recreates the oldcrowns matrix.
}
Cb <- imagery
Mb <- imagery
Cb[] <- as.numeric(Crowns[1:dim(Crowns)[1], dim(Crowns)[2]:1],
byrow = TRUE)
Mb[] <- as.numeric(Max[1:dim(Max)[1], dim(Max)[2]:1],
byrow = TRUE)
m2 <- methods::as(Cb, "SpatialGridDataFrame")
m3 <- raster::raster(m2, layer = 1)
m3.shp <- raster::rasterToPolygons(m3, fun = , dissolve = TRUE)
# Extracts the mean and max tree heights:
names(m3.shp@data) <- "tree"
HyperCrowns <- m3.shp[m3.shp@data[, 1] != 0, ]
HCbuf <- rgeos::gBuffer(HyperCrowns, width = -res(imagery)[1]/2, byid = T)
HyperCrowns<-HyperCrowns[HyperCrowns$tree %in% HCbuf$tree,] # excludes any trees that have been buffered out.
ITCcv <- rgeos::gConvexHull(HCbuf, byid = T)
ITCcvSD <- sp::SpatialPolygonsDataFrame(ITCcv, data = HyperCrowns@data, match.ID = F)
#ITCcvSD <- sp::SpatialPolygonsDataFrame(HyperCrowns, data = HyperCrowns@data, match.ID = F)
ITCcvSD$CA_m2 <- unlist(lapply(ITCcvSD@polygons, function(x) methods::slot(x, "area")))
# Adds the tree height info:
u.trees<-ITCcvSD$tree
CH<-lapply(u.trees, function(indexIII)
{
CH<-Gnew[Crowns==indexIII]
data.frame(tree=indexIII, CH_mean=mean(CH, na.rm=TRUE), CH_max=max(CH, na.rm=TRUE))
}
)
CH<-do.call(rbind, CH)
ITCcvSD$CH_mean <- CH$CH_mean
ITCcvSD$CH_max <- CH$CH_max
ITCcvSD <- ITCcvSD[ITCcvSD$CA_m2 > 1, ] # excludes the trees smaller than 1m^2
return<-ITCcvSD
# if (exists("ITCcvSD")) {
# return <- ITCcvSD[, -1] # excludes the tree label.
# }
}
}
|
PM25data <- readRDS("summarySCC_PM25.rds")
PM25data_BC_vehicle <- subset(PM25data, fips == "24510" & type == "ON-ROAD")
##use type ON-ROAD to define motor vehicle source
PM25data_BC_vehicle_totalbyyear <- tapply(PM25data_BC_vehicle$Emissions, PM25data_BC_vehicle$year, sum, na.rm=TRUE)
plot(names(PM25data_BC_vehicle_totalbyyear), PM25data_BC_vehicle_totalbyyear, xlab = "Year", ylab = "Total Emissions from Motor Vehicle Sources in Baltimore City", pch=19)
lines(names(PM25data_BC_vehicle_totalbyyear), PM25data_BC_vehicle_totalbyyear)
dev.copy(png, file = "plot5.png")
dev.off()
|
/plot5.R
|
no_license
|
wenlytang/ExData_Plotting2
|
R
| false
| false
| 595
|
r
|
PM25data <- readRDS("summarySCC_PM25.rds")
PM25data_BC_vehicle <- subset(PM25data, fips == "24510" & type == "ON-ROAD")
##use type ON-ROAD to define motor vehicle source
PM25data_BC_vehicle_totalbyyear <- tapply(PM25data_BC_vehicle$Emissions, PM25data_BC_vehicle$year, sum, na.rm=TRUE)
plot(names(PM25data_BC_vehicle_totalbyyear), PM25data_BC_vehicle_totalbyyear, xlab = "Year", ylab = "Total Emissions from Motor Vehicle Sources in Baltimore City", pch=19)
lines(names(PM25data_BC_vehicle_totalbyyear), PM25data_BC_vehicle_totalbyyear)
dev.copy(png, file = "plot5.png")
dev.off()
|
germline_patient_surv <- germline_patient_data %>% distinct(avatar_id, .keep_all = TRUE)
################################################################################### I ### PFS/OS hct date by HCT----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# From hct
mysurv <- Surv(time = germline_patient_surv$month_at_progression_hct, event = germline_patient_surv$hct_progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(10, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(10, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# OS
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS HCT by CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# From hct
mysurv <- Surv(time = germline_patient_surv$month_at_progression_hct, event = germline_patient_surv$hct_progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
# Cox prop hazard----
germline_patient_surv %>% select(pfs_hct) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl1 <- germline_patient_surv %>% select(pfs_hct, ISS) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
# model <- coxph(Surv(time = germline_patient_surv$month_at_os,
# event = germline_patient_surv$os_event) ~ pfs_hct + ISS, data = germline_patient_surv)
# summary(model)
# coxph(Surv(time = germline_patient_surv$month_at_os,
# event = germline_patient_surv$os_event) ~ pfs_hct, data = germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
tbl1 <- germline_patient_surv %>% select(pfs_hct, ISS_grp) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl1 <- germline_patient_surv %>% select(CH_status, ISS) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ CH_status + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
# Regression
model <- glm(os_event ~
pfs_hct + ISS,
data = germline_patient_surv, family = binomial)
tbl1 <- tbl_regression(model)
tbl2 <- tbl_regression(model, exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
model <- glm(os_event ~
pfs_hct + CH_status,
data = germline_patient_surv, family = binomial)
tbl1 <- tbl_regression(model)
tbl2 <- tbl_regression(model, exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
### Sequenced patients
# patient <- readxl::read_xlsx(paste0(path, "/Nancy's working files/MM Avatar_Sequenced subset.xlsx"),
# sheet = "Sequenced") %>%
# select(avatar_id) %>% distinct()
# id <- paste(patient$avatar_id, collapse = "|")
# seq_germline_patient_surv <- germline_patient_surv[ grepl(id, germline_patient_surv$avatar_id) , ]
#
# tbl1 <- seq_germline_patient_surv %>% select(pfs_hct, ISS) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
#
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
# tbl1 <- seq_germline_patient_surv %>% select(pfs_hct, ISS_grp) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
#
# tbl1 <- seq_germline_patient_surv %>% select(CH_status, ISS) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ CH_status + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
# # Regression
# model <- glm(os_event ~
# pfs_hct + ISS,
# data = seq_germline_patient_surv, family = binomial)
# tbl1 <- tbl_regression(model)
# tbl2 <- tbl_regression(model, exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
#
# model <- glm(os_event ~
# pfs_hct + CH_status,
# data = seq_germline_patient_surv, family = binomial)
# tbl1 <- tbl_regression(model)
# tbl2 <- tbl_regression(model, exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
################################################################################### II ### PFS/OS treatment date by general treatment----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Treatment from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from Dx",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Treatment from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from Dx",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 ISS.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# from treatment date
mysurv <- Surv(time = germline_patient_surv$month_at_progression_treat, event = germline_patient_surv$drug_progression_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Treatment from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by Treatment CH from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 ISS from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from treatment (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by HCT2 CH from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from treatment (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS----
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS by Treatment.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Treatment",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS Treatment CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Treatment",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT2 ISS.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/OS HCT2 CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
################################################################################### III ### PFS/OS rad date by Radiation----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from Dx (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~Radiation, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from Dx (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# From rad date
mysurv <- Surv(time = germline_patient_surv$month_at_progression_rad, event = germline_patient_surv$rad_progression_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation from rad date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from rad date (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_radiation+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by Radiation CH from rad date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from rad date (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS----
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS by Radiation.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Radiation (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_radiation+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS Rad by CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Radiation (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# tbl <-
germline_patient_surv %>%
distinct(avatar_id, .keep_all = TRUE) %>%
select(CH_status, Radiation, pfs_radiation) %>%
tbl_summary(by = CH_status,
sort = list(everything() ~ "frequency")) %>% add_p() %>%
as_gt()
# gt::gtsave(tbl, zoom = 1, paste0(path, "/Figures/CHIP/Radiation by CH.pdf"))
pdf(paste0(path, "/Figures/CHIP/Days repartition interval_radiation_vs_germ.pdf"), height = 6, width = 9)
p <- germline_patient_data %>% # filter(!is.na(Race)) %>%
# mutate(Race = factor(Race, levels=c("White", "Black", "Am Indian", "Asian", "More than one race", "Others", "Unknown"))) %>%
ggplot(aes(y=interval_radiation_vs_germ)) +
geom_boxplot(alpha = 0.5) + # color= c("#60136EFF", "#A92E5EFF", "#E65D2FFF")
theme_minimal() +
# scale_fill_brewer(palette="BuPu") +
labs(y="Days", title="interval_radiation_vs_germ")
# p + geom_jitter(shape=16, position=position_jitter(0.2)) #+
# stat_compare_means()
dev.off()
######################################################################################################### By CH
# Dx
# mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
# myplot <- survfit(mysurv~Radiation+CH_status, data = germline_patient_surv)
# # jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Radiation from Dx.jpeg"), width = 1500, height = 900)
# ggsurvplot(myplot, data = germline_patient_surv,
# title = "PFS Radiation from Dx",
# font.main = c(24, "bold", "black"),
# font.x = c(20, "bold", "black"),
# font.y = c(20, "bold", "black"),
# font.legend = c(15, "bold", "black"),
# font.tickslab = c(18, "bold", "black"),
# size = 1.5,
#
# xlab = "Time in months",
# legend = "top",
# legend.title = "",
# # # legend.labs = c("No Radiation", "Radiation"),
# # palette = c("darkred", "darkgreen", "grey"),
# pval = TRUE,
# conf.int = FALSE,
# # Add risk table
# tables.height = 0.3,
# risk.table.title = "Risk table (number(%))",
# risk.table = "abs_pct",
# risk.table.y.text = FALSE,
# risk.table.fontsize = 6,
# tables.theme = theme_survminer(base_size = 5,
# font.main = c(16, "bold", "black"),
# font.x = c(16, "bold", "black"),
# font.y = c(16, "bold", "transparent"),
# font.tickslab = c(19, "bold", "black")
# ),
# # Censor
# censor = TRUE
# )
# # dev.off()
#
# myplot <- survfit(mysurv~Radiation_event+CH_status, data = germline_patient_surv)
# # jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Radiation_event from Dx.jpeg"), width = 1200, height = 900)
# ggsurvplot(myplot, data = germline_patient_surv,
# title = "PFS Radiation from Dx",
# font.main = c(24, "bold", "black"),
# font.x = c(20, "bold", "black"),
# font.y = c(20, "bold", "black"),
# font.legend = c(10, "bold", "black"),
# font.tickslab = c(18, "bold", "black"),
# size = 1.5,
#
# xlab = "Time in months",
# legend = "top",
# legend.title = "",
# # # legend.labs = c("No Radiation", "Radiation"),
# # palette = c("darkred", "darkgreen", "grey"),
# pval = TRUE,
# conf.int = FALSE,
# # Add risk table
# tables.height = 0.3,
# risk.table.title = "Risk table (number(%))",
# risk.table = "abs_pct",
# risk.table.y.text = FALSE,
# risk.table.fontsize = 6,
# tables.theme = theme_survminer(base_size = 5,
# font.main = c(16, "bold", "black"),
# font.x = c(16, "bold", "black"),
# font.y = c(16, "bold", "transparent"),
# font.tickslab = c(19, "bold", "black")
# ),
# # Censor
# censor = TRUE
# )
# # dev.off()
################################################################################### IV ### PFS/OS drug date by Drug----
# drug yes/no----
# PFS
mysurv <- Surv(time = germline_patient_surv$month_at_progression_drug, event = germline_patient_surv$drug_progression_event)
myplot <- survfit(mysurv~pfs_drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Drugs from drug date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Drugs from drug date (pfs_drugs is when drugs <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~Drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Drugs from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Drugs from drug date (Drugs is general yes/no, need to do on MM only)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# OS
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS Drugs.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Drugs (pfs_drugs is when drugs <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# by regimen----
germline_patient_drug_surv <- germline_patient_surv %>%
filter(str_detect(first_regimen_name_MM, "No Drugs|VRd|Bor-Dex|^Rd|CyBorD or VCd|Dexamethasone|Lenalidomide|^Td|^KRd|Bortezomib|Melphalan|VAd|ABCD|D-RVd or dara-RVd|IRD"))
mysurv <- Surv(time = germline_patient_drug_surv$month_at_progression_drug, event = germline_patient_drug_surv$drug_progression_event)
myplot <- survfit(mysurv~first_regimen_name, data = germline_patient_drug_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Regimen from drugs date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_drug_surv,
title = "PFS regimen from drug date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(8, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "regimen_name",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS
mysurv <- Surv(time = germline_patient_drug_surv$month_at_os, event = germline_patient_drug_surv$os_event)
myplot <- survfit(mysurv~first_regimen_name, data = germline_patient_drug_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS Regimen.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_drug_surv,
title = "OS regimen",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(8, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "regimen_name",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
|
/R/7.2.Treatment survivals.R
|
no_license
|
GillisLabAtMoffitt/CHIP-Avatar
|
R
| false
| false
| 70,488
|
r
|
germline_patient_surv <- germline_patient_data %>% distinct(avatar_id, .keep_all = TRUE)
################################################################################### I ### PFS/OS hct date by HCT----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# From hct
mysurv <- Surv(time = germline_patient_surv$month_at_progression_hct, event = germline_patient_surv$hct_progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(10, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(10, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# OS
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_hct+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS HCT by CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# From hct
mysurv <- Surv(time = germline_patient_surv$month_at_progression_hct, event = germline_patient_surv$hct_progression_event)
myplot <- survfit(mysurv~pfs_hct, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS HCT from hct date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from HCT date (pfs_hct is when HCT <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
# censor = TRUE
)
# dev.off()
# Cox prop hazard----
germline_patient_surv %>% select(pfs_hct) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl1 <- germline_patient_surv %>% select(pfs_hct, ISS) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
# model <- coxph(Surv(time = germline_patient_surv$month_at_os,
# event = germline_patient_surv$os_event) ~ pfs_hct + ISS, data = germline_patient_surv)
# summary(model)
# coxph(Surv(time = germline_patient_surv$month_at_os,
# event = germline_patient_surv$os_event) ~ pfs_hct, data = germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
tbl1 <- germline_patient_surv %>% select(pfs_hct, ISS_grp) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl1 <- germline_patient_surv %>% select(CH_status, ISS) %>%
tbl_uvregression(method = survival::coxph,
y = (Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event)),
exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
bold_labels() %>% italicize_levels()
tbl2 <- coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ CH_status + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + ISS, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
coxph(Surv(time = germline_patient_surv$month_at_os,
event = germline_patient_surv$os_event) ~ HCT_ever + CH_status + ISS_grp, data = germline_patient_surv) %>%
tbl_regression(exponentiate = TRUE)
# Regression
model <- glm(os_event ~
pfs_hct + ISS,
data = germline_patient_surv, family = binomial)
tbl1 <- tbl_regression(model)
tbl2 <- tbl_regression(model, exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
model <- glm(os_event ~
pfs_hct + CH_status,
data = germline_patient_surv, family = binomial)
tbl1 <- tbl_regression(model)
tbl2 <- tbl_regression(model, exponentiate = TRUE)
tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
### Sequenced patients
# patient <- readxl::read_xlsx(paste0(path, "/Nancy's working files/MM Avatar_Sequenced subset.xlsx"),
# sheet = "Sequenced") %>%
# select(avatar_id) %>% distinct()
# id <- paste(patient$avatar_id, collapse = "|")
# seq_germline_patient_surv <- germline_patient_surv[ grepl(id, germline_patient_surv$avatar_id) , ]
#
# tbl1 <- seq_germline_patient_surv %>% select(pfs_hct, ISS) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
#
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
# tbl1 <- seq_germline_patient_surv %>% select(pfs_hct, ISS_grp) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ pfs_hct + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
#
# tbl1 <- seq_germline_patient_surv %>% select(CH_status, ISS) %>%
# tbl_uvregression(method = survival::coxph,
# y = (Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event)),
# exponentiate = TRUE) %>% bold_p(t = .05) %>% add_nevent() %>%
# bold_labels() %>% italicize_levels()
# tbl2 <- coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ CH_status + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Univariate**", "**Multivariate**"))
#
#
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + ISS, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
# coxph(Surv(time = seq_germline_patient_surv$month_at_os,
# event = seq_germline_patient_surv$os_event) ~ HCT_ever + CH_status + ISS_grp, data = seq_germline_patient_surv) %>%
# tbl_regression(exponentiate = TRUE)
#
# # Regression
# model <- glm(os_event ~
# pfs_hct + ISS,
# data = seq_germline_patient_surv, family = binomial)
# tbl1 <- tbl_regression(model)
# tbl2 <- tbl_regression(model, exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
#
# model <- glm(os_event ~
# pfs_hct + CH_status,
# data = seq_germline_patient_surv, family = binomial)
# tbl1 <- tbl_regression(model)
# tbl2 <- tbl_regression(model, exponentiate = TRUE)
# tbl_merge(list(tbl1, tbl2), tab_spanner = c("**Estimate**", "**Exp**"))
################################################################################### II ### PFS/OS treatment date by general treatment----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Treatment from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from Dx",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Treatment from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from Dx",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 ISS.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from Dx (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# from treatment date
mysurv <- Surv(time = germline_patient_surv$month_at_progression_treat, event = germline_patient_surv$drug_progression_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Treatment from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by Treatment CH from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Treatment from treatment date (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by HCT2 ISS from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from treatment (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by HCT2 CH from treatment date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS HCT from treatment (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS----
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_treatment, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS by Treatment.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Treatment",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_treatment+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS Treatment CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Treatment",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "pfs_treatment",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT2.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+ISS, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/OS HCT2 ISS.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "ISS",
linetype = "HCT_ever",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~HCT_ever+CH_status, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/CHIP/OS HCT2 CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS HCT2 (general HCT yes/no but no include drugs)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Treatment", "Treatment"),
# palette = c("darkred", "darkgreen", "grey"),
color = "HCT_ever",
linetype = "CH_status",
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
################################################################################### III ### PFS/OS rad date by Radiation----
# From Dx
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from Dx (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~Radiation, data = germline_patient_surv)
## jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from Dx (general HCT yes/no)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# From rad date
mysurv <- Surv(time = germline_patient_surv$month_at_progression_rad, event = germline_patient_surv$rad_progression_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS by Radiation from rad date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from rad date (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_radiation+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by Radiation CH from rad date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Radiation from rad date (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# # legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS----
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_radiation, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS by Radiation.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Radiation (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
myplot <- survfit(mysurv~pfs_radiation+CH_status, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/CHIP/OS Rad by CH.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Radiation (pfs_radiation is when rad <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Radiation", "Radiation"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# tbl <-
germline_patient_surv %>%
distinct(avatar_id, .keep_all = TRUE) %>%
select(CH_status, Radiation, pfs_radiation) %>%
tbl_summary(by = CH_status,
sort = list(everything() ~ "frequency")) %>% add_p() %>%
as_gt()
# gt::gtsave(tbl, zoom = 1, paste0(path, "/Figures/CHIP/Radiation by CH.pdf"))
pdf(paste0(path, "/Figures/CHIP/Days repartition interval_radiation_vs_germ.pdf"), height = 6, width = 9)
p <- germline_patient_data %>% # filter(!is.na(Race)) %>%
# mutate(Race = factor(Race, levels=c("White", "Black", "Am Indian", "Asian", "More than one race", "Others", "Unknown"))) %>%
ggplot(aes(y=interval_radiation_vs_germ)) +
geom_boxplot(alpha = 0.5) + # color= c("#60136EFF", "#A92E5EFF", "#E65D2FFF")
theme_minimal() +
# scale_fill_brewer(palette="BuPu") +
labs(y="Days", title="interval_radiation_vs_germ")
# p + geom_jitter(shape=16, position=position_jitter(0.2)) #+
# stat_compare_means()
dev.off()
######################################################################################################### By CH
# Dx
# mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
# myplot <- survfit(mysurv~Radiation+CH_status, data = germline_patient_surv)
# # jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Radiation from Dx.jpeg"), width = 1500, height = 900)
# ggsurvplot(myplot, data = germline_patient_surv,
# title = "PFS Radiation from Dx",
# font.main = c(24, "bold", "black"),
# font.x = c(20, "bold", "black"),
# font.y = c(20, "bold", "black"),
# font.legend = c(15, "bold", "black"),
# font.tickslab = c(18, "bold", "black"),
# size = 1.5,
#
# xlab = "Time in months",
# legend = "top",
# legend.title = "",
# # # legend.labs = c("No Radiation", "Radiation"),
# # palette = c("darkred", "darkgreen", "grey"),
# pval = TRUE,
# conf.int = FALSE,
# # Add risk table
# tables.height = 0.3,
# risk.table.title = "Risk table (number(%))",
# risk.table = "abs_pct",
# risk.table.y.text = FALSE,
# risk.table.fontsize = 6,
# tables.theme = theme_survminer(base_size = 5,
# font.main = c(16, "bold", "black"),
# font.x = c(16, "bold", "black"),
# font.y = c(16, "bold", "transparent"),
# font.tickslab = c(19, "bold", "black")
# ),
# # Censor
# censor = TRUE
# )
# # dev.off()
#
# myplot <- survfit(mysurv~Radiation_event+CH_status, data = germline_patient_surv)
# # jpeg(paste0(path, "/Figures/Survivals/CHIP/PFS by CH Radiation_event from Dx.jpeg"), width = 1200, height = 900)
# ggsurvplot(myplot, data = germline_patient_surv,
# title = "PFS Radiation from Dx",
# font.main = c(24, "bold", "black"),
# font.x = c(20, "bold", "black"),
# font.y = c(20, "bold", "black"),
# font.legend = c(10, "bold", "black"),
# font.tickslab = c(18, "bold", "black"),
# size = 1.5,
#
# xlab = "Time in months",
# legend = "top",
# legend.title = "",
# # # legend.labs = c("No Radiation", "Radiation"),
# # palette = c("darkred", "darkgreen", "grey"),
# pval = TRUE,
# conf.int = FALSE,
# # Add risk table
# tables.height = 0.3,
# risk.table.title = "Risk table (number(%))",
# risk.table = "abs_pct",
# risk.table.y.text = FALSE,
# risk.table.fontsize = 6,
# tables.theme = theme_survminer(base_size = 5,
# font.main = c(16, "bold", "black"),
# font.x = c(16, "bold", "black"),
# font.y = c(16, "bold", "transparent"),
# font.tickslab = c(19, "bold", "black")
# ),
# # Censor
# censor = TRUE
# )
# # dev.off()
################################################################################### IV ### PFS/OS drug date by Drug----
# drug yes/no----
# PFS
mysurv <- Surv(time = germline_patient_surv$month_at_progression_drug, event = germline_patient_surv$drug_progression_event)
myplot <- survfit(mysurv~pfs_drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Drugs from drug date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Drugs from drug date (pfs_drugs is when drugs <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
mysurv <- Surv(time = germline_patient_surv$month_at_progression_Dx, event = germline_patient_surv$Progression_event)
myplot <- survfit(mysurv~Drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Drugs from Dx.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "PFS Drugs from drug date (Drugs is general yes/no, need to do on MM only)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# OS
mysurv <- Surv(time = germline_patient_surv$month_at_os, event = germline_patient_surv$os_event)
myplot <- survfit(mysurv~pfs_drugs, data = germline_patient_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS Drugs.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_surv,
title = "OS Drugs (pfs_drugs is when drugs <or= germline date)",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(20, "bold", "black"),
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "",
# legend.labs = c("No Drugs", "Drugs"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
# by regimen----
germline_patient_drug_surv <- germline_patient_surv %>%
filter(str_detect(first_regimen_name_MM, "No Drugs|VRd|Bor-Dex|^Rd|CyBorD or VCd|Dexamethasone|Lenalidomide|^Td|^KRd|Bortezomib|Melphalan|VAd|ABCD|D-RVd or dara-RVd|IRD"))
mysurv <- Surv(time = germline_patient_drug_surv$month_at_progression_drug, event = germline_patient_drug_surv$drug_progression_event)
myplot <- survfit(mysurv~first_regimen_name, data = germline_patient_drug_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/PFS Regimen from drugs date.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_drug_surv,
title = "PFS regimen from drug date",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(8, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "regimen_name",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")
),
# Censor
censor = TRUE
)
# dev.off()
# OS
mysurv <- Surv(time = germline_patient_drug_surv$month_at_os, event = germline_patient_drug_surv$os_event)
myplot <- survfit(mysurv~first_regimen_name, data = germline_patient_drug_surv)
# jpeg(paste0(path, "/Figures/Survivals/Treatment/OS Regimen.jpeg"), width = 1200, height = 900)
ggsurvplot(myplot, data = germline_patient_drug_surv,
title = "OS regimen",
font.main = c(24, "bold", "black"),
font.x = c(20, "bold", "black"),
font.y = c(20, "bold", "black"),
font.legend = c(8, "bold", "black"), # 20
font.tickslab = c(18, "bold", "black"),
size = 1.5,
xlab = "Time in months",
legend = "top",
legend.title = "regimen_name",
# # legend.labs = c("Hipanic", "Non-Hispanic", "Unknown"),
# palette = c("darkred", "darkgreen", "grey"),
pval = TRUE,
conf.int = FALSE,
# Add risk table
tables.height = 0.3,
risk.table.title = "Risk table (number(%))",
risk.table = "abs_pct",
risk.table.y.text = FALSE,
risk.table.fontsize = 6,
tables.theme = theme_survminer(base_size = 5,
font.main = c(16, "bold", "black"),
font.x = c(16, "bold", "black"),
font.y = c(16, "bold", "transparent"),
font.tickslab = c(19, "bold", "black")),
# Censor
censor = TRUE
)
# dev.off()
|
context("nnm")
test_that("nnm.fit classification", {
x = iris[, 1:4]
y = iris[, 5]
mod = nnm.fit(x, y, list(Dense(4, 6), Dense(6, 3, Activation.Identity), Softmax))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm.fit regression", {
x = iris[, 1:3]
y = iris[, 4]
mod = nnm.fit(x, y, list(Dense(3, 6), Dense(6, 1, Activation.Identity)))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm.fit regression with parallel layers", {
x = iris[, 1:3]
y = iris[, 4]
layer1 = Parallel(Identity(1), Identity(2))
layer2 = Sequential(Dense(3, 6), Dense(6, 1, Activation.Identity))
mod = nnm.fit(x, y, list(layer1, layer2))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with all numeric data.frame", {
x = iris[, 1:4]
y = iris[, 5]
mod = nnm(x, y, list(Dense(4, 6), Dense(6, 3, Activation.Identity), Softmax))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with factor + numeric data.frame", {
x = iris[, 2:5]
y = iris[, 1]
layerSpec = list(Dense(4+2, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec)
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with embedding factor + numeric data.frame", {
x = iris[, 2:5]
y = iris[, 1]
layerSpec = list(Dense(3+3, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec, embeddingCols="Species", numEmbeddingDims=3)
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with embedding character + numeric data.frame", {
n <- 1000
x <- data.frame(x1 = rnorm(n),
x2 = sample(letters[1:10], size=n, replace=TRUE))
y <- x$x1 + rnorm(n)
layerSpec = list(Dense(1+4, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec, embeddingCols="x2", numEmbeddingDims=4)
expect_equal(length(mod$fitted), length(y))
})
|
/tests/testthat/test_nnm.R
|
permissive
|
chuanwen/nnm
|
R
| false
| false
| 1,901
|
r
|
context("nnm")
test_that("nnm.fit classification", {
x = iris[, 1:4]
y = iris[, 5]
mod = nnm.fit(x, y, list(Dense(4, 6), Dense(6, 3, Activation.Identity), Softmax))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm.fit regression", {
x = iris[, 1:3]
y = iris[, 4]
mod = nnm.fit(x, y, list(Dense(3, 6), Dense(6, 1, Activation.Identity)))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm.fit regression with parallel layers", {
x = iris[, 1:3]
y = iris[, 4]
layer1 = Parallel(Identity(1), Identity(2))
layer2 = Sequential(Dense(3, 6), Dense(6, 1, Activation.Identity))
mod = nnm.fit(x, y, list(layer1, layer2))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with all numeric data.frame", {
x = iris[, 1:4]
y = iris[, 5]
mod = nnm(x, y, list(Dense(4, 6), Dense(6, 3, Activation.Identity), Softmax))
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with factor + numeric data.frame", {
x = iris[, 2:5]
y = iris[, 1]
layerSpec = list(Dense(4+2, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec)
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with embedding factor + numeric data.frame", {
x = iris[, 2:5]
y = iris[, 1]
layerSpec = list(Dense(3+3, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec, embeddingCols="Species", numEmbeddingDims=3)
expect_equal(length(mod$fitted), length(y))
})
test_that("nnm with embedding character + numeric data.frame", {
n <- 1000
x <- data.frame(x1 = rnorm(n),
x2 = sample(letters[1:10], size=n, replace=TRUE))
y <- x$x1 + rnorm(n)
layerSpec = list(Dense(1+4, 6), Dense(6, 1, Activation.Identity))
mod = nnm(x, y, layerSpec, embeddingCols="x2", numEmbeddingDims=4)
expect_equal(length(mod$fitted), length(y))
})
|
library(testthat)
library(recipes)
context("Matrix data types")
###################################################################
data(okc)
okc$diet <- as.factor(okc$diet)
okc$date <- as.Date(okc$date)
okc$location <- as.factor(okc$location)
okc_tr <- okc[1:400, ]
okc_te <- okc[(401:800), ]
###################################################################
rec <- recipe( ~ ., data = okc_tr) %>%
step_modeimpute(all_nominal()) %>%
step_meanimpute(all_numeric()) %>%
step_dummy(location, diet) %>%
prep(training = okc_tr, retain = TRUE)
###################################################################
test_that('correct types', {
bake_default <- bake(rec, new_data = okc_te, all_numeric())
bake_sparse <-
bake(rec,
new_data = okc_te,
all_numeric(),
composition = "matrix")
bake_sparse_1d <-
bake(rec,
new_data = okc_te,
age,
composition = "matrix")
juice_default <- juice(rec, all_numeric())
juice_sparse <-
juice(rec, all_numeric(), composition = "matrix")
juice_sparse_1d <-
juice(rec, age, composition = "matrix")
expect_equal(class(bake_default), class(tibble()))
expect_equal(class(juice_default), class(tibble()))
expect_equal(as.vector(class(bake_sparse)), "matrix")
expect_equal(as.vector(class(juice_sparse)), "matrix")
expect_equal(as.vector(class(bake_sparse_1d)), "matrix")
expect_equal(as.vector(class(juice_sparse_1d)), "matrix")
expect_equal(recipes:::convert_matrix(bake_default, sparse = FALSE),
bake_sparse)
expect_equal(recipes:::convert_matrix(juice_default, sparse = FALSE),
juice_sparse)
})
test_that('bad args', {
expect_error(bake(rec, new_data = okc_te, composition = "matrix"))
expect_error(juice(rec, composition = "matrix"))
})
|
/tests/testthat/test_matrix.R
|
no_license
|
EmilHvitfeldt/recipes
|
R
| false
| false
| 1,827
|
r
|
library(testthat)
library(recipes)
context("Matrix data types")
###################################################################
data(okc)
okc$diet <- as.factor(okc$diet)
okc$date <- as.Date(okc$date)
okc$location <- as.factor(okc$location)
okc_tr <- okc[1:400, ]
okc_te <- okc[(401:800), ]
###################################################################
rec <- recipe( ~ ., data = okc_tr) %>%
step_modeimpute(all_nominal()) %>%
step_meanimpute(all_numeric()) %>%
step_dummy(location, diet) %>%
prep(training = okc_tr, retain = TRUE)
###################################################################
test_that('correct types', {
bake_default <- bake(rec, new_data = okc_te, all_numeric())
bake_sparse <-
bake(rec,
new_data = okc_te,
all_numeric(),
composition = "matrix")
bake_sparse_1d <-
bake(rec,
new_data = okc_te,
age,
composition = "matrix")
juice_default <- juice(rec, all_numeric())
juice_sparse <-
juice(rec, all_numeric(), composition = "matrix")
juice_sparse_1d <-
juice(rec, age, composition = "matrix")
expect_equal(class(bake_default), class(tibble()))
expect_equal(class(juice_default), class(tibble()))
expect_equal(as.vector(class(bake_sparse)), "matrix")
expect_equal(as.vector(class(juice_sparse)), "matrix")
expect_equal(as.vector(class(bake_sparse_1d)), "matrix")
expect_equal(as.vector(class(juice_sparse_1d)), "matrix")
expect_equal(recipes:::convert_matrix(bake_default, sparse = FALSE),
bake_sparse)
expect_equal(recipes:::convert_matrix(juice_default, sparse = FALSE),
juice_sparse)
})
test_that('bad args', {
expect_error(bake(rec, new_data = okc_te, composition = "matrix"))
expect_error(juice(rec, composition = "matrix"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53resolver_operations.R
\name{route53resolver_get_resolver_rule_policy}
\alias{route53resolver_get_resolver_rule_policy}
\title{Gets information about a resolver rule policy}
\usage{
route53resolver_get_resolver_rule_policy(Arn)
}
\arguments{
\item{Arn}{[required] The ID of the resolver rule policy that you want to get information
about.}
}
\description{
Gets information about a resolver rule policy. A resolver rule policy
specifies the Resolver operations and resources that you want to allow
another AWS account to be able to use.
}
\section{Request syntax}{
\preformatted{svc$get_resolver_rule_policy(
Arn = "string"
)
}
}
\keyword{internal}
|
/paws/man/route53resolver_get_resolver_rule_policy.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 735
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53resolver_operations.R
\name{route53resolver_get_resolver_rule_policy}
\alias{route53resolver_get_resolver_rule_policy}
\title{Gets information about a resolver rule policy}
\usage{
route53resolver_get_resolver_rule_policy(Arn)
}
\arguments{
\item{Arn}{[required] The ID of the resolver rule policy that you want to get information
about.}
}
\description{
Gets information about a resolver rule policy. A resolver rule policy
specifies the Resolver operations and resources that you want to allow
another AWS account to be able to use.
}
\section{Request syntax}{
\preformatted{svc$get_resolver_rule_policy(
Arn = "string"
)
}
}
\keyword{internal}
|
#' @import stats
#' @import ggplot2
precision = getFromNamespace("precision", "scales")
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' @importFrom magrittr %<>%
#' @export
magrittr::`%<>%`
#' Parent Function: Power of a number.
#'
#' This is an internal function to generate another ones which will be
#' effectivelly exported. This function is not exported in NAMESPACE, therefore
#' it is not available for the end-user.
#'
#' @param exponent The exponent.
#' @return A parent function that allows the user to create a closure that
#' returns the n-th power of its argument.
#' @examples
#'
#' \dontrun{
#' power <- function(exponent) {
#' function(x) {
#' x ^ exponent
#' }
#' }
#'
#' square <- power(2)
#' square_root <- power(.5)
#'
#' square(2) #4
#' square_root(4) #2
#' }
power <- function(exponent) {
function(x) {
x^exponent
}
}
#' Reciprocal of the square of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rsqr(2)
#' rsqr(1:10)
rsqr <- power(-2)
#' Reciprocal (1/x) of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rec(2)
#' rec(1:10)
rec <- power(-1)
#' Reciprocal of the square root of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rsqrt(4)
#' rsqrt(1:10)
rsqrt <- power(-0.5)
#' Square of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' sqr(2)
#' sqr(1:10)
sqr <- power(2)
#' Cube of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' cube(2)
cube <- power(3)
#' Returns the inverse of the function f (character)
#'
#' @param func a function of the box-cox family (rsqr(), rec(), rsqrt(), log(),
#' cubroot(), sqrt(), I() and sqr())
#' @return the inverse function, in character format.
#' @export
#' @examples
#' invFunc("log")
invFunc <- function(func){
switch(func,
rsqr = "rsqrt",
rec = "rec",
rsqrt = "rsqr",
log = "exp",
cubroot = "cube",
sqrt = "sqr",
identity = "identity",
sqr = "sqrt"
)
}
#' Returns a vector generated with the inverse of the function f
#'
#' @param x A vector or object of type
#' @param func a function of the box-cox family (rsqr(), rec(), rsqrt(), log(),
#' cubroot(), sqrt(), I() and sqr())
#' @return a
#' @export
#' @examples
#' inverse(rsqr(10), "rsqr")
#' inverse(rec(10), "rec")
#' inverse(rsqrt(10), "rsqrt")
#' inverse(log(1), "log")
#' inverse(sqrt(4), "sqrt")
#' inverse(sqr(4), "sqr")
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., data = dados)
#' aval <- new_data(fit)
#' Y <- predict(fit, newdata = aval, interval = "confidence")
#' inverse(Y, "log")
#'
inverse <- function(x, func) {
switch(func,
rsqr = appraiseR::rsqrt(x),
rec = appraiseR::rec(x),
rsqrt = appraiseR::rsqr(x),
log = exp(x),
cubroot = appraiseR::cube(x),
sqrt = appraiseR::sqr(x),
identity = identity(x),
sqr = sqrt(x)
)
}
#' Return the median value or the modal value of a vector, depending on whether
#' the vector is numeric or a factor.
#'
#' @inheritParams stats::median
#' @return the median value for objects of the class integer or double. The
#' modal value for objects of class factor.
#' @name centre
#' @export
centre <- function(x, ...) UseMethod("centre")
#' @rdname centre
#' @examples
#' vec <- c(-3, -2, 0, 1, 1, 3)
#' centre(vec)
#' @export
centre.numeric <- function(x, na.rm = TRUE, ...) {
x <- stats::median(x, na.rm = na.rm, ...)
x
}
#' @rdname centre
#' @examples
#' vec <- c(-3, -2, 0, 1, 1, 3)
#' vec <- as.factor(vec)
#' centre(vec)
#' dados <- st_drop_geometry(centro_2015)
#' centre(dados$padrao)
#' @export
centre.factor <- function(x, na.rm = TRUE, ...){
x <- raster::modal(x, na.rm = na.rm)
x
}
#' Extract object parameters
#'
#' Returns the parameters used to build a model.
#'
#' @param object A model object.
#' @param \dots not used.
#' @return the parameters, predictors and response names besides the
#' original data used to build the model.
#' @name parameters
#' @export
parameters <- function(object, ...) {
UseMethod("parameters")
}
#' @rdname parameters
#' @examples
#' dados <- centro_2015@data
#' fit <- lm(log(valor) ~ ., dados)
#' p <- parameters(fit)
#' p$parameters
#' p$predictors
#' p$response
#' p$data
#' @export
#'
parameters.lm <- function(object, ...) {
z <- object
cl <- stats::getCall(z)
myformula <- stats::formula(z)
data <- eval(cl$data)
vars <- all.vars(myformula)
tt <- terms(myformula)
resp <- all.vars(update(myformula, . ~ 1))
preds <- setdiff(vars, resp)
lhs <- myformula[[2]]
rhs <- myformula[[3]]
param <-
list(parameters = c(resp, preds),
predictors = preds,
response = resp,
lhs = lhs,
rhs = rhs,
data = data,
call = cl)
return(param)
}
#' @rdname parameters
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' best_fit <- bestfit(valor ~ ., dados)
#' parameters(best_fit)
#' @export
#'
parameters.bestfit <- function(object, ...) {
z <- object
cl <- z$call
data <- eval(cl$data, environment(stats::formula(z)))
resp <- z$response
preds <- z$predictors
param <-
list(parameters = c(resp, preds),
predictors = preds,
response = resp,
data = data,
call = cl)
return(param)
}
#' Builds \code{newdata} argument to be used in \link{predict.lm}
#'
#' Builds a new \code{data.frame} containing only elements
#' to be appraised from the current \code{data.frame}
#'
#' @param object object of class \code{lm}
#'
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., data = dados)
#' new_data(fit)
#' @export
new_data <- function(object) {
z <- object
params <- parameters(z)
response <- params$response
response <- as.name(response)
parameters <- params$parameters
data <- params$data
aval <-
data %>%
dplyr::filter(is.na(!!response)) %>%
dplyr::select(parameters)
aval
}
#' Wrapper around format
#'
#' This function is only a wrapper around \link{format} function that uses standard
#' brazillian formats by default
#'
#' @param x a number to be formatted by \code{format}
#' @inheritParams base::format
#' @export
brformat <- function(x, decimal.mark = ",", big.mark = ".", digits = 2,
nsmall = 2, scientific = FALSE, ...) {
format(x, decimal.mark = decimal.mark, big.mark = big.mark, digits = digits,
nsmall = nsmall, scientific = scientific, ...)
}
#' @rdname brformat
#' @export
brf <- brformat
#' Wrapper around brformat
#'
#' This is a wrapper around \link{brformat}.
#'
#' @param x a real number
#' @param prefix currency units. Defaults for brazilian reais.
#' @param \ldots further arguments to be passed to \link{brformat}.
#' @return text in Reais.
#' @examples
#' Reais(100)
#' @export
#'
Reais <- function(x, prefix = "R$ ", ...) {
paste(prefix, brformat(x, ...), sep = "")
}
#' @rdname Reais
#' @examples
#' library(ggplot2)
#' p <- ggplot(centro_2015@data, aes(x = area_total, y = valor)) +
#' geom_point(na.rm = TRUE)
#' p + scale_y_continuous(labels = reais(nsmall = 0))
#' @export
reais <- function(...) {
function(x) Reais(x, ...)
}
#' Write in percentage form
#'
#' This function transforms any number to percentage format for reporting purposes.
#' @param x a real number
#' @examples
#' porcento(0.25)
#' pct(0.25)
#' @export
#'
porcento <- function (x, ...) {
if (length(x) == 0)
return(character())
x <- brf(100*x, ...)
paste0(x, "%")
}
#' @rdname porcento
#' @export
pct <- porcento
#' Regression equation
#'
#' Givens a \link{lm} object, returns its regression equation
#' @param object object of class \code{lm}
#' @param type the equation type required: regression (reg) or estimation (est).
#' @param inline the equation mode. TRUE for inline equations or FALSE for
#' displayed mode.
#' @param func transformation applied to dependent variable.
#' @param accuracy number to round to; for POSIXct objects, a number of seconds
#' @param f rounding function: floor, ceiling or round
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., dados)
#' equacoes(fit)
#' equacoes(fit, precision = 1)
#' @export
equacoes <- function(object, type = c("reg", "est"), inline = TRUE, func,
accuracy = 100, f = round, errorTerm = TRUE){
z <- object
myformula <- stats::formula(z)
parametros <- parameters(z)
type <- match.arg(type)
if(type == "reg") {
lhs <- format(parametros$lhs)
} else {
lhs <- parametros$response
}
coefs <- coef(z)
coefs <- plyr::round_any(coefs, accuracy, f)
rhs <- paste(coefs[1], "+", paste(coefs[-1], "\\cdot",
names(coefs[-1]),
collapse = " + ")
)
rhs <- gsub("\\_", "\\\\_", rhs)
rhs <- gsub("\\+ -", "- ", rhs)
rhs <- gsub("[.]", ",", rhs)
if (type == "reg"){
Formula <- paste(lhs, "=", rhs)
} else if (!missing(func)) {
Formula <- paste(lhs, " = ", appraiseR::invFunc(func), "(", rhs, ")",
sep = "")
} else {
message("Estimation regression asked but no transformation passed.")
}
if (type == "reg" & errorTerm == TRUE){
Formula <- paste(Formula, "+ \\varepsilon")
}
if (inline == TRUE) {
cat('$', Formula, '$', sep = "")
} else {
cat('$$', Formula, '$$', sep = "")
}
}
|
/R/helpers.R
|
permissive
|
lfpdroubi/appraiseR
|
R
| false
| false
| 9,515
|
r
|
#' @import stats
#' @import ggplot2
precision = getFromNamespace("precision", "scales")
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' @importFrom magrittr %<>%
#' @export
magrittr::`%<>%`
#' Parent Function: Power of a number.
#'
#' This is an internal function to generate another ones which will be
#' effectivelly exported. This function is not exported in NAMESPACE, therefore
#' it is not available for the end-user.
#'
#' @param exponent The exponent.
#' @return A parent function that allows the user to create a closure that
#' returns the n-th power of its argument.
#' @examples
#'
#' \dontrun{
#' power <- function(exponent) {
#' function(x) {
#' x ^ exponent
#' }
#' }
#'
#' square <- power(2)
#' square_root <- power(.5)
#'
#' square(2) #4
#' square_root(4) #2
#' }
power <- function(exponent) {
function(x) {
x^exponent
}
}
#' Reciprocal of the square of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rsqr(2)
#' rsqr(1:10)
rsqr <- power(-2)
#' Reciprocal (1/x) of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rec(2)
#' rec(1:10)
rec <- power(-1)
#' Reciprocal of the square root of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' rsqrt(4)
#' rsqrt(1:10)
rsqrt <- power(-0.5)
#' Square of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' sqr(2)
#' sqr(1:10)
sqr <- power(2)
#' Cube of a number
#'
#' @param x a numeric vector or array
#' @export
#' @examples
#'
#' cube(2)
cube <- power(3)
#' Returns the inverse of the function f (character)
#'
#' @param func a function of the box-cox family (rsqr(), rec(), rsqrt(), log(),
#' cubroot(), sqrt(), I() and sqr())
#' @return the inverse function, in character format.
#' @export
#' @examples
#' invFunc("log")
invFunc <- function(func){
switch(func,
rsqr = "rsqrt",
rec = "rec",
rsqrt = "rsqr",
log = "exp",
cubroot = "cube",
sqrt = "sqr",
identity = "identity",
sqr = "sqrt"
)
}
#' Returns a vector generated with the inverse of the function f
#'
#' @param x A vector or object of type
#' @param func a function of the box-cox family (rsqr(), rec(), rsqrt(), log(),
#' cubroot(), sqrt(), I() and sqr())
#' @return a
#' @export
#' @examples
#' inverse(rsqr(10), "rsqr")
#' inverse(rec(10), "rec")
#' inverse(rsqrt(10), "rsqrt")
#' inverse(log(1), "log")
#' inverse(sqrt(4), "sqrt")
#' inverse(sqr(4), "sqr")
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., data = dados)
#' aval <- new_data(fit)
#' Y <- predict(fit, newdata = aval, interval = "confidence")
#' inverse(Y, "log")
#'
inverse <- function(x, func) {
switch(func,
rsqr = appraiseR::rsqrt(x),
rec = appraiseR::rec(x),
rsqrt = appraiseR::rsqr(x),
log = exp(x),
cubroot = appraiseR::cube(x),
sqrt = appraiseR::sqr(x),
identity = identity(x),
sqr = sqrt(x)
)
}
#' Return the median value or the modal value of a vector, depending on whether
#' the vector is numeric or a factor.
#'
#' @inheritParams stats::median
#' @return the median value for objects of the class integer or double. The
#' modal value for objects of class factor.
#' @name centre
#' @export
centre <- function(x, ...) UseMethod("centre")
#' @rdname centre
#' @examples
#' vec <- c(-3, -2, 0, 1, 1, 3)
#' centre(vec)
#' @export
centre.numeric <- function(x, na.rm = TRUE, ...) {
x <- stats::median(x, na.rm = na.rm, ...)
x
}
#' @rdname centre
#' @examples
#' vec <- c(-3, -2, 0, 1, 1, 3)
#' vec <- as.factor(vec)
#' centre(vec)
#' dados <- st_drop_geometry(centro_2015)
#' centre(dados$padrao)
#' @export
centre.factor <- function(x, na.rm = TRUE, ...){
x <- raster::modal(x, na.rm = na.rm)
x
}
#' Extract object parameters
#'
#' Returns the parameters used to build a model.
#'
#' @param object A model object.
#' @param \dots not used.
#' @return the parameters, predictors and response names besides the
#' original data used to build the model.
#' @name parameters
#' @export
parameters <- function(object, ...) {
UseMethod("parameters")
}
#' @rdname parameters
#' @examples
#' dados <- centro_2015@data
#' fit <- lm(log(valor) ~ ., dados)
#' p <- parameters(fit)
#' p$parameters
#' p$predictors
#' p$response
#' p$data
#' @export
#'
parameters.lm <- function(object, ...) {
z <- object
cl <- stats::getCall(z)
myformula <- stats::formula(z)
data <- eval(cl$data)
vars <- all.vars(myformula)
tt <- terms(myformula)
resp <- all.vars(update(myformula, . ~ 1))
preds <- setdiff(vars, resp)
lhs <- myformula[[2]]
rhs <- myformula[[3]]
param <-
list(parameters = c(resp, preds),
predictors = preds,
response = resp,
lhs = lhs,
rhs = rhs,
data = data,
call = cl)
return(param)
}
#' @rdname parameters
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' best_fit <- bestfit(valor ~ ., dados)
#' parameters(best_fit)
#' @export
#'
parameters.bestfit <- function(object, ...) {
z <- object
cl <- z$call
data <- eval(cl$data, environment(stats::formula(z)))
resp <- z$response
preds <- z$predictors
param <-
list(parameters = c(resp, preds),
predictors = preds,
response = resp,
data = data,
call = cl)
return(param)
}
#' Builds \code{newdata} argument to be used in \link{predict.lm}
#'
#' Builds a new \code{data.frame} containing only elements
#' to be appraised from the current \code{data.frame}
#'
#' @param object object of class \code{lm}
#'
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., data = dados)
#' new_data(fit)
#' @export
new_data <- function(object) {
z <- object
params <- parameters(z)
response <- params$response
response <- as.name(response)
parameters <- params$parameters
data <- params$data
aval <-
data %>%
dplyr::filter(is.na(!!response)) %>%
dplyr::select(parameters)
aval
}
#' Wrapper around format
#'
#' This function is only a wrapper around \link{format} function that uses standard
#' brazillian formats by default
#'
#' @param x a number to be formatted by \code{format}
#' @inheritParams base::format
#' @export
brformat <- function(x, decimal.mark = ",", big.mark = ".", digits = 2,
nsmall = 2, scientific = FALSE, ...) {
format(x, decimal.mark = decimal.mark, big.mark = big.mark, digits = digits,
nsmall = nsmall, scientific = scientific, ...)
}
#' @rdname brformat
#' @export
brf <- brformat
#' Wrapper around brformat
#'
#' This is a wrapper around \link{brformat}.
#'
#' @param x a real number
#' @param prefix currency units. Defaults for brazilian reais.
#' @param \ldots further arguments to be passed to \link{brformat}.
#' @return text in Reais.
#' @examples
#' Reais(100)
#' @export
#'
Reais <- function(x, prefix = "R$ ", ...) {
paste(prefix, brformat(x, ...), sep = "")
}
#' @rdname Reais
#' @examples
#' library(ggplot2)
#' p <- ggplot(centro_2015@data, aes(x = area_total, y = valor)) +
#' geom_point(na.rm = TRUE)
#' p + scale_y_continuous(labels = reais(nsmall = 0))
#' @export
reais <- function(...) {
function(x) Reais(x, ...)
}
#' Write in percentage form
#'
#' This function transforms any number to percentage format for reporting purposes.
#' @param x a real number
#' @examples
#' porcento(0.25)
#' pct(0.25)
#' @export
#'
porcento <- function (x, ...) {
if (length(x) == 0)
return(character())
x <- brf(100*x, ...)
paste0(x, "%")
}
#' @rdname porcento
#' @export
pct <- porcento
#' Regression equation
#'
#' Givens a \link{lm} object, returns its regression equation
#' @param object object of class \code{lm}
#' @param type the equation type required: regression (reg) or estimation (est).
#' @param inline the equation mode. TRUE for inline equations or FALSE for
#' displayed mode.
#' @param func transformation applied to dependent variable.
#' @param accuracy number to round to; for POSIXct objects, a number of seconds
#' @param f rounding function: floor, ceiling or round
#' @examples
#' dados <- st_drop_geometry(centro_2015)
#' fit <- lm(log(valor) ~ ., dados)
#' equacoes(fit)
#' equacoes(fit, precision = 1)
#' @export
equacoes <- function(object, type = c("reg", "est"), inline = TRUE, func,
accuracy = 100, f = round, errorTerm = TRUE){
z <- object
myformula <- stats::formula(z)
parametros <- parameters(z)
type <- match.arg(type)
if(type == "reg") {
lhs <- format(parametros$lhs)
} else {
lhs <- parametros$response
}
coefs <- coef(z)
coefs <- plyr::round_any(coefs, accuracy, f)
rhs <- paste(coefs[1], "+", paste(coefs[-1], "\\cdot",
names(coefs[-1]),
collapse = " + ")
)
rhs <- gsub("\\_", "\\\\_", rhs)
rhs <- gsub("\\+ -", "- ", rhs)
rhs <- gsub("[.]", ",", rhs)
if (type == "reg"){
Formula <- paste(lhs, "=", rhs)
} else if (!missing(func)) {
Formula <- paste(lhs, " = ", appraiseR::invFunc(func), "(", rhs, ")",
sep = "")
} else {
message("Estimation regression asked but no transformation passed.")
}
if (type == "reg" & errorTerm == TRUE){
Formula <- paste(Formula, "+ \\varepsilon")
}
if (inline == TRUE) {
cat('$', Formula, '$', sep = "")
} else {
cat('$$', Formula, '$$', sep = "")
}
}
|
/models/KNN_model.R
|
no_license
|
Wynnlin329/R_EDA_minning
|
R
| false
| false
| 725
|
r
| ||
#************** INTRODUCTION TO STATISTICS **********************************
#Types of data: Quantitative, Qualitative, Logical, Missing, Other types
#Quantitative: Discrete -- takes values in a finite and countable way.
# Continuous -- Takes values in an interval of numbers (decimal values).
#R comes with data within the datasets package. For a complete list use
#library(help = datasets)
#Let's see some data:
str(precip) #continuous
str(rivers) #discrete
str(discoveries) #time-series data (discrete)
#Central tendency: mean(), median()
#Spread: var(), sd(), IQR(), min(), max(), range()
#Categorical: table(mpg$drv)
|
/ch4_sumarize_data.R
|
no_license
|
hsimspace/statistics_with_r
|
R
| false
| false
| 702
|
r
|
#************** INTRODUCTION TO STATISTICS **********************************
#Types of data: Quantitative, Qualitative, Logical, Missing, Other types
#Quantitative: Discrete -- takes values in a finite and countable way.
# Continuous -- Takes values in an interval of numbers (decimal values).
#R comes with data within the datasets package. For a complete list use
#library(help = datasets)
#Let's see some data:
str(precip) #continuous
str(rivers) #discrete
str(discoveries) #time-series data (discrete)
#Central tendency: mean(), median()
#Spread: var(), sd(), IQR(), min(), max(), range()
#Categorical: table(mpg$drv)
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:4, WORD_NUM:92">
</head>
<body bgcolor="white">
<a href="#0" id="0">Tass said 748 of the injured had to be hospitalized and said a search was continuing for a police officer who vanished June 7.</a>
<a href="#1" id="1">It was the latest in a series of bloody civil conflicts to trouble President Mikhail S. Gorbachev's government.</a>
<a href="#2" id="2">He said in Moscow on Tuesday that ministry troops had to be used in 13 regions of the Soviet Union because of such outbreaks, Tass said.</a>
<a href="#3" id="3">Besides unrest this month in Kirghizia and Uzbekistan, ethnic violence also has been reported in recent years in Georgia, Armenia, Azerbaijan, Kazakhstan and Tadzhikistan.</a>
</body>
</html>
|
/DUC-Dataset/Summary_p100_R/D105.AP900613-0195.html.R
|
no_license
|
Angela7126/SLNSumEval
|
R
| false
| false
| 763
|
r
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:4, WORD_NUM:92">
</head>
<body bgcolor="white">
<a href="#0" id="0">Tass said 748 of the injured had to be hospitalized and said a search was continuing for a police officer who vanished June 7.</a>
<a href="#1" id="1">It was the latest in a series of bloody civil conflicts to trouble President Mikhail S. Gorbachev's government.</a>
<a href="#2" id="2">He said in Moscow on Tuesday that ministry troops had to be used in 13 regions of the Soviet Union because of such outbreaks, Tass said.</a>
<a href="#3" id="3">Besides unrest this month in Kirghizia and Uzbekistan, ethnic violence also has been reported in recent years in Georgia, Armenia, Azerbaijan, Kazakhstan and Tadzhikistan.</a>
</body>
</html>
|
test_that("grade_code() - allow_partial_matching works 2 errors", {
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(TRUE), quote(rate_backoff())),
submitted_name = c("quie", "rat"),
solution_name = c("quiet", "rate")
)
)
})
test_that("grade_this_code() - allow_partial_matching works 2 errors", {
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(TRUE), quote(rate_backoff())),
submitted_name = c("quie", "rat"),
solution_name = c("quiet", "rate")
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error bool", {
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = quote(TRUE),
submitted_name = c("quie"),
solution_name = c("quiet")
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error bool", {
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = quote(TRUE),
submitted_name = c("quie"),
solution_name = c("quiet")
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error fun", {
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(rate_backoff())),
submitted_name = "rat",
solution_name = "rate"
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error fun", {
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(rate_backoff())),
submitted_name = "rat",
solution_name = "rate"
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error chr", {
ff <- function(p1 = "yes") print(p1)
expect_grade_code(
user_code = "ff(p=\"no\")",
solution_code = "ff(p1=\"no\")",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = quote("no"),
submitted_name = "p",
solution_name = "p1"
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error chr", {
ff <- function(p1 = "yes") print(p1)
expect_this_code(
user_code = "ff(p=\"no\")",
solution_code = "ff(p1=\"no\")",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = quote("no"),
submitted_name = "p",
solution_name = "p1"
)
)
})
test_that("grade_code() - allow_partial_matching works errors multi arg type", {
ff <- function(chr = "yes", fun = ls, call = ls(), bool = TRUE) {
print("youpi")
}
pmatch_FALSE <- expect_grade_code( # nolint: object_name
user_code = "ff(ch = \"yes\", fu = ls, cal =ls(), boo = TRUE)",
solution_code = "ff(chr = \"yes\", fun = ls, call =ls(), bool = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = list(quote("yes"), quote(ls), quote(ls()), quote(TRUE)),
submitted_name = c("ch", "fu", "cal", "boo"),
solution_name = c("chr", "fun", "call", "bool")
)
)
expect_match(object = pmatch_FALSE$message, regexp = "ch = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "chr = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "boo = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "bool = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "cal = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "call = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "fun = ls")
expect_match(object = pmatch_FALSE$message, regexp = "fu = ls")
})
test_that("grade_this_code() - allow_partial_matching works errors multi arg type", {
ff <- function(chr = "yes", fun = ls, call = ls(), bool = TRUE) {
print("youpi")
}
pmatch_FALSE <- expect_this_code( # nolint: object_name
user_code = "ff(ch = \"yes\", fu = ls, cal =ls(), boo = TRUE)",
solution_code = "ff(chr = \"yes\", fun = ls, call =ls(), bool = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = list(quote("yes"), quote(ls), quote(ls()), quote(TRUE)),
submitted_name = c("ch", "fu", "cal", "boo"),
solution_name = c("chr", "fun", "call", "bool")
)
)
expect_match(object = pmatch_FALSE$message, regexp = "ch = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "chr = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "boo = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "bool = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "cal = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "call = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "fun = ls")
expect_match(object = pmatch_FALSE$message, regexp = "fu = ls")
})
test_that("grade_code() - allow_partial_matching works with multiple matches", {
ff <- function(ab, abc, abcd) return(1)
expect_grade_code(
user_code = "ff(ab = 1)",
solution_code = "ff(ab = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_grade_code(
user_code = "ff(a = 1)",
solution_code = "ff(a = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_equal(
expect_grade_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
glue_correct = "{ .message } { .correct }",
glue_incorrect = "{ .message } { .incorrect }",
allow_partial_matching = FALSE,
is_correct = FALSE
),
expect_grade_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
glue_correct = "{ .message } { .correct }",
glue_incorrect = "{ .message } { .incorrect }",
allow_partial_matching = TRUE,
is_correct = FALSE
)
)
expect_grade_code(
user_code = "ff(ab = 1, abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
expect_grade_code(
user_code = "ff(abc = 1, ab = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
})
test_that("grade_this_code() - allow_partial_matching works with multiple matches", {
ff <- function(ab, abc, abcd) return(1)
expect_this_code(
user_code = "ff(ab = 1)",
solution_code = "ff(ab = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_this_code(
user_code = "ff(a = 1)",
solution_code = "ff(a = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_equal(
expect_this_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
correct = "correct",
incorrect = "{.message}",
allow_partial_matching = FALSE,
is_correct = FALSE
),
expect_this_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
correct = "correct",
incorrect = "{.message}",
allow_partial_matching = TRUE,
is_correct = FALSE
)
)
expect_this_code(
user_code = "ff(ab = 1, abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
expect_this_code(
user_code = "ff(abc = 1, ab = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
})
|
/tests/testthat/test-allow_partial_matching.R
|
permissive
|
rstudio/gradethis
|
R
| false
| false
| 11,263
|
r
|
test_that("grade_code() - allow_partial_matching works 2 errors", {
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(TRUE), quote(rate_backoff())),
submitted_name = c("quie", "rat"),
solution_name = c("quiet", "rate")
)
)
})
test_that("grade_this_code() - allow_partial_matching works 2 errors", {
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,quiet = TRUE,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(TRUE), quote(rate_backoff())),
submitted_name = c("quie", "rat"),
solution_name = c("quiet", "rate")
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error bool", {
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = quote(TRUE),
submitted_name = c("quie"),
solution_name = c("quiet")
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error bool", {
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,quie = TRUE)",
solution_code = "purrr::insistently(mean,quiet = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = quote(TRUE),
submitted_name = c("quie"),
solution_name = c("quiet")
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error fun", {
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_grade_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(rate_backoff())),
submitted_name = "rat",
solution_name = "rate"
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error fun", {
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = TRUE,
is_correct = TRUE
)
expect_this_code(
user_code = "purrr::insistently(mean,rat = rate_backoff())",
solution_code = "purrr::insistently(mean,rate = rate_backoff())",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "purrr::insistently",
submitted = list(quote(rate_backoff())),
submitted_name = "rat",
solution_name = "rate"
)
)
})
test_that("grade_code() - allow_partial_matching works 1 error chr", {
ff <- function(p1 = "yes") print(p1)
expect_grade_code(
user_code = "ff(p=\"no\")",
solution_code = "ff(p1=\"no\")",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = quote("no"),
submitted_name = "p",
solution_name = "p1"
)
)
})
test_that("grade_this_code() - allow_partial_matching works 1 error chr", {
ff <- function(p1 = "yes") print(p1)
expect_this_code(
user_code = "ff(p=\"no\")",
solution_code = "ff(p1=\"no\")",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = quote("no"),
submitted_name = "p",
solution_name = "p1"
)
)
})
test_that("grade_code() - allow_partial_matching works errors multi arg type", {
ff <- function(chr = "yes", fun = ls, call = ls(), bool = TRUE) {
print("youpi")
}
pmatch_FALSE <- expect_grade_code( # nolint: object_name
user_code = "ff(ch = \"yes\", fu = ls, cal =ls(), boo = TRUE)",
solution_code = "ff(chr = \"yes\", fun = ls, call =ls(), bool = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = list(quote("yes"), quote(ls), quote(ls()), quote(TRUE)),
submitted_name = c("ch", "fu", "cal", "boo"),
solution_name = c("chr", "fun", "call", "bool")
)
)
expect_match(object = pmatch_FALSE$message, regexp = "ch = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "chr = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "boo = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "bool = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "cal = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "call = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "fun = ls")
expect_match(object = pmatch_FALSE$message, regexp = "fu = ls")
})
test_that("grade_this_code() - allow_partial_matching works errors multi arg type", {
ff <- function(chr = "yes", fun = ls, call = ls(), bool = TRUE) {
print("youpi")
}
pmatch_FALSE <- expect_this_code( # nolint: object_name
user_code = "ff(ch = \"yes\", fu = ls, cal =ls(), boo = TRUE)",
solution_code = "ff(chr = \"yes\", fun = ls, call =ls(), bool = TRUE)",
allow_partial_matching = FALSE,
is_correct = FALSE,
msg = message_pmatches_argument_name(
submitted_call = "ff",
submitted = list(quote("yes"), quote(ls), quote(ls()), quote(TRUE)),
submitted_name = c("ch", "fu", "cal", "boo"),
solution_name = c("chr", "fun", "call", "bool")
)
)
expect_match(object = pmatch_FALSE$message, regexp = "ch = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "chr = \"yes\"")
expect_match(object = pmatch_FALSE$message, regexp = "boo = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "bool = TRUE")
expect_match(object = pmatch_FALSE$message, regexp = "cal = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "call = ls()")
expect_match(object = pmatch_FALSE$message, regexp = "fun = ls")
expect_match(object = pmatch_FALSE$message, regexp = "fu = ls")
})
test_that("grade_code() - allow_partial_matching works with multiple matches", {
ff <- function(ab, abc, abcd) return(1)
expect_grade_code(
user_code = "ff(ab = 1)",
solution_code = "ff(ab = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_grade_code(
user_code = "ff(a = 1)",
solution_code = "ff(a = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_equal(
expect_grade_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
glue_correct = "{ .message } { .correct }",
glue_incorrect = "{ .message } { .incorrect }",
allow_partial_matching = FALSE,
is_correct = FALSE
),
expect_grade_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
glue_correct = "{ .message } { .correct }",
glue_incorrect = "{ .message } { .incorrect }",
allow_partial_matching = TRUE,
is_correct = FALSE
)
)
expect_grade_code(
user_code = "ff(ab = 1, abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
expect_grade_code(
user_code = "ff(abc = 1, ab = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
})
test_that("grade_this_code() - allow_partial_matching works with multiple matches", {
ff <- function(ab, abc, abcd) return(1)
expect_this_code(
user_code = "ff(ab = 1)",
solution_code = "ff(ab = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_this_code(
user_code = "ff(a = 1)",
solution_code = "ff(a = 1)",
allow_partial_matching = FALSE,
is_correct = TRUE
)
expect_equal(
expect_this_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
correct = "correct",
incorrect = "{.message}",
allow_partial_matching = FALSE,
is_correct = FALSE
),
expect_this_code(
user_code = "ff(abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
correct = "correct",
incorrect = "{.message}",
allow_partial_matching = TRUE,
is_correct = FALSE
)
)
expect_this_code(
user_code = "ff(ab = 1, abc = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
expect_this_code(
user_code = "ff(abc = 1, ab = 1)",
solution_code = "ff(ab = 1, abc = 1)",
is_correct = TRUE
)
})
|
context("get_followers")
test_that("get_followers returns data frame with user_id", {
skip_on_cran()
skip_if_offline()
token <- readRDS("twitter_tokens")
rl <- rate_limit(token, "get_followers")
if (rl$remaining > 1) {
f <- get_followers("HillaryClinton", n = 10000, token = token)
expect_true(is.data.frame(f))
expect_true(identical(length(next_cursor(f)), 1L))
expect_named(f, "user_id")
expect_gt(NROW(f), 9999)
} else {
expect_true(rl$limit == 15)
expect_true(rl$remaining == 0)
expect_true(rl$limit == 15)
expect_true(rl$limit == 15)
}
})
|
/tests/testthat/test_get_followers.R
|
no_license
|
zzqqzzqqzz/rtweet
|
R
| false
| false
| 601
|
r
|
context("get_followers")
test_that("get_followers returns data frame with user_id", {
skip_on_cran()
skip_if_offline()
token <- readRDS("twitter_tokens")
rl <- rate_limit(token, "get_followers")
if (rl$remaining > 1) {
f <- get_followers("HillaryClinton", n = 10000, token = token)
expect_true(is.data.frame(f))
expect_true(identical(length(next_cursor(f)), 1L))
expect_named(f, "user_id")
expect_gt(NROW(f), 9999)
} else {
expect_true(rl$limit == 15)
expect_true(rl$remaining == 0)
expect_true(rl$limit == 15)
expect_true(rl$limit == 15)
}
})
|
rm(list = ls())
library(tidyverse)
library(foreach)
library(matrixStats)
library(here)
folder_path <- here("data/PO4Ca_GT/umbrella/10blocks/")
file_name <- "PO4Ca_GT"
n_block <- 10
all_pmfs <-
foreach( i = 0:(n_block-1), .combine = "c") %do% {
pmf_file <- paste(folder_path, "pmf", i, ".xvg", sep = "")
all_content <-readLines(pmf_file)
a_reduced <- all_content[!grepl("#|@", all_content)]
pmf <- read.csv(textConnection(a_reduced), stringsAsFactors = FALSE, header = FALSE, sep="")
pmf_av <-
pmf %>%
filter((V1 > 1.4) & (V1 < 1.45)) %>%
summarise(bulk = mean(V2)) %>%
pull()
pmf$V2 <- pmf$V2 - pmf_av
list(pmf)
}
x1 <- all_pmfs[[1]]$V1
dx <- mean(x1[2:length(x1)] - x1[1:(length(x1)-1)])
ndata <- nrow(all_pmfs[[1]])
xleft <- max(unlist(lapply(all_pmfs, function(pmf){
pmf$V1[1]
})))
xright <- min(unlist(lapply(all_pmfs, function(pmf){
pmf$V1[ndata]
})))
r <- seq(xleft, xright, length.out = ndata)
all_pmfs_interp <-
foreach( i = 1:n_block, .combine = "cbind") %do% {
approx(x = all_pmfs[[i]]$V1, y = all_pmfs[[i]]$V2, xout = r)$y
}
pmf_mean <- rowMeans(all_pmfs_interp)
pmf_std <- rowSds(all_pmfs_interp)/sqrt(n_block)
df_pmf_final <- data.frame(r = r, pmf = pmf_mean, error = pmf_std)
saveRDS(df_pmf_final, file = paste(folder_path, "df_", file_name, "_meanstd_", n_block, "blocks", sep = ""))
write.table(x = df_pmf_final, file = paste(folder_path, "df_", file_name, "_meanstd_", n_block, "blocks.txt", sep = ""), row.names = FALSE, col.names = FALSE, quote = FALSE)
p <- ggplot(data = df_pmf_final, mapping = aes(x = r, y = pmf))
p <- p + geom_line()
p <- p + geom_errorbar(aes(ymin = pmf - error, ymax = pmf + error))
p <- p + xlim(0.2, 1.5)
p
|
/utilities/get_PMF/block_average_PO4Ca_GT.R
|
no_license
|
andy90/DNA_simulation
|
R
| false
| false
| 1,745
|
r
|
rm(list = ls())
library(tidyverse)
library(foreach)
library(matrixStats)
library(here)
folder_path <- here("data/PO4Ca_GT/umbrella/10blocks/")
file_name <- "PO4Ca_GT"
n_block <- 10
all_pmfs <-
foreach( i = 0:(n_block-1), .combine = "c") %do% {
pmf_file <- paste(folder_path, "pmf", i, ".xvg", sep = "")
all_content <-readLines(pmf_file)
a_reduced <- all_content[!grepl("#|@", all_content)]
pmf <- read.csv(textConnection(a_reduced), stringsAsFactors = FALSE, header = FALSE, sep="")
pmf_av <-
pmf %>%
filter((V1 > 1.4) & (V1 < 1.45)) %>%
summarise(bulk = mean(V2)) %>%
pull()
pmf$V2 <- pmf$V2 - pmf_av
list(pmf)
}
x1 <- all_pmfs[[1]]$V1
dx <- mean(x1[2:length(x1)] - x1[1:(length(x1)-1)])
ndata <- nrow(all_pmfs[[1]])
xleft <- max(unlist(lapply(all_pmfs, function(pmf){
pmf$V1[1]
})))
xright <- min(unlist(lapply(all_pmfs, function(pmf){
pmf$V1[ndata]
})))
r <- seq(xleft, xright, length.out = ndata)
all_pmfs_interp <-
foreach( i = 1:n_block, .combine = "cbind") %do% {
approx(x = all_pmfs[[i]]$V1, y = all_pmfs[[i]]$V2, xout = r)$y
}
pmf_mean <- rowMeans(all_pmfs_interp)
pmf_std <- rowSds(all_pmfs_interp)/sqrt(n_block)
df_pmf_final <- data.frame(r = r, pmf = pmf_mean, error = pmf_std)
saveRDS(df_pmf_final, file = paste(folder_path, "df_", file_name, "_meanstd_", n_block, "blocks", sep = ""))
write.table(x = df_pmf_final, file = paste(folder_path, "df_", file_name, "_meanstd_", n_block, "blocks.txt", sep = ""), row.names = FALSE, col.names = FALSE, quote = FALSE)
p <- ggplot(data = df_pmf_final, mapping = aes(x = r, y = pmf))
p <- p + geom_line()
p <- p + geom_errorbar(aes(ymin = pmf - error, ymax = pmf + error))
p <- p + xlim(0.2, 1.5)
p
|
#' @title Create table 1, part 1 from the credit union publication
#'
#' @description Creates Table 1, part 1 from the \href{https://www.bankofengland.co.uk/news?NewsTypes=571948d14c6943f7b5b7748ad80bef29&Direction=Upcoming}{Credit Union Quarterly Release}.
#'
#' @details \code{table_1} takes as input a standardised long format data frame of class \code{series_period_data},
#' uses associated metadata to create table elements and uses \code{dsdtabs} to format a table.
#'
#' @keywords internal
#'
#' @param x Object of class \code{series_period_data()}.
#'
#' @return Table 1, part 1
#'
#' @examples
#'
#' library(CreditUnionRAP)
#' library(dplyr)
#' table_1_part_1(CQ)
#'
#'
#' @export
table_1_part_1<- function(x) {
x <- x %>%
dplyr::arrange(desc(Quarter)) %>%
dplyr::filter(Data.Element == "CQ_A2") %>%
dplyr:: group_by(Country, Quarter) %>%
dplyr:: add_tally() %>%
dplyr:: distinct(Country, Quarter, .keep_all = TRUE) %>%
dplyr:: select(Data.Element, Country, Quarter, n) %>%
dplyr::slice(1:5) %>%
dplyr:: group_by(Data.Element, Quarter)
UK <- x %>%
dplyr:: summarise(Country = as.character("UK"),
n = sum(n)) %>%
bind_rows( .)
x<- rbind(x, UK)
# create wide representation of the data
x <- tidyr::spread(x, key=Quarter, value=n)
names<- data.frame(a = c("UK", "England", "Scotland", "Wales","Northern Ireland"))
x<- x[match(names$a, x$Country),]
colnames(x)[1] <- ""
colnames(x)[2] <- ""
# Define the class here ----
structure(
list(
data = x,
quarters = colnames(x[!colnames(x) == ""]),
country = x[,2],
units = "Number of submisions",
title = "Quarterly Returns submitted",
transformation = "Not seasonally adjusted",
Box_code = x[,1]
),
class = "table_1")
}
|
/table_1_part_1.R
|
no_license
|
JoshuaAllenBoE/CreditUnionRAP
|
R
| false
| false
| 1,881
|
r
|
#' @title Create table 1, part 1 from the credit union publication
#'
#' @description Creates Table 1, part 1 from the \href{https://www.bankofengland.co.uk/news?NewsTypes=571948d14c6943f7b5b7748ad80bef29&Direction=Upcoming}{Credit Union Quarterly Release}.
#'
#' @details \code{table_1} takes as input a standardised long format data frame of class \code{series_period_data},
#' uses associated metadata to create table elements and uses \code{dsdtabs} to format a table.
#'
#' @keywords internal
#'
#' @param x Object of class \code{series_period_data()}.
#'
#' @return Table 1, part 1
#'
#' @examples
#'
#' library(CreditUnionRAP)
#' library(dplyr)
#' table_1_part_1(CQ)
#'
#'
#' @export
table_1_part_1<- function(x) {
x <- x %>%
dplyr::arrange(desc(Quarter)) %>%
dplyr::filter(Data.Element == "CQ_A2") %>%
dplyr:: group_by(Country, Quarter) %>%
dplyr:: add_tally() %>%
dplyr:: distinct(Country, Quarter, .keep_all = TRUE) %>%
dplyr:: select(Data.Element, Country, Quarter, n) %>%
dplyr::slice(1:5) %>%
dplyr:: group_by(Data.Element, Quarter)
UK <- x %>%
dplyr:: summarise(Country = as.character("UK"),
n = sum(n)) %>%
bind_rows( .)
x<- rbind(x, UK)
# create wide representation of the data
x <- tidyr::spread(x, key=Quarter, value=n)
names<- data.frame(a = c("UK", "England", "Scotland", "Wales","Northern Ireland"))
x<- x[match(names$a, x$Country),]
colnames(x)[1] <- ""
colnames(x)[2] <- ""
# Define the class here ----
structure(
list(
data = x,
quarters = colnames(x[!colnames(x) == ""]),
country = x[,2],
units = "Number of submisions",
title = "Quarterly Returns submitted",
transformation = "Not seasonally adjusted",
Box_code = x[,1]
),
class = "table_1")
}
|
### NOTE TO USERS ###
#' The following code uses parellel computation with up to 30 cores being used at once.
#' Parallelization was completed using the mclapply function. To run this script locally,
#' replace mclapply with lapply and remove the mc.cores parameter. This quantile
#' estimator should be able to run locally without too much computational pain.
# load libraries
library(parallel)
library(dplyr)
# load in unimodal distributions
source("simulation_setup/unimodal_distribution_setup.R")
### ONSET ###
# lapply functions
onsetestimator_noci <- function(x){
onset <- quantile(x = x, probs = 0)
return(onset)
}
# 10 obs 10 sd
onset10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = onsetestimator_noci,mc.cores = 20))
onset10obs_10sd_df <- as.data.frame(split(onset10obs_10sd, 1:1))
# 10 obs 20 sd
onset10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = onsetestimator_noci, mc.cores = 20))
onset10obs_20sd_df <- as.data.frame(split(onset10obs_20sd, 1:1))
# 10 obs 40 sd
onset10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = onsetestimator_noci, mc.cores = 20))
onset10obs_40sd_df <- as.data.frame(split(onset10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
onset20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_10sd_df <- as.data.frame(split(onset20obs_10sd, 1:1))
# 20 obs 20 sd
onset20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_20sd_df <- as.data.frame(split(onset20obs_20sd, 1:1))
# 20 obs 40 sd
onset20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_40sd_df <- as.data.frame(split(onset20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
onset50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_10sd_df <- as.data.frame(split(onset50obs_10sd, 1:1))
# 50 obs 20 sd
onset50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_20sd_df <- as.data.frame(split(onset50obs_20sd, 1:1))
# 50 obs 40 sd
onset50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_40sd_df <- as.data.frame(split(onset50obs_40sd, 1:1))
## Mutate dataframes
onset10obs_10sd_df <- onset10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
onset20obs_10sd_df <- onset20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
onset50obs_10sd_df <- onset50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
onset10obs_20sd_df <- onset10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
onset20obs_20sd_df <- onset20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
onset50obs_20sd_df <- onset50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
onset10obs_40sd_df <- onset10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
onset20obs_40sd_df <- onset20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
onset50obs_40sd_df <- onset50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveonset_df <- plyr::rbind.fill(onset10obs_10sd_df, onset10obs_20sd_df, onset10obs_40sd_df,
onset20obs_10sd_df, onset20obs_20sd_df, onset20obs_40sd_df,
onset50obs_10sd_df, onset50obs_20sd_df, onset50obs_40sd_df) %>%
mutate(perc = "onset", Q = 0)
### FIRST PERCENTILE ###
# lapply functions
firstestimator_noci <- function(x){
first <- quantile(x = x, probs = 0.01)
return(first)
}
# 10 obs 10 sd
first10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = firstestimator_noci,mc.cores = 20)) # already run
first10obs_10sd_df <- as.data.frame(split(first10obs_10sd, 1:1))
# 10 obs 20 sd
first10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first10obs_20sd_df <- as.data.frame(split(first10obs_20sd, 1:1))
# 10 obs 40 sd
first10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first10obs_40sd_df <- as.data.frame(split(first10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
first20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_10sd_df <- as.data.frame(split(first20obs_10sd, 1:1))
# 20 obs 20 sd
first20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_20sd_df <- as.data.frame(split(first20obs_20sd, 1:1))
# 20 obs 40 sd
first20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_40sd_df <- as.data.frame(split(first20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
first50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_10sd_df <- as.data.frame(split(first50obs_10sd, 1:1))
# 50 obs 20 sd
first50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_20sd_df <- as.data.frame(split(first50obs_20sd, 1:1))
# 50 obs 40 sd
first50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_40sd_df <- as.data.frame(split(first50obs_40sd, 1:1))
## Mutate dataframes
first10obs_10sd_df <- first10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
first20obs_10sd_df <- first20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
first50obs_10sd_df <- first50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
first10obs_20sd_df <- first10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
first20obs_20sd_df <- first20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
first50obs_20sd_df <- first50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
first10obs_40sd_df <- first10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
first20obs_40sd_df <- first20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
first50obs_40sd_df <- first50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefirst_df <- plyr::rbind.fill(first10obs_10sd_df, first10obs_20sd_df, first10obs_40sd_df,
first20obs_10sd_df, first20obs_20sd_df, first20obs_40sd_df,
first50obs_10sd_df, first50obs_20sd_df, first50obs_40sd_df) %>%
mutate(perc = "first", Q = 1)
### FIFTH PERCENTILE ###
# lapply functions
fifthestimator_noci <- function(x){
fifth <- quantile(x = x, probs = 0.05)
return(fifth)
}
# 10 obs 10 sd
fifth10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = fifthestimator_noci,mc.cores = 20)) # already run
fifth10obs_10sd_df <- as.data.frame(split(fifth10obs_10sd, 1:1))
# 10 obs 20 sd
fifth10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth10obs_20sd_df <- as.data.frame(split(fifth10obs_20sd, 1:1))
# 10 obs 40 sd
fifth10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth10obs_40sd_df <- as.data.frame(split(fifth10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
fifth20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_10sd_df <- as.data.frame(split(fifth20obs_10sd, 1:1))
# 20 obs 20 sd
fifth20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_20sd_df <- as.data.frame(split(fifth20obs_20sd, 1:1))
# 20 obs 40 sd
fifth20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_40sd_df <- as.data.frame(split(fifth20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
fifth50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_10sd_df <- as.data.frame(split(fifth50obs_10sd, 1:1))
# 50 obs 20 sd
fifth50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_20sd_df <- as.data.frame(split(fifth50obs_20sd, 1:1))
# 50 obs 40 sd
fifth50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_40sd_df <- as.data.frame(split(fifth50obs_40sd, 1:1))
## Mutate dataframes
fifth10obs_10sd_df <- fifth10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
fifth20obs_10sd_df <- fifth20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
fifth50obs_10sd_df <- fifth50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
fifth10obs_20sd_df <- fifth10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
fifth20obs_20sd_df <- fifth20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
fifth50obs_20sd_df <- fifth50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
fifth10obs_40sd_df <- fifth10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
fifth20obs_40sd_df <- fifth20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
fifth50obs_40sd_df <- fifth50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefifth_df <- plyr::rbind.fill(fifth10obs_10sd_df, fifth10obs_20sd_df, fifth10obs_40sd_df,
fifth20obs_10sd_df, fifth20obs_20sd_df, fifth20obs_40sd_df,
fifth50obs_10sd_df, fifth50obs_20sd_df, fifth50obs_40sd_df) %>%
mutate(perc = "fifth", Q = 5)
### TENTH PERCENTILE ###
# lapply functions
tenthestimator_noci <- function(x){
tenth <-quantile(x = x, probs = 0.1)
return(tenth)
}
# 10 obs 10 sd
tenth10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = tenthestimator_noci,mc.cores = 20)) # already run
tenth10obs_10sd_df <- as.data.frame(split(tenth10obs_10sd, 1:1))
# 10 obs 20 sd
tenth10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth10obs_20sd_df <- as.data.frame(split(tenth10obs_20sd, 1:1))
# 10 obs 40 sd
tenth10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth10obs_40sd_df <- as.data.frame(split(tenth10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
tenth20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_10sd_df <- as.data.frame(split(tenth20obs_10sd, 1:1))
# 20 obs 20 sd
tenth20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_20sd_df <- as.data.frame(split(tenth20obs_20sd, 1:1))
# 20 obs 40 sd
tenth20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_40sd_df <- as.data.frame(split(tenth20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
tenth50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_10sd_df <- as.data.frame(split(tenth50obs_10sd, 1:1))
# 50 obs 20 sd
tenth50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_20sd_df <- as.data.frame(split(tenth50obs_20sd, 1:1))
# 50 obs 40 sd
tenth50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_40sd_df <- as.data.frame(split(tenth50obs_40sd, 1:1))
## Mutate dataframes
tenth10obs_10sd_df <- tenth10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
tenth20obs_10sd_df <- tenth20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
tenth50obs_10sd_df <- tenth50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
tenth10obs_20sd_df <- tenth10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
tenth20obs_20sd_df <- tenth20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
tenth50obs_20sd_df <- tenth50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
tenth10obs_40sd_df <- tenth10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
tenth20obs_40sd_df <- tenth20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
tenth50obs_40sd_df <- tenth50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivetenth_df <- plyr::rbind.fill(tenth10obs_10sd_df, tenth10obs_20sd_df, tenth10obs_40sd_df,
tenth20obs_10sd_df, tenth20obs_20sd_df, tenth20obs_40sd_df,
tenth50obs_10sd_df, tenth50obs_20sd_df, tenth50obs_40sd_df)%>%
mutate(perc = "tenth", Q = 10)
### FIFTIETH PERCENTILE ###
# lapply functions
fiftyestimator_noci <- function(x){
fifty <- quantile(x = x, probs = 0.5)
return(fifty)
}
# 10 obs 10 sd
fifty10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = fiftyestimator_noci,mc.cores = 20)) # already run
fifty10obs_10sd_df <- as.data.frame(split(fifty10obs_10sd, 1:1))
# 10 obs 20 sd
fifty10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty10obs_20sd_df <- as.data.frame(split(fifty10obs_20sd, 1:1))
# 10 obs 40 sd
fifty10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty10obs_40sd_df <- as.data.frame(split(fifty10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
fifty20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_10sd_df <- as.data.frame(split(fifty20obs_10sd, 1:1))
# 20 obs 20 sd
fifty20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_20sd_df <- as.data.frame(split(fifty20obs_20sd, 1:1))
# 20 obs 40 sd
fifty20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_40sd_df <- as.data.frame(split(fifty20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
fifty50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_10sd_df <- as.data.frame(split(fifty50obs_10sd, 1:1))
# 50 obs 20 sd
fifty50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_20sd_df <- as.data.frame(split(fifty50obs_20sd, 1:1))
# 50 obs 40 sd
fifty50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_40sd_df <- as.data.frame(split(fifty50obs_40sd, 1:1))
## Mutate dataframes
fifty10obs_10sd_df <- fifty10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
fifty20obs_10sd_df <- fifty20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
fifty50obs_10sd_df <- fifty50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
fifty10obs_20sd_df <- fifty10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
fifty20obs_20sd_df <- fifty20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
fifty50obs_20sd_df <- fifty50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
fifty10obs_40sd_df <- fifty10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
fifty20obs_40sd_df <- fifty20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
fifty50obs_40sd_df <- fifty50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefifty_df <- plyr::rbind.fill(fifty10obs_10sd_df, fifty10obs_20sd_df, fifty10obs_40sd_df,
fifty20obs_10sd_df, fifty20obs_20sd_df, fifty20obs_40sd_df,
fifty50obs_10sd_df, fifty50obs_20sd_df, fifty50obs_40sd_df)%>%
mutate(perc = "fiftieth", Q = 50)
### NINETIETH PERCENTILE ###
# lapply functions
nintyestimator_noci <- function(x){
ninty <-quantile(x = x, probs = 0.9)
return(ninty)
}
# 10 obs 10 sd
ninty10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintyestimator_noci,mc.cores = 20)) # already run
ninty10obs_10sd_df <- as.data.frame(split(ninty10obs_10sd, 1:1))
# 10 obs 20 sd
ninty10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty10obs_20sd_df <- as.data.frame(split(ninty10obs_20sd, 1:1))
# 10 obs 40 sd
ninty10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty10obs_40sd_df <- as.data.frame(split(ninty10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
ninty20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_10sd_df <- as.data.frame(split(ninty20obs_10sd, 1:1))
# 20 obs 20 sd
ninty20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_20sd_df <- as.data.frame(split(ninty20obs_20sd, 1:1))
# 20 obs 40 sd
ninty20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_40sd_df <- as.data.frame(split(ninty20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
ninty50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_10sd_df <- as.data.frame(split(ninty50obs_10sd, 1:1))
# 50 obs 20 sd
ninty50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_20sd_df <- as.data.frame(split(ninty50obs_20sd, 1:1))
# 50 obs 40 sd
ninty50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_40sd_df <- as.data.frame(split(ninty50obs_40sd, 1:1))
## Mutate dataframes
ninty10obs_10sd_df <- ninty10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
ninty20obs_10sd_df <- ninty20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
ninty50obs_10sd_df <- ninty50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
ninty10obs_20sd_df <- ninty10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
ninty20obs_20sd_df <- ninty20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
ninty50obs_20sd_df <- ninty50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
ninty10obs_40sd_df <- ninty10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
ninty20obs_40sd_df <- ninty20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
ninty50obs_40sd_df <- ninty50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveninty_df <- plyr::rbind.fill(ninty10obs_10sd_df, ninty10obs_20sd_df, ninty10obs_40sd_df,
ninty20obs_10sd_df, ninty20obs_20sd_df, ninty20obs_40sd_df,
ninty50obs_10sd_df, ninty50obs_20sd_df, ninty50obs_40sd_df)%>%
mutate(perc = "ninetieth", Q = 90)
### NINTYFIFTH PERCENTILE ###
# lapply functions
nintyfiveestimator_noci <- function(x){
nintyfive <- quantile(x = x, probs = 0.95)
return(nintyfive)
}
# 10 obs 10 sd
nintyfive10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintyfiveestimator_noci,mc.cores = 20)) # already run
nintyfive10obs_10sd_df <- as.data.frame(split(nintyfive10obs_10sd, 1:1))
# 10 obs 20 sd
nintyfive10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive10obs_20sd_df <- as.data.frame(split(nintyfive10obs_20sd, 1:1))
# 10 obs 40 sd
nintyfive10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive10obs_40sd_df <- as.data.frame(split(nintyfive10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
nintyfive20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_10sd_df <- as.data.frame(split(nintyfive20obs_10sd, 1:1))
# 20 obs 20 sd
nintyfive20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_20sd_df <- as.data.frame(split(nintyfive20obs_20sd, 1:1))
# 20 obs 40 sd
nintyfive20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_40sd_df <- as.data.frame(split(nintyfive20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
nintyfive50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_10sd_df <- as.data.frame(split(nintyfive50obs_10sd, 1:1))
# 50 obs 20 sd
nintyfive50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_20sd_df <- as.data.frame(split(nintyfive50obs_20sd, 1:1))
# 50 obs 40 sd
nintyfive50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_40sd_df <- as.data.frame(split(nintyfive50obs_40sd, 1:1))
## Mutate dataframes
nintyfive10obs_10sd_df <- nintyfive10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
nintyfive20obs_10sd_df <- nintyfive20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
nintyfive50obs_10sd_df <- nintyfive50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
nintyfive10obs_20sd_df <- nintyfive10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
nintyfive20obs_20sd_df <- nintyfive20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
nintyfive50obs_20sd_df <- nintyfive50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
nintyfive10obs_40sd_df <- nintyfive10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
nintyfive20obs_40sd_df <- nintyfive20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
nintyfive50obs_40sd_df <- nintyfive50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivenintyfive_df <- plyr::rbind.fill(nintyfive10obs_10sd_df, nintyfive10obs_20sd_df, nintyfive10obs_40sd_df,
nintyfive20obs_10sd_df, nintyfive20obs_20sd_df, nintyfive20obs_40sd_df,
nintyfive50obs_10sd_df, nintyfive50obs_20sd_df, nintyfive50obs_40sd_df)%>%
mutate(perc = "ninetyfifth", Q = 95)
### NINTY NINETH PERCENTILE ###
# lapply functions
nintynineestimator_noci <- function(x){
nintynine <- quantile(x = x, probs = 0.99)
return(nintynine)
}
# 10 obs 10 sd
nintynine10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintynineestimator_noci,mc.cores = 20)) # already run
nintynine10obs_10sd_df <- as.data.frame(split(nintynine10obs_10sd, 1:1))
# 10 obs 20 sd
nintynine10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine10obs_20sd_df <- as.data.frame(split(nintynine10obs_20sd, 1:1))
# 10 obs 40 sd
nintynine10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine10obs_40sd_df <- as.data.frame(split(nintynine10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
nintynine20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_10sd_df <- as.data.frame(split(nintynine20obs_10sd, 1:1))
# 20 obs 20 sd
nintynine20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_20sd_df <- as.data.frame(split(nintynine20obs_20sd, 1:1))
# 20 obs 40 sd
nintynine20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_40sd_df <- as.data.frame(split(nintynine20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
nintynine50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_10sd_df <- as.data.frame(split(nintynine50obs_10sd, 1:1))
# 50 obs 20 sd
nintynine50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_20sd_df <- as.data.frame(split(nintynine50obs_20sd, 1:1))
# 50 obs 40 sd
nintynine50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_40sd_df <- as.data.frame(split(nintynine50obs_40sd, 1:1))
## Mutate dataframes
nintynine10obs_10sd_df <- nintynine10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
nintynine20obs_10sd_df <- nintynine20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
nintynine50obs_10sd_df <- nintynine50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
nintynine10obs_20sd_df <- nintynine10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
nintynine20obs_20sd_df <- nintynine20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
nintynine50obs_20sd_df <- nintynine50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
nintynine10obs_40sd_df <- nintynine10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
nintynine20obs_40sd_df <- nintynine20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
nintynine50obs_40sd_df <- nintynine50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivenintynine_df <- plyr::rbind.fill(nintynine10obs_10sd_df, nintynine10obs_20sd_df, nintynine10obs_40sd_df,
nintynine20obs_10sd_df, nintynine20obs_20sd_df, nintynine20obs_40sd_df,
nintynine50obs_10sd_df, nintynine50obs_20sd_df, nintynine50obs_40sd_df) %>%
mutate(perc = "ninetynineth", Q = 99)
### OFFSET ###
# lapply functions
offsetestimator_noci <- function(x){
offset <- quantile(x = x, probs = 1)
return(offset)
}
# 10 obs 10 sd
offset10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = offsetestimator_noci,mc.cores = 20)) # already run
offset10obs_10sd_df <- as.data.frame(split(offset10obs_10sd, 1:1))
# 10 obs 20 sd
offset10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset10obs_20sd_df <- as.data.frame(split(offset10obs_20sd, 1:1))
# 10 obs 40 sd
offset10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset10obs_40sd_df <- as.data.frame(split(offset10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
offset20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_10sd_df <- as.data.frame(split(offset20obs_10sd, 1:1))
# 20 obs 20 sd
offset20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_20sd_df <- as.data.frame(split(offset20obs_20sd, 1:1))
# 20 obs 40 sd
offset20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_40sd_df <- as.data.frame(split(offset20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
offset50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_10sd_df <- as.data.frame(split(offset50obs_10sd, 1:1))
# 50 obs 20 sd
offset50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_20sd_df <- as.data.frame(split(offset50obs_20sd, 1:1))
# 50 obs 40 sd
offset50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_40sd_df <- as.data.frame(split(offset50obs_40sd, 1:1))
## Mutate dataframes
offset10obs_10sd_df <- offset10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
offset20obs_10sd_df <- offset20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
offset50obs_10sd_df <- offset50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
offset10obs_20sd_df <- offset10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
offset20obs_20sd_df <- offset20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
offset50obs_20sd_df <- offset50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
offset10obs_40sd_df <- offset10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
offset20obs_40sd_df <- offset20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
offset50obs_40sd_df <- offset50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveoffset_df <- plyr::rbind.fill(offset10obs_10sd_df, offset10obs_20sd_df, offset10obs_40sd_df,
offset20obs_10sd_df, offset20obs_20sd_df, offset20obs_40sd_df,
offset50obs_10sd_df, offset50obs_20sd_df, offset50obs_40sd_df)%>%
mutate(perc = "offset", Q = 100)
unimodal_sims_all <- plyr::rbind.fill(naiveonset_df, naivefirst_df, naivefifth_df, naivetenth_df,
naivefifty_df, naiveninty_df, naivenintyfive_df, naivenintynine_df,
naiveoffset_df)
# write results
write.csv(unimodal_sims_all, file = "results/unimodal_quantile.csv", row.names = FALSE)
|
/analyses_scripts/unimodal_quantile_estimates.R
|
no_license
|
mbelitz/belitz_etal_phenometrics
|
R
| false
| false
| 39,146
|
r
|
### NOTE TO USERS ###
#' The following code uses parellel computation with up to 30 cores being used at once.
#' Parallelization was completed using the mclapply function. To run this script locally,
#' replace mclapply with lapply and remove the mc.cores parameter. This quantile
#' estimator should be able to run locally without too much computational pain.
# load libraries
library(parallel)
library(dplyr)
# load in unimodal distributions
source("simulation_setup/unimodal_distribution_setup.R")
### ONSET ###
# lapply functions
onsetestimator_noci <- function(x){
onset <- quantile(x = x, probs = 0)
return(onset)
}
# 10 obs 10 sd
onset10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = onsetestimator_noci,mc.cores = 20))
onset10obs_10sd_df <- as.data.frame(split(onset10obs_10sd, 1:1))
# 10 obs 20 sd
onset10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = onsetestimator_noci, mc.cores = 20))
onset10obs_20sd_df <- as.data.frame(split(onset10obs_20sd, 1:1))
# 10 obs 40 sd
onset10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = onsetestimator_noci, mc.cores = 20))
onset10obs_40sd_df <- as.data.frame(split(onset10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
onset20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_10sd_df <- as.data.frame(split(onset20obs_10sd, 1:1))
# 20 obs 20 sd
onset20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_20sd_df <- as.data.frame(split(onset20obs_20sd, 1:1))
# 20 obs 40 sd
onset20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = onsetestimator_noci, mc.cores = 20))
onset20obs_40sd_df <- as.data.frame(split(onset20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
onset50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_10sd_df <- as.data.frame(split(onset50obs_10sd, 1:1))
# 50 obs 20 sd
onset50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_20sd_df <- as.data.frame(split(onset50obs_20sd, 1:1))
# 50 obs 40 sd
onset50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = onsetestimator_noci, mc.cores = 30))
onset50obs_40sd_df <- as.data.frame(split(onset50obs_40sd, 1:1))
## Mutate dataframes
onset10obs_10sd_df <- onset10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
onset20obs_10sd_df <- onset20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
onset50obs_10sd_df <- onset50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 162.68) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
onset10obs_20sd_df <- onset10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
onset20obs_20sd_df <- onset20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
onset50obs_20sd_df <- onset50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 119.92) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
onset10obs_40sd_df <- onset10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
onset20obs_40sd_df <- onset20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
onset50obs_40sd_df <- onset50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 35.88) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveonset_df <- plyr::rbind.fill(onset10obs_10sd_df, onset10obs_20sd_df, onset10obs_40sd_df,
onset20obs_10sd_df, onset20obs_20sd_df, onset20obs_40sd_df,
onset50obs_10sd_df, onset50obs_20sd_df, onset50obs_40sd_df) %>%
mutate(perc = "onset", Q = 0)
### FIRST PERCENTILE ###
# lapply functions
firstestimator_noci <- function(x){
first <- quantile(x = x, probs = 0.01)
return(first)
}
# 10 obs 10 sd
first10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = firstestimator_noci,mc.cores = 20)) # already run
first10obs_10sd_df <- as.data.frame(split(first10obs_10sd, 1:1))
# 10 obs 20 sd
first10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first10obs_20sd_df <- as.data.frame(split(first10obs_20sd, 1:1))
# 10 obs 40 sd
first10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first10obs_40sd_df <- as.data.frame(split(first10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
first20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_10sd_df <- as.data.frame(split(first20obs_10sd, 1:1))
# 20 obs 20 sd
first20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_20sd_df <- as.data.frame(split(first20obs_20sd, 1:1))
# 20 obs 40 sd
first20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first20obs_40sd_df <- as.data.frame(split(first20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
first50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_10sd_df <- as.data.frame(split(first50obs_10sd, 1:1))
# 50 obs 20 sd
first50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_20sd_df <- as.data.frame(split(first50obs_20sd, 1:1))
# 50 obs 40 sd
first50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = firstestimator_noci, mc.cores = 20))
first50obs_40sd_df <- as.data.frame(split(first50obs_40sd, 1:1))
## Mutate dataframes
first10obs_10sd_df <- first10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
first20obs_10sd_df <- first20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
first50obs_10sd_df <- first50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 176.73) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
first10obs_20sd_df <- first10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
first20obs_20sd_df <- first20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
first50obs_20sd_df <- first50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 153.74) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
first10obs_40sd_df <- first10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
first20obs_40sd_df <- first20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
first50obs_40sd_df <- first50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 107.64) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefirst_df <- plyr::rbind.fill(first10obs_10sd_df, first10obs_20sd_df, first10obs_40sd_df,
first20obs_10sd_df, first20obs_20sd_df, first20obs_40sd_df,
first50obs_10sd_df, first50obs_20sd_df, first50obs_40sd_df) %>%
mutate(perc = "first", Q = 1)
### FIFTH PERCENTILE ###
# lapply functions
fifthestimator_noci <- function(x){
fifth <- quantile(x = x, probs = 0.05)
return(fifth)
}
# 10 obs 10 sd
fifth10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = fifthestimator_noci,mc.cores = 20)) # already run
fifth10obs_10sd_df <- as.data.frame(split(fifth10obs_10sd, 1:1))
# 10 obs 20 sd
fifth10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth10obs_20sd_df <- as.data.frame(split(fifth10obs_20sd, 1:1))
# 10 obs 40 sd
fifth10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth10obs_40sd_df <- as.data.frame(split(fifth10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
fifth20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_10sd_df <- as.data.frame(split(fifth20obs_10sd, 1:1))
# 20 obs 20 sd
fifth20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_20sd_df <- as.data.frame(split(fifth20obs_20sd, 1:1))
# 20 obs 40 sd
fifth20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth20obs_40sd_df <- as.data.frame(split(fifth20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
fifth50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_10sd_df <- as.data.frame(split(fifth50obs_10sd, 1:1))
# 50 obs 20 sd
fifth50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_20sd_df <- as.data.frame(split(fifth50obs_20sd, 1:1))
# 50 obs 40 sd
fifth50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = fifthestimator_noci, mc.cores = 20))
fifth50obs_40sd_df <- as.data.frame(split(fifth50obs_40sd, 1:1))
## Mutate dataframes
fifth10obs_10sd_df <- fifth10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
fifth20obs_10sd_df <- fifth20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
fifth50obs_10sd_df <- fifth50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 183.21) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
fifth10obs_20sd_df <- fifth10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
fifth20obs_20sd_df <- fifth20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
fifth50obs_20sd_df <- fifth50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 167.65) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
fifth10obs_40sd_df <- fifth10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
fifth20obs_40sd_df <- fifth20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
fifth50obs_40sd_df <- fifth50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 134.35) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefifth_df <- plyr::rbind.fill(fifth10obs_10sd_df, fifth10obs_20sd_df, fifth10obs_40sd_df,
fifth20obs_10sd_df, fifth20obs_20sd_df, fifth20obs_40sd_df,
fifth50obs_10sd_df, fifth50obs_20sd_df, fifth50obs_40sd_df) %>%
mutate(perc = "fifth", Q = 5)
### TENTH PERCENTILE ###
# lapply functions
tenthestimator_noci <- function(x){
tenth <-quantile(x = x, probs = 0.1)
return(tenth)
}
# 10 obs 10 sd
tenth10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = tenthestimator_noci,mc.cores = 20)) # already run
tenth10obs_10sd_df <- as.data.frame(split(tenth10obs_10sd, 1:1))
# 10 obs 20 sd
tenth10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth10obs_20sd_df <- as.data.frame(split(tenth10obs_20sd, 1:1))
# 10 obs 40 sd
tenth10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth10obs_40sd_df <- as.data.frame(split(tenth10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
tenth20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_10sd_df <- as.data.frame(split(tenth20obs_10sd, 1:1))
# 20 obs 20 sd
tenth20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_20sd_df <- as.data.frame(split(tenth20obs_20sd, 1:1))
# 20 obs 40 sd
tenth20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth20obs_40sd_df <- as.data.frame(split(tenth20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
tenth50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_10sd_df <- as.data.frame(split(tenth50obs_10sd, 1:1))
# 50 obs 20 sd
tenth50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_20sd_df <- as.data.frame(split(tenth50obs_20sd, 1:1))
# 50 obs 40 sd
tenth50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = tenthestimator_noci, mc.cores = 20))
tenth50obs_40sd_df <- as.data.frame(split(tenth50obs_40sd, 1:1))
## Mutate dataframes
tenth10obs_10sd_df <- tenth10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
tenth20obs_10sd_df <- tenth20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
tenth50obs_10sd_df <- tenth50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 186.99) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
tenth10obs_20sd_df <- tenth10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
tenth20obs_20sd_df <- tenth20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
tenth50obs_20sd_df <- tenth50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 174.82) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
tenth10obs_40sd_df <- tenth10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
tenth20obs_40sd_df <- tenth20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
tenth50obs_40sd_df <- tenth50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 137.17) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivetenth_df <- plyr::rbind.fill(tenth10obs_10sd_df, tenth10obs_20sd_df, tenth10obs_40sd_df,
tenth20obs_10sd_df, tenth20obs_20sd_df, tenth20obs_40sd_df,
tenth50obs_10sd_df, tenth50obs_20sd_df, tenth50obs_40sd_df)%>%
mutate(perc = "tenth", Q = 10)
### FIFTIETH PERCENTILE ###
# lapply functions
fiftyestimator_noci <- function(x){
fifty <- quantile(x = x, probs = 0.5)
return(fifty)
}
# 10 obs 10 sd
fifty10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = fiftyestimator_noci,mc.cores = 20)) # already run
fifty10obs_10sd_df <- as.data.frame(split(fifty10obs_10sd, 1:1))
# 10 obs 20 sd
fifty10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty10obs_20sd_df <- as.data.frame(split(fifty10obs_20sd, 1:1))
# 10 obs 40 sd
fifty10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty10obs_40sd_df <- as.data.frame(split(fifty10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
fifty20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_10sd_df <- as.data.frame(split(fifty20obs_10sd, 1:1))
# 20 obs 20 sd
fifty20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_20sd_df <- as.data.frame(split(fifty20obs_20sd, 1:1))
# 20 obs 40 sd
fifty20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty20obs_40sd_df <- as.data.frame(split(fifty20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
fifty50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_10sd_df <- as.data.frame(split(fifty50obs_10sd, 1:1))
# 50 obs 20 sd
fifty50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_20sd_df <- as.data.frame(split(fifty50obs_20sd, 1:1))
# 50 obs 40 sd
fifty50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = fiftyestimator_noci, mc.cores = 20))
fifty50obs_40sd_df <- as.data.frame(split(fifty50obs_40sd, 1:1))
## Mutate dataframes
fifty10obs_10sd_df <- fifty10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
fifty20obs_10sd_df <- fifty20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
fifty50obs_10sd_df <- fifty50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.89) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
fifty10obs_20sd_df <- fifty10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
fifty20obs_20sd_df <- fifty20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
fifty50obs_20sd_df <- fifty50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 200.18) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
fifty10obs_40sd_df <- fifty10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
fifty20obs_40sd_df <- fifty20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
fifty50obs_40sd_df <- fifty50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 199.80) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivefifty_df <- plyr::rbind.fill(fifty10obs_10sd_df, fifty10obs_20sd_df, fifty10obs_40sd_df,
fifty20obs_10sd_df, fifty20obs_20sd_df, fifty20obs_40sd_df,
fifty50obs_10sd_df, fifty50obs_20sd_df, fifty50obs_40sd_df)%>%
mutate(perc = "fiftieth", Q = 50)
### NINETIETH PERCENTILE ###
# lapply functions
nintyestimator_noci <- function(x){
ninty <-quantile(x = x, probs = 0.9)
return(ninty)
}
# 10 obs 10 sd
ninty10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintyestimator_noci,mc.cores = 20)) # already run
ninty10obs_10sd_df <- as.data.frame(split(ninty10obs_10sd, 1:1))
# 10 obs 20 sd
ninty10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty10obs_20sd_df <- as.data.frame(split(ninty10obs_20sd, 1:1))
# 10 obs 40 sd
ninty10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty10obs_40sd_df <- as.data.frame(split(ninty10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
ninty20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_10sd_df <- as.data.frame(split(ninty20obs_10sd, 1:1))
# 20 obs 20 sd
ninty20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_20sd_df <- as.data.frame(split(ninty20obs_20sd, 1:1))
# 20 obs 40 sd
ninty20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty20obs_40sd_df <- as.data.frame(split(ninty20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
ninty50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_10sd_df <- as.data.frame(split(ninty50obs_10sd, 1:1))
# 50 obs 20 sd
ninty50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_20sd_df <- as.data.frame(split(ninty50obs_20sd, 1:1))
# 50 obs 40 sd
ninty50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintyestimator_noci, mc.cores = 20))
ninty50obs_40sd_df <- as.data.frame(split(ninty50obs_40sd, 1:1))
## Mutate dataframes
ninty10obs_10sd_df <- ninty10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
ninty20obs_10sd_df <- ninty20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
ninty50obs_10sd_df <- ninty50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 212.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
ninty10obs_20sd_df <- ninty10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
ninty20obs_20sd_df <- ninty20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
ninty50obs_20sd_df <- ninty50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 225.60) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
ninty10obs_40sd_df <- ninty10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
ninty20obs_40sd_df <- ninty20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
ninty50obs_40sd_df <- ninty50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 251.79) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveninty_df <- plyr::rbind.fill(ninty10obs_10sd_df, ninty10obs_20sd_df, ninty10obs_40sd_df,
ninty20obs_10sd_df, ninty20obs_20sd_df, ninty20obs_40sd_df,
ninty50obs_10sd_df, ninty50obs_20sd_df, ninty50obs_40sd_df)%>%
mutate(perc = "ninetieth", Q = 90)
### NINTYFIFTH PERCENTILE ###
# lapply functions
nintyfiveestimator_noci <- function(x){
nintyfive <- quantile(x = x, probs = 0.95)
return(nintyfive)
}
# 10 obs 10 sd
nintyfive10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintyfiveestimator_noci,mc.cores = 20)) # already run
nintyfive10obs_10sd_df <- as.data.frame(split(nintyfive10obs_10sd, 1:1))
# 10 obs 20 sd
nintyfive10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive10obs_20sd_df <- as.data.frame(split(nintyfive10obs_20sd, 1:1))
# 10 obs 40 sd
nintyfive10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive10obs_40sd_df <- as.data.frame(split(nintyfive10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
nintyfive20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_10sd_df <- as.data.frame(split(nintyfive20obs_10sd, 1:1))
# 20 obs 20 sd
nintyfive20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_20sd_df <- as.data.frame(split(nintyfive20obs_20sd, 1:1))
# 20 obs 40 sd
nintyfive20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive20obs_40sd_df <- as.data.frame(split(nintyfive20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
nintyfive50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_10sd_df <- as.data.frame(split(nintyfive50obs_10sd, 1:1))
# 50 obs 20 sd
nintyfive50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_20sd_df <- as.data.frame(split(nintyfive50obs_20sd, 1:1))
# 50 obs 40 sd
nintyfive50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintyfiveestimator_noci, mc.cores = 20))
nintyfive50obs_40sd_df <- as.data.frame(split(nintyfive50obs_40sd, 1:1))
## Mutate dataframes
nintyfive10obs_10sd_df <- nintyfive10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
nintyfive20obs_10sd_df <- nintyfive20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
nintyfive50obs_10sd_df <- nintyfive50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 216.19) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
nintyfive10obs_20sd_df <- nintyfive10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
nintyfive20obs_20sd_df <- nintyfive20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
nintyfive50obs_20sd_df <- nintyfive50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 232.76) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
nintyfive10obs_40sd_df <- nintyfive10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
nintyfive20obs_40sd_df <- nintyfive20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
nintyfive50obs_40sd_df <- nintyfive50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 266.36) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivenintyfive_df <- plyr::rbind.fill(nintyfive10obs_10sd_df, nintyfive10obs_20sd_df, nintyfive10obs_40sd_df,
nintyfive20obs_10sd_df, nintyfive20obs_20sd_df, nintyfive20obs_40sd_df,
nintyfive50obs_10sd_df, nintyfive50obs_20sd_df, nintyfive50obs_40sd_df)%>%
mutate(perc = "ninetyfifth", Q = 95)
### NINTY NINETH PERCENTILE ###
# lapply functions
nintynineestimator_noci <- function(x){
nintynine <- quantile(x = x, probs = 0.99)
return(nintynine)
}
# 10 obs 10 sd
nintynine10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = nintynineestimator_noci,mc.cores = 20)) # already run
nintynine10obs_10sd_df <- as.data.frame(split(nintynine10obs_10sd, 1:1))
# 10 obs 20 sd
nintynine10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine10obs_20sd_df <- as.data.frame(split(nintynine10obs_20sd, 1:1))
# 10 obs 40 sd
nintynine10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine10obs_40sd_df <- as.data.frame(split(nintynine10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
nintynine20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_10sd_df <- as.data.frame(split(nintynine20obs_10sd, 1:1))
# 20 obs 20 sd
nintynine20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_20sd_df <- as.data.frame(split(nintynine20obs_20sd, 1:1))
# 20 obs 40 sd
nintynine20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine20obs_40sd_df <- as.data.frame(split(nintynine20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
nintynine50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_10sd_df <- as.data.frame(split(nintynine50obs_10sd, 1:1))
# 50 obs 20 sd
nintynine50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_20sd_df <- as.data.frame(split(nintynine50obs_20sd, 1:1))
# 50 obs 40 sd
nintynine50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = nintynineestimator_noci, mc.cores = 20))
nintynine50obs_40sd_df <- as.data.frame(split(nintynine50obs_40sd, 1:1))
## Mutate dataframes
nintynine10obs_10sd_df <- nintynine10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
nintynine20obs_10sd_df <- nintynine20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
nintynine50obs_10sd_df <- nintynine50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 223.52) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
nintynine10obs_20sd_df <- nintynine10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
nintynine20obs_20sd_df <- nintynine20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
nintynine50obs_20sd_df <- nintynine50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 246.44) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
nintynine10obs_40sd_df <- nintynine10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
nintynine20obs_40sd_df <- nintynine20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
nintynine50obs_40sd_df <- nintynine50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 293.43) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naivenintynine_df <- plyr::rbind.fill(nintynine10obs_10sd_df, nintynine10obs_20sd_df, nintynine10obs_40sd_df,
nintynine20obs_10sd_df, nintynine20obs_20sd_df, nintynine20obs_40sd_df,
nintynine50obs_10sd_df, nintynine50obs_20sd_df, nintynine50obs_40sd_df) %>%
mutate(perc = "ninetynineth", Q = 99)
### OFFSET ###
# lapply functions
offsetestimator_noci <- function(x){
offset <- quantile(x = x, probs = 1)
return(offset)
}
# 10 obs 10 sd
offset10obs_10sd <- unlist(mclapply(list_10obs_10sd, FUN = offsetestimator_noci,mc.cores = 20)) # already run
offset10obs_10sd_df <- as.data.frame(split(offset10obs_10sd, 1:1))
# 10 obs 20 sd
offset10obs_20sd <- unlist(mclapply(list_10obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset10obs_20sd_df <- as.data.frame(split(offset10obs_20sd, 1:1))
# 10 obs 40 sd
offset10obs_40sd <- unlist(mclapply(list_10obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset10obs_40sd_df <- as.data.frame(split(offset10obs_40sd, 1:1))
###### 20 Obs ####
# 20 obs 10 sd
offset20obs_10sd <- unlist(mclapply(list_20obs_10sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_10sd_df <- as.data.frame(split(offset20obs_10sd, 1:1))
# 20 obs 20 sd
offset20obs_20sd <- unlist(mclapply(list_20obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_20sd_df <- as.data.frame(split(offset20obs_20sd, 1:1))
# 20 obs 40 sd
offset20obs_40sd <- unlist(mclapply(list_20obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset20obs_40sd_df <- as.data.frame(split(offset20obs_40sd, 1:1))
########## 50 obs now y'all ########
# 50 obs 10 sd
offset50obs_10sd <- unlist(mclapply(list_50obs_10sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_10sd_df <- as.data.frame(split(offset50obs_10sd, 1:1))
# 50 obs 20 sd
offset50obs_20sd <- unlist(mclapply(list_50obs_20sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_20sd_df <- as.data.frame(split(offset50obs_20sd, 1:1))
# 50 obs 40 sd
offset50obs_40sd <- unlist(mclapply(list_50obs_40sd, FUN = offsetestimator_noci, mc.cores = 20))
offset50obs_40sd_df <- as.data.frame(split(offset50obs_40sd, 1:1))
## Mutate dataframes
offset10obs_10sd_df <- offset10obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 10) %>%
mutate(estimator = "naive")
offset20obs_10sd_df <- offset20obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 10) %>%
mutate(estimator = "naive")
offset50obs_10sd_df <- offset50obs_10sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 238.1) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 10) %>%
mutate(estimator = "naive")
# 20 sd
offset10obs_20sd_df <- offset10obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 20) %>%
mutate(estimator = "naive")
offset20obs_20sd_df <- offset20obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 20) %>%
mutate(estimator = "naive")
offset50obs_20sd_df <- offset50obs_20sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 279.34) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 20) %>%
mutate(estimator = "naive")
# 40 sd
offset10obs_40sd_df <- offset10obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 10, sd = 40) %>%
mutate(estimator = "naive")
offset20obs_40sd_df <- offset20obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "naive")
offset50obs_40sd_df <- offset50obs_40sd_df %>%
rename(estimate = X1) %>%
mutate(true_value = 361.38) %>%
mutate(distance = estimate - true_value) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "naive")
# rbind these together
naiveoffset_df <- plyr::rbind.fill(offset10obs_10sd_df, offset10obs_20sd_df, offset10obs_40sd_df,
offset20obs_10sd_df, offset20obs_20sd_df, offset20obs_40sd_df,
offset50obs_10sd_df, offset50obs_20sd_df, offset50obs_40sd_df)%>%
mutate(perc = "offset", Q = 100)
unimodal_sims_all <- plyr::rbind.fill(naiveonset_df, naivefirst_df, naivefifth_df, naivetenth_df,
naivefifty_df, naiveninty_df, naivenintyfive_df, naivenintynine_df,
naiveoffset_df)
# write results
write.csv(unimodal_sims_all, file = "results/unimodal_quantile.csv", row.names = FALSE)
|
# Check if both data exist. If not, load the data.
if (!"data" %in% ls()) {
pmData <- readRDS("./data/summarySCC_PM25.rds")
}
if (!"data" %in% ls()) {
classData <- readRDS("./data/Source_Classification_Code.rds")
}
#plt the data
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot4.png",
width = 480, height = 480,
units = "px")
coal <- grep("coal", classData$Short.Name, ignore.case = T)
coal <- classData[coal, ]
coal <- pmData[pmData$SCC %in% coal$SCC, ]
coalEmissions <- aggregate(coal$Emissions, list(coal$year), FUN = "sum")
plot(coalEmissions, type = "l", xlab = "Year",
main = "Total Emissions From Coal Combustion-related\n Sources from 1999 to 2008",
ylab = expression('Total PM'[2.5]*" Emission"))
dev.off()
|
/plot4.R
|
no_license
|
ez3804/ExData_Prj2
|
R
| false
| false
| 762
|
r
|
# Check if both data exist. If not, load the data.
if (!"data" %in% ls()) {
pmData <- readRDS("./data/summarySCC_PM25.rds")
}
if (!"data" %in% ls()) {
classData <- readRDS("./data/Source_Classification_Code.rds")
}
#plt the data
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot4.png",
width = 480, height = 480,
units = "px")
coal <- grep("coal", classData$Short.Name, ignore.case = T)
coal <- classData[coal, ]
coal <- pmData[pmData$SCC %in% coal$SCC, ]
coalEmissions <- aggregate(coal$Emissions, list(coal$year), FUN = "sum")
plot(coalEmissions, type = "l", xlab = "Year",
main = "Total Emissions From Coal Combustion-related\n Sources from 1999 to 2008",
ylab = expression('Total PM'[2.5]*" Emission"))
dev.off()
|
# Run this on Domino ####
source('/mnt/simulator/sim_phenos.R')
source('/mnt/simulator/create_cross_design.R')
source('/mnt/simulator/make_crosses.R')
source('/mnt/simulator/calc_TGV.R')
source('/mnt/simulator/extract_selections.R')
source('/mnt/simulator/create_map.R')
source('/mnt/simulator/create_parents.R')
source('/mnt/simulator/OP_testing.R')
## Volume ####
D=0
minor=-100
#set.seed(435634)
#set.seed(3345345)
set.seed(457456)
map <- create_genetic_map(num.chromos = 12,map.length = 1800,num.markers = 120,total.qtl = 2640,num.snpqtl = 1960,
distribute.loci ="even",marker.distribution = "equally-spaced",snp.qtl.maf = c(0.01,0.02))
parents <- create_parents(map.info = map,num.parents = 50,max.delt.allele = 14,heterozygous.markers = F)
parent.tgv <- calc_TGV(geno.info = parents,map.info = map,founder = T,A = 1,a = minor,dom.coeff = D)
set.seed(435634)
parent.phenos <- sim_phenos(TGV.object = parent.tgv,h2 = .3)
first.gen.cross <- create_cross_design(parent.info = parents,mating.design = "cross.file.input",cross.file = "/mnt/graham_study/g.study.1stgen.txt",generation = 1)
first.prog <- make_crosses(parent.info = parents,map.info = map,cross.design = first.gen.cross,num.cores = parallel::detectCores())
first.prog.tgv <- calc_TGV(geno.info = first.prog,map.info = map,cross.design = first.gen.cross,A = 1,a = minor,dom.coeff = D)
first.prog.phenos <- sim_phenos(TGV.object = first.prog.tgv,h2 = .3)
prog.ex1 <- extract_selections(among.family.selection = "Phenotype",relationship.matrix.type = "pedigree",map.info = map,
cross.design = first.gen.cross,past.tgv = parent.tgv,past.phenos = parent.phenos,parent.info = parents,
progeny.info = first.prog,progeny.TGV = first.prog.tgv,progeny.phenos = first.prog.phenos,
num.selections.within.family = 1,
num.selections.among.family = 40)
selections <- sort(as.numeric(names(prog.ex1$selection.phenos)))
second.gen.cross <- read.table("/mnt/graham_study/g.2nd.gen.ped2.txt")
second.gen.cross <- cbind(selections[second.gen.cross[,1]],selections[second.gen.cross[,2]],second.gen.cross[,3])
write.table(second.gen.cross,"./g.2nd.gen.pedv2.txt",row.names = F,col.names = F)
second.gen.cross <-create_cross_design(parent.info = prog.ex1,mating.design = "cross.file.input",cross.file = "./g.2nd.gen.pedv2.txt",generation = 2)
second.prog <- make_crosses(parent.info = prog.ex1,map.info = map,cross.design = second.gen.cross,num.cores = parallel::detectCores())
second.prog.tgv <- calc_TGV(geno.info = second.prog,map.info = map,cross.design = second.gen.cross,A = 1,a = minor,dom.coeff = D)
evar <- var(parent.phenos$phenos) - var(parent.phenos$genetic.values)
second.prog.phenos <- sim_phenos(TGV.object = second.prog.tgv,E.var = evar)
### Make PLot ####
{
first.prog <- seq(1,2000,50)
length(first.prog)
OP <- c(seq(1,length(first.prog),by = 4))
Self <- c(seq(2,length(first.prog),by = 4))
FS <- c(seq(3,length(first.prog),by = 4))
HS <- c(seq(4,length(first.prog),by = 4))
mean.phenos <- vector()
for(each in 1:length(first.prog)) { mean.phenos <- c(mean.phenos,mean(second.prog.phenos$phenos[first.prog[each]:(first.prog[each]+49)]))}
OP.phenos <- (mean.phenos[OP])
Self.phenos <- mean.phenos[Self]
FS.phenos <- mean.phenos[FS]
HS.phenos <- mean.phenos[HS]
outcross <- rep(0,10)
half<- rep(.125,10)
full<- rep(.25,10)
self<- rep(.5,10)
coastal.ht <- data.frame(par=rep(1:10,times=4),x=c(outcross,half,full,self),y=c(OP.phenos,HS.phenos,FS.phenos,Self.phenos))
# Create Line Chart
a<-20
b<-140
range01 <- function(x){(((b-a)*(x-min(x)))/(max(x)-min(x)))+a}
cy <- range01(coastal.ht$y)
coastal.ht$y <- cy
coastal.ht$par <- as.factor(coastal.ht$par)
coastal.ht$Parents <- coastal.ht$par
ggplot(coastal.ht, aes(x=x, y=y,shape=Parents,group=Parents)) +
geom_line() +
geom_point() +
scale_shape_manual(values = c(0,9, 16, 3,11,8,4,3,2,6)) +
scale_y_continuous(breaks=seq(20, 140, 20),limits=c(20,140)) +
scale_x_continuous(breaks=c(0,.125,.25,.5),limits=c(0,.5)) +
xlab("Inbreeding Coefficient") +
ylab("Volume (cubic dm)") +
labs(title = "Volume") +
theme_pubr()
}
### Generate statistics #####
op_mean <- mean(coastal.ht$y[1:10])
hs_mean <- mean(coastal.ht$y[11:20])
fs_mean <- mean(coastal.ht$y[21:30])
s_mean <- mean(coastal.ht$y[31:40])
i_levels <- c(0,.125,.25,.5)
i_means <- c(op_mean,hs_mean,fs_mean,s_mean)
summary(lm(coastal.ht$y ~ 0 +coastal.ht$x))
summary(lm(i_means ~ i_levels))
1-(hs_mean/op_mean)
1-(fs_mean/op_mean)
1-(s_mean/op_mean)
t.test(coastal.ht$y[11:20],coastal.ht$y[31:40],paired = T)
op_hs <- coastal.ht$y[1:10] - coastal.ht$y[11:20]
hs_fs <- coastal.ht$y[11:20] - coastal.ht$y[21:30]
fs_s <- coastal.ht$y[21:30] - coastal.ht$y[31:40]
cbind(op_hs,hs_fs,fs_s)
library(heritability)
h2 <- data.frame(second.prog.tgv$markers.matrix,stringsAsFactors = F)
h2 <- apply(h2,MARGIN = 2, function(x) as.numeric(x))
library(rrBLUP)
out <- A.mat((h2)); colnames(out) <- names(second.prog.phenos$phenos) ; rownames(out) <- names(second.prog.phenos$phenos)
sample.num <- sample(x = 1:ncol(out),size = 100,replace = F)
sample.num <- 1:2000
out.test <- out[sample.num,sample.num]; out.phenos <- second.prog.phenos$phenos[sample.num]
the.out.data <- marker_h2(data.vector = out.phenos,geno.vector = rownames(out.test),K = out.test,fix.h2 = T,h2 = .22)
the.out.data
|
/replicate_GF_study/volume.R
|
permissive
|
arfesta/SimBreeder_Project
|
R
| false
| false
| 5,495
|
r
|
# Run this on Domino ####
source('/mnt/simulator/sim_phenos.R')
source('/mnt/simulator/create_cross_design.R')
source('/mnt/simulator/make_crosses.R')
source('/mnt/simulator/calc_TGV.R')
source('/mnt/simulator/extract_selections.R')
source('/mnt/simulator/create_map.R')
source('/mnt/simulator/create_parents.R')
source('/mnt/simulator/OP_testing.R')
## Volume ####
D=0
minor=-100
#set.seed(435634)
#set.seed(3345345)
set.seed(457456)
map <- create_genetic_map(num.chromos = 12,map.length = 1800,num.markers = 120,total.qtl = 2640,num.snpqtl = 1960,
distribute.loci ="even",marker.distribution = "equally-spaced",snp.qtl.maf = c(0.01,0.02))
parents <- create_parents(map.info = map,num.parents = 50,max.delt.allele = 14,heterozygous.markers = F)
parent.tgv <- calc_TGV(geno.info = parents,map.info = map,founder = T,A = 1,a = minor,dom.coeff = D)
set.seed(435634)
parent.phenos <- sim_phenos(TGV.object = parent.tgv,h2 = .3)
first.gen.cross <- create_cross_design(parent.info = parents,mating.design = "cross.file.input",cross.file = "/mnt/graham_study/g.study.1stgen.txt",generation = 1)
first.prog <- make_crosses(parent.info = parents,map.info = map,cross.design = first.gen.cross,num.cores = parallel::detectCores())
first.prog.tgv <- calc_TGV(geno.info = first.prog,map.info = map,cross.design = first.gen.cross,A = 1,a = minor,dom.coeff = D)
first.prog.phenos <- sim_phenos(TGV.object = first.prog.tgv,h2 = .3)
prog.ex1 <- extract_selections(among.family.selection = "Phenotype",relationship.matrix.type = "pedigree",map.info = map,
cross.design = first.gen.cross,past.tgv = parent.tgv,past.phenos = parent.phenos,parent.info = parents,
progeny.info = first.prog,progeny.TGV = first.prog.tgv,progeny.phenos = first.prog.phenos,
num.selections.within.family = 1,
num.selections.among.family = 40)
selections <- sort(as.numeric(names(prog.ex1$selection.phenos)))
second.gen.cross <- read.table("/mnt/graham_study/g.2nd.gen.ped2.txt")
second.gen.cross <- cbind(selections[second.gen.cross[,1]],selections[second.gen.cross[,2]],second.gen.cross[,3])
write.table(second.gen.cross,"./g.2nd.gen.pedv2.txt",row.names = F,col.names = F)
second.gen.cross <-create_cross_design(parent.info = prog.ex1,mating.design = "cross.file.input",cross.file = "./g.2nd.gen.pedv2.txt",generation = 2)
second.prog <- make_crosses(parent.info = prog.ex1,map.info = map,cross.design = second.gen.cross,num.cores = parallel::detectCores())
second.prog.tgv <- calc_TGV(geno.info = second.prog,map.info = map,cross.design = second.gen.cross,A = 1,a = minor,dom.coeff = D)
evar <- var(parent.phenos$phenos) - var(parent.phenos$genetic.values)
second.prog.phenos <- sim_phenos(TGV.object = second.prog.tgv,E.var = evar)
### Make PLot ####
{
first.prog <- seq(1,2000,50)
length(first.prog)
OP <- c(seq(1,length(first.prog),by = 4))
Self <- c(seq(2,length(first.prog),by = 4))
FS <- c(seq(3,length(first.prog),by = 4))
HS <- c(seq(4,length(first.prog),by = 4))
mean.phenos <- vector()
for(each in 1:length(first.prog)) { mean.phenos <- c(mean.phenos,mean(second.prog.phenos$phenos[first.prog[each]:(first.prog[each]+49)]))}
OP.phenos <- (mean.phenos[OP])
Self.phenos <- mean.phenos[Self]
FS.phenos <- mean.phenos[FS]
HS.phenos <- mean.phenos[HS]
outcross <- rep(0,10)
half<- rep(.125,10)
full<- rep(.25,10)
self<- rep(.5,10)
coastal.ht <- data.frame(par=rep(1:10,times=4),x=c(outcross,half,full,self),y=c(OP.phenos,HS.phenos,FS.phenos,Self.phenos))
# Create Line Chart
a<-20
b<-140
range01 <- function(x){(((b-a)*(x-min(x)))/(max(x)-min(x)))+a}
cy <- range01(coastal.ht$y)
coastal.ht$y <- cy
coastal.ht$par <- as.factor(coastal.ht$par)
coastal.ht$Parents <- coastal.ht$par
ggplot(coastal.ht, aes(x=x, y=y,shape=Parents,group=Parents)) +
geom_line() +
geom_point() +
scale_shape_manual(values = c(0,9, 16, 3,11,8,4,3,2,6)) +
scale_y_continuous(breaks=seq(20, 140, 20),limits=c(20,140)) +
scale_x_continuous(breaks=c(0,.125,.25,.5),limits=c(0,.5)) +
xlab("Inbreeding Coefficient") +
ylab("Volume (cubic dm)") +
labs(title = "Volume") +
theme_pubr()
}
### Generate statistics #####
op_mean <- mean(coastal.ht$y[1:10])
hs_mean <- mean(coastal.ht$y[11:20])
fs_mean <- mean(coastal.ht$y[21:30])
s_mean <- mean(coastal.ht$y[31:40])
i_levels <- c(0,.125,.25,.5)
i_means <- c(op_mean,hs_mean,fs_mean,s_mean)
summary(lm(coastal.ht$y ~ 0 +coastal.ht$x))
summary(lm(i_means ~ i_levels))
1-(hs_mean/op_mean)
1-(fs_mean/op_mean)
1-(s_mean/op_mean)
t.test(coastal.ht$y[11:20],coastal.ht$y[31:40],paired = T)
op_hs <- coastal.ht$y[1:10] - coastal.ht$y[11:20]
hs_fs <- coastal.ht$y[11:20] - coastal.ht$y[21:30]
fs_s <- coastal.ht$y[21:30] - coastal.ht$y[31:40]
cbind(op_hs,hs_fs,fs_s)
library(heritability)
h2 <- data.frame(second.prog.tgv$markers.matrix,stringsAsFactors = F)
h2 <- apply(h2,MARGIN = 2, function(x) as.numeric(x))
library(rrBLUP)
out <- A.mat((h2)); colnames(out) <- names(second.prog.phenos$phenos) ; rownames(out) <- names(second.prog.phenos$phenos)
sample.num <- sample(x = 1:ncol(out),size = 100,replace = F)
sample.num <- 1:2000
out.test <- out[sample.num,sample.num]; out.phenos <- second.prog.phenos$phenos[sample.num]
the.out.data <- marker_h2(data.vector = out.phenos,geno.vector = rownames(out.test),K = out.test,fix.h2 = T,h2 = .22)
the.out.data
|
best <- function(state, outcome) {
##Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fulldata <- as.data.frame(cbind(data[, 2], ##hospital
data[, 7], ##state
data[, 11], ##heart attack
data[, 17], ##heart failure
data[, 23]), ##pneumonia
stringsAsFactors = FALSE)
colnames(fulldata) <- c("hospital", "state", "heart attack", "heart failure"
, "pneumonia")
##Check that state and outcome are valid
if(!state %in% fulldata[, "state"]){
stop('Invalid State')
} else if(!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('Invalid Outcome')
} else {
matchstate <- which(fulldata[, "state"] == state)
tablestate <- fulldata[matchstate, ]
##Return a data frame with the hospital names and the abbreviated state
##name
outputdata <- as.numeric(tablestate[, eval(outcome)])
min_val <- min(outputdata, na.rm = TRUE)
result <- tablestate[, "hospital"][which(outputdata == min_val)]
output <- result[order(result)]
}
return(output)
}
|
/best.R
|
no_license
|
abheekbiswas/ProgrammingAssignment3
|
R
| false
| false
| 1,258
|
r
|
best <- function(state, outcome) {
##Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fulldata <- as.data.frame(cbind(data[, 2], ##hospital
data[, 7], ##state
data[, 11], ##heart attack
data[, 17], ##heart failure
data[, 23]), ##pneumonia
stringsAsFactors = FALSE)
colnames(fulldata) <- c("hospital", "state", "heart attack", "heart failure"
, "pneumonia")
##Check that state and outcome are valid
if(!state %in% fulldata[, "state"]){
stop('Invalid State')
} else if(!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('Invalid Outcome')
} else {
matchstate <- which(fulldata[, "state"] == state)
tablestate <- fulldata[matchstate, ]
##Return a data frame with the hospital names and the abbreviated state
##name
outputdata <- as.numeric(tablestate[, eval(outcome)])
min_val <- min(outputdata, na.rm = TRUE)
result <- tablestate[, "hospital"][which(outputdata == min_val)]
output <- result[order(result)]
}
return(output)
}
|
library(ggplot2)
library(dplyr)
library(gridExtra)
library(timetk)
### Plot time series for variables
load('data/FL.Rda')
fl = FL %>%
filter(policydate < as.Date("2019-08-01")) %>%
summarise_by_time(.date_var = policydate, .by= "year",
cost = mean(policycost),
num = n(),
amt = mean(building_ins + content_ins))
p1 = ggplot(fl, aes(x = policydate, y = cost)) +
geom_line(color = "red") + geom_point(color = "red") +
labs(title = 'Average US flood insurance cost', x = "Year", y = "Policy cost [$]") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p1
p2 = ggplot(fl, aes(x = policydate, y = num/1e6)) +
geom_line(color = "blue") + geom_point(color = "blue") +
labs(title = 'Total number of policies', subtitle = 'in the US', x = "Year", y = "Number [mil]") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p2
p3 = ggplot(fl, aes(x = policydate, y = amt/1e6)) +
geom_line(color = "green") + geom_point(color = "green") +
labs(title = 'Amount of property insured', x = "Year", y = "Ampunt [mil $] ") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p3
grid.arrange(p1,p2,p3, nrow = 3)
###### Make monthly plots
fl = FL %>%
summarise_by_time(.date_var = policydate, .by= "month",
cost = mean(policycost),
num = n(),
amt = mean(building_ins + content_ins)) %>%
filter(policydate < as.Date("2019-08-01"))
v.lines = c("2013-01-15", "2014-01-15", "2015-01-15", "2016-01-15",
"2017-01-15", "2018-01-15", "2019-01-15")
hur_start = c("2013-06-01", "2014-06-01", "2015-06-01", "2016-06-01", "2017-06-01", "2018-06-01", "2019-06-01")
hur_start = as.Date(hur_start, format = "%Y-%m-%d")
hur_end = c("2013-12-01", "2014-12-01", "2015-12-01", "2016-12-01", "2017-12-01", "2018-12-01", "2019-12-01")
hur_end = as.Date(hur_end, format = "%Y-%m-%d")
p1 = ggplot(fl, aes(x = policydate, y = cost)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_line(color = "red") + geom_point(color = "red") +
labs(title = 'Average US flood insurance cost', x = "Year", y = "Policy cost [$]") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p1
p2 = ggplot(fl, aes(x = policydate, y = num/1e6)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_line(color = "blue") + geom_point(color = "blue") +
labs(title = 'Total number of policies', subtitle = 'in the US', x = "Year", y = "Number [mil]") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p2
p3 = ggplot(fl, aes(x = policydate, y = amt/1e6)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_line(color = "darkgreen") + geom_point(color = "darkgreen") +
labs(title = 'Amount of property insured', x = "Year", y = "Amount [mil $] ") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p3
grid.arrange(p1,p2,p3, nrow = 3)
|
/not used/timeser_Onestates.R
|
no_license
|
LenaChretien/National_flood_insurance
|
R
| false
| false
| 6,463
|
r
|
library(ggplot2)
library(dplyr)
library(gridExtra)
library(timetk)
### Plot time series for variables
load('data/FL.Rda')
fl = FL %>%
filter(policydate < as.Date("2019-08-01")) %>%
summarise_by_time(.date_var = policydate, .by= "year",
cost = mean(policycost),
num = n(),
amt = mean(building_ins + content_ins))
p1 = ggplot(fl, aes(x = policydate, y = cost)) +
geom_line(color = "red") + geom_point(color = "red") +
labs(title = 'Average US flood insurance cost', x = "Year", y = "Policy cost [$]") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p1
p2 = ggplot(fl, aes(x = policydate, y = num/1e6)) +
geom_line(color = "blue") + geom_point(color = "blue") +
labs(title = 'Total number of policies', subtitle = 'in the US', x = "Year", y = "Number [mil]") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p2
p3 = ggplot(fl, aes(x = policydate, y = amt/1e6)) +
geom_line(color = "green") + geom_point(color = "green") +
labs(title = 'Amount of property insured', x = "Year", y = "Ampunt [mil $] ") +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p3
grid.arrange(p1,p2,p3, nrow = 3)
###### Make monthly plots
fl = FL %>%
summarise_by_time(.date_var = policydate, .by= "month",
cost = mean(policycost),
num = n(),
amt = mean(building_ins + content_ins)) %>%
filter(policydate < as.Date("2019-08-01"))
v.lines = c("2013-01-15", "2014-01-15", "2015-01-15", "2016-01-15",
"2017-01-15", "2018-01-15", "2019-01-15")
hur_start = c("2013-06-01", "2014-06-01", "2015-06-01", "2016-06-01", "2017-06-01", "2018-06-01", "2019-06-01")
hur_start = as.Date(hur_start, format = "%Y-%m-%d")
hur_end = c("2013-12-01", "2014-12-01", "2015-12-01", "2016-12-01", "2017-12-01", "2018-12-01", "2019-12-01")
hur_end = as.Date(hur_end, format = "%Y-%m-%d")
p1 = ggplot(fl, aes(x = policydate, y = cost)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "pink", alpha = 0.03) +
geom_line(color = "red") + geom_point(color = "red") +
labs(title = 'Average US flood insurance cost', x = "Year", y = "Policy cost [$]") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p1
p2 = ggplot(fl, aes(x = policydate, y = num/1e6)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "lightblue", alpha = 0.03) +
geom_line(color = "blue") + geom_point(color = "blue") +
labs(title = 'Total number of policies', subtitle = 'in the US', x = "Year", y = "Number [mil]") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p2
p3 = ggplot(fl, aes(x = policydate, y = amt/1e6)) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[1], xmax = hur_end[1], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[2], xmax = hur_end[2], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[3], xmax = hur_end[3], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[4], xmax = hur_end[4], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[5], xmax = hur_end[5], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[6], xmax = hur_end[6], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_rect(inherit.aes = F, aes(xmin = hur_start[7], xmax = hur_end[7], ymin = -Inf, ymax = Inf),
fill = "lightgreen", alpha = 0.03) +
geom_line(color = "darkgreen") + geom_point(color = "darkgreen") +
labs(title = 'Amount of property insured', x = "Year", y = "Amount [mil $] ") +
geom_vline(xintercept = as.Date(v.lines)) +
scale_x_date(date_labels = "%m/%Y", date_breaks = "years", date_minor_breaks = "months")
p3
grid.arrange(p1,p2,p3, nrow = 3)
|
/semaforos.R
|
permissive
|
fagnersutel/pontoscriticos
|
R
| false
| false
| 2,276
|
r
| ||
#Week 2 Quiz q5
file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
df <- read.fwf(file_url, skip = 4, widths = c(12, 7, 4, 9, 4, 9, 4, 9, 4))
print(sum(df$V4))
|
/Getting_and_Cleaning_Data/W2_Quiz_q5.R
|
no_license
|
Mjvolk3/datasciencecoursera
|
R
| false
| false
| 185
|
r
|
#Week 2 Quiz q5
file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
df <- read.fwf(file_url, skip = 4, widths = c(12, 7, 4, 9, 4, 9, 4, 9, 4))
print(sum(df$V4))
|
RNGkind(sample.kind="Rounding")
library(bhpm)
set.seed(14258)
######################### All events, Severity 1, Model BB_dependent #####################
print("######################### All events, Severity 1, Model BB level 1 #####################")
data(demo.cluster.data)
set.seed(9019)
raw = bhpm.pm(demo.cluster.data, level = 0, nchains = 1)
conv = bhpm.convergence.diag(raw)
sink("conv.dat")
bhpm.print.convergence.summary(conv)
sink()
rm(conv)
gc()
summ = bhpm.summary.stats(raw)
sink("summary.dat")
bhpm.print.summary.stats(summ)
sink()
rm(summ)
gc()
ptheta = bhpm.ptheta(raw)
print("Removing objects...")
rm(raw)
gc()
write.table(ptheta, "ptheta.dat")
ptheta90 = ptheta[ptheta$ptheta > 0.90,]
write.table(ptheta90, "ptheta90.dat")
print("Removing objects...")
#rm(conv)
rm(ptheta)
rm(ptheta90)
gc()
print("Finished.")
warnings()
|
/test/bhpm.pm/single_chain/default_parameters/test/run.r
|
no_license
|
rcarragh/bhpm
|
R
| false
| false
| 844
|
r
|
RNGkind(sample.kind="Rounding")
library(bhpm)
set.seed(14258)
######################### All events, Severity 1, Model BB_dependent #####################
print("######################### All events, Severity 1, Model BB level 1 #####################")
data(demo.cluster.data)
set.seed(9019)
raw = bhpm.pm(demo.cluster.data, level = 0, nchains = 1)
conv = bhpm.convergence.diag(raw)
sink("conv.dat")
bhpm.print.convergence.summary(conv)
sink()
rm(conv)
gc()
summ = bhpm.summary.stats(raw)
sink("summary.dat")
bhpm.print.summary.stats(summ)
sink()
rm(summ)
gc()
ptheta = bhpm.ptheta(raw)
print("Removing objects...")
rm(raw)
gc()
write.table(ptheta, "ptheta.dat")
ptheta90 = ptheta[ptheta$ptheta > 0.90,]
write.table(ptheta90, "ptheta90.dat")
print("Removing objects...")
#rm(conv)
rm(ptheta)
rm(ptheta90)
gc()
print("Finished.")
warnings()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_watch_ui.R
\name{tar_watch_ui}
\alias{tar_watch_ui}
\title{Shiny module UI for tar_watch()}
\usage{
tar_watch_ui(
id,
label = "tar_watch_label",
seconds = 10,
seconds_min = 1,
seconds_max = 60,
seconds_step = 1,
targets_only = FALSE,
outdated = FALSE,
label_tar_visnetwork = NULL,
level_separation = 150,
degree_from = 1L,
degree_to = 1L,
height = "650px",
display = "summary",
displays = c("summary", "branches", "progress", "graph", "about")
)
}
\arguments{
\item{id}{Character of length 1, ID corresponding to the UI function
of the module.}
\item{label}{Label for the module.}
\item{seconds}{Numeric of length 1,
default number of seconds between refreshes of the graph.
Can be changed in the app controls.}
\item{seconds_min}{Numeric of length 1, lower bound of \code{seconds}
in the app controls.}
\item{seconds_max}{Numeric of length 1, upper bound of \code{seconds}
in the app controls.}
\item{seconds_step}{Numeric of length 1, step size of \code{seconds}
in the app controls.}
\item{targets_only}{Logical, whether to restrict the output to just targets
(\code{FALSE}) or to also include global functions and objects.}
\item{outdated}{Logical, whether to show colors to distinguish outdated
targets from up-to-date targets. (Global functions and objects
still show these colors.) Looking for outdated targets
takes a lot of time for large pipelines with lots of branches,
and setting \code{outdated} to \code{FALSE} is a nice way to speed up the graph
if you only want to see dependency relationships and build progress.}
\item{label_tar_visnetwork}{Character vector, \code{label} argument to
\code{\link[=tar_visnetwork]{tar_visnetwork()}}.}
\item{level_separation}{Numeric of length 1,
\code{levelSeparation} argument of \code{visNetwork::visHierarchicalLayout()}.
Controls the distance between hierarchical levels.
Consider changing the value if the aspect ratio of the graph
is far from 1. If \code{level_separation} is \code{NULL},
the \code{levelSeparation} argument of \code{visHierarchicalLayout()}
defaults to \code{150}.}
\item{degree_from}{Integer of length 1. When you click on a node,
the graph highlights a neighborhood of that node. \code{degree_from}
controls the number of edges the neighborhood extends upstream.}
\item{degree_to}{Integer of length 1. When you click on a node,
the graph highlights a neighborhood of that node. \code{degree_to}
controls the number of edges the neighborhood extends downstream.}
\item{height}{Character of length 1,
height of the \code{visNetwork} widget and branches table.}
\item{display}{Character of length 1, which display to show first.}
\item{displays}{Character vector of choices for the display.
Elements can be any of
\code{"graph"}, \code{"summary"}, \code{"branches"}, or \code{"about"}.}
}
\value{
A Shiny module UI.
}
\description{
Use \code{tar_watch_ui()} and \code{\link[=tar_watch_server]{tar_watch_server()}}
to include \code{\link[=tar_watch]{tar_watch()}} as a Shiny module in an app.
}
\seealso{
Other progress:
\code{\link{tar_built}()},
\code{\link{tar_canceled}()},
\code{\link{tar_errored}()},
\code{\link{tar_poll}()},
\code{\link{tar_progress_branches}()},
\code{\link{tar_progress_summary}()},
\code{\link{tar_progress}()},
\code{\link{tar_skipped}()},
\code{\link{tar_started}()},
\code{\link{tar_watch_server}()},
\code{\link{tar_watch}()}
}
\concept{progress}
|
/man/tar_watch_ui.Rd
|
permissive
|
ropensci/targets
|
R
| false
| true
| 3,479
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_watch_ui.R
\name{tar_watch_ui}
\alias{tar_watch_ui}
\title{Shiny module UI for tar_watch()}
\usage{
tar_watch_ui(
id,
label = "tar_watch_label",
seconds = 10,
seconds_min = 1,
seconds_max = 60,
seconds_step = 1,
targets_only = FALSE,
outdated = FALSE,
label_tar_visnetwork = NULL,
level_separation = 150,
degree_from = 1L,
degree_to = 1L,
height = "650px",
display = "summary",
displays = c("summary", "branches", "progress", "graph", "about")
)
}
\arguments{
\item{id}{Character of length 1, ID corresponding to the UI function
of the module.}
\item{label}{Label for the module.}
\item{seconds}{Numeric of length 1,
default number of seconds between refreshes of the graph.
Can be changed in the app controls.}
\item{seconds_min}{Numeric of length 1, lower bound of \code{seconds}
in the app controls.}
\item{seconds_max}{Numeric of length 1, upper bound of \code{seconds}
in the app controls.}
\item{seconds_step}{Numeric of length 1, step size of \code{seconds}
in the app controls.}
\item{targets_only}{Logical, whether to restrict the output to just targets
(\code{FALSE}) or to also include global functions and objects.}
\item{outdated}{Logical, whether to show colors to distinguish outdated
targets from up-to-date targets. (Global functions and objects
still show these colors.) Looking for outdated targets
takes a lot of time for large pipelines with lots of branches,
and setting \code{outdated} to \code{FALSE} is a nice way to speed up the graph
if you only want to see dependency relationships and build progress.}
\item{label_tar_visnetwork}{Character vector, \code{label} argument to
\code{\link[=tar_visnetwork]{tar_visnetwork()}}.}
\item{level_separation}{Numeric of length 1,
\code{levelSeparation} argument of \code{visNetwork::visHierarchicalLayout()}.
Controls the distance between hierarchical levels.
Consider changing the value if the aspect ratio of the graph
is far from 1. If \code{level_separation} is \code{NULL},
the \code{levelSeparation} argument of \code{visHierarchicalLayout()}
defaults to \code{150}.}
\item{degree_from}{Integer of length 1. When you click on a node,
the graph highlights a neighborhood of that node. \code{degree_from}
controls the number of edges the neighborhood extends upstream.}
\item{degree_to}{Integer of length 1. When you click on a node,
the graph highlights a neighborhood of that node. \code{degree_to}
controls the number of edges the neighborhood extends downstream.}
\item{height}{Character of length 1,
height of the \code{visNetwork} widget and branches table.}
\item{display}{Character of length 1, which display to show first.}
\item{displays}{Character vector of choices for the display.
Elements can be any of
\code{"graph"}, \code{"summary"}, \code{"branches"}, or \code{"about"}.}
}
\value{
A Shiny module UI.
}
\description{
Use \code{tar_watch_ui()} and \code{\link[=tar_watch_server]{tar_watch_server()}}
to include \code{\link[=tar_watch]{tar_watch()}} as a Shiny module in an app.
}
\seealso{
Other progress:
\code{\link{tar_built}()},
\code{\link{tar_canceled}()},
\code{\link{tar_errored}()},
\code{\link{tar_poll}()},
\code{\link{tar_progress_branches}()},
\code{\link{tar_progress_summary}()},
\code{\link{tar_progress}()},
\code{\link{tar_skipped}()},
\code{\link{tar_started}()},
\code{\link{tar_watch_server}()},
\code{\link{tar_watch}()}
}
\concept{progress}
|
#Run the script with the associated metabolite file.
#Rscript compare_metab.R MetabTable_ForComparison.csv Results_features.txt
args<-commandArgs(TRUE)
data<-read.table(args[1],sep=",",row.names=1,header=TRUE)
metab_pos<-grep("X",names(data))
results_data<-as.data.frame(matrix(NA,length(metab_pos),12))
for(i in 1:length(metab_pos)){
results_data[i,1:4]<-boxplot(data[,metab_pos[i]]~data$ATTRIBUTE_treatment,plot=FALSE)$stats[3,]
results_data[i,5]<-kruskal.test(data[,metab_pos[i]]~data$ATTRIBUTE_treatment)$p.value
results_data[i,6]<-names(data)[metab_pos[i]]
temp<-pairwise.wilcox.test(data[,metab_pos[i]],data$ATTRIBUTE_treatment,p.adjust="bonf")
results_data[i,7:9]<-temp$p.value[1:3,1]
results_data[i,10:11]<-temp$p.value[2:3,2]
results_data[i,12]<-temp$p.value[3,3]
}
names(results_data)<-c(levels(data$ATTRIBUTE_treatment),"pval","Feature","Benznidazole-Carnitine","Benznidazole-Untreated","Benznidazole-Vehicle","Carnitine-Untreated","Carnitine-Vehicle","Untreated-Vehicle")
results_data$fdr<-p.adjust(results_data$pval,method="fdr")
write.table(results_data,file=args[2],sep="\t",quote=FALSE,row.names=FALSE)
|
/compare_metab.R
|
permissive
|
mccall-lab-OU/GI-tract-paper
|
R
| false
| false
| 1,127
|
r
|
#Run the script with the associated metabolite file.
#Rscript compare_metab.R MetabTable_ForComparison.csv Results_features.txt
args<-commandArgs(TRUE)
data<-read.table(args[1],sep=",",row.names=1,header=TRUE)
metab_pos<-grep("X",names(data))
results_data<-as.data.frame(matrix(NA,length(metab_pos),12))
for(i in 1:length(metab_pos)){
results_data[i,1:4]<-boxplot(data[,metab_pos[i]]~data$ATTRIBUTE_treatment,plot=FALSE)$stats[3,]
results_data[i,5]<-kruskal.test(data[,metab_pos[i]]~data$ATTRIBUTE_treatment)$p.value
results_data[i,6]<-names(data)[metab_pos[i]]
temp<-pairwise.wilcox.test(data[,metab_pos[i]],data$ATTRIBUTE_treatment,p.adjust="bonf")
results_data[i,7:9]<-temp$p.value[1:3,1]
results_data[i,10:11]<-temp$p.value[2:3,2]
results_data[i,12]<-temp$p.value[3,3]
}
names(results_data)<-c(levels(data$ATTRIBUTE_treatment),"pval","Feature","Benznidazole-Carnitine","Benznidazole-Untreated","Benznidazole-Vehicle","Carnitine-Untreated","Carnitine-Vehicle","Untreated-Vehicle")
results_data$fdr<-p.adjust(results_data$pval,method="fdr")
write.table(results_data,file=args[2],sep="\t",quote=FALSE,row.names=FALSE)
|
# 9 April 2020—Lab 13 "Occupancy ####
library(unmarked)
library(MuMIn)
library(ggplot2)
# Single season occupancy ####
badger <- read.csv("badger_occupancy_scotland.csv", header = TRUE)
head(badger)
badger.o <- unmarkedFrameOccu(y = badger[,4:25],
siteCovs = badger[,c(26,27,32,41,46)])
plot(badger.o)
pdot.psidot = occu(~1 ~1, data = badger.o)
pdot.psidot
backTransform(pdot.psidot, type = 'det') # estimate of p
backTransform(pdot.psidot, type = 'state') # estimate of psi
pTemp.psiTemp <- occu(~WTemp ~WTemp, data = badger.o)
summary(pTemp.psiTemp)
pTemp.psiDTMR <- occu(~WTemp ~DistMR, data = badger.o)
summary(pTemp.psiDTMR)
hist(badger.o@siteCovs$DistMR)
par(mfrow = c(1,2))
hist(badger.o@siteCovs$DistMR, main="Not scaled")
hist(scale(badger.o@siteCovs$DistMR), main= "Scaled and centered")
mean(scale(badger.o@siteCovs$DistMR))
sd(scale(badger.o@siteCovs$DistMR))
pTemp.psiDTMR <- occu(~WTemp ~scale(DistMR), data = badger.o)
summary(pTemp.psiDTMR)
models <- fitList( 'pdot.psidot'=pdot.psidot,
'pTemp.psiTemp'=pTemp.psiTemp,
'pTemp.psiDTMR'=pTemp.psiDTMR )
modSel(models)
out.put <- model.sel(pTemp.psiTemp, pTemp.psiDTMR)
MA.ests <- model.avg(out.put, subset = delta < 2)
# Assignment 1 ####
hist(badger.o@siteCovs$Elev)
hist(badger.o@siteCovs$Agric)
hist(badger.o@siteCovs$Humans)
# These don't range as extreme as other covariates
pTemp.psiELEV <- occu(~WTemp ~Elev, data = badger.o)
pTemp.psiAGRI <- occu(~WTemp ~Agric, data = badger.o)
pTemp.psiHUM <- occu(~WTemp ~Humans, data = badger.o)
models <- fitList('pdot.psidot'=pdot.psidot,
'pTemp.psiTemp'=pTemp.psiTemp,
'pTemp.psiDTMR'=pTemp.psiDTMR,
'pTemp.psiELEV'=pTemp.psiELEV,
'pTemp.psiAGRI'=pTemp.psiAGRI,
'pTemp.psiHUM'=pTemp.psiHUM)
modSel(models)
# most parsimonious is pTemp.psiAGRI because it has lowest AIC
# and is the only with delta <2
exp(coef(pTemp.psiAGRI)[2])
# every percent increase in agricultural land use increases occupancy by 1.20
# make the figure
agricultural <- data.frame(Agric = seq(0,26,1))
agricultural$occ <- predict(pTemp.psiAGRI, newdata = agricultural,
type = "state", appendData = TRUE)
ggplot(d = agricultural, aes(x = Agric, y = agricultural$occ$Predicted))+
geom_line(color = "red")+
geom_ribbon(data=agricultural,
aes(ymin=agricultural$occ$lower,ymax=agricultural$occ$upper),alpha=0.2,
color = "blue", fill = "blue")+
xlab("% Agricultural land")+
ylab("Badger Occupancy")+
theme_classic()+
theme(text = element_text(size = 20))
# Multi-season occupancy ####
y <- read.csv("lepa_y.csv", header = TRUE) # presence/absence data
head(y)
site.covs <- read.csv("lepa_site_covs.csv", header = TRUE) # covariates
head(site.covs) # A has been scaled
plot(site.covs$x.coord., site.covs$y.coord.,
cex = log(site.covs$A+1), xlab = "X coordinate", ylab = "Y coordinate")
lepa_umf <- unmarkedMultFrame(y=y[,2:7], siteCovs = site.covs, numPrimary = 3)
mod_1<-colext(psiformula=~Phorophyte, gammaformula=~1, epsilonformula=~1,
pformula=~A+Phorophyte,data=lepa_umf)
plogis(coef(mod_1)[3])
plogis(coef(mod_1)[4])
plogis(coef(mod_1)[5])
plogis(coef(mod_1)[5]+coef(mod_1)[7])
d <- dist(cbind(site.covs$x.coord.,site.covs$y.coord.))
alpha <- 1/4.8 #1/average dispersal distance
edis <- as.matrix(exp(-alpha*d))
diag(edis) <- 0
edis <- sweep(edis,2,site.covs$A,"*") #Finally the sumation S<-rowSums(edis)
S <- rowSums(edis)
site.covs$S <- S #adds column to site.covs
lepa_umf <- unmarkedMultFrame(y=y[,2:7],siteCovs=site.covs,numPrimary=3)
mod_2 <- colext(psiformula=~Phorophyte,gammaformula=~S, epsilonformula=~A,
pformula=~A+Phorophyte,data=lepa_umf) #interpret odds ratio
#connectivity and colonization
exp(coef(mod_2)[4])
#extinction and patch area
exp(coef(mod_2)[6])
# Assignment 2 ####
mod_3 <- colext(psiformula=~Phorophyte,gammaformula=~(S+A), epsilonformula=~A,
pformula=~A+Phorophyte,data=lepa_umf)
mod_4 <- colext(psiformula=~Phorophyte,gammaformula=~S, epsilonformula=~(A+S),
pformula=~A+Phorophyte,data=lepa_umf)
mod_5 <- colext(psiformula=~Phorophyte,gammaformula=~(S+A), epsilonformula=~(A+S),
pformula=~A+Phorophyte,data=lepa_umf)
# list the models
models <- fitList("mod_1" = mod_1,
"mod_2" = mod_2,
"mod_3" = mod_3,
"mod_4" = mod_4,
"mod_5" = mod_5)
# which is the most parsimonious
modSel(models)
out.put <- model.sel(mod_2, mod_3, mod_4)
MA.ests <- model.avg(out.put, subset = delta < 2)
# S = patch connectivity
# A = patch area
# Colonization
exp(coef(MA.ests)[3]) # rock?
exp(coef(MA.ests)[3])+plogis(coef(MA.ests)[4]) # + connectivity
exp(coef(MA.ests)[3])+exp(coef(MA.ests)[4])+exp(coef(MA.ests)[4]) # + patch area
# Extinction
exp(coef(MA.ests)[5]) # rock?
exp(coef(MA.ests)[5])-exp(coef(MA.ests)[6]) # + area
exp(coef(MA.ests)[5])-exp(coef(MA.ests)[6])-exp(coef(MA.ests)[11])# connectivity
|
/9_April_Occupancy.R
|
no_license
|
masond-UF/iQAAP
|
R
| false
| false
| 4,924
|
r
|
# 9 April 2020—Lab 13 "Occupancy ####
library(unmarked)
library(MuMIn)
library(ggplot2)
# Single season occupancy ####
badger <- read.csv("badger_occupancy_scotland.csv", header = TRUE)
head(badger)
badger.o <- unmarkedFrameOccu(y = badger[,4:25],
siteCovs = badger[,c(26,27,32,41,46)])
plot(badger.o)
pdot.psidot = occu(~1 ~1, data = badger.o)
pdot.psidot
backTransform(pdot.psidot, type = 'det') # estimate of p
backTransform(pdot.psidot, type = 'state') # estimate of psi
pTemp.psiTemp <- occu(~WTemp ~WTemp, data = badger.o)
summary(pTemp.psiTemp)
pTemp.psiDTMR <- occu(~WTemp ~DistMR, data = badger.o)
summary(pTemp.psiDTMR)
hist(badger.o@siteCovs$DistMR)
par(mfrow = c(1,2))
hist(badger.o@siteCovs$DistMR, main="Not scaled")
hist(scale(badger.o@siteCovs$DistMR), main= "Scaled and centered")
mean(scale(badger.o@siteCovs$DistMR))
sd(scale(badger.o@siteCovs$DistMR))
pTemp.psiDTMR <- occu(~WTemp ~scale(DistMR), data = badger.o)
summary(pTemp.psiDTMR)
models <- fitList( 'pdot.psidot'=pdot.psidot,
'pTemp.psiTemp'=pTemp.psiTemp,
'pTemp.psiDTMR'=pTemp.psiDTMR )
modSel(models)
out.put <- model.sel(pTemp.psiTemp, pTemp.psiDTMR)
MA.ests <- model.avg(out.put, subset = delta < 2)
# Assignment 1 ####
hist(badger.o@siteCovs$Elev)
hist(badger.o@siteCovs$Agric)
hist(badger.o@siteCovs$Humans)
# These don't range as extreme as other covariates
pTemp.psiELEV <- occu(~WTemp ~Elev, data = badger.o)
pTemp.psiAGRI <- occu(~WTemp ~Agric, data = badger.o)
pTemp.psiHUM <- occu(~WTemp ~Humans, data = badger.o)
models <- fitList('pdot.psidot'=pdot.psidot,
'pTemp.psiTemp'=pTemp.psiTemp,
'pTemp.psiDTMR'=pTemp.psiDTMR,
'pTemp.psiELEV'=pTemp.psiELEV,
'pTemp.psiAGRI'=pTemp.psiAGRI,
'pTemp.psiHUM'=pTemp.psiHUM)
modSel(models)
# most parsimonious is pTemp.psiAGRI because it has lowest AIC
# and is the only with delta <2
exp(coef(pTemp.psiAGRI)[2])
# every percent increase in agricultural land use increases occupancy by 1.20
# make the figure
agricultural <- data.frame(Agric = seq(0,26,1))
agricultural$occ <- predict(pTemp.psiAGRI, newdata = agricultural,
type = "state", appendData = TRUE)
ggplot(d = agricultural, aes(x = Agric, y = agricultural$occ$Predicted))+
geom_line(color = "red")+
geom_ribbon(data=agricultural,
aes(ymin=agricultural$occ$lower,ymax=agricultural$occ$upper),alpha=0.2,
color = "blue", fill = "blue")+
xlab("% Agricultural land")+
ylab("Badger Occupancy")+
theme_classic()+
theme(text = element_text(size = 20))
# Multi-season occupancy ####
y <- read.csv("lepa_y.csv", header = TRUE) # presence/absence data
head(y)
site.covs <- read.csv("lepa_site_covs.csv", header = TRUE) # covariates
head(site.covs) # A has been scaled
plot(site.covs$x.coord., site.covs$y.coord.,
cex = log(site.covs$A+1), xlab = "X coordinate", ylab = "Y coordinate")
lepa_umf <- unmarkedMultFrame(y=y[,2:7], siteCovs = site.covs, numPrimary = 3)
mod_1<-colext(psiformula=~Phorophyte, gammaformula=~1, epsilonformula=~1,
pformula=~A+Phorophyte,data=lepa_umf)
plogis(coef(mod_1)[3])
plogis(coef(mod_1)[4])
plogis(coef(mod_1)[5])
plogis(coef(mod_1)[5]+coef(mod_1)[7])
d <- dist(cbind(site.covs$x.coord.,site.covs$y.coord.))
alpha <- 1/4.8 #1/average dispersal distance
edis <- as.matrix(exp(-alpha*d))
diag(edis) <- 0
edis <- sweep(edis,2,site.covs$A,"*") #Finally the sumation S<-rowSums(edis)
S <- rowSums(edis)
site.covs$S <- S #adds column to site.covs
lepa_umf <- unmarkedMultFrame(y=y[,2:7],siteCovs=site.covs,numPrimary=3)
mod_2 <- colext(psiformula=~Phorophyte,gammaformula=~S, epsilonformula=~A,
pformula=~A+Phorophyte,data=lepa_umf) #interpret odds ratio
#connectivity and colonization
exp(coef(mod_2)[4])
#extinction and patch area
exp(coef(mod_2)[6])
# Assignment 2 ####
mod_3 <- colext(psiformula=~Phorophyte,gammaformula=~(S+A), epsilonformula=~A,
pformula=~A+Phorophyte,data=lepa_umf)
mod_4 <- colext(psiformula=~Phorophyte,gammaformula=~S, epsilonformula=~(A+S),
pformula=~A+Phorophyte,data=lepa_umf)
mod_5 <- colext(psiformula=~Phorophyte,gammaformula=~(S+A), epsilonformula=~(A+S),
pformula=~A+Phorophyte,data=lepa_umf)
# list the models
models <- fitList("mod_1" = mod_1,
"mod_2" = mod_2,
"mod_3" = mod_3,
"mod_4" = mod_4,
"mod_5" = mod_5)
# which is the most parsimonious
modSel(models)
out.put <- model.sel(mod_2, mod_3, mod_4)
MA.ests <- model.avg(out.put, subset = delta < 2)
# S = patch connectivity
# A = patch area
# Colonization
exp(coef(MA.ests)[3]) # rock?
exp(coef(MA.ests)[3])+plogis(coef(MA.ests)[4]) # + connectivity
exp(coef(MA.ests)[3])+exp(coef(MA.ests)[4])+exp(coef(MA.ests)[4]) # + patch area
# Extinction
exp(coef(MA.ests)[5]) # rock?
exp(coef(MA.ests)[5])-exp(coef(MA.ests)[6]) # + area
exp(coef(MA.ests)[5])-exp(coef(MA.ests)[6])-exp(coef(MA.ests)[11])# connectivity
|
# Exercise 1: Creating and Indexing Vectors
# Create a vector `first.ten` that has the values 10 through 20 in it (using the : operator)
first.ten <- 10:20
# Create a vector `next.ten` that has the values 21 through 30 in it (using the seq operator)
next.ten <- seq(21:30)
# Create a vector `all.numbers` by combining the vectors `first.ten` and `next.ten`
all.numbers <- c(first.ten, next.ten)
# Create a variable `eleventh` that is equal to the 11th element in your vector `all.numbers`
eleventh <- all.numbers[11]
# Create a vector `some.numbers` that is equal to the second through the 5th elements of `all.numbers`
some.numbers <- all.numbers[2:5]
# Create a variable `len` that is equal to the length of your vector `all.numbers`
len <- length(all.numbers)
### Bonus ###
# Create a vector `odd` that holds the odd numbers from 1 to 100
odd <- seq(1,100,2)
# Using the `all` and `%%` operators, confirm that all of the numbers in your `odd` vector are odd
all(odd%%2==1)
|
/exercise-1/exercise.R
|
permissive
|
kevinwork98/ch7-vectors
|
R
| false
| false
| 985
|
r
|
# Exercise 1: Creating and Indexing Vectors
# Create a vector `first.ten` that has the values 10 through 20 in it (using the : operator)
first.ten <- 10:20
# Create a vector `next.ten` that has the values 21 through 30 in it (using the seq operator)
next.ten <- seq(21:30)
# Create a vector `all.numbers` by combining the vectors `first.ten` and `next.ten`
all.numbers <- c(first.ten, next.ten)
# Create a variable `eleventh` that is equal to the 11th element in your vector `all.numbers`
eleventh <- all.numbers[11]
# Create a vector `some.numbers` that is equal to the second through the 5th elements of `all.numbers`
some.numbers <- all.numbers[2:5]
# Create a variable `len` that is equal to the length of your vector `all.numbers`
len <- length(all.numbers)
### Bonus ###
# Create a vector `odd` that holds the odd numbers from 1 to 100
odd <- seq(1,100,2)
# Using the `all` and `%%` operators, confirm that all of the numbers in your `odd` vector are odd
all(odd%%2==1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Met_MatH.R
\name{WH.var.covar}
\alias{WH.var.covar}
\alias{WH.var.covar,MatH-method}
\title{Method WH.var.covar}
\usage{
WH.var.covar(object, ...)
\S4method{WH.var.covar}{MatH}(object, w = numeric(0))
}
\arguments{
\item{object}{a \code{MatH} object}
\item{...}{some optional parameters}
\item{w}{it is possible to add a vector of weights (positive numbers)
having the same size of the rows of the \code{MatH object},
default = equal weight for each row}
}
\value{
a squared \code{matrix} with the (weighted) variance-covariance values
}
\description{
Compute the variance-covariance matrix of a \code{MatH} object, i.e.
a matrix of values consistent with
a set of distributions equipped with a L2 wasserstein metric.
}
\examples{
WH.var.covar(BLOOD)
# generate a set of random weights
RN <- runif(get.MatH.nrows(BLOOD))
WH.var.covar(BLOOD, w = RN)
}
\references{
Irpino, A., Verde, R. (2015) \emph{Basic
statistics for distributional symbolic variables: a new metric-based
approach} Advances in Data Analysis and Classification, DOI
10.1007/s11634-014-0176-4
}
|
/man/WH.var.covar-methods.Rd
|
no_license
|
cran/HistDAWass
|
R
| false
| true
| 1,183
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Met_MatH.R
\name{WH.var.covar}
\alias{WH.var.covar}
\alias{WH.var.covar,MatH-method}
\title{Method WH.var.covar}
\usage{
WH.var.covar(object, ...)
\S4method{WH.var.covar}{MatH}(object, w = numeric(0))
}
\arguments{
\item{object}{a \code{MatH} object}
\item{...}{some optional parameters}
\item{w}{it is possible to add a vector of weights (positive numbers)
having the same size of the rows of the \code{MatH object},
default = equal weight for each row}
}
\value{
a squared \code{matrix} with the (weighted) variance-covariance values
}
\description{
Compute the variance-covariance matrix of a \code{MatH} object, i.e.
a matrix of values consistent with
a set of distributions equipped with a L2 wasserstein metric.
}
\examples{
WH.var.covar(BLOOD)
# generate a set of random weights
RN <- runif(get.MatH.nrows(BLOOD))
WH.var.covar(BLOOD, w = RN)
}
\references{
Irpino, A., Verde, R. (2015) \emph{Basic
statistics for distributional symbolic variables: a new metric-based
approach} Advances in Data Analysis and Classification, DOI
10.1007/s11634-014-0176-4
}
|
### get parameters
args = commandArgs(trailingOnly=TRUE)
index_set_inputfile = args[1]
regin_index_inputfile = args[2]
regin_signal_inputfile = args[3]
index_set_all_heatmap = args[4]
index_set_thresh_heatmap = args[5]
index_all_heatmap = args[6]
### read index set signal matrix
read_enriched_index_set_matrix = function(inputfile){
data_index_set = as.matrix(read.table(inputfile, header=T))
data_index_set_binary_patter = data_index_set[,1]
rownames(data_index_set) = data_index_set_binary_patter
data_index_set = data_index_set[,-1]
class(data_index_set) = "numeric"
return(c(data_index_set, data_index_set_binary_patter))
}
### read index matrix
read_index_matrix = function(inputfile){
### read matrix as string matrix
data_index_matrix = read.table(inputfile, header=T, sep='\t', colClasses = "character")
rownames(data_index_matrix) = data_index_matrix[,1]
data_index_matrix = data_index_matrix[,-1]
### collapse string vector
data_index_matrix = apply(data_index_matrix, MARGIN=1, FUN=function(x) paste(x, collapse='_') )
return(data_index_matrix)
}
### read index signal matrix
read_signal_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = as.matrix(read.table(inputfile, header=T, sep='\t'))
rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = data_signal_matrix[,-1]
return(data_signal_matrix)
}
### read index signal matrix
read_wg_signal_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = (read.table(inputfile, header=F, sep='\t'))
#rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = as.matrix(data_signal_matrix[,c(-1,-2,-3)])
return(data_signal_matrix)
}
### read DNA region matrix
read_DNAregion_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = as.matrix(read.table(inputfile, header=F, sep='\t'))
rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = data_signal_matrix[,-1]
return(data_signal_matrix)
}
### get DNA region intervals
index_region_matrix = read_DNAregion_matrix('DNA_regin_210k_indexsort_onlyinterval.txt')#[c(1:100000),]
### index region length
index_region_length = as.numeric(index_region_matrix[,3]) - as.numeric(index_region_matrix[,2])#[c(1:100000)]
total_len = sum(index_region_length)
### get index binary label matrix
index_label_matrix = read_index_matrix('celltype.index.sorted.txt')#[c(1:100000)]
### get index signal matrix
index_sig_matrix = read_signal_matrix('celltype.index.signal.sorted.txt')#[c(1:100000),]
NOcREs_sig_matrix = read_wg_signal_matrix('celltype.tpm.NOcRE.txt')#[c(1:100000),]
wg_sig_matrix_log2 = log2(read_wg_signal_matrix('celltype.tpm.sorted.txt')+0.01)#[c(1:100000),]
### get index_set binary labels
index_set_filtered_matrix = read_signal_matrix('celltype.index_set_filtered.sorted.txt')#[c(1,2,3),]
### to numeric matrix
class(index_set_filtered_matrix) = "numeric"
index_set_filtered_label = rownames(index_set_filtered_matrix)
### get multiple variable normal distribution from previous peak calling result
### get new index calling probability
### Previous matrix (plus 0.01 then log2 transform)
index_sig_matrix_log2 = log2(index_sig_matrix+0.01)
### initialize index call p matrix
index_sig_matrix_p_call = c()
enriched_index_set_position = rep(0, dim(index_sig_matrix_log2)[1])
### enriched index set:
for (i in c(1: length(index_set_filtered_label))){
###
print(paste('index set', toString(i), index_set_filtered_label[i], sep=':'))
### get index set index position
index_sig_i_position = index_label_matrix==index_set_filtered_label[i]
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(index_sig_matrix[index_sig_i_position, ] + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
### NOT enriched index set:
for (i in c(1)){
###
print(paste('index set', 'NOT enriched', sep=':'))
### get index set index position
index_sig_i_position = enriched_index_set_position==0
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(index_sig_matrix[index_sig_i_position, ] + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
### NOT enriched index set:
for (i in c(1)){
###
print(paste('index set', 'NOT enriched whole genome', sep=':'))
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(NOcREs_sig_matrix + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
#print(dim(index_sig_matrix_p_call))
#print(head(index_sig_matrix_p_call))
colnames(index_sig_matrix_p_call) = c(1: dim(index_sig_matrix_p_call)[2])
#rownames(index_sig_matrix_p_call) = rownames(index_sig_matrix)
### the sum of probability is standardized to 1
#print('get index call probability matrix')
#index_sig_matrix_p_call = t( apply(index_sig_matrix_p_call, MARGIN=1, FUN=function(x) x/sum(x)) )
### write all index set call & probability
write.table(index_sig_matrix_p_call, 'index_sig_matrix_index_set_p.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
### get belonging index set id
print('get index call index_set_id & probability')
index_sig_matrix_index_set_call = t( apply((index_sig_matrix_p_call), MARGIN=1, FUN=function(x) c(which.max(x), max(x))) )
colnames(index_sig_matrix_index_set_call) = c('index_set_id', 'probability')
### write all index set probility matrix
write.table(index_sig_matrix_index_set_call, 'index_sig_matrix_index_set_call.txt', quote=F, sep='\t', row.names = TRUE, col.names = )
############################################
print('sort index based on new index call')
### sort input signal matrix based on the new index set call
index_sig_matrix_sort = index_sig_matrix[order(index_sig_matrix_index_set_call[,1]), ]
### write all index set call & probability
write.table(index_sig_matrix_sort, 'index_sig_matrix_index_set_call_sort.txt', quote=F, sep='\t', row.names = TRUE, col.names = )
############################################
print('get new index set numbers:')
### recalculate index set DNA region number
index_set_num = table(index_sig_matrix_index_set_call[,1])
### change previous index set number to new called number
index_set_filtered_matrix_recalnum = c()
for (i in c(1: dim(index_set_filtered_matrix)[1])){
index_set_filtered_matrix_vec = index_set_filtered_matrix[i, ]
### recalculate the number of DNA region in index set
if (i %in% names(index_set_num)){
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec / max(index_set_filtered_matrix_vec) * index_set_num[names(index_set_num) == i]
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
} else{
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec - index_set_filtered_matrix_vec
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
}
}
### transpose
index_set_filtered_matrix_recalnum = t( index_set_filtered_matrix_recalnum )
### add colnames & rownames
colnames(index_set_filtered_matrix_recalnum) = colnames(index_set_filtered_matrix)
rownames(index_set_filtered_matrix_recalnum) = rownames(index_set_filtered_matrix)
### write index_set_filtered_matrix_recalnum
write.table(index_set_filtered_matrix_recalnum, 'celltype.index_set_filtered.sorted.recalnum.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
png('probability_hist.png')
hist(index_sig_matrix_index_set_call[,2], breaks = 50)#, xlim=c(0, 150))
dev.off()
############################################
print('get new index set numbers (threshold 0.95):')
### recalculate index set DNA region number
index_set_num = table(index_sig_matrix_index_set_call[index_sig_matrix_index_set_call[,2]>=0,1])
### change previous index set number to new called number
index_set_filtered_matrix_recalnum = c()
for (i in c(1: dim(index_set_filtered_matrix)[1])){
index_set_filtered_matrix_vec = index_set_filtered_matrix[i, ]
### recalculate the number of DNA region in index set
if (i %in% names(index_set_num)){
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec / max(index_set_filtered_matrix_vec) * index_set_num[names(index_set_num) == i]
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
} else{
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec - index_set_filtered_matrix_vec
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
}
}
### transpose
index_set_filtered_matrix_recalnum = t( index_set_filtered_matrix_recalnum )
### add colnames & rownames
colnames(index_set_filtered_matrix_recalnum) = colnames(index_set_filtered_matrix)
rownames(index_set_filtered_matrix_recalnum) = rownames(index_set_filtered_matrix)
### write index_set_filtered_matrix_recalnum
write.table(index_set_filtered_matrix_recalnum, 'celltype.index_set_filtered.sorted.recalnum.thresh.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
|
/bin/index_calling.R
|
no_license
|
guanjue/index_caller
|
R
| false
| false
| 13,686
|
r
|
### get parameters
args = commandArgs(trailingOnly=TRUE)
index_set_inputfile = args[1]
regin_index_inputfile = args[2]
regin_signal_inputfile = args[3]
index_set_all_heatmap = args[4]
index_set_thresh_heatmap = args[5]
index_all_heatmap = args[6]
### read index set signal matrix
read_enriched_index_set_matrix = function(inputfile){
data_index_set = as.matrix(read.table(inputfile, header=T))
data_index_set_binary_patter = data_index_set[,1]
rownames(data_index_set) = data_index_set_binary_patter
data_index_set = data_index_set[,-1]
class(data_index_set) = "numeric"
return(c(data_index_set, data_index_set_binary_patter))
}
### read index matrix
read_index_matrix = function(inputfile){
### read matrix as string matrix
data_index_matrix = read.table(inputfile, header=T, sep='\t', colClasses = "character")
rownames(data_index_matrix) = data_index_matrix[,1]
data_index_matrix = data_index_matrix[,-1]
### collapse string vector
data_index_matrix = apply(data_index_matrix, MARGIN=1, FUN=function(x) paste(x, collapse='_') )
return(data_index_matrix)
}
### read index signal matrix
read_signal_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = as.matrix(read.table(inputfile, header=T, sep='\t'))
rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = data_signal_matrix[,-1]
return(data_signal_matrix)
}
### read index signal matrix
read_wg_signal_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = (read.table(inputfile, header=F, sep='\t'))
#rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = as.matrix(data_signal_matrix[,c(-1,-2,-3)])
return(data_signal_matrix)
}
### read DNA region matrix
read_DNAregion_matrix = function(inputfile){
### read matrix as string matrix
data_signal_matrix = as.matrix(read.table(inputfile, header=F, sep='\t'))
rownames(data_signal_matrix) = data_signal_matrix[,1]
data_signal_matrix = data_signal_matrix[,-1]
return(data_signal_matrix)
}
### get DNA region intervals
index_region_matrix = read_DNAregion_matrix('DNA_regin_210k_indexsort_onlyinterval.txt')#[c(1:100000),]
### index region length
index_region_length = as.numeric(index_region_matrix[,3]) - as.numeric(index_region_matrix[,2])#[c(1:100000)]
total_len = sum(index_region_length)
### get index binary label matrix
index_label_matrix = read_index_matrix('celltype.index.sorted.txt')#[c(1:100000)]
### get index signal matrix
index_sig_matrix = read_signal_matrix('celltype.index.signal.sorted.txt')#[c(1:100000),]
NOcREs_sig_matrix = read_wg_signal_matrix('celltype.tpm.NOcRE.txt')#[c(1:100000),]
wg_sig_matrix_log2 = log2(read_wg_signal_matrix('celltype.tpm.sorted.txt')+0.01)#[c(1:100000),]
### get index_set binary labels
index_set_filtered_matrix = read_signal_matrix('celltype.index_set_filtered.sorted.txt')#[c(1,2,3),]
### to numeric matrix
class(index_set_filtered_matrix) = "numeric"
index_set_filtered_label = rownames(index_set_filtered_matrix)
### get multiple variable normal distribution from previous peak calling result
### get new index calling probability
### Previous matrix (plus 0.01 then log2 transform)
index_sig_matrix_log2 = log2(index_sig_matrix+0.01)
### initialize index call p matrix
index_sig_matrix_p_call = c()
enriched_index_set_position = rep(0, dim(index_sig_matrix_log2)[1])
### enriched index set:
for (i in c(1: length(index_set_filtered_label))){
###
print(paste('index set', toString(i), index_set_filtered_label[i], sep=':'))
### get index set index position
index_sig_i_position = index_label_matrix==index_set_filtered_label[i]
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(index_sig_matrix[index_sig_i_position, ] + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
### NOT enriched index set:
for (i in c(1)){
###
print(paste('index set', 'NOT enriched', sep=':'))
### get index set index position
index_sig_i_position = enriched_index_set_position==0
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(index_sig_matrix[index_sig_i_position, ] + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
### NOT enriched index set:
for (i in c(1)){
###
print(paste('index set', 'NOT enriched whole genome', sep=':'))
### get each index's signal (plus 0.01 then log2 transform)
index_sig_i_log2 = log2(NOcREs_sig_matrix + 0.01)
### get index set peak proportion
index_region_length_i = -log(sum(index_region_length[index_sig_i_position]/total_len))
### save enriched_index_set DNA region id positions
enriched_index_set_position = enriched_index_set_position + index_sig_i_position
### boxplot index_set cell type signals
png(paste('index_set_boxplot/index_set_boxplot.', toString(i), '.', index_set_filtered_label[i], '.png', sep=''))
boxplot(index_sig_i_log2, ylim=c(min(index_sig_matrix_log2), quantile(index_sig_matrix_log2, probs=0.99)))
dev.off()
### get index_set mean vector & variance-covariance matrix
x_mean = colMeans(index_sig_i_log2)
x_cov = cov(index_sig_i_log2)
### read mvnorm function for matrix
dmvnorm_all_slow = function(vector){
library(mvtnorm)
d = dmvnorm(vector, x_mean, x_cov)
return(d)
}
### dnorm function (fast version)
dnorm_fast = function(matrix, x_mean, x_cov){
#data_r = t(apply(matrix, 1, FUN=function(x) x-x_mean))#t(matrix) - x_mean
data_r = t(t(matrix) - x_mean)
p = length(x_mean)
###
x2 = apply(data_r %*% solve(x_cov) * (data_r), 1, sum)
#print(dim(x2))
d = det(x_cov) * 2 * pi
lp = (log(d) * p + x2)/2
return(lp)
}
### calculate the likelihood density
print('start dmvnorm')
index_sig_matrix_i_p = dnorm_fast(wg_sig_matrix_log2, x_mean, x_cov)
#print(length(index_sig_matrix_i_p))
### consider overall index set region length
index_sig_matrix_i_p_p = index_sig_matrix_i_p + index_region_length_i
### append index call matrix (-log(p))
index_sig_matrix_p_call = cbind(index_sig_matrix_p_call, index_sig_matrix_i_p_p)
}
#print(dim(index_sig_matrix_p_call))
#print(head(index_sig_matrix_p_call))
colnames(index_sig_matrix_p_call) = c(1: dim(index_sig_matrix_p_call)[2])
#rownames(index_sig_matrix_p_call) = rownames(index_sig_matrix)
### the sum of probability is standardized to 1
#print('get index call probability matrix')
#index_sig_matrix_p_call = t( apply(index_sig_matrix_p_call, MARGIN=1, FUN=function(x) x/sum(x)) )
### write all index set call & probability
write.table(index_sig_matrix_p_call, 'index_sig_matrix_index_set_p.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
### get belonging index set id
print('get index call index_set_id & probability')
index_sig_matrix_index_set_call = t( apply((index_sig_matrix_p_call), MARGIN=1, FUN=function(x) c(which.max(x), max(x))) )
colnames(index_sig_matrix_index_set_call) = c('index_set_id', 'probability')
### write all index set probility matrix
write.table(index_sig_matrix_index_set_call, 'index_sig_matrix_index_set_call.txt', quote=F, sep='\t', row.names = TRUE, col.names = )
############################################
print('sort index based on new index call')
### sort input signal matrix based on the new index set call
index_sig_matrix_sort = index_sig_matrix[order(index_sig_matrix_index_set_call[,1]), ]
### write all index set call & probability
write.table(index_sig_matrix_sort, 'index_sig_matrix_index_set_call_sort.txt', quote=F, sep='\t', row.names = TRUE, col.names = )
############################################
print('get new index set numbers:')
### recalculate index set DNA region number
index_set_num = table(index_sig_matrix_index_set_call[,1])
### change previous index set number to new called number
index_set_filtered_matrix_recalnum = c()
for (i in c(1: dim(index_set_filtered_matrix)[1])){
index_set_filtered_matrix_vec = index_set_filtered_matrix[i, ]
### recalculate the number of DNA region in index set
if (i %in% names(index_set_num)){
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec / max(index_set_filtered_matrix_vec) * index_set_num[names(index_set_num) == i]
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
} else{
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec - index_set_filtered_matrix_vec
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
}
}
### transpose
index_set_filtered_matrix_recalnum = t( index_set_filtered_matrix_recalnum )
### add colnames & rownames
colnames(index_set_filtered_matrix_recalnum) = colnames(index_set_filtered_matrix)
rownames(index_set_filtered_matrix_recalnum) = rownames(index_set_filtered_matrix)
### write index_set_filtered_matrix_recalnum
write.table(index_set_filtered_matrix_recalnum, 'celltype.index_set_filtered.sorted.recalnum.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
png('probability_hist.png')
hist(index_sig_matrix_index_set_call[,2], breaks = 50)#, xlim=c(0, 150))
dev.off()
############################################
print('get new index set numbers (threshold 0.95):')
### recalculate index set DNA region number
index_set_num = table(index_sig_matrix_index_set_call[index_sig_matrix_index_set_call[,2]>=0,1])
### change previous index set number to new called number
index_set_filtered_matrix_recalnum = c()
for (i in c(1: dim(index_set_filtered_matrix)[1])){
index_set_filtered_matrix_vec = index_set_filtered_matrix[i, ]
### recalculate the number of DNA region in index set
if (i %in% names(index_set_num)){
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec / max(index_set_filtered_matrix_vec) * index_set_num[names(index_set_num) == i]
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
} else{
index_set_filtered_matrix_vec_new = index_set_filtered_matrix_vec - index_set_filtered_matrix_vec
index_set_filtered_matrix_recalnum = cbind(index_set_filtered_matrix_recalnum, index_set_filtered_matrix_vec_new)
}
}
### transpose
index_set_filtered_matrix_recalnum = t( index_set_filtered_matrix_recalnum )
### add colnames & rownames
colnames(index_set_filtered_matrix_recalnum) = colnames(index_set_filtered_matrix)
rownames(index_set_filtered_matrix_recalnum) = rownames(index_set_filtered_matrix)
### write index_set_filtered_matrix_recalnum
write.table(index_set_filtered_matrix_recalnum, 'celltype.index_set_filtered.sorted.recalnum.thresh.txt', quote=F, sep='\t', row.names = TRUE, col.names = NA)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn.lambda.R
\name{fn.lambda}
\alias{fn.lambda}
\title{Calculate recruitment bias based on environmental filtering}
\usage{
fn.lambda(trait.optimum, niche.breadth, Ef, Ef.specificity)
}
\arguments{
\item{trait.optimum}{Optimum value for a given species along an environmental
gradient. Gradients in \link{fn.metaSIM} are restricted to the range [0,1], so
this value must be in the range [0,1].}
\item{niche.breadth}{Niche breadth around a species' trait optimum. The value of sigma
in Fig 1 in Gravel et al. (2006).}
\item{Ef}{Value of the environmental filter at the site for which lambda is being
calculated.}
\item{Ef.specificity}{The selection specificity of the environmental filter.}
}
\description{
Used by \link{fn.metaSIM}. Uses niche breadth and an
environmental gradient to determine how to bias species selection
weights based on environmental filtering dynamics. See Gravel et al. 2006.
}
\details{
Used by \link{fn.metaSIM}.
}
\references{
Gravel, D., C. D. Canham, M. Beaudet, and C. Messier. 2006. Reconciling niche and
neutrality: the continuum hypothesis. Ecology Letters 9:399--409.
}
|
/man/fn.lambda.Rd
|
no_license
|
sokole/MCSim
|
R
| false
| true
| 1,195
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn.lambda.R
\name{fn.lambda}
\alias{fn.lambda}
\title{Calculate recruitment bias based on environmental filtering}
\usage{
fn.lambda(trait.optimum, niche.breadth, Ef, Ef.specificity)
}
\arguments{
\item{trait.optimum}{Optimum value for a given species along an environmental
gradient. Gradients in \link{fn.metaSIM} are restricted to the range [0,1], so
this value must be in the range [0,1].}
\item{niche.breadth}{Niche breadth around a species' trait optimum. The value of sigma
in Fig 1 in Gravel et al. (2006).}
\item{Ef}{Value of the environmental filter at the site for which lambda is being
calculated.}
\item{Ef.specificity}{The selection specificity of the environmental filter.}
}
\description{
Used by \link{fn.metaSIM}. Uses niche breadth and an
environmental gradient to determine how to bias species selection
weights based on environmental filtering dynamics. See Gravel et al. 2006.
}
\details{
Used by \link{fn.metaSIM}.
}
\references{
Gravel, D., C. D. Canham, M. Beaudet, and C. Messier. 2006. Reconciling niche and
neutrality: the continuum hypothesis. Ecology Letters 9:399--409.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.