content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## 1 Import and explore data
cod <- read.csv("cod_recruitment.csv")
cod
par(mfrow=c(2,2))
plot(cod, xlim=c(0,130), ylim=c(0,400))
## 2 Function to plot Ricker line
plot_ricker <- function(data, a, b, ...)
{
S <- data[[1]]
R <- data[[2]]
plot(data, xlim=c(0,1.1*max(S)), ylim=c(0,1.1*max(R)), ...)
Sfit <- seq(0, 1.1*max(S), length=200)
Rfit <- a * Sfit * exp(-b*Sfit)
lines(Sfit, Rfit)
}
plot_ricker(cod, a=1, b=1)
## a is the initial slope
## 1/b is the SSB that produces maximum recruitment
plot_ricker(cod, a=5, b=0.01)
## 3 Function to fit model (estimate a and b)
ricker <- function(par, data)
{
a <- exp(par[["loga"]])
b <- exp(par[["logb"]])
Rfit <- a * data$S * exp(-b*data$S)
res <- log(data$R) - log(Rfit)
sum(res^2)
}
## 4 Fit model
## Starting values
init <- c(loga=log(5), logb=log(0.01))
## Calculate RSS at starting values, before fitting model
ricker(par=init, data=cod)
## Fit model
optim(ricker, par=init, data=cod)
fit <- optim(ricker, par=init, data=cod)$par
ricker(par=fit, cod) # RSS
fit # estimated parameters
## Exponentiate
c(a=exp(fit[["loga"]]), b=exp(fit[["logb"]]))
best <- data.frame(a=exp(fit[["loga"]]), b=exp(fit[["logb"]]))
## 5 Plot fitted model
plot_ricker(cod, a=best$a, b=best$b, main="Best fit")
| /02_Model_fitting/ricker.R | no_license | ices-eg/tc_tcsai2019 | R | false | false | 1,335 | r | ## 1 Import and explore data
cod <- read.csv("cod_recruitment.csv")
cod
par(mfrow=c(2,2))
plot(cod, xlim=c(0,130), ylim=c(0,400))
## 2 Function to plot Ricker line
plot_ricker <- function(data, a, b, ...)
{
S <- data[[1]]
R <- data[[2]]
plot(data, xlim=c(0,1.1*max(S)), ylim=c(0,1.1*max(R)), ...)
Sfit <- seq(0, 1.1*max(S), length=200)
Rfit <- a * Sfit * exp(-b*Sfit)
lines(Sfit, Rfit)
}
plot_ricker(cod, a=1, b=1)
## a is the initial slope
## 1/b is the SSB that produces maximum recruitment
plot_ricker(cod, a=5, b=0.01)
## 3 Function to fit model (estimate a and b)
ricker <- function(par, data)
{
a <- exp(par[["loga"]])
b <- exp(par[["logb"]])
Rfit <- a * data$S * exp(-b*data$S)
res <- log(data$R) - log(Rfit)
sum(res^2)
}
## 4 Fit model
## Starting values
init <- c(loga=log(5), logb=log(0.01))
## Calculate RSS at starting values, before fitting model
ricker(par=init, data=cod)
## Fit model
optim(ricker, par=init, data=cod)
fit <- optim(ricker, par=init, data=cod)$par
ricker(par=fit, cod) # RSS
fit # estimated parameters
## Exponentiate
c(a=exp(fit[["loga"]]), b=exp(fit[["logb"]]))
best <- data.frame(a=exp(fit[["loga"]]), b=exp(fit[["logb"]]))
## 5 Plot fitted model
plot_ricker(cod, a=best$a, b=best$b, main="Best fit")
|
#----------------------------------------------------------------------------------------------------
#' Run the Elastic Net Solvers
#' @description Given a TReNA object with either LASSO or Ridge Regression as the solver, use the \code{\link{glmnet}} function to estimate coefficients for each transcription factor as a predictor of the target gene's expression level.
#'
#' @param obj An object of class Solver
#' @param target.gene A designated target gene that should be part of the mtx.assay data
#' @param tfs The designated set of transcription factors that could be associated with the target gene.
#' @param tf.weights A set of weights on the transcription factors (default = rep(1, length(tfs)))
#' @param alpha The LASSO/Ridge tuning parameter
#' @param lambda The penalty tuning parameter for elastic net
#' @param keep.metrics A binary variable indicating whether or not to keep metrics
#'
#' @return A data frame containing the coefficients relating the target gene to each transcription factor, plus other fit parameters
#'
#' @seealso \code{\link{glmnet}}
#'
#' @examples
#'
.elasticNetSolver <- function (obj, target.gene, tfs, tf.weights, alpha, lambda, keep.metrics){
if(length(tfs) == 0)
return(data.frame())
# we don't try to handle tf self-regulation
deleters <- grep(target.gene, tfs)
if(length(deleters) > 0){
tfs <- tfs[-deleters]
tf.weights <- tf.weights[-deleters]
if(!obj@quiet)
message(sprintf("Removing target.gene from candidate regulators: %s", target.gene))
}
if( length(tfs) == 0 ) return( data.frame() )
mtx <- getAssayData(obj)
stopifnot(target.gene %in% rownames(mtx))
stopifnot(all(tfs %in% rownames(mtx)))
stopifnot(class(lambda) %in% c("NULL","numeric"))
features <- t(mtx[tfs,,drop=FALSE ])
target <- as.numeric(mtx[target.gene,])
if( length(tfs) == 1 ) {
fit = stats::lm( target ~ features )
mtx.beta = stats::coef(fit)
cor.target.feature = stats::cor( target , features )[1,1]
mtx.beta = data.frame( beta = mtx.beta[2] , intercept = mtx.beta[1] , gene.cor = cor.target.feature )
rownames(mtx.beta) = tfs
if( keep.metrics == FALSE ) return( mtx.beta )
if( keep.metrics == TRUE ) return( list( mtx.beta = mtx.beta , lambda = NA , r2 = cor.target.feature^2 ) )
}
if( length(lambda) == 0 ) {
# Run Permutation testing to find lambda
if( alpha != 0 )
alpha.perm = alpha
else(alpha.perm = 0.1)
target.mixed <- sample(target)
threshold <- 1E-15
lambda.change <- 10^(-4)
lambda <- 1
lambda.list <- numeric(length=50)
for(i in 1:length(lambda.list)){
# Do a binary search
step.size <- lambda/2 # Start at 0.5
while(step.size > lambda.change){
# Get the fit
fit <- glmnet(features, target.mixed, penalty.factor = tf.weights, alpha=alpha.perm, lambda=lambda)
# Case 1: nonsense, need to lower lambda
if(max(fit$beta) < threshold){
lambda <- lambda - step.size
}
# Case 2: sense, need to raise lambda
else{
lambda <- lambda + step.size
}
# Halve the step size and re-scramble the target
step.size <- step.size/2
target.mixed <- sample(target)
}
lambda.list[[i]] <- lambda
}
# Give lambda as 1 + 1se
lambda <- mean(lambda.list) + (stats::sd(lambda.list)/sqrt(length(lambda.list)))
fit <- glmnet(features, target, penalty.factor=tf.weights, alpha=alpha, lambda=lambda)
}
# For non-LASSO
# else{
# fit <- cv.glmnet(features, target, penalty.factor=tf.weights, grouped=FALSE , alpha = alpha )
# lambda.min <- fit$lambda.min
# lambda <-fit$lambda.1se
# }
else if(is.numeric(lambda)){
fit = glmnet(features, target, penalty.factor=tf.weights, alpha=alpha, lambda=lambda)
}
# extract the exponents of the fit
mtx.beta <- as.matrix( stats::predict( fit , newx = features , type = "coef" , s = lambda ) )
colnames(mtx.beta) <- "beta"
deleters <- as.integer(which(mtx.beta[,1] == 0))
if( all( mtx.beta[,1] == 0 ) ) return( data.frame() )
if(length(deleters) > 0)
mtx.beta <- mtx.beta[-deleters, , drop=FALSE]
# put the intercept, admittedly with much redundancy, into its own column
intercept <- mtx.beta[1,1]
mtx.beta <- mtx.beta[-1, , drop=FALSE]
mtx.beta <- cbind(mtx.beta, intercept=rep(intercept, nrow(mtx.beta)))
correlations.of.betas.to.targetGene <- unlist(lapply(rownames(mtx.beta), function(x) stats::cor(mtx[x,], mtx[target.gene,])))
mtx.beta <- as.matrix(cbind( mtx.beta, gene.cor=correlations.of.betas.to.targetGene))
#if(!obj@quiet)
# graphics::plot(fit.nolambda, xvar='lambda', label=TRUE)
if( nrow(mtx.beta) > 1 ) {
ordered.indices <- order(abs(mtx.beta[, "beta"]), decreasing=TRUE)
mtx.beta <- mtx.beta[ordered.indices,]
}
mtx.beta = as.data.frame(mtx.beta)
if( keep.metrics == TRUE ) {
pred.values = stats::predict( fit , newx = features , s = lambda , type = "link" )
r2 = (stats::cor( target , pred.values )[1,1])^2
return( list( mtx.beta = mtx.beta , lambda = lambda , r2 = r2 ) )
}
if( keep.metrics == FALSE )
return(as.data.frame(mtx.beta))
}
# ElasticNetSolver
#----------------------------------------------------------------------------------------------------
| /R/sharedFunctions.R | no_license | noahmclean1/TReNA | R | false | false | 5,926 | r | #----------------------------------------------------------------------------------------------------
#' Run the Elastic Net Solvers
#' @description Given a TReNA object with either LASSO or Ridge Regression as the solver, use the \code{\link{glmnet}} function to estimate coefficients for each transcription factor as a predictor of the target gene's expression level.
#'
#' @param obj An object of class Solver
#' @param target.gene A designated target gene that should be part of the mtx.assay data
#' @param tfs The designated set of transcription factors that could be associated with the target gene.
#' @param tf.weights A set of weights on the transcription factors (default = rep(1, length(tfs)))
#' @param alpha The LASSO/Ridge tuning parameter
#' @param lambda The penalty tuning parameter for elastic net
#' @param keep.metrics A binary variable indicating whether or not to keep metrics
#'
#' @return A data frame containing the coefficients relating the target gene to each transcription factor, plus other fit parameters
#'
#' @seealso \code{\link{glmnet}}
#'
#' @examples
#'
.elasticNetSolver <- function (obj, target.gene, tfs, tf.weights, alpha, lambda, keep.metrics){
if(length(tfs) == 0)
return(data.frame())
# we don't try to handle tf self-regulation
deleters <- grep(target.gene, tfs)
if(length(deleters) > 0){
tfs <- tfs[-deleters]
tf.weights <- tf.weights[-deleters]
if(!obj@quiet)
message(sprintf("Removing target.gene from candidate regulators: %s", target.gene))
}
if( length(tfs) == 0 ) return( data.frame() )
mtx <- getAssayData(obj)
stopifnot(target.gene %in% rownames(mtx))
stopifnot(all(tfs %in% rownames(mtx)))
stopifnot(class(lambda) %in% c("NULL","numeric"))
features <- t(mtx[tfs,,drop=FALSE ])
target <- as.numeric(mtx[target.gene,])
if( length(tfs) == 1 ) {
fit = stats::lm( target ~ features )
mtx.beta = stats::coef(fit)
cor.target.feature = stats::cor( target , features )[1,1]
mtx.beta = data.frame( beta = mtx.beta[2] , intercept = mtx.beta[1] , gene.cor = cor.target.feature )
rownames(mtx.beta) = tfs
if( keep.metrics == FALSE ) return( mtx.beta )
if( keep.metrics == TRUE ) return( list( mtx.beta = mtx.beta , lambda = NA , r2 = cor.target.feature^2 ) )
}
if( length(lambda) == 0 ) {
# Run Permutation testing to find lambda
if( alpha != 0 )
alpha.perm = alpha
else(alpha.perm = 0.1)
target.mixed <- sample(target)
threshold <- 1E-15
lambda.change <- 10^(-4)
lambda <- 1
lambda.list <- numeric(length=50)
for(i in 1:length(lambda.list)){
# Do a binary search
step.size <- lambda/2 # Start at 0.5
while(step.size > lambda.change){
# Get the fit
fit <- glmnet(features, target.mixed, penalty.factor = tf.weights, alpha=alpha.perm, lambda=lambda)
# Case 1: nonsense, need to lower lambda
if(max(fit$beta) < threshold){
lambda <- lambda - step.size
}
# Case 2: sense, need to raise lambda
else{
lambda <- lambda + step.size
}
# Halve the step size and re-scramble the target
step.size <- step.size/2
target.mixed <- sample(target)
}
lambda.list[[i]] <- lambda
}
# Give lambda as 1 + 1se
lambda <- mean(lambda.list) + (stats::sd(lambda.list)/sqrt(length(lambda.list)))
fit <- glmnet(features, target, penalty.factor=tf.weights, alpha=alpha, lambda=lambda)
}
# For non-LASSO
# else{
# fit <- cv.glmnet(features, target, penalty.factor=tf.weights, grouped=FALSE , alpha = alpha )
# lambda.min <- fit$lambda.min
# lambda <-fit$lambda.1se
# }
else if(is.numeric(lambda)){
fit = glmnet(features, target, penalty.factor=tf.weights, alpha=alpha, lambda=lambda)
}
# extract the exponents of the fit
mtx.beta <- as.matrix( stats::predict( fit , newx = features , type = "coef" , s = lambda ) )
colnames(mtx.beta) <- "beta"
deleters <- as.integer(which(mtx.beta[,1] == 0))
if( all( mtx.beta[,1] == 0 ) ) return( data.frame() )
if(length(deleters) > 0)
mtx.beta <- mtx.beta[-deleters, , drop=FALSE]
# put the intercept, admittedly with much redundancy, into its own column
intercept <- mtx.beta[1,1]
mtx.beta <- mtx.beta[-1, , drop=FALSE]
mtx.beta <- cbind(mtx.beta, intercept=rep(intercept, nrow(mtx.beta)))
correlations.of.betas.to.targetGene <- unlist(lapply(rownames(mtx.beta), function(x) stats::cor(mtx[x,], mtx[target.gene,])))
mtx.beta <- as.matrix(cbind( mtx.beta, gene.cor=correlations.of.betas.to.targetGene))
#if(!obj@quiet)
# graphics::plot(fit.nolambda, xvar='lambda', label=TRUE)
if( nrow(mtx.beta) > 1 ) {
ordered.indices <- order(abs(mtx.beta[, "beta"]), decreasing=TRUE)
mtx.beta <- mtx.beta[ordered.indices,]
}
mtx.beta = as.data.frame(mtx.beta)
if( keep.metrics == TRUE ) {
pred.values = stats::predict( fit , newx = features , s = lambda , type = "link" )
r2 = (stats::cor( target , pred.values )[1,1])^2
return( list( mtx.beta = mtx.beta , lambda = lambda , r2 = r2 ) )
}
if( keep.metrics == FALSE )
return(as.data.frame(mtx.beta))
}
# ElasticNetSolver
#----------------------------------------------------------------------------------------------------
|
rankhospital <- function(state, outcome, num = "best") {
hospitalNameColumnNumber <- 2
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- unique(outcomeData[, 7])
numberOfRows <- nrow(outcomeData);
if (! state %in% states) stop(paste("Error in rankhospital(", state, ", ", outcome, ") : invalid state", sep = "\""), call. = F)
index <- if (num == "best") {
1
} else if (num == "worst") {
numberOfRows
} else if (!is.numeric(num)) {
stop(paste("Error in rankhospital(", state, ", ", outcome, ", ", num, ") : invalid num", sep = "\""), call. = F)
} else if (num > numberOfRows) {
NA
} else {
num
}
measuredColumnNumber <-
if (outcome == "heart attack"){
outcomeData [, 11] <- as.numeric(outcomeData[, 11])
11
} else
if (outcome == "heart failure"){
outcomeData [, 17] <- as.numeric(outcomeData[, 17])
17
} else
if (outcome == "pneumonia"){
outcomeData [, 23] <- as.numeric(outcomeData[, 23])
23
} else stop(paste("Error in rankhospital(", state, ", ", outcome, ") : invalid outcome", sep = "\""), call. = F)
concreteStateSubset <- subset(outcomeData, State == state)
orderedStateSubset <- order(concreteStateSubset[measuredColumnNumber], concreteStateSubset[hospitalNameColumnNumber], na.last = NA)
index <- length(orderedStateSubset)
rearrangedStateSubset <- concreteStateSubset[orderedStateSubset, ]
rearrangedStateSubset[index, ]$Hospital.Name
} | /rankhospital.R | no_license | RonZamkadny/ProgrammingAssignment2 | R | false | false | 1,527 | r | rankhospital <- function(state, outcome, num = "best") {
hospitalNameColumnNumber <- 2
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- unique(outcomeData[, 7])
numberOfRows <- nrow(outcomeData);
if (! state %in% states) stop(paste("Error in rankhospital(", state, ", ", outcome, ") : invalid state", sep = "\""), call. = F)
index <- if (num == "best") {
1
} else if (num == "worst") {
numberOfRows
} else if (!is.numeric(num)) {
stop(paste("Error in rankhospital(", state, ", ", outcome, ", ", num, ") : invalid num", sep = "\""), call. = F)
} else if (num > numberOfRows) {
NA
} else {
num
}
measuredColumnNumber <-
if (outcome == "heart attack"){
outcomeData [, 11] <- as.numeric(outcomeData[, 11])
11
} else
if (outcome == "heart failure"){
outcomeData [, 17] <- as.numeric(outcomeData[, 17])
17
} else
if (outcome == "pneumonia"){
outcomeData [, 23] <- as.numeric(outcomeData[, 23])
23
} else stop(paste("Error in rankhospital(", state, ", ", outcome, ") : invalid outcome", sep = "\""), call. = F)
concreteStateSubset <- subset(outcomeData, State == state)
orderedStateSubset <- order(concreteStateSubset[measuredColumnNumber], concreteStateSubset[hospitalNameColumnNumber], na.last = NA)
index <- length(orderedStateSubset)
rearrangedStateSubset <- concreteStateSubset[orderedStateSubset, ]
rearrangedStateSubset[index, ]$Hospital.Name
} |
context('descriptives')
test_that('descriptive statistics work for continuous variables without split by sunny', {
CI_WIDTH <- 0.95
QUANTS <- c(0.25, 0.50, 0.75)
set.seed(1337)
x <- rnorm(100, 0, 1)
df <- data.frame(x=x)
desc <- jmv::descriptives(
data=df, vars=x, mode=TRUE, sum=TRUE, variance=TRUE, range=TRUE,
se=TRUE, ci=TRUE, skew=TRUE, kurt=TRUE, sw=TRUE, pcEqGr=TRUE, pc=TRUE
)
r <- desc$descriptives$asDF
# Calculate statistics
missing <- sum(is.na(x))
n <- length(x) - missing
mean <- mean(x)
se <- sd(x) / sqrt(n)
zQuant <- 1 - ((1 - CI_WIDTH) / 2)
ciLower <- mean - qnorm(zQuant) * se
ciUpper <- mean + qnorm(zQuant) * se
mode <- as.numeric(names(table(x)[table(x)==max(table(x))]))[1]
shapiro <- shapiro.test(x)
quantiles <- quantile(x, QUANTS)
# Test descriptives table
expect_equal(n, r[["x[n]"]], tolerance = 1e-5)
expect_equal(missing, r[["x[missing]"]], tolerance = 1e-5)
expect_equal(mean, r[["x[mean]"]], tolerance = 1e-5)
expect_equal(se, r[["x[se]"]], tolerance = 1e-5)
expect_equal(ciLower, r[["x[ciLower]"]], tolerance = 1e-5)
expect_equal(ciUpper, r[["x[ciUpper]"]], tolerance = 1e-5)
expect_equal(median(x), r[["x[median]"]], tolerance = 1e-5)
expect_equal(mode, r[["x[mode]"]], tolerance = 1e-5)
expect_equal(sum(x), r[["x[sum]"]], tolerance = 1e-5)
expect_equal(sd(x), r[["x[sd]"]], tolerance = 1e-5)
expect_equal(var(x), r[["x[variance]"]], tolerance = 1e-5)
expect_equal(range(x)[2] - range(x)[1], r[["x[range]"]], tolerance = 1e-5)
expect_equal(min(x), r[["x[min]"]], tolerance = 1e-5)
expect_equal(max(x), r[["x[max]"]], tolerance = 1e-5)
expect_equal(0.11014, r[["x[skew]"]], tolerance = 1e-5)
expect_equal(0.24138, r[["x[seSkew]"]], tolerance = 1e-5)
expect_equal(-0.11958, r[["x[kurt]"]], tolerance = 1e-5)
expect_equal(0.47833, r[["x[seKurt]"]], tolerance = 1e-5)
expect_equal(as.numeric(shapiro$statistic), r[["x[sww]"]], tolerance = 1e-5)
expect_equal(as.numeric(shapiro$p.value), r[["x[sw]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[1]), r[["x[quant1]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[2]), r[["x[quant2]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[3]), r[["x[quant3]"]], tolerance = 1e-5)
})
test_that('descriptives transposed table works with splitBy', {
suppressWarnings(RNGversion("3.5.0"))
set.seed(1337)
df <- data.frame(
Q1=rnorm(100),
Q2=rnorm(100),
Q3=rnorm(100),
Q4=rnorm(100),
group=sample(letters[1:3], 100, replace = TRUE)
)
desc <- jmv::descriptives(
data=df, vars=vars(Q1, Q2, Q3, Q4), splitBy=group, desc="rows"
)
r <- desc$descriptivesT$asDF
expect_equal(c(36, 28, 36, 36, 28, 36, 36, 28, 36, 36, 28, 36), r$n)
expect_equal(
c(0.1454, 0.2344, 0.3307, 0.09781, -0.02078, 0.03245, -0.2239, -0.1114,
-0.1302, -0.005095, -0.1445, 0.01393),
r$mean, tolerance=1e-4
)
expect_equal(
c(1.138, 0.8853, 1.138, 0.9002, 1.178, 0.9884, 1.044, 1.225, 0.9436,
0.8925, 1.070, 0.843),
r$sd, tolerance=1e-3
)
expect_equal(
c(-2.344, -1.774, -1.679, -1.154, -2.474, -2.32, -2.38, -2.689, -1.979,
-1.697, -1.867, -1.493),
r$min, tolerance=1e-3
)
expect_equal(
c(2.199, 1.785, 3.446, 3.104, 2.929, 2.209, 2.258, 3.406, 1.933, 1.851,
1.898, 2.163),
r$max, tolerance=1e-4
)
})
test_that('descriptives works old scenario', {
w <- as.factor(rep(c("1", "2","3"), each=4))
x <- as.factor(rep(c("a", "b","c"), 4))
y <- c(4,4,3,4,8,0,9,8,8,6,0,3)
z <- c(NA,NaN,3,-1,-2,1,1,-2,2,-2,-3,3)
data <- data.frame(w=w, x=x, y=y, z=z)
desc <- jmv::descriptives(data, vars=c("w", "y", "z"), splitBy = "x",
freq=TRUE, median=TRUE, mode=TRUE, skew=TRUE,
kurt=TRUE, pc=TRUE)
freq <- desc$frequencies[[1]]$asDF
descr <- desc$descriptives$asDF
# Test frequency table numerical values
expect_equal(1, freq[1,3], tolerance = 1e-3)
expect_equal(2, freq[3,4], tolerance = 1e-3)
# Test descriptives table numerical values
expect_equal(2.619, descr$`y[seKurtb]`, tolerance = 1e-3)
expect_equal(-1.289, descr$`z[kurtc]`, tolerance = 1e-3)
expect_equal(1, descr$`z[missinga]`, tolerance = 1e-3)
expect_equal(5.750, descr$`y[meana]`, tolerance = 1e-3)
expect_equal(-2, descr$`z[modeb]`, tolerance = 1e-3)
expect_equal(4, descr$`y[mina]`, tolerance = 1e-3)
expect_equal(2.25, descr$`y[perc1c]`, tolerance = 1e-3)
})
test_that('histogram is created for nominal numeric variable', {
set.seed(1337)
data <- data.frame(
a1 = rnorm(100, 0, 10),
a2 = factor(sample(1:10, 100, replace = TRUE))
)
attr(data$a2, 'values') <- 1:10
desc <- jmv::descriptives(data, c('a1', 'a2'), hist=TRUE)
expect_true(desc$plots[[2]]$.render())
})
| /tests/testthat/testdescriptives.R | no_license | cran/jmv | R | false | false | 5,070 | r | context('descriptives')
test_that('descriptive statistics work for continuous variables without split by sunny', {
CI_WIDTH <- 0.95
QUANTS <- c(0.25, 0.50, 0.75)
set.seed(1337)
x <- rnorm(100, 0, 1)
df <- data.frame(x=x)
desc <- jmv::descriptives(
data=df, vars=x, mode=TRUE, sum=TRUE, variance=TRUE, range=TRUE,
se=TRUE, ci=TRUE, skew=TRUE, kurt=TRUE, sw=TRUE, pcEqGr=TRUE, pc=TRUE
)
r <- desc$descriptives$asDF
# Calculate statistics
missing <- sum(is.na(x))
n <- length(x) - missing
mean <- mean(x)
se <- sd(x) / sqrt(n)
zQuant <- 1 - ((1 - CI_WIDTH) / 2)
ciLower <- mean - qnorm(zQuant) * se
ciUpper <- mean + qnorm(zQuant) * se
mode <- as.numeric(names(table(x)[table(x)==max(table(x))]))[1]
shapiro <- shapiro.test(x)
quantiles <- quantile(x, QUANTS)
# Test descriptives table
expect_equal(n, r[["x[n]"]], tolerance = 1e-5)
expect_equal(missing, r[["x[missing]"]], tolerance = 1e-5)
expect_equal(mean, r[["x[mean]"]], tolerance = 1e-5)
expect_equal(se, r[["x[se]"]], tolerance = 1e-5)
expect_equal(ciLower, r[["x[ciLower]"]], tolerance = 1e-5)
expect_equal(ciUpper, r[["x[ciUpper]"]], tolerance = 1e-5)
expect_equal(median(x), r[["x[median]"]], tolerance = 1e-5)
expect_equal(mode, r[["x[mode]"]], tolerance = 1e-5)
expect_equal(sum(x), r[["x[sum]"]], tolerance = 1e-5)
expect_equal(sd(x), r[["x[sd]"]], tolerance = 1e-5)
expect_equal(var(x), r[["x[variance]"]], tolerance = 1e-5)
expect_equal(range(x)[2] - range(x)[1], r[["x[range]"]], tolerance = 1e-5)
expect_equal(min(x), r[["x[min]"]], tolerance = 1e-5)
expect_equal(max(x), r[["x[max]"]], tolerance = 1e-5)
expect_equal(0.11014, r[["x[skew]"]], tolerance = 1e-5)
expect_equal(0.24138, r[["x[seSkew]"]], tolerance = 1e-5)
expect_equal(-0.11958, r[["x[kurt]"]], tolerance = 1e-5)
expect_equal(0.47833, r[["x[seKurt]"]], tolerance = 1e-5)
expect_equal(as.numeric(shapiro$statistic), r[["x[sww]"]], tolerance = 1e-5)
expect_equal(as.numeric(shapiro$p.value), r[["x[sw]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[1]), r[["x[quant1]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[2]), r[["x[quant2]"]], tolerance = 1e-5)
expect_equal(as.numeric(quantiles[3]), r[["x[quant3]"]], tolerance = 1e-5)
})
test_that('descriptives transposed table works with splitBy', {
suppressWarnings(RNGversion("3.5.0"))
set.seed(1337)
df <- data.frame(
Q1=rnorm(100),
Q2=rnorm(100),
Q3=rnorm(100),
Q4=rnorm(100),
group=sample(letters[1:3], 100, replace = TRUE)
)
desc <- jmv::descriptives(
data=df, vars=vars(Q1, Q2, Q3, Q4), splitBy=group, desc="rows"
)
r <- desc$descriptivesT$asDF
expect_equal(c(36, 28, 36, 36, 28, 36, 36, 28, 36, 36, 28, 36), r$n)
expect_equal(
c(0.1454, 0.2344, 0.3307, 0.09781, -0.02078, 0.03245, -0.2239, -0.1114,
-0.1302, -0.005095, -0.1445, 0.01393),
r$mean, tolerance=1e-4
)
expect_equal(
c(1.138, 0.8853, 1.138, 0.9002, 1.178, 0.9884, 1.044, 1.225, 0.9436,
0.8925, 1.070, 0.843),
r$sd, tolerance=1e-3
)
expect_equal(
c(-2.344, -1.774, -1.679, -1.154, -2.474, -2.32, -2.38, -2.689, -1.979,
-1.697, -1.867, -1.493),
r$min, tolerance=1e-3
)
expect_equal(
c(2.199, 1.785, 3.446, 3.104, 2.929, 2.209, 2.258, 3.406, 1.933, 1.851,
1.898, 2.163),
r$max, tolerance=1e-4
)
})
test_that('descriptives works old scenario', {
w <- as.factor(rep(c("1", "2","3"), each=4))
x <- as.factor(rep(c("a", "b","c"), 4))
y <- c(4,4,3,4,8,0,9,8,8,6,0,3)
z <- c(NA,NaN,3,-1,-2,1,1,-2,2,-2,-3,3)
data <- data.frame(w=w, x=x, y=y, z=z)
desc <- jmv::descriptives(data, vars=c("w", "y", "z"), splitBy = "x",
freq=TRUE, median=TRUE, mode=TRUE, skew=TRUE,
kurt=TRUE, pc=TRUE)
freq <- desc$frequencies[[1]]$asDF
descr <- desc$descriptives$asDF
# Test frequency table numerical values
expect_equal(1, freq[1,3], tolerance = 1e-3)
expect_equal(2, freq[3,4], tolerance = 1e-3)
# Test descriptives table numerical values
expect_equal(2.619, descr$`y[seKurtb]`, tolerance = 1e-3)
expect_equal(-1.289, descr$`z[kurtc]`, tolerance = 1e-3)
expect_equal(1, descr$`z[missinga]`, tolerance = 1e-3)
expect_equal(5.750, descr$`y[meana]`, tolerance = 1e-3)
expect_equal(-2, descr$`z[modeb]`, tolerance = 1e-3)
expect_equal(4, descr$`y[mina]`, tolerance = 1e-3)
expect_equal(2.25, descr$`y[perc1c]`, tolerance = 1e-3)
})
test_that('histogram is created for nominal numeric variable', {
set.seed(1337)
data <- data.frame(
a1 = rnorm(100, 0, 10),
a2 = factor(sample(1:10, 100, replace = TRUE))
)
attr(data$a2, 'values') <- 1:10
desc <- jmv::descriptives(data, c('a1', 'a2'), hist=TRUE)
expect_true(desc$plots[[2]]$.render())
})
|
baixar_paginas_gazetadeiracemapolis <- function(diretorio = ".",caderno = "policia", paginas = 10){
url <- paste0("http://gazetadeiracemapolis.com.br/noticias/",caderno,"/page/")
purrr::walk(1:paginas,purrr::possibly(purrrogress::with_progress(~{
arquivo <- paste0(stringr::str_replace_all(Sys.time(),"\\D","_"),"_pagina_",.x,".html")
httr::GET(url=paste0(url,.x),httr::write_disk(paste0(diretorio,"/",arquivo),overwrite = TRUE))
}),NULL))
}
p <- baixar_paginas_gazetadeiracemapolis(diretorio = "pagina")
ler_paginas_gazetadeiracemapolis <- function(arquivos = NULL, diretorio = "."){
if (is.null(arquivos)){
arquivos <- list.files(diretorio,pattern= "html",full.names=TRUE)
}
purrr::map_dfr(arquivos,purrr::possibly(purrrogress::with_progress(~{
x <- xml2::read_html(.x)
url <- x %>%
xml2::xml_find_all('//article/a') %>%
xml2::xml_attr("href")
headline <- x %>%
xml2::xml_find_all('//div[@class="content-text-list"]/h4') %>%
xml2::xml_text()
descricao <- x %>%
xml2::xml_find_all('//div[@class="content-text-list"]//p') %>%
xml2::xml_text()
data_publicacao <- x %>%
xml2::xml_find_all('//p[@class="date-and-catname"]') %>%
xml2::xml_text() %>%
stringr::str_sub(start = 8, end = 17)
tibble::tibble(headline,descricao,data_publicacao,url)
}),NULL))
}
gzt <- ler_paginas_gazetadeiracemapolis(arquivos = arquivos)
baixar_noticias_gazetadeiracemapolis <- function(url = NULL, diretorio = "."){
if (is.null(url)){
stop("Você tem de informar a url")
}
purrr::walk(url,purrr::possibly(purrrogress::with_progress(~{
artigo <- stringr::str_extract(.x,"[\\w-]+(?=/$)") %>%
stringr::str_replace_all("-","_")
arquivo <- paste0(stringr::str_replace_all(Sys.time(),"\\D","_"),"_gazetadeiracemapolis_",artigo,".html")
httr::GET(.x,httr::write_disk(paste0(diretorio,"/",arquivo),overwrite = TRUE))
}),NULL))
}
baixar_noticias_gazetadeiracemapolis(gzt$url,"noticias")
ler_noticias_gazetadeiracemapolis <- function(arquivos = NULL, diretorio = "."){
if (is.null(arquivos)){
arquivos <- list.files(diretorio, pattern="html",full.names = TRUE)
}
purrr::map_dfr(arquivos, purrr::possibly(purrrogress::with_progress(~{
x <- xml2::read_html(.x)
headline <- x %>%
xml2::xml_find_all('//h1[@class="title"]') %>%
xml2::xml_text()
texto <- x %>%
xml2::xml_find_all('//section[@class="row"]//p') %>%
xml2::xml_text() %>%
stringr::str_c(collapse = "\n")
data_publicacao <- x %>%
xml2::xml_find_all('//small') %>%
xml2::xml_text() %>%
stringr::str_sub(start = 13, end = 35)
tibble::tibble(headline,texto,data_publicacao)
}),NULL))
}
noticias <- ler_noticias_gazetadeiracemapolis(arquivos = arquivos)
| /gazetadeiracemapolisDATA.R | no_license | Viny369/jornais | R | false | false | 2,971 | r | baixar_paginas_gazetadeiracemapolis <- function(diretorio = ".",caderno = "policia", paginas = 10){
url <- paste0("http://gazetadeiracemapolis.com.br/noticias/",caderno,"/page/")
purrr::walk(1:paginas,purrr::possibly(purrrogress::with_progress(~{
arquivo <- paste0(stringr::str_replace_all(Sys.time(),"\\D","_"),"_pagina_",.x,".html")
httr::GET(url=paste0(url,.x),httr::write_disk(paste0(diretorio,"/",arquivo),overwrite = TRUE))
}),NULL))
}
p <- baixar_paginas_gazetadeiracemapolis(diretorio = "pagina")
ler_paginas_gazetadeiracemapolis <- function(arquivos = NULL, diretorio = "."){
if (is.null(arquivos)){
arquivos <- list.files(diretorio,pattern= "html",full.names=TRUE)
}
purrr::map_dfr(arquivos,purrr::possibly(purrrogress::with_progress(~{
x <- xml2::read_html(.x)
url <- x %>%
xml2::xml_find_all('//article/a') %>%
xml2::xml_attr("href")
headline <- x %>%
xml2::xml_find_all('//div[@class="content-text-list"]/h4') %>%
xml2::xml_text()
descricao <- x %>%
xml2::xml_find_all('//div[@class="content-text-list"]//p') %>%
xml2::xml_text()
data_publicacao <- x %>%
xml2::xml_find_all('//p[@class="date-and-catname"]') %>%
xml2::xml_text() %>%
stringr::str_sub(start = 8, end = 17)
tibble::tibble(headline,descricao,data_publicacao,url)
}),NULL))
}
gzt <- ler_paginas_gazetadeiracemapolis(arquivos = arquivos)
baixar_noticias_gazetadeiracemapolis <- function(url = NULL, diretorio = "."){
if (is.null(url)){
stop("Você tem de informar a url")
}
purrr::walk(url,purrr::possibly(purrrogress::with_progress(~{
artigo <- stringr::str_extract(.x,"[\\w-]+(?=/$)") %>%
stringr::str_replace_all("-","_")
arquivo <- paste0(stringr::str_replace_all(Sys.time(),"\\D","_"),"_gazetadeiracemapolis_",artigo,".html")
httr::GET(.x,httr::write_disk(paste0(diretorio,"/",arquivo),overwrite = TRUE))
}),NULL))
}
baixar_noticias_gazetadeiracemapolis(gzt$url,"noticias")
ler_noticias_gazetadeiracemapolis <- function(arquivos = NULL, diretorio = "."){
if (is.null(arquivos)){
arquivos <- list.files(diretorio, pattern="html",full.names = TRUE)
}
purrr::map_dfr(arquivos, purrr::possibly(purrrogress::with_progress(~{
x <- xml2::read_html(.x)
headline <- x %>%
xml2::xml_find_all('//h1[@class="title"]') %>%
xml2::xml_text()
texto <- x %>%
xml2::xml_find_all('//section[@class="row"]//p') %>%
xml2::xml_text() %>%
stringr::str_c(collapse = "\n")
data_publicacao <- x %>%
xml2::xml_find_all('//small') %>%
xml2::xml_text() %>%
stringr::str_sub(start = 13, end = 35)
tibble::tibble(headline,texto,data_publicacao)
}),NULL))
}
noticias <- ler_noticias_gazetadeiracemapolis(arquivos = arquivos)
|
setwd("C:/Users/Shalanda/Documents/R Bootcamp")
snpsDataframe= read.table('hapmap_CEU_r23a_chr2_ld-2.txt', header=TRUE)
dim(snpsDataframe)
head(snpsDataframe)
names(snpsDataframe)
row.names(snpsDataframe)
snps=as.matrix(snpsDataframe)
testSNP=snps["rs218206_G",]
table(testSNP)
het=sum(testSNP==1)/length(testSNP)
testSNP=snps["rs6717613_A",]
table(testSNP)
testSNP==1
length(testSNP)
is.na(testSNP)
het=sum(testSNP==1)/length(testSNP)
het=sum(testSNP==1,na.rm=TRUE)/sum(!is.na(testSNP))
freq=sum(testSNP,na.rm=TRUE)/(2.0*sum(!is.na(testSNP)))
calc_freq=function(x){
return(sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x))))
}
calc_het=function(x){
return(sum(x==1,na.rm=TRUE)/(sum(!is.na(x))))
}
freq=apply(snps,1,calc_freq)
het=apply(snps,1,calc_het)
plot(freq,het,xlab="Frequency",ylab="Heterozygosity")
p=seq(0,0.5,by=0.05)
points(p,2*p*(1-p),type="l",col=2)
compute_chisquare=function(x){
freq=sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x)))
cnt0=sum(x==0,na.rm=TRUE)
cnt1=sum(x==1,na.rm=TRUE)
cnt2=sum(x==2,na.rm=TRUE)
obscnts=c(cnt0,cnt1,cnt2)
n=sum(obscnts)
expcnts=c((1-freq)^2,2*freq*(1-freq),freq^2)*n
chisq=sum((obscnts-expcnts)^2/expcnts)
return(chisq)
}
compute_chisquare_2=function(x){
freq=sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x)))
cnt0=sum(x==0,na.rm=TRUE)
cnt1=sum(x==1,na.rm=TRUE)
cnt2=sum(x==2,na.rm=TRUE)
obscnts=c(cnt0,cnt1,cnt2)
n=sum(obscnts)
exp_probs=c((1-freq)^2,2*freq*(1-freq),freq^2)
chisq<-chisq.test(obscnts,p=exp_probs, correct = FALSE)$statistic
return(chisq)
}
chisqs=apply(snps,1,compute_chisquare)
chisqs2=apply(snps,1,compute_chisquare_2)
cor.test(chisqs,chisqs2)
plot(chisqs,chisqs2)
pvals <- pchisq(chisqs,1,lower.tail=FALSE)
pvals
signifthres<-0.05
sum(pvals<signifthres)
mean(pvals<signifthres)
#181 less than 0.05
signifthres<-0.01
sum(pvals<signifthres)
mean(pvals<signifthres)
#41 less than 0.01
signifthres<-0.001
sum(pvals<signifthres)
mean(pvals<signifthres)
#5 less than 0.001
num_pval <- length (pvals)
num_pval
#4014 pvals
dpvals <- seq(1, 4014, by=1)
exp_pvals <- (dpvals/num_pval)
exp_pvals
log_exp_pvals <- -log10(exp_pvals)
log_exp_pvals
sort_pvals <- sort(pvals)
sort_pvals
log_sort_pvals <- -log10(sort_pvals)
log_sort_pvals
qqplot(log_exp_pvals, log_sort_pvals,xlab = "-log10(expected P- Value", ylab = "-log10(observed P-value)")
abline(0,1, col=2)
| /Plotting Exercise 2 Part 1.R | no_license | shagrier/Bootcamp-HW-Assignments- | R | false | false | 2,491 | r | setwd("C:/Users/Shalanda/Documents/R Bootcamp")
snpsDataframe= read.table('hapmap_CEU_r23a_chr2_ld-2.txt', header=TRUE)
dim(snpsDataframe)
head(snpsDataframe)
names(snpsDataframe)
row.names(snpsDataframe)
snps=as.matrix(snpsDataframe)
testSNP=snps["rs218206_G",]
table(testSNP)
het=sum(testSNP==1)/length(testSNP)
testSNP=snps["rs6717613_A",]
table(testSNP)
testSNP==1
length(testSNP)
is.na(testSNP)
het=sum(testSNP==1)/length(testSNP)
het=sum(testSNP==1,na.rm=TRUE)/sum(!is.na(testSNP))
freq=sum(testSNP,na.rm=TRUE)/(2.0*sum(!is.na(testSNP)))
calc_freq=function(x){
return(sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x))))
}
calc_het=function(x){
return(sum(x==1,na.rm=TRUE)/(sum(!is.na(x))))
}
freq=apply(snps,1,calc_freq)
het=apply(snps,1,calc_het)
plot(freq,het,xlab="Frequency",ylab="Heterozygosity")
p=seq(0,0.5,by=0.05)
points(p,2*p*(1-p),type="l",col=2)
compute_chisquare=function(x){
freq=sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x)))
cnt0=sum(x==0,na.rm=TRUE)
cnt1=sum(x==1,na.rm=TRUE)
cnt2=sum(x==2,na.rm=TRUE)
obscnts=c(cnt0,cnt1,cnt2)
n=sum(obscnts)
expcnts=c((1-freq)^2,2*freq*(1-freq),freq^2)*n
chisq=sum((obscnts-expcnts)^2/expcnts)
return(chisq)
}
compute_chisquare_2=function(x){
freq=sum(x,na.rm=TRUE)/(2.0*sum(!is.na(x)))
cnt0=sum(x==0,na.rm=TRUE)
cnt1=sum(x==1,na.rm=TRUE)
cnt2=sum(x==2,na.rm=TRUE)
obscnts=c(cnt0,cnt1,cnt2)
n=sum(obscnts)
exp_probs=c((1-freq)^2,2*freq*(1-freq),freq^2)
chisq<-chisq.test(obscnts,p=exp_probs, correct = FALSE)$statistic
return(chisq)
}
chisqs=apply(snps,1,compute_chisquare)
chisqs2=apply(snps,1,compute_chisquare_2)
cor.test(chisqs,chisqs2)
plot(chisqs,chisqs2)
pvals <- pchisq(chisqs,1,lower.tail=FALSE)
pvals
signifthres<-0.05
sum(pvals<signifthres)
mean(pvals<signifthres)
#181 less than 0.05
signifthres<-0.01
sum(pvals<signifthres)
mean(pvals<signifthres)
#41 less than 0.01
signifthres<-0.001
sum(pvals<signifthres)
mean(pvals<signifthres)
#5 less than 0.001
num_pval <- length (pvals)
num_pval
#4014 pvals
dpvals <- seq(1, 4014, by=1)
exp_pvals <- (dpvals/num_pval)
exp_pvals
log_exp_pvals <- -log10(exp_pvals)
log_exp_pvals
sort_pvals <- sort(pvals)
sort_pvals
log_sort_pvals <- -log10(sort_pvals)
log_sort_pvals
qqplot(log_exp_pvals, log_sort_pvals,xlab = "-log10(expected P- Value", ylab = "-log10(observed P-value)")
abline(0,1, col=2)
|
# September 08, 2019
# HW 2 Time Series
# Daily Mean PM2.5 Concentration - Particulate Matter smaller than 2.5 micrometers Forecast
library(forecast)
library(tsa)
library(haven)
library(fma)
library(expsmooth)
library(lmtest)
library(zoo)
library(seasonal)
library(imputeTS)
library(xts)
library(ggplot2)
library(dplyr)
library(lubridate)
library(tseries)
# Set the working directory
setwd("/Users/CathyTran/Documents/Fall I 2019/Time Series")
# Read CSV into R
data <- read.csv(file="HW2_PM_2_5_Raleigh2.csv", header=TRUE, sep=",")
# set strings as factors to false
options(stringsAsFactors = FALSE)
# Convert character string to date format
data$Date <- as.Date(data$Date, format="%m/%d/%Y")
# Get the range of dates covered
DateRange <- seq(min(data$Date), max(data$Date), by = 1)
# Calculate missing values - 353
length(DateRange[!DateRange %in% data$Date])
# Create a z object to check for missing value after aggregated month
z <- zoo(data$Daily.Mean.PM2.5.Concentration, data$Date)
monthavg <- aggregate(z, as.yearmon, mean)
# Create Training (1300 obs 18 vars) & Validation (173 obs 18 vars) Data Set
data.train <- data[data$Date >= "2014-01-01" & data$Date <= "2018-06-30",]
data.valid <- data[data$Date >= "2018-07-01" & data$Date <= "2018-12-31",]
# Aggregate by month using Daily Mean PM2.5 Concentration
months.train <- data.train %>%
group_by(year=year(Date), month=month(Date)) %>%
summarise(mean=mean(Daily.Mean.PM2.5.Concentration))
months.valid <- data.valid %>%
group_by(year=year(Date), month=month(Date)) %>%
summarise(mean=mean(Daily.Mean.PM2.5.Concentration))
# Time Series Object#
ts.months.train <- ts(months.train$mean, start = 2014, frequency =12)
ts.months.valid <- ts(months.valid$mean, start = 2018, frequency = 12)
# Check the stationarity ADF Test 3 lags
adf.test(ts.months.train, alternative = "stationary", k = 0) # p-value = 0.01 We have stationarity!
adf.test(ts.months.train, alternative = "stationary", k = 1) # p-value = 0.01
adf.test(ts.months.train, alternative = "stationary", k = 2) # p-value = 0.0171
# Pulling out the p-values
ADF.Pvalues <- rep(NA, 3)
for(i in 0:2){
ADF.Pvalues[i+1] <- adf.test(ts.months.train, alternative = "stationary", k = i)$p.value
}
# Check for trending data using STL
# What is the x term
# • Order=c(0,0,0)
# ○ 1st 0 Autoregressive
# ○ 2nd 0 Integrative differences
# 3rd 0 Moving averages
# Define x with length 54 as far out the data goes
length(ts.months.train)
x=seq(1,54)
# Fitting Linear Regression
arima.trend=Arima(ts.months.train, xreg=x,order=c(0,0,0))
# Plot the residuals plot
plot(arima.trend$residuals[1:54], xlab='Number of Observations',ylab='Residuals',main='Residuals Plot',type='l')
# Stationary about the Trend because we know there is NO random walk
# Ljung-Box Test No MA or AR term#
Acf(arima.trend$residuals, lag=10,main = " Autocorrelation Plot")$acf
Pacf(arima.trend$residuals, lag=10, main = "Partial Correlation Plot")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend$residuals, lag = i, type = "Ljung", fitdf = 0)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
# H0: White Noise, No Autocorrelation
# HA: One or more autocorrelation up to lag m are not 0
# no white noise - there is correlation that needs to be modeled, ARIMA is a good way to go
################ AR(1) ###############
# AR(1) Model - PACF
# 3 characteristics
# a) ACF decreases exponentially
# b) PACF has a significant spike at the 1st lag, followed by nothing after
# c) IACF has a significant spike at the 1st lag, followed by nothing after
arima.trend1=Arima(ts.months.train, xreg=x, order=c(1,0,0))
# Forecast n.ahead = 6 forecast 6 periods
xnew=seq(55,60)
arima.forecast <- predict(arima.trend1, n.ahead=6, newxreg=xnew)
# Compare these the predicted values to the validation data set
months.valid$mean - arima.forecast$pred
# For later Calculate MAE and MAPE
# Check residuals for white noise AR(1)
# Ljung-Box Test We have white noise now#
Acf(arima.trend1$residuals, lag=10,main = "AR(1)")$acf
Pacf(arima.trend1$residuals, lag=10, main = "AR(1)")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend1$residuals, lag = i, type = "Ljung", fitdf = 1)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
################ MA(1) ###############
# MA(1) Model focus on ACF
# a) Look for spikes in ACF
# b) PACF decreases exponentially as the number of lags increases
# c) IACF decreases exponentially as the number of lags increases
ma.arima1=Arima(ts.months.train, xreg=x,order=c(0,0,1))
# Check residuals for white noise MA(1)
# Ljung-Box Test We have white noise now#
Acf(ma.arima1$residuals, lag=10,main = "MA(1)")$acf
Pacf(ma.arima1$residuals, lag=10, main = "MA(1)")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend1$residuals, lag = i, type = "Ljung", fitdf = 1)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
| /Time Series HW 2/Cathy_TimeSeries_HW3.R | no_license | gclark422/Orange2-HW | R | false | false | 5,540 | r | # September 08, 2019
# HW 2 Time Series
# Daily Mean PM2.5 Concentration - Particulate Matter smaller than 2.5 micrometers Forecast
library(forecast)
library(tsa)
library(haven)
library(fma)
library(expsmooth)
library(lmtest)
library(zoo)
library(seasonal)
library(imputeTS)
library(xts)
library(ggplot2)
library(dplyr)
library(lubridate)
library(tseries)
# Set the working directory
setwd("/Users/CathyTran/Documents/Fall I 2019/Time Series")
# Read CSV into R
data <- read.csv(file="HW2_PM_2_5_Raleigh2.csv", header=TRUE, sep=",")
# set strings as factors to false
options(stringsAsFactors = FALSE)
# Convert character string to date format
data$Date <- as.Date(data$Date, format="%m/%d/%Y")
# Get the range of dates covered
DateRange <- seq(min(data$Date), max(data$Date), by = 1)
# Calculate missing values - 353
length(DateRange[!DateRange %in% data$Date])
# Create a z object to check for missing value after aggregated month
z <- zoo(data$Daily.Mean.PM2.5.Concentration, data$Date)
monthavg <- aggregate(z, as.yearmon, mean)
# Create Training (1300 obs 18 vars) & Validation (173 obs 18 vars) Data Set
data.train <- data[data$Date >= "2014-01-01" & data$Date <= "2018-06-30",]
data.valid <- data[data$Date >= "2018-07-01" & data$Date <= "2018-12-31",]
# Aggregate by month using Daily Mean PM2.5 Concentration
months.train <- data.train %>%
group_by(year=year(Date), month=month(Date)) %>%
summarise(mean=mean(Daily.Mean.PM2.5.Concentration))
months.valid <- data.valid %>%
group_by(year=year(Date), month=month(Date)) %>%
summarise(mean=mean(Daily.Mean.PM2.5.Concentration))
# Time Series Object#
ts.months.train <- ts(months.train$mean, start = 2014, frequency =12)
ts.months.valid <- ts(months.valid$mean, start = 2018, frequency = 12)
# Check the stationarity ADF Test 3 lags
adf.test(ts.months.train, alternative = "stationary", k = 0) # p-value = 0.01 We have stationarity!
adf.test(ts.months.train, alternative = "stationary", k = 1) # p-value = 0.01
adf.test(ts.months.train, alternative = "stationary", k = 2) # p-value = 0.0171
# Pulling out the p-values
ADF.Pvalues <- rep(NA, 3)
for(i in 0:2){
ADF.Pvalues[i+1] <- adf.test(ts.months.train, alternative = "stationary", k = i)$p.value
}
# Check for trending data using STL
# What is the x term
# • Order=c(0,0,0)
# ○ 1st 0 Autoregressive
# ○ 2nd 0 Integrative differences
# 3rd 0 Moving averages
# Define x with length 54 as far out the data goes
length(ts.months.train)
x=seq(1,54)
# Fitting Linear Regression
arima.trend=Arima(ts.months.train, xreg=x,order=c(0,0,0))
# Plot the residuals plot
plot(arima.trend$residuals[1:54], xlab='Number of Observations',ylab='Residuals',main='Residuals Plot',type='l')
# Stationary about the Trend because we know there is NO random walk
# Ljung-Box Test No MA or AR term#
Acf(arima.trend$residuals, lag=10,main = " Autocorrelation Plot")$acf
Pacf(arima.trend$residuals, lag=10, main = "Partial Correlation Plot")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend$residuals, lag = i, type = "Ljung", fitdf = 0)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
# H0: White Noise, No Autocorrelation
# HA: One or more autocorrelation up to lag m are not 0
# no white noise - there is correlation that needs to be modeled, ARIMA is a good way to go
################ AR(1) ###############
# AR(1) Model - PACF
# 3 characteristics
# a) ACF decreases exponentially
# b) PACF has a significant spike at the 1st lag, followed by nothing after
# c) IACF has a significant spike at the 1st lag, followed by nothing after
arima.trend1=Arima(ts.months.train, xreg=x, order=c(1,0,0))
# Forecast n.ahead = 6 forecast 6 periods
xnew=seq(55,60)
arima.forecast <- predict(arima.trend1, n.ahead=6, newxreg=xnew)
# Compare these the predicted values to the validation data set
months.valid$mean - arima.forecast$pred
# For later Calculate MAE and MAPE
# Check residuals for white noise AR(1)
# Ljung-Box Test We have white noise now#
Acf(arima.trend1$residuals, lag=10,main = "AR(1)")$acf
Pacf(arima.trend1$residuals, lag=10, main = "AR(1)")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend1$residuals, lag = i, type = "Ljung", fitdf = 1)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
################ MA(1) ###############
# MA(1) Model focus on ACF
# a) Look for spikes in ACF
# b) PACF decreases exponentially as the number of lags increases
# c) IACF decreases exponentially as the number of lags increases
ma.arima1=Arima(ts.months.train, xreg=x,order=c(0,0,1))
# Check residuals for white noise MA(1)
# Ljung-Box Test We have white noise now#
Acf(ma.arima1$residuals, lag=10,main = "MA(1)")$acf
Pacf(ma.arima1$residuals, lag=10, main = "MA(1)")$acf
White.LB <- rep(NA, 10)
for(i in 1:10){
White.LB[i] <- Box.test(arima.trend1$residuals, lag = i, type = "Ljung", fitdf = 1)$p.value
}
White.LB <- pmin(White.LB, 0.2)
barplot(White.LB, main = "Ljung-Box Test P-values", ylab = "Probabilities", xlab = "Lags", ylim = c(0, 0.2))
abline(h = 0.01, lty = "dashed", col = "black")
abline(h = 0.05, lty = "dashed", col = "black")
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
title = "NLP Word Prediction",
tags$style(HTML("
@import url('https://fonts.googleapis.com/css2?family=Yusei+Magic&display=swap');
.container-fluid{
margin-top: 150px;
}
h1{
width: 35%;
color: #555;
border-bottom: 2px solid #777;
}
h3{
color: #999;
margin-top: -5px;
}
#text1{
border-color: rgba(17, 119, 187, 0.4);
text-align: center;
}
#time{
margin-top: 20px;
}
@media only screen and (max-width: 1000px){
h1{
width: 70%;
}
}
")),
fluidRow(
column(12, align="center",
# Application title
titlePanel(
h1("DATA SCIENCE CAPSTONE"),
),
h3("Word Prediction with NLP")
)
),
br(),br(),
fluidRow(
column(12, align="center",
textInput("text1", label=NULL, placeholder = "Enter any text", width="600px")
)
),
fluidRow(
column(12, align="center",
uiOutput("uio1"),
uiOutput("uio2"),
uiOutput("uio3")
)
),
fluidRow(
column(12, align="center",
textOutput("time")
)
)
))
| /prediction/ui.R | no_license | KennethFajardo/WordPredictionNLP | R | false | false | 1,462 | r | library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
title = "NLP Word Prediction",
tags$style(HTML("
@import url('https://fonts.googleapis.com/css2?family=Yusei+Magic&display=swap');
.container-fluid{
margin-top: 150px;
}
h1{
width: 35%;
color: #555;
border-bottom: 2px solid #777;
}
h3{
color: #999;
margin-top: -5px;
}
#text1{
border-color: rgba(17, 119, 187, 0.4);
text-align: center;
}
#time{
margin-top: 20px;
}
@media only screen and (max-width: 1000px){
h1{
width: 70%;
}
}
")),
fluidRow(
column(12, align="center",
# Application title
titlePanel(
h1("DATA SCIENCE CAPSTONE"),
),
h3("Word Prediction with NLP")
)
),
br(),br(),
fluidRow(
column(12, align="center",
textInput("text1", label=NULL, placeholder = "Enter any text", width="600px")
)
),
fluidRow(
column(12, align="center",
uiOutput("uio1"),
uiOutput("uio2"),
uiOutput("uio3")
)
),
fluidRow(
column(12, align="center",
textOutput("time")
)
)
))
|
#!/usr/bin/Rscript
new.yogi.plotter <- function() {
###
# this function draws a heatmap.
#
heatmap <- function(data,cex=.5,color.stops=c("white","royalblue3"),
row.names=rownames(data),col.names=colnames(data),log=FALSE,
col.separator=NULL, row.separator=NULL) {
#define the number of colors to use
ncolors <- 30
#create color palette
colors <- colorRampPalette(color.stops)(ncolors)
#SET UP PLOT LAYOUT
#set up layout with main plot taking 90% of the space and legend 10%
layout(matrix(c(1,2),nrow=1),widths=c(9,1))
#MAIN PLOT
if (log) {
data <- log10(data)
}
#set margins for the main plot, and set axis labels to perpendicular mode
op <- par(mar=c(2,6,6,0),las=2)
#draw heatmap with correct orientation
image(t(data)[,nrow(data):1], col=colors, axes=FALSE)
#draw separators
if (!is.null(row.separator)) {
b <- 1/(nrow(data)-1)
abline(h=(nrow(data)-row.separator)*b-b/2,lty="dashed",col="gray")
}
if (!is.null(col.separator)) {
b <- 1/(ncol(data)-1)
abline(v=col.separator*b-b/2,lty="dashed",col="gray")
}
#add axis labels
axis(3,at=seq(0,1,length.out=ncol(data)),labels=col.names,cex.axis=cex)
axis(2,at=seq(0,1,length.out=nrow(data)),labels=rev(row.names),cex.axis=cex)
#restore graphics parameters
par(op)
#LEGEND
#set margins and label orientation for legend
op <- par(las=1,mar=c(2,1,4,4))
#create a new plot that will become the legend
plot(0,type="n",xlim=c(0,1),ylim=c(0,ncolors),axes=FALSE,ylab="",xlab="")
#draw colored rectangles along the y axis of the new plot
rect(0,0:(ncolors-1),1,1:ncolors,col=colors,border=NA)
#add an axis label that associates the rectangles with the values, round them to 2 decimal digits
labels <- signif(seq(min(data),max(data),length.out=ncolors),2)
if (log) {
labels <- parse(text=sprintf("10^%s",labels))
}
axis(4,at=(1:ncolors)-.5,labels=labels,cex.axis=cex)
#restore graphics parameters
par(op)
}
structure(list(
heatmap=heatmap
),class="yogiplot")
}
###
# This function computes the best 2D layout for a given number
# of images to be drawn together.
# n = number of images
# returns: a vector of two integers representing rows and columns
# of the best layout.
#
best.grid <- function(n,l=1) {
r <- ceiling(sqrt(n))
factors <- do.call(rbind,lapply((r-l):(r+l),function(rows) {
do.call(rbind,lapply(rows:(r+l), function(cols) {
val <- rows*cols - n
c(rows=rows, cols=cols, fit=ifelse(val<0, Inf, val) )
}))
}))
factors[which.min(factors[,"fit"]),c("rows","cols")]
}
| /lib/libyogiplot.R | no_license | Harleymount/Ensminger_BFG | R | false | false | 2,548 | r | #!/usr/bin/Rscript
new.yogi.plotter <- function() {
###
# this function draws a heatmap.
#
heatmap <- function(data,cex=.5,color.stops=c("white","royalblue3"),
row.names=rownames(data),col.names=colnames(data),log=FALSE,
col.separator=NULL, row.separator=NULL) {
#define the number of colors to use
ncolors <- 30
#create color palette
colors <- colorRampPalette(color.stops)(ncolors)
#SET UP PLOT LAYOUT
#set up layout with main plot taking 90% of the space and legend 10%
layout(matrix(c(1,2),nrow=1),widths=c(9,1))
#MAIN PLOT
if (log) {
data <- log10(data)
}
#set margins for the main plot, and set axis labels to perpendicular mode
op <- par(mar=c(2,6,6,0),las=2)
#draw heatmap with correct orientation
image(t(data)[,nrow(data):1], col=colors, axes=FALSE)
#draw separators
if (!is.null(row.separator)) {
b <- 1/(nrow(data)-1)
abline(h=(nrow(data)-row.separator)*b-b/2,lty="dashed",col="gray")
}
if (!is.null(col.separator)) {
b <- 1/(ncol(data)-1)
abline(v=col.separator*b-b/2,lty="dashed",col="gray")
}
#add axis labels
axis(3,at=seq(0,1,length.out=ncol(data)),labels=col.names,cex.axis=cex)
axis(2,at=seq(0,1,length.out=nrow(data)),labels=rev(row.names),cex.axis=cex)
#restore graphics parameters
par(op)
#LEGEND
#set margins and label orientation for legend
op <- par(las=1,mar=c(2,1,4,4))
#create a new plot that will become the legend
plot(0,type="n",xlim=c(0,1),ylim=c(0,ncolors),axes=FALSE,ylab="",xlab="")
#draw colored rectangles along the y axis of the new plot
rect(0,0:(ncolors-1),1,1:ncolors,col=colors,border=NA)
#add an axis label that associates the rectangles with the values, round them to 2 decimal digits
labels <- signif(seq(min(data),max(data),length.out=ncolors),2)
if (log) {
labels <- parse(text=sprintf("10^%s",labels))
}
axis(4,at=(1:ncolors)-.5,labels=labels,cex.axis=cex)
#restore graphics parameters
par(op)
}
structure(list(
heatmap=heatmap
),class="yogiplot")
}
###
# This function computes the best 2D layout for a given number
# of images to be drawn together.
# n = number of images
# returns: a vector of two integers representing rows and columns
# of the best layout.
#
best.grid <- function(n,l=1) {
r <- ceiling(sqrt(n))
factors <- do.call(rbind,lapply((r-l):(r+l),function(rows) {
do.call(rbind,lapply(rows:(r+l), function(cols) {
val <- rows*cols - n
c(rows=rows, cols=cols, fit=ifelse(val<0, Inf, val) )
}))
}))
factors[which.min(factors[,"fit"]),c("rows","cols")]
}
|
# edfReader package
#
# Purpose : Reading .edf(+)/.bdf(+) files
#
# Copyright : (C) 2015-2016, Vis Consultancy, the Netherlands
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with edfReader package for R. If not, see <http://www.gnu.org/licenses/>.
#
# History :
# Feb16 - Created, version 1.0.0
# Mar16 - Version 1.1.0
# Apr16 - Version 1.1.1, no changes
#
#' edfReader: A package for reading EDF(+) and BDF(+) files
#'
#' The edfReader package reads EDF(+) and BDF(+) files in two steps: first the header is read
#' and then the signals (using the header object as an parameter).
#'
#' @section edfReader functions:
#' \tabular{lll}{
#' \code{\link{readEdfHeader}} \tab \verb{ } \tab to read the file header with basic info about the signals \cr
#' \code{\link{readEdfSignals}} \tab \tab to read one or more recorded signals
#' }
#' The objects returned by these functions are described in the package vignette.
#'
#' @section Details:
#' \tabular{lll}{
#' Package \tab \verb{ } \tab edfReader \cr
#' Version \tab \tab 1.1.1 \cr
#' Date \tab \tab April 17, 2016 \cr
#' Licence \tab \tab GPL version 3 or newer \cr
#' GitHub \tab \tab https://github.com/Pisca46/edfReader \cr
#' Author \tab \tab Jan Vis, Vis Consultancy \cr
#' E-mail \tab \tab jan@visconsultancy.eu \cr
#' Web \tab \tab visconsultancy.eu \cr
#' }
#' @section Acknowledgement:
#' This package has used code from:
#' \itemize{
#' \item edf.R version 0.3 (27-11-2013) from Fabien Feschet, http://data-auvergne.fr/cloud/index.php/s/WYmFEDZylFWJzNs
#' \item the work of Henelius Andreas as of July 2015, https://github.com/bwrc/edf
#' }
#' @seealso
#' For the vignette use the console command:\cr
#' \code{vignette('edfReaderVignette', package = "edfReader")}\cr
#' or click on \code{Index} below.
#
#' @aliases bdfReader
#' @docType package
#' @name edfReader
NULL
| /R/edfReader.R | no_license | vagnerfonseca/edf | R | false | false | 2,597 | r | # edfReader package
#
# Purpose : Reading .edf(+)/.bdf(+) files
#
# Copyright : (C) 2015-2016, Vis Consultancy, the Netherlands
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with edfReader package for R. If not, see <http://www.gnu.org/licenses/>.
#
# History :
# Feb16 - Created, version 1.0.0
# Mar16 - Version 1.1.0
# Apr16 - Version 1.1.1, no changes
#
#' edfReader: A package for reading EDF(+) and BDF(+) files
#'
#' The edfReader package reads EDF(+) and BDF(+) files in two steps: first the header is read
#' and then the signals (using the header object as an parameter).
#'
#' @section edfReader functions:
#' \tabular{lll}{
#' \code{\link{readEdfHeader}} \tab \verb{ } \tab to read the file header with basic info about the signals \cr
#' \code{\link{readEdfSignals}} \tab \tab to read one or more recorded signals
#' }
#' The objects returned by these functions are described in the package vignette.
#'
#' @section Details:
#' \tabular{lll}{
#' Package \tab \verb{ } \tab edfReader \cr
#' Version \tab \tab 1.1.1 \cr
#' Date \tab \tab April 17, 2016 \cr
#' Licence \tab \tab GPL version 3 or newer \cr
#' GitHub \tab \tab https://github.com/Pisca46/edfReader \cr
#' Author \tab \tab Jan Vis, Vis Consultancy \cr
#' E-mail \tab \tab jan@visconsultancy.eu \cr
#' Web \tab \tab visconsultancy.eu \cr
#' }
#' @section Acknowledgement:
#' This package has used code from:
#' \itemize{
#' \item edf.R version 0.3 (27-11-2013) from Fabien Feschet, http://data-auvergne.fr/cloud/index.php/s/WYmFEDZylFWJzNs
#' \item the work of Henelius Andreas as of July 2015, https://github.com/bwrc/edf
#' }
#' @seealso
#' For the vignette use the console command:\cr
#' \code{vignette('edfReaderVignette', package = "edfReader")}\cr
#' or click on \code{Index} below.
#
#' @aliases bdfReader
#' @docType package
#' @name edfReader
NULL
|
setMethod(f="initialize",
signature="windrose",
definition=function(.Object) {
.Object@processingLog$time <- as.POSIXct(Sys.time())
.Object@processingLog$value <- "create 'windrose' object"
return(.Object)
})
setMethod(f="summary",
signature="windrose",
definition=function(object, ...) {
cat("Windrose data\n-------------\n\n")
n <- length(object@data$theta)
dtheta <- abs(diff(object@data$theta[1:2]))
cat("* Have n=", n, "angles, separated by dtheta=", dtheta,"\n\n")
##cat("* Statistics by angle::\n\n", ...)
##threes <- matrix(nrow=2, ncol=3)
##threes[1,] <- threenum(object@data$theta)
##threes[2,] <- threenum(object@data$count)
##colnames(threes) <- c("Min.", "Mean", "Max.")
##rownames(threes) <- c("theta", "count")
##print(threes)
##cat('\n')
processingLogShow(object)
invisible(NULL)
})
setMethod(f="[[",
signature="windrose",
definition=function(x, i, j, drop) {
## 'j' can be for times, as in OCE
##if (!missing(j)) cat("j=", j, "*****\n")
i <- match.arg(i, c("theta", "count", "fives"))
if (i == "theta") return(x@data$theta)
else if (i == "count") return(x@data$count)
else if (i == "fives") return(x@data$fives)
else stop("cannot access \"", i, "\"") # cannot get here
})
as.windrose <- function(x, y, dtheta = 15, debug=getOption("oceDebug"))
{
oceDebug(debug, "as.windrose(x, y, dtheta=", dtheta, ", debug=", debug, ") {\n", sep="", unindent=1)
if (inherits(x, "met")) {
tmp <- x
x <- tmp[["u"]]
y <- tmp[["v"]]
}
ok <- !is.na(x) & !is.na(y)
x <- x[ok]
y <- y[ok]
xlen <- length(x)
pi <- atan2(1, 1) * 4
dt <- dtheta * pi / 180
dt2 <- dt / 2
R <- sqrt(x^2 + y^2)
angle <- atan2(y, x)
L <- max(R, na.rm=TRUE)
nt <- round(2 * pi / dt)
count <- mean <- vector("numeric", nt)
fives <- matrix(0, nt, 5)
theta <- seq(-pi+dt2, pi-dt2, length.out=nt)
## The bin-detection code was faulty until 2012-02-07. This
## was pointed out by Alex Deckmyn, who also suggested the
## present solution. His issue reports, available on
## github.com/dankelley/oce/issues, are a model of
## patience and insight.
ai <- 1 + floor((angle+pi)/dt)
ai <- (ai-1)%%nt + 1 # clean up problems (thanks, adeckmyn at github!!)
if (min(ai) < 1)
stop("problem setting up bins (ai<1)")
if (max(ai) > nt)
stop("problem setting up bins (ai>xlen)")
for (i in 1:nt) {
inside <- ai==i
oceDebug(debug, sum(inside), "counts for angle category", i,
"(", round(180/pi*(theta[i]-dt2), 4), "to", round(180/pi*(theta[i]+dt2), 4), "deg)\n")
count[i] <- sum(inside)
mean[i] <- mean(R[inside], na.rm=TRUE)
fives[i,] <- fivenum(R[inside])
}
if (sum(count) != xlen)
stop("miscount in angles")
res <- new('windrose')
res@data <- list(n=length(x), x.mean=mean(x, na.rm=TRUE), y.mean=mean(y, na.rm=TRUE), theta=theta*180/pi,
count=count, mean=mean, fives=fives)
res@metadata <- list(dtheta=dtheta)
res@processingLog <- processingLog(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
oceDebug(debug, "} # as.windrose()\n", sep="", unindent=1)
res
}
setMethod(f="plot",
signature=signature("windrose"),
definition=function(x,
type=c("count","mean", "median", "fivenum"),
convention=c("meteorological", "oceanographic"),
mgp=getOption("oceMgp"),
mar=c(mgp[1], mgp[1], 1+mgp[1], mgp[1]),
col,
...)
{
if (!inherits(x, "windrose"))
stop("method is only for objects of class '", "windrose", "'")
type <- match.arg(type)
convention <- match.arg(convention)
nt <- length(x@data$theta)
pi <- 4 * atan2(1, 1)
if (convention == "meteorological")
t <- x@data$theta * pi / 180 # in radians
else
t <- pi + x@data$theta * pi / 180 # in radians
dt <- t[2] - t[1]
dt2 <- dt / 2
# Plot setup
par(mgp=mgp, mar=mar)
plot.new()
pin <- par("pin")
xlim <- ylim <- c(-1, 1)
if (pin[1] > pin[2])
xlim <- (pin[1]/pin[2]) * xlim
else ylim <- (pin[2]/pin[1]) * ylim
plot.window(xlim, ylim, "", asp = 1)
if (missing(col))
col <- c("red", "pink", "blue", "darkgray")
else {
if (length(col) != 4)
stop("'col' should be a list of 4 colours")
}
# Draw circle and radii
tt <- seq(0, 2*pi, length.out=100)
px <- cos(tt)
py <- sin(tt)
lines(px, py, col=col[4])
for (i in 1:nt) {
lines(c(0, cos(t[i] - dt2)), c(0, sin(t[i] - dt2)), lwd=0.5, col=col[4])
}
text( 0, -1, "S", pos=1)
text(-1, 0, "W", pos=2)
text( 0, 1, "N", pos=3)
text( 1, 0, "E", pos=4)
## Draw rose in a given type
if (type == "count") {
max <- max(x@data$count, na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$count[i] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Counts (max ", max, ")", sep=""))
} else if (type == "mean") {
max <- max(x@data$mean, na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$mean[i] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Means (max ", sprintf(max, fmt="%.3g"), ")", sep=""))
} else if (type == "median") {
max <- max(x@data$fives[,5], na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$fives[i,3] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Medians (max ", sprintf(max,fmt="%.3g"), ")", sep=""))
} else if (type == "fivenum") {
max <- max(x@data$fives[,3], na.rm=TRUE)
for (i in 1:nt) {
for (j in 2:3) {
tm <- t[i] - dt2
tp <- t[i] + dt2
r0 <- x@data$fives[i, j-1] / max
r <- x@data$fives[i, j ] / max
xlist <- c(r0 * cos(tm), r * cos(tm), r * cos(tp), r0 * cos(tp))
ylist <- c(r0 * sin(tm), r * sin(tm), r * sin(tp), r0 * sin(tp))
thiscol <- col[c(2,1,1,2)][j-1]
polygon(xlist, ylist, col=thiscol, border=col[4])
}
r <- x@data$fivenum[i, 3] / max
lines(c(r * cos(tm), r * cos(tp)), c(r * sin(tm), r * sin(tp)), col="blue", lwd=2)
}
title(paste("Fiveum (max ", sprintf(max,fmt="%.3g"), ")", sep=""))
}
invisible()
})
| /R/windrose.R | no_license | landsat/oce | R | false | false | 8,546 | r | setMethod(f="initialize",
signature="windrose",
definition=function(.Object) {
.Object@processingLog$time <- as.POSIXct(Sys.time())
.Object@processingLog$value <- "create 'windrose' object"
return(.Object)
})
setMethod(f="summary",
signature="windrose",
definition=function(object, ...) {
cat("Windrose data\n-------------\n\n")
n <- length(object@data$theta)
dtheta <- abs(diff(object@data$theta[1:2]))
cat("* Have n=", n, "angles, separated by dtheta=", dtheta,"\n\n")
##cat("* Statistics by angle::\n\n", ...)
##threes <- matrix(nrow=2, ncol=3)
##threes[1,] <- threenum(object@data$theta)
##threes[2,] <- threenum(object@data$count)
##colnames(threes) <- c("Min.", "Mean", "Max.")
##rownames(threes) <- c("theta", "count")
##print(threes)
##cat('\n')
processingLogShow(object)
invisible(NULL)
})
setMethod(f="[[",
signature="windrose",
definition=function(x, i, j, drop) {
## 'j' can be for times, as in OCE
##if (!missing(j)) cat("j=", j, "*****\n")
i <- match.arg(i, c("theta", "count", "fives"))
if (i == "theta") return(x@data$theta)
else if (i == "count") return(x@data$count)
else if (i == "fives") return(x@data$fives)
else stop("cannot access \"", i, "\"") # cannot get here
})
as.windrose <- function(x, y, dtheta = 15, debug=getOption("oceDebug"))
{
oceDebug(debug, "as.windrose(x, y, dtheta=", dtheta, ", debug=", debug, ") {\n", sep="", unindent=1)
if (inherits(x, "met")) {
tmp <- x
x <- tmp[["u"]]
y <- tmp[["v"]]
}
ok <- !is.na(x) & !is.na(y)
x <- x[ok]
y <- y[ok]
xlen <- length(x)
pi <- atan2(1, 1) * 4
dt <- dtheta * pi / 180
dt2 <- dt / 2
R <- sqrt(x^2 + y^2)
angle <- atan2(y, x)
L <- max(R, na.rm=TRUE)
nt <- round(2 * pi / dt)
count <- mean <- vector("numeric", nt)
fives <- matrix(0, nt, 5)
theta <- seq(-pi+dt2, pi-dt2, length.out=nt)
## The bin-detection code was faulty until 2012-02-07. This
## was pointed out by Alex Deckmyn, who also suggested the
## present solution. His issue reports, available on
## github.com/dankelley/oce/issues, are a model of
## patience and insight.
ai <- 1 + floor((angle+pi)/dt)
ai <- (ai-1)%%nt + 1 # clean up problems (thanks, adeckmyn at github!!)
if (min(ai) < 1)
stop("problem setting up bins (ai<1)")
if (max(ai) > nt)
stop("problem setting up bins (ai>xlen)")
for (i in 1:nt) {
inside <- ai==i
oceDebug(debug, sum(inside), "counts for angle category", i,
"(", round(180/pi*(theta[i]-dt2), 4), "to", round(180/pi*(theta[i]+dt2), 4), "deg)\n")
count[i] <- sum(inside)
mean[i] <- mean(R[inside], na.rm=TRUE)
fives[i,] <- fivenum(R[inside])
}
if (sum(count) != xlen)
stop("miscount in angles")
res <- new('windrose')
res@data <- list(n=length(x), x.mean=mean(x, na.rm=TRUE), y.mean=mean(y, na.rm=TRUE), theta=theta*180/pi,
count=count, mean=mean, fives=fives)
res@metadata <- list(dtheta=dtheta)
res@processingLog <- processingLog(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
oceDebug(debug, "} # as.windrose()\n", sep="", unindent=1)
res
}
setMethod(f="plot",
signature=signature("windrose"),
definition=function(x,
type=c("count","mean", "median", "fivenum"),
convention=c("meteorological", "oceanographic"),
mgp=getOption("oceMgp"),
mar=c(mgp[1], mgp[1], 1+mgp[1], mgp[1]),
col,
...)
{
if (!inherits(x, "windrose"))
stop("method is only for objects of class '", "windrose", "'")
type <- match.arg(type)
convention <- match.arg(convention)
nt <- length(x@data$theta)
pi <- 4 * atan2(1, 1)
if (convention == "meteorological")
t <- x@data$theta * pi / 180 # in radians
else
t <- pi + x@data$theta * pi / 180 # in radians
dt <- t[2] - t[1]
dt2 <- dt / 2
# Plot setup
par(mgp=mgp, mar=mar)
plot.new()
pin <- par("pin")
xlim <- ylim <- c(-1, 1)
if (pin[1] > pin[2])
xlim <- (pin[1]/pin[2]) * xlim
else ylim <- (pin[2]/pin[1]) * ylim
plot.window(xlim, ylim, "", asp = 1)
if (missing(col))
col <- c("red", "pink", "blue", "darkgray")
else {
if (length(col) != 4)
stop("'col' should be a list of 4 colours")
}
# Draw circle and radii
tt <- seq(0, 2*pi, length.out=100)
px <- cos(tt)
py <- sin(tt)
lines(px, py, col=col[4])
for (i in 1:nt) {
lines(c(0, cos(t[i] - dt2)), c(0, sin(t[i] - dt2)), lwd=0.5, col=col[4])
}
text( 0, -1, "S", pos=1)
text(-1, 0, "W", pos=2)
text( 0, 1, "N", pos=3)
text( 1, 0, "E", pos=4)
## Draw rose in a given type
if (type == "count") {
max <- max(x@data$count, na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$count[i] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Counts (max ", max, ")", sep=""))
} else if (type == "mean") {
max <- max(x@data$mean, na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$mean[i] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Means (max ", sprintf(max, fmt="%.3g"), ")", sep=""))
} else if (type == "median") {
max <- max(x@data$fives[,5], na.rm=TRUE)
for (i in 1:nt) {
r <- x@data$fives[i,3] / max
##cat("t=", t[i], " r=", r, "\n")
xlist <- c(0, r * cos(t[i] - dt2), r * cos(t[i] + dt2), 0)
ylist <- c(0, r * sin(t[i] - dt2), r * sin(t[i] + dt2), 0)
polygon(xlist, ylist,col=col[1], border=col[3])
}
title(paste("Medians (max ", sprintf(max,fmt="%.3g"), ")", sep=""))
} else if (type == "fivenum") {
max <- max(x@data$fives[,3], na.rm=TRUE)
for (i in 1:nt) {
for (j in 2:3) {
tm <- t[i] - dt2
tp <- t[i] + dt2
r0 <- x@data$fives[i, j-1] / max
r <- x@data$fives[i, j ] / max
xlist <- c(r0 * cos(tm), r * cos(tm), r * cos(tp), r0 * cos(tp))
ylist <- c(r0 * sin(tm), r * sin(tm), r * sin(tp), r0 * sin(tp))
thiscol <- col[c(2,1,1,2)][j-1]
polygon(xlist, ylist, col=thiscol, border=col[4])
}
r <- x@data$fivenum[i, 3] / max
lines(c(r * cos(tm), r * cos(tp)), c(r * sin(tm), r * sin(tp)), col="blue", lwd=2)
}
title(paste("Fiveum (max ", sprintf(max,fmt="%.3g"), ")", sep=""))
}
invisible()
})
|
\name{get.MirType}
\alias{get.MirType}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{get.MirType}
\description{
Return the mir type string from the object
}
\usage{
get.MirType(theObject)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theObject}{object returned from one of the getMirs_Metatdata functions}
}
\details{
Return the mir type string from the object
}
\value{
Return the mir type string from the object
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/get.MirType.Rd | no_license | yanch86/TCGAGeneReport | R | false | false | 911 | rd | \name{get.MirType}
\alias{get.MirType}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{get.MirType}
\description{
Return the mir type string from the object
}
\usage{
get.MirType(theObject)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theObject}{object returned from one of the getMirs_Metatdata functions}
}
\details{
Return the mir type string from the object
}
\value{
Return the mir type string from the object
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(dplyr)
library(lubridate)
library(ggplot2)
library(ir)
library(LTRCtrees)
library(survival)
library(rpart.plot)
library(partykit)
# Load, format =====
# TK <- read.csv("~/Desktop/ODG/IngersolRand/NA_DTF_wPREDICTORS_3.csv")
# names(TK)
# devtools::use_data(TK_AG, pkg = "ir")
# TK <- TK %>%
# mutate(mfg_date = ymd_hms(mfg_date),
# serial = as.character(serial)) %>%
# arrange(serial, months_in_field) %>%
# filter(months_in_field > 0)
# Format to Anderson-Gill style ======
# Add 'first' and 'last' indicator columns to TK
# i.e. indicator first/last obs. of that serial #
for(i in 2:dim(TK)[1]){
if(i == 2) first_serial <- "TRUE"
first_serial[i] <- !(TK$serial[i] == TK$serial[i-1])
if(i == dim(TK)[1]){
TK$first_serial <- first_serial
TK$last <- c(first_serial[-1], TRUE)
rm(i, first_serial)
}
}
# TK <- TK %>%
# mutate(
# time1 = ifelse(first_serial, 0, lag(months_in_field)),
# time2 = months_in_field
# )
# ir::TK_AG -- properly Anderson-Gill formatted data
# fit LTRC model ======
data(TK_AG)
fit.cox <- coxph(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1,
data = TK_AG)
LTRCART.fit <- LTRCART(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1 + max_ambient + min_ambient,
data = TK_AG) # operative function--> rpart::rpart(...)
LTRCIT.fit <- LTRCIT(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1 + max_ambient + min_ambient,
data = TK_AG) # # operative function--> partykit::ctree(...)
fit.cox
rpart.plot.version1(LTRCART.fit, type = 0)
plot(LTRCIT.fit)
# type = “prob”: list of predicted KM curves
# KM_curves <- predict(LTRCIT.fit, newdata = , type = "prob")
# type = "response": predicted median survival time
# predict(LTRCIT.fit, newdata = , type = "response")
| /OpenDataGroup/IR/ThermoKing_LTRCtrees.R | no_license | cwcomiskey/Misc | R | false | false | 1,808 | r | library(dplyr)
library(lubridate)
library(ggplot2)
library(ir)
library(LTRCtrees)
library(survival)
library(rpart.plot)
library(partykit)
# Load, format =====
# TK <- read.csv("~/Desktop/ODG/IngersolRand/NA_DTF_wPREDICTORS_3.csv")
# names(TK)
# devtools::use_data(TK_AG, pkg = "ir")
# TK <- TK %>%
# mutate(mfg_date = ymd_hms(mfg_date),
# serial = as.character(serial)) %>%
# arrange(serial, months_in_field) %>%
# filter(months_in_field > 0)
# Format to Anderson-Gill style ======
# Add 'first' and 'last' indicator columns to TK
# i.e. indicator first/last obs. of that serial #
for(i in 2:dim(TK)[1]){
if(i == 2) first_serial <- "TRUE"
first_serial[i] <- !(TK$serial[i] == TK$serial[i-1])
if(i == dim(TK)[1]){
TK$first_serial <- first_serial
TK$last <- c(first_serial[-1], TRUE)
rm(i, first_serial)
}
}
# TK <- TK %>%
# mutate(
# time1 = ifelse(first_serial, 0, lag(months_in_field)),
# time2 = months_in_field
# )
# ir::TK_AG -- properly Anderson-Gill formatted data
# fit LTRC model ======
data(TK_AG)
fit.cox <- coxph(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1,
data = TK_AG)
LTRCART.fit <- LTRCART(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1 + max_ambient + min_ambient,
data = TK_AG) # operative function--> rpart::rpart(...)
LTRCIT.fit <- LTRCIT(
Surv(time1, time2, event) ~ max_return_z1 + min_return_z1 + max_ambient + min_ambient,
data = TK_AG) # # operative function--> partykit::ctree(...)
fit.cox
rpart.plot.version1(LTRCART.fit, type = 0)
plot(LTRCIT.fit)
# type = “prob”: list of predicted KM curves
# KM_curves <- predict(LTRCIT.fit, newdata = , type = "prob")
# type = "response": predicted median survival time
# predict(LTRCIT.fit, newdata = , type = "response")
|
#' Find coefficients for blm
#'
#' This function returns the fitted parameters of the model.
#'
#' @param object A blm model
#' @param ... Some data
#' @return A list of coefficients
#’
#' @export
coef.blm = function(object, ...) {
return(list(Sigma = object$Sigma, mean = object$mean))
}
| /R/coef.R | no_license | steinunngroa/blm | R | false | false | 294 | r | #' Find coefficients for blm
#'
#' This function returns the fitted parameters of the model.
#'
#' @param object A blm model
#' @param ... Some data
#' @return A list of coefficients
#’
#' @export
coef.blm = function(object, ...) {
return(list(Sigma = object$Sigma, mean = object$mean))
}
|
# Dependencies
library(quantmod)
library(tidyverse)
library(lubridate)
amount <- 2400
invstrinp <- data.frame(
Symbol = c("VOO", "VTI", "VXUS", "BND", "QQQ"),
Ratio = c(0.22, 0.22, 0.16, 0.2, 0.2))
ratio_sum_check = sum(invstrinp$Ratio)
invstr <- invstrinp %>% mutate(Amount = amount * Ratio)
# Fetch Current Prices
for (row in 1:nrow(invstr)) {
quote <- getQuote(invstr$Symbol[row])
invstr[row, 'Last'] = quote$Last
}
rm(row, quote)
invstr <- invstr %>% mutate(Stock = floor(Amount / Last))
invstr <- invstr %>% mutate(Spend = Stock * Last)
total_spend <- sum(invstr$Spend)
spend_diff <- amount - total_spend
invstr <- invstr %>% mutate(SR = round(Spend / total_spend, 2))
| /src/r/finance/investment-strategy-1.R | no_license | orri93/analysis | R | false | false | 709 | r | # Dependencies
library(quantmod)
library(tidyverse)
library(lubridate)
amount <- 2400
invstrinp <- data.frame(
Symbol = c("VOO", "VTI", "VXUS", "BND", "QQQ"),
Ratio = c(0.22, 0.22, 0.16, 0.2, 0.2))
ratio_sum_check = sum(invstrinp$Ratio)
invstr <- invstrinp %>% mutate(Amount = amount * Ratio)
# Fetch Current Prices
for (row in 1:nrow(invstr)) {
quote <- getQuote(invstr$Symbol[row])
invstr[row, 'Last'] = quote$Last
}
rm(row, quote)
invstr <- invstr %>% mutate(Stock = floor(Amount / Last))
invstr <- invstr %>% mutate(Spend = Stock * Last)
total_spend <- sum(invstr$Spend)
spend_diff <- amount - total_spend
invstr <- invstr %>% mutate(SR = round(Spend / total_spend, 2))
|
sbh_calc <-
function(psa,sys="usda",plt_par=T,dat_par=F){
if(sys%in%c("au2","iuss")){
s0 = 0.00002
lim = c(0.002,0.02,2)
}else{
s0 = 0.00005
lim = c(0.002,0.05,2)
}
sizes=c(s0,lim)
###probabilties
p_3 = 0.9999 ##probability of upper size limit
p_0 = 0.0001 ##probability of lower size limit
##cummulative clay and silt
probs=c(p_0,cumsum(psa[-length(psa)])/100,p_3)
probs[which(probs<p_0)]=p_0 ##min probability= 0.0001
probs[which(probs>p_3)]=p_3 ##max probability= 0.9999
###functions
d_lognorm = function(x) {exp(-(log10(x)-a)^2/(2*b^2))/(b*sqrt(2*pi))}
integrand0 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))}
integrand1 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))*x}
integrand2 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))*x^2}
##integrated moments
m0=c()
m1=c()
m2=c()
##run it
if(plt_par){
plot(NA,xlim=c(log10(sizes[1]),log10(10)),ylim=c(0,12),yaxs="i",xaxs="i", ylab="Theoretical lognormal prob.", xlab="Grain size (mm)", xaxt="n")
axis(1,at= -5:1, labels=c("0.00001","0.0001","0.001","0.01","0.1","1.0","10.0"))
abline(v=log10(sizes[2:(length(sizes))]),lty=2, col="grey")
}
if(dat_par){cat("\nSegment d_g\tSeg. sigma_g")} ##d_g and sigma of segment
for(i in 1:length(psa)){ #i=1
##Normal deviates
x1 = log10(sizes[i])
Y1 = qnorm(probs[i])
x2 = log10(sizes[i+1])
Y2 = qnorm(probs[i+1])
##calculate values
b=(x2-x1)/(Y2-Y1)
a=-Y1*b+x1
if(!is.nan(a)&!is.infinite(a)){
if(dat_par){cat("\n",10^a,"\t",10^b,"\n")} ##d_g and sigma of segment
##plot fitted distributions for each segment
if(plt_par){
vals=seq(log10(sizes[i]),log10(sizes[i+1]),length.out =1000)
nums=seq(log10(sizes[1]),log10(100),length.out =1000)
lines(nums,d_lognorm(10^nums)*10, lty=2) ##dodgy *10 constant, I cannot produce theier results without it
lines(vals,d_lognorm(10^vals)*10, lwd=2) ##dodgy *10 constant, I cannot produce theier results without it
}
m0=c(m0,integrate(integrand0, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
m1=c(m1,integrate(integrand1, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
m2=c(m2,integrate(integrand2, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
}else{
if(seg_dat){cat("\n",0,"\t",0)} ##d_g and sigma of segment
}
}
area=sum(m0)
a=sum(m1)/area
b=sqrt(sum(m2)/area-a^2)
d_g=10^a
sigma_g=10^b
return(data.frame("d_g"=d_g,"sigma_g"=sigma_g))
}
| /R/sbh_calc.R | no_license | edwardjjones/ShiraziBoersma | R | false | false | 2,622 | r | sbh_calc <-
function(psa,sys="usda",plt_par=T,dat_par=F){
if(sys%in%c("au2","iuss")){
s0 = 0.00002
lim = c(0.002,0.02,2)
}else{
s0 = 0.00005
lim = c(0.002,0.05,2)
}
sizes=c(s0,lim)
###probabilties
p_3 = 0.9999 ##probability of upper size limit
p_0 = 0.0001 ##probability of lower size limit
##cummulative clay and silt
probs=c(p_0,cumsum(psa[-length(psa)])/100,p_3)
probs[which(probs<p_0)]=p_0 ##min probability= 0.0001
probs[which(probs>p_3)]=p_3 ##max probability= 0.9999
###functions
d_lognorm = function(x) {exp(-(log10(x)-a)^2/(2*b^2))/(b*sqrt(2*pi))}
integrand0 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))}
integrand1 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))*x}
integrand2 = function(x) {exp(-(x-a)^2/(2*b^2))/(b*sqrt(2*pi))*x^2}
##integrated moments
m0=c()
m1=c()
m2=c()
##run it
if(plt_par){
plot(NA,xlim=c(log10(sizes[1]),log10(10)),ylim=c(0,12),yaxs="i",xaxs="i", ylab="Theoretical lognormal prob.", xlab="Grain size (mm)", xaxt="n")
axis(1,at= -5:1, labels=c("0.00001","0.0001","0.001","0.01","0.1","1.0","10.0"))
abline(v=log10(sizes[2:(length(sizes))]),lty=2, col="grey")
}
if(dat_par){cat("\nSegment d_g\tSeg. sigma_g")} ##d_g and sigma of segment
for(i in 1:length(psa)){ #i=1
##Normal deviates
x1 = log10(sizes[i])
Y1 = qnorm(probs[i])
x2 = log10(sizes[i+1])
Y2 = qnorm(probs[i+1])
##calculate values
b=(x2-x1)/(Y2-Y1)
a=-Y1*b+x1
if(!is.nan(a)&!is.infinite(a)){
if(dat_par){cat("\n",10^a,"\t",10^b,"\n")} ##d_g and sigma of segment
##plot fitted distributions for each segment
if(plt_par){
vals=seq(log10(sizes[i]),log10(sizes[i+1]),length.out =1000)
nums=seq(log10(sizes[1]),log10(100),length.out =1000)
lines(nums,d_lognorm(10^nums)*10, lty=2) ##dodgy *10 constant, I cannot produce theier results without it
lines(vals,d_lognorm(10^vals)*10, lwd=2) ##dodgy *10 constant, I cannot produce theier results without it
}
m0=c(m0,integrate(integrand0, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
m1=c(m1,integrate(integrand1, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
m2=c(m2,integrate(integrand2, lower = log10(sizes[i]), upper = log10(sizes[i+1]))$value)
}else{
if(seg_dat){cat("\n",0,"\t",0)} ##d_g and sigma of segment
}
}
area=sum(m0)
a=sum(m1)/area
b=sqrt(sum(m2)/area-a^2)
d_g=10^a
sigma_g=10^b
return(data.frame("d_g"=d_g,"sigma_g"=sigma_g))
}
|
library(RoughSets)
data(RoughSetData)
decision.table <- RoughSetData$hiring.dt
## evaluate single reduct
res.1 <- FS.quickreduct.RST(decision.table)
print(res.1)
## generate new decision table according to the reduct
new.decTable <- SF.applyDecTable(decision.table, res.1)
print(new.decTable) | /fuzzedpackages/RoughSets/demo/FS.QuickReduct.RST.R | no_license | akhikolla/testpackages | R | false | false | 305 | r | library(RoughSets)
data(RoughSetData)
decision.table <- RoughSetData$hiring.dt
## evaluate single reduct
res.1 <- FS.quickreduct.RST(decision.table)
print(res.1)
## generate new decision table according to the reduct
new.decTable <- SF.applyDecTable(decision.table, res.1)
print(new.decTable) |
#vectools: Advanced Vector Toolkit
#Copyright (C), Abby Spurdle, 2020 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
.points.vardim = function (x, y, about.axis)
{ d3 = rep (0, length (x) )
if (missing (about.axis) )
list (x=x, y=y, z=NULL, N=2L)
else if (about.axis == "x")
list (x=d3, y=x, z=y, N=3L)
else if (about.axis == "y")
list (x=x, y=d3, z=y, N=3L)
else if (about.axis == "z")
list (x=x, y=y, z=d3, N=3L)
else
stop ("about.axis needs to be x, y or z")
}
regPolygon = function (n=4, ..., about.axis, d=1, stagger = (n %% 2 == 0) )
{ .arg.error (...)
start = pi / 2
if (stagger)
start = start + pi / n
ps = c (d, 0) %|*% brot2 (start)
ps = ps %|*% eq.brot2 (n)
ps = .points.vardim (ps [,1], ps [,2], about.axis)
Polygon (ps$x, ps$y, ps$z, ...)
}
Rect = function (..., about.axis, center=FALSE, side.length=1, glist = list () )
{ .arg.error (...)
center = rep_len (center, 2)
side.length = rep_len (side.length, 2)
x = c (0, side.length [1])
y = c (0, side.length [2])
if (center [1])
x = x - mean (x)
if (center [2])
y = y - mean (y)
x = x [c (1, 1, 2, 2)]
y = y [c (1, 2, 2, 1)]
ps = .points.vardim (x, y, about.axis)
new ("Rect", D=ps$N, np=4L, glist=glist, data = cbind (ps$x, ps$y, ps$z) )
}
Cuboid = function (..., center=FALSE, side.length=1, glist = list () )
{ .arg.error (...)
center = rep_len (center, 3)
side.length = rep_len (side.length, 3)
v = ObjectArray (6)
gv = new ("Cuboid", CLASS="Rect", D=3L, N=1L, n=6L, names = list (), data=v@data)
rx = Rect (about.axis="x", center = center [2:3], side.length = side.length [2:3], glist=glist)
ry = Rect (about.axis="y", center = center [-2], side.length = side.length [-2], glist=glist)
rz = Rect (about.axis="z", center = center [1:2], side.length = side.length [1:2], glist=glist)
if (center [1])
rx = rx %]*% btrl3 (- side.length [1] / 2, 0, 0)
if (center [2])
ry = ry %]*% btrl3 (0, - side.length [2] / 2, 0)
if (center [3])
rz = rz %]*% btrl3 (0, 0, - side.length [3] / 2)
gv [[1]] = rx
gv [[2]] = rx %]*% btrl3 (side.length [1], 0, 0)
gv [[3]] = ry
gv [[4]] = ry %]*% btrl3 (0, side.length [2], 0)
gv [[5]] = rz
gv [[6]] = rz %]*% btrl3 (0, 0, side.length [3])
gv
}
rectGrid = function (x, y, gv=NULL, ..., glist = list (), vlist=NULL)
{ .arg.error (...)
nr = length (x)
nc = length (y)
umat = vmat = matrix (0, nr, nc)
for (j in 1:nc)
umat [,j] = x
for (i in 1:nr)
vmat [i,] = y
if (! is.null (gv) )
{ if (length (gv) == 1)
gv = matrix (gv, nr, nc)
else if (nr == nrow (gv) && nc == ncol (gv) )
NULL
else
stop ("gv doesn't match x and y")
}
Grid (umat, vmat, gv, glist=glist, vlist=vlist)
}
rectVImage = function (x, y, gv=NULL, ..., tf=FALSE, colm, glist = list () )
{ .arg.error (...)
grid = rectGrid (x, y, gv)
VImage (grid@x, grid@y, grid@gv, tf=tf, colm=colm, glist=glist)
}
| /R/12_GeomObject_2.r | no_license | cran/vectools | R | false | false | 3,321 | r | #vectools: Advanced Vector Toolkit
#Copyright (C), Abby Spurdle, 2020 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
.points.vardim = function (x, y, about.axis)
{ d3 = rep (0, length (x) )
if (missing (about.axis) )
list (x=x, y=y, z=NULL, N=2L)
else if (about.axis == "x")
list (x=d3, y=x, z=y, N=3L)
else if (about.axis == "y")
list (x=x, y=d3, z=y, N=3L)
else if (about.axis == "z")
list (x=x, y=y, z=d3, N=3L)
else
stop ("about.axis needs to be x, y or z")
}
regPolygon = function (n=4, ..., about.axis, d=1, stagger = (n %% 2 == 0) )
{ .arg.error (...)
start = pi / 2
if (stagger)
start = start + pi / n
ps = c (d, 0) %|*% brot2 (start)
ps = ps %|*% eq.brot2 (n)
ps = .points.vardim (ps [,1], ps [,2], about.axis)
Polygon (ps$x, ps$y, ps$z, ...)
}
Rect = function (..., about.axis, center=FALSE, side.length=1, glist = list () )
{ .arg.error (...)
center = rep_len (center, 2)
side.length = rep_len (side.length, 2)
x = c (0, side.length [1])
y = c (0, side.length [2])
if (center [1])
x = x - mean (x)
if (center [2])
y = y - mean (y)
x = x [c (1, 1, 2, 2)]
y = y [c (1, 2, 2, 1)]
ps = .points.vardim (x, y, about.axis)
new ("Rect", D=ps$N, np=4L, glist=glist, data = cbind (ps$x, ps$y, ps$z) )
}
Cuboid = function (..., center=FALSE, side.length=1, glist = list () )
{ .arg.error (...)
center = rep_len (center, 3)
side.length = rep_len (side.length, 3)
v = ObjectArray (6)
gv = new ("Cuboid", CLASS="Rect", D=3L, N=1L, n=6L, names = list (), data=v@data)
rx = Rect (about.axis="x", center = center [2:3], side.length = side.length [2:3], glist=glist)
ry = Rect (about.axis="y", center = center [-2], side.length = side.length [-2], glist=glist)
rz = Rect (about.axis="z", center = center [1:2], side.length = side.length [1:2], glist=glist)
if (center [1])
rx = rx %]*% btrl3 (- side.length [1] / 2, 0, 0)
if (center [2])
ry = ry %]*% btrl3 (0, - side.length [2] / 2, 0)
if (center [3])
rz = rz %]*% btrl3 (0, 0, - side.length [3] / 2)
gv [[1]] = rx
gv [[2]] = rx %]*% btrl3 (side.length [1], 0, 0)
gv [[3]] = ry
gv [[4]] = ry %]*% btrl3 (0, side.length [2], 0)
gv [[5]] = rz
gv [[6]] = rz %]*% btrl3 (0, 0, side.length [3])
gv
}
rectGrid = function (x, y, gv=NULL, ..., glist = list (), vlist=NULL)
{ .arg.error (...)
nr = length (x)
nc = length (y)
umat = vmat = matrix (0, nr, nc)
for (j in 1:nc)
umat [,j] = x
for (i in 1:nr)
vmat [i,] = y
if (! is.null (gv) )
{ if (length (gv) == 1)
gv = matrix (gv, nr, nc)
else if (nr == nrow (gv) && nc == ncol (gv) )
NULL
else
stop ("gv doesn't match x and y")
}
Grid (umat, vmat, gv, glist=glist, vlist=vlist)
}
rectVImage = function (x, y, gv=NULL, ..., tf=FALSE, colm, glist = list () )
{ .arg.error (...)
grid = rectGrid (x, y, gv)
VImage (grid@x, grid@y, grid@gv, tf=tf, colm=colm, glist=glist)
}
|
library(readr)
ref20 <- read.csv("~/Team_1/data/Ref_Factor_20.csv")
ref48 <- read.csv("~/Team_1/data/Ref_Factor_48.csv")
ref75 <- read.csv("~/Team_1/data/Ref_Factor_75.csv")
ref95 <- read.csv("~/Team_1/data/Ref_Factor_95.csv")
ref99 <- read.csv("~/Team_1/data/Ref_Factor_99.csv")
#View(ref20)
#colnames(ref20)
# Delelting all those rows/observations for which Reflective percentage is above 100
#ref20$Reflect...>100
Ref20_100<-which(ref20$Reflect... >=100)
Ref48_100<-which(ref48$Reflect... >=100)
Ref75_100<-which(ref75$Reflect... >=100)
Ref95_100<-which(ref95$Reflect... >=100)
Ref99_100<-which(ref99$Reflect... >=100)
## Replace all those Rediation for which %Reflection is above or equal 100 with NA
ref20$Rad...Ref..[Ref20_100]<- NA
ref48$Rad...Ref..[Ref48_100]<- NA
ref75$Rad...Ref..[Ref75_100]<- NA
ref95$Rad...Ref..[Ref95_100]<- NA
ref99$Rad...Ref..[Ref99_100]<- NA
## Next we want to keep only wavelength and Rad(ref) in our data set for 20%, 48%, 75%, 95%, and 99%
new_ref20<-ref20[c(1,3)]
new_ref48<-ref48[c(1,3)] # keep first column (wavelenth) of all bcs dim is not same for all files after cleaning data
new_ref75<-ref75[c(1,3)]
new_ref95<-ref95[c(1,3)]
new_ref99<-ref99[c(1,3)]
## check
names(new_ref20)
## Now to clean the data a little more, We will replace all the negative values in the Radiation (ref) column with NA
new_ref20[new_ref20 <= 0]<-NA
new_ref48[new_ref48 <= 0]<-NA
new_ref75[new_ref75 <= 0]<-NA
new_ref95[new_ref95 <= 0]<-NA
new_ref99[new_ref99 <= 0]<-NA
## Check dimension of each now
dim(new_ref20)
dim(new_ref48)
dim(new_ref75)
dim(new_ref95)
dim(new_ref99)
## Perfect, all files have the same dimension now
## T0 make typing easy, lets rename the variables
x20<- new_ref20$Wvl
y20<-new_ref20$Rad...Ref..
x48<- new_ref48$Wvl
y48<-new_ref48$Rad...Ref..
x75<- new_ref75$Wvl
y75<-new_ref75$Rad...Ref..
x95<- new_ref95$Wvl
y95<-new_ref95$Rad...Ref..
x99<- new_ref95$Wvl
y99<-new_ref95$Rad...Ref..
## Firs plot all the files in the same window
library(ggplot2)
library(grid)
library(gridExtra)
library(plotly)
pdf(file = '~/Team_1/results/dataplots.pdf')
plot(x20, y20,style ='p', pch = 19, cex = .6, col='red', xlab='wavelength', ylab='Radiation(W/m^2) ', main = "Ref_Factor vs Wavelength")
#par(new= TRUE)
lines(x48, y48,style ='p', pch = 20, cex = .7, col = 'green')
#par(new= TRUE)
lines(x75, y75, style ='p',pch = 18, cex = .8, col = 'cyan')
#par(new= TRUE)
lines(x95, y95, style ='p',pch = 17, cex = .6, col='blue')
#par(new= TRUE)
lines(x99, y99, style ='p',pch = 15, cex = .6, col = 'black')
legend("topright", legend=c("Ra20%", "Rad48%", "Rad75", "Rad95", "Rad99"),
col=c("red", 'green', 'cyan', 'blue', "black"), pch = c(19, 20, 18, 17, 15), cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#dev.off()
## Fitting the data
### First fit on the 20% data
f20 <- splinefun(x20, y20, method = "fmm", ties = mean)
#par(mfrow=c(4,1))
pdf(file = '~/Team_1/results/fitt20_48.pdf')
plot(x48, f20(x48), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor48%")
#par(new= TRUE)
lines(x48, y48, type = 'p', pch=19,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#par(new=TRUE)
pdf(file = '~/Team_1/results/fitt20_75.pdf')
plot(x75, f20(x75), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor75%")
par(new= TRUE)
lines(x75, y75, type = 'p', pch=18,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
pdf(file = '~/Team_1/results/fitt20_95.pdf')
plot(x95, f20(x95), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor95%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch=17,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref95%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
pdf(file = '~/Team_1/results/fitt20_99.pdf')
plot(x99, f20(x99), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor99%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch=15,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref99%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
### Now, fit the data one the base of three files 20%, 75%, and 99%
Rad_mean<-rowMeans(cbind(new_ref20$Rad...Ref.., new_ref75$Rad...Ref.., new_ref99$Rad...Ref..), na.rm=TRUE)
#Wvl_mean<-rowMeans(cbind(new_ref20$, new_ref75$Rad...Ref.., new_ref99$Rad...Ref..), na.rm=TRUE)
fmean <- splinefun(x20, Rad_mean, method = "fmm", ties = mean)
#par(mfrow=c(1,2))
pdf(file = '~/Team_1/results/fittmean_48.pdf')
plot(x48, fmean(x48), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted on 20%, 75%, and 99%")
par(new= TRUE)
lines(x48, y48, type = 'p', pch = 20, cex = 0.5, col='blue')
legend("topright", legend=c("fittedmean%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#par(new=TRUE)
pdf(file = '~/Team_1/results/fittmean_99.pdf')
plot(x95, fmean(x95), type = 'l', col = 'red', xlab='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted on 20%, 75%, and 99%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch = 17, cex = 0.5, col='black')
legend("topright", legend=c("fittedmean%", "ref95%"),
col=c("red", "black"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
| /src/R/team1_pi4.r | no_license | dlebauer/team1-predict-swir | R | false | false | 5,729 | r | library(readr)
ref20 <- read.csv("~/Team_1/data/Ref_Factor_20.csv")
ref48 <- read.csv("~/Team_1/data/Ref_Factor_48.csv")
ref75 <- read.csv("~/Team_1/data/Ref_Factor_75.csv")
ref95 <- read.csv("~/Team_1/data/Ref_Factor_95.csv")
ref99 <- read.csv("~/Team_1/data/Ref_Factor_99.csv")
#View(ref20)
#colnames(ref20)
# Delelting all those rows/observations for which Reflective percentage is above 100
#ref20$Reflect...>100
Ref20_100<-which(ref20$Reflect... >=100)
Ref48_100<-which(ref48$Reflect... >=100)
Ref75_100<-which(ref75$Reflect... >=100)
Ref95_100<-which(ref95$Reflect... >=100)
Ref99_100<-which(ref99$Reflect... >=100)
## Replace all those Rediation for which %Reflection is above or equal 100 with NA
ref20$Rad...Ref..[Ref20_100]<- NA
ref48$Rad...Ref..[Ref48_100]<- NA
ref75$Rad...Ref..[Ref75_100]<- NA
ref95$Rad...Ref..[Ref95_100]<- NA
ref99$Rad...Ref..[Ref99_100]<- NA
## Next we want to keep only wavelength and Rad(ref) in our data set for 20%, 48%, 75%, 95%, and 99%
new_ref20<-ref20[c(1,3)]
new_ref48<-ref48[c(1,3)] # keep first column (wavelenth) of all bcs dim is not same for all files after cleaning data
new_ref75<-ref75[c(1,3)]
new_ref95<-ref95[c(1,3)]
new_ref99<-ref99[c(1,3)]
## check
names(new_ref20)
## Now to clean the data a little more, We will replace all the negative values in the Radiation (ref) column with NA
new_ref20[new_ref20 <= 0]<-NA
new_ref48[new_ref48 <= 0]<-NA
new_ref75[new_ref75 <= 0]<-NA
new_ref95[new_ref95 <= 0]<-NA
new_ref99[new_ref99 <= 0]<-NA
## Check dimension of each now
dim(new_ref20)
dim(new_ref48)
dim(new_ref75)
dim(new_ref95)
dim(new_ref99)
## Perfect, all files have the same dimension now
## T0 make typing easy, lets rename the variables
x20<- new_ref20$Wvl
y20<-new_ref20$Rad...Ref..
x48<- new_ref48$Wvl
y48<-new_ref48$Rad...Ref..
x75<- new_ref75$Wvl
y75<-new_ref75$Rad...Ref..
x95<- new_ref95$Wvl
y95<-new_ref95$Rad...Ref..
x99<- new_ref95$Wvl
y99<-new_ref95$Rad...Ref..
## Firs plot all the files in the same window
library(ggplot2)
library(grid)
library(gridExtra)
library(plotly)
pdf(file = '~/Team_1/results/dataplots.pdf')
plot(x20, y20,style ='p', pch = 19, cex = .6, col='red', xlab='wavelength', ylab='Radiation(W/m^2) ', main = "Ref_Factor vs Wavelength")
#par(new= TRUE)
lines(x48, y48,style ='p', pch = 20, cex = .7, col = 'green')
#par(new= TRUE)
lines(x75, y75, style ='p',pch = 18, cex = .8, col = 'cyan')
#par(new= TRUE)
lines(x95, y95, style ='p',pch = 17, cex = .6, col='blue')
#par(new= TRUE)
lines(x99, y99, style ='p',pch = 15, cex = .6, col = 'black')
legend("topright", legend=c("Ra20%", "Rad48%", "Rad75", "Rad95", "Rad99"),
col=c("red", 'green', 'cyan', 'blue', "black"), pch = c(19, 20, 18, 17, 15), cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#dev.off()
## Fitting the data
### First fit on the 20% data
f20 <- splinefun(x20, y20, method = "fmm", ties = mean)
#par(mfrow=c(4,1))
pdf(file = '~/Team_1/results/fitt20_48.pdf')
plot(x48, f20(x48), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor48%")
#par(new= TRUE)
lines(x48, y48, type = 'p', pch=19,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#par(new=TRUE)
pdf(file = '~/Team_1/results/fitt20_75.pdf')
plot(x75, f20(x75), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor75%")
par(new= TRUE)
lines(x75, y75, type = 'p', pch=18,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
pdf(file = '~/Team_1/results/fitt20_95.pdf')
plot(x95, f20(x95), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor95%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch=17,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref95%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
pdf(file = '~/Team_1/results/fitt20_99.pdf')
plot(x99, f20(x99), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted vs Ref_Factor99%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch=15,cex = 0.5, col='blue')
legend("topright", legend=c("fitted20%", "ref99%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
### Now, fit the data one the base of three files 20%, 75%, and 99%
Rad_mean<-rowMeans(cbind(new_ref20$Rad...Ref.., new_ref75$Rad...Ref.., new_ref99$Rad...Ref..), na.rm=TRUE)
#Wvl_mean<-rowMeans(cbind(new_ref20$, new_ref75$Rad...Ref.., new_ref99$Rad...Ref..), na.rm=TRUE)
fmean <- splinefun(x20, Rad_mean, method = "fmm", ties = mean)
#par(mfrow=c(1,2))
pdf(file = '~/Team_1/results/fittmean_48.pdf')
plot(x48, fmean(x48), type = 'l',col = 'red', xlab ='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted on 20%, 75%, and 99%")
par(new= TRUE)
lines(x48, y48, type = 'p', pch = 20, cex = 0.5, col='blue')
legend("topright", legend=c("fittedmean%", "ref48%"),
col=c("red", "blue"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
#par(new=TRUE)
pdf(file = '~/Team_1/results/fittmean_99.pdf')
plot(x95, fmean(x95), type = 'l', col = 'red', xlab='wavelength', ylab='Radiation(W/m^2) ', main = "Spline fitted on 20%, 75%, and 99%")
par(new= TRUE)
lines(x95, y95, type = 'p', pch = 17, cex = 0.5, col='black')
legend("topright", legend=c("fittedmean%", "ref95%"),
col=c("red", "black"), lty=1:2, cex=0.8, box.lty=2, box.lwd=2, box.col="green")
dev.off()
|
setwd("C:/Users/IT18194654/Downloads")
data<-read.table(file ="C:/Users/IT18194654/Downloads/Forest.txt",header = TRUE,sep = ",")
data
attach(data)
#names("X","Y","month","day","FFMC","DMC","DC","ISI","temp","RH","wind","rain","area")
#q2
summary(data)
#q3
fix(data)
#q4
max(wind)
min(wind)
#q5
summary(temp)
#q6
boxplot(wind,horizontal = TRUE,outline = TRUE,pch = 16)
#q7
#left-skewd
#q8
mean(temp)
#q9
mean(wind)
sd(wind)
#10
IQR(wind)
#q11
freq<-table(day,month)
freq
#q12
mean(temp[month=="sep"])
#q13
freq<-table(day,month)
freq
| /lab7.R | no_license | PazinduLakpriya/ps-lab-7 | R | false | false | 581 | r | setwd("C:/Users/IT18194654/Downloads")
data<-read.table(file ="C:/Users/IT18194654/Downloads/Forest.txt",header = TRUE,sep = ",")
data
attach(data)
#names("X","Y","month","day","FFMC","DMC","DC","ISI","temp","RH","wind","rain","area")
#q2
summary(data)
#q3
fix(data)
#q4
max(wind)
min(wind)
#q5
summary(temp)
#q6
boxplot(wind,horizontal = TRUE,outline = TRUE,pch = 16)
#q7
#left-skewd
#q8
mean(temp)
#q9
mean(wind)
sd(wind)
#10
IQR(wind)
#q11
freq<-table(day,month)
freq
#q12
mean(temp[month=="sep"])
#q13
freq<-table(day,month)
freq
|
/bin/mac/sla.beta.r | permissive | coronalabs/corona | R | false | false | 166,996 | r | ||
library(shiny)
earnings_intro <- fluidPage(
h2("Is Potential Earnings of Prestige Colleges Worth the Cost?"))
earnings_analysis <- fluidPage(
h3("The Importance of this Graph"),
p(em("Note: Per College Scorecard, Earnings are defined as the
sum of wages and deferred compensation from all non-duplicate W-2
forms received for each individual (from both full- and part-time
employment), plus positive self-employment earnings from Schedule
SE.")),
p("This graph has a lot of information to digest. We can take a look at
each year individually and the trend over each year. Six years after
enrollment, we can see that colleges that have over 75% admission rate
(not-prestigous) are extremely valuable for the cost. The mean earnings are
around $18,000, which is great intial value. We can compare this to the
other end where acceptance rate is around 0%, and we can see that the mean
earnings are $5,000 which is significantly less. When we take a look at
the trend of yearly earnings over six, eight, and ten years, we can see
that earnings increase across the board. The line shifts up nearly $10,000
for each sdmisson rate. However, one section increases much more than the
others. Ten years after enrollment, we can see that the earnings for
prestige, low admission college increased drastically. The earnings
increased to around $37,000 dollars! This is much higher compared than
the low prestige, high admission rate which did increase to around
$25,000 dollars."),
strong("From these insights, we can infer than in the long term the high
cost prestigous colleges will pay off. However, it will take around
10 years after enrollment to overtake it.")
)
map_analysis <- fluidPage(
h3("Importance"),
p("A question we want to answer is, Do certain states correlate with higher
student debt/tuition cost/default rates? We can take a look at these
correlations and think about what relates these factors and the state.
e.g poverty, institution cost, public / private colleges."),
h4("Average Student Debt Map"),
p("From this map, we can see that there is a large grouping of student debt
in the southeastern states. Other notables states are South Dakota and
Vermont. The large grouping of southeastern states can most likely be due
to high college tuition costs, and a higher poverty level. Most colleges
around the U.S. hold the same tuition, but the amount of money the students
in a state have changes drastically. Other reasoning could be that more
students are persuing higher education, which has a much higher cost than
community colleges and trade schools. This could explain why states like
New Mexico have lower student debt."),
h4("Average Tuition Cost Map"),
p("This map shows that the average tuition cost is slightly correlated with
the previous map. The State with the lowest average tuition is Wyoming,
which also had the lowest average graduation debt. Similarly, in the
New England area, there seems to be a much higher average tuition cost
than the surrounding states. This is most likely due to the large
number of high prestige schools with much lower admission rates.
These schools are able to charge more and without the risk of
a reduction in applicants."),
h4("Average Default Rate"),
p("This map shows that the default rate is not correlated as much as the
other two. High default rates are especially prevelent in the southern
states, despite these states having relatively lower student debt and
tuition costs. This is most likely due to relatively higher poverty rates
or that graduates are unable to obtain jobs and pay off their
student loans.")
)
type_intro <- fluidPage(
h2(
"Does Income Level and Institution
Type Affect the Ability to Pay Off Debt?")
)
type_analysis <- fluidPage(
h3("The Importance of this Graph"),
p(em("Note: The income level is determined by FAFSA data: low = $0-$30,000,
medium = $30,001-$75,000, high = $75,001+). Loan debt in this variable
is calculated by the median cumulative debt each student has borrowed
during their time enrolled in the institution.")),
p(em("Legend: max represents the highest loan debt from the type of
institution and avg represents the average loan debt from the type of
institution. Low, medium, and high represent the levels of income in the
\"Note\" section above.")),
p("Taking an initial scan, the average amount of debt depends on
the type of instiution, where \"public\" colleges have the minimum debt of
approx(~). $16,000, and \"private\" colleges have the max of ~$24,000.
There is a pattern of the income level (low, medium, and high) resulting in
the lowest to highest (respectively) average earnings 10 years
post-graduation for all types of institutions. All the income level
earnings for \"private for-profit\" colleges are the minimum values when
comparing the corresponding levels across all the types of schools, versus
\"private\" institutions and all their earnings being the maximum for each
level."),
p("Average debt accumilated at graduation is significantly less than the
earnings for all income levels, especially for students graduating from
\"public\" and \"private\" schools. The difference between low-income
earnings and the average debt for \"private for-profit\" is only ~$13,000,
whereas \"private\" and \"public\" institutions have a difference of
~$23,000 and ~$24,000 respectively. With \"public\" and \"private\"
institutions' low-level income earnings still surpassing their own max
cumulative loan debt, private for-profit's max is only surpassed by the
high-level income earnings."),
strong("From these insights, we can infer that income level does affect a
student's earnings post-graduation and that amount is dependent on the
type of institution they graduate from. Regardless of the amount of
average debt, all income level earnings (when used effectively) from
\"private\" and \"public\" schools are high enough to pay off the debt
faster/easier than \"private for-profit\" colleges.")
)
| /final/ui_components/analysis_ui.R | permissive | getachew67/INFO201-7 | R | false | false | 6,436 | r | library(shiny)
earnings_intro <- fluidPage(
h2("Is Potential Earnings of Prestige Colleges Worth the Cost?"))
earnings_analysis <- fluidPage(
h3("The Importance of this Graph"),
p(em("Note: Per College Scorecard, Earnings are defined as the
sum of wages and deferred compensation from all non-duplicate W-2
forms received for each individual (from both full- and part-time
employment), plus positive self-employment earnings from Schedule
SE.")),
p("This graph has a lot of information to digest. We can take a look at
each year individually and the trend over each year. Six years after
enrollment, we can see that colleges that have over 75% admission rate
(not-prestigous) are extremely valuable for the cost. The mean earnings are
around $18,000, which is great intial value. We can compare this to the
other end where acceptance rate is around 0%, and we can see that the mean
earnings are $5,000 which is significantly less. When we take a look at
the trend of yearly earnings over six, eight, and ten years, we can see
that earnings increase across the board. The line shifts up nearly $10,000
for each sdmisson rate. However, one section increases much more than the
others. Ten years after enrollment, we can see that the earnings for
prestige, low admission college increased drastically. The earnings
increased to around $37,000 dollars! This is much higher compared than
the low prestige, high admission rate which did increase to around
$25,000 dollars."),
strong("From these insights, we can infer than in the long term the high
cost prestigous colleges will pay off. However, it will take around
10 years after enrollment to overtake it.")
)
map_analysis <- fluidPage(
h3("Importance"),
p("A question we want to answer is, Do certain states correlate with higher
student debt/tuition cost/default rates? We can take a look at these
correlations and think about what relates these factors and the state.
e.g poverty, institution cost, public / private colleges."),
h4("Average Student Debt Map"),
p("From this map, we can see that there is a large grouping of student debt
in the southeastern states. Other notables states are South Dakota and
Vermont. The large grouping of southeastern states can most likely be due
to high college tuition costs, and a higher poverty level. Most colleges
around the U.S. hold the same tuition, but the amount of money the students
in a state have changes drastically. Other reasoning could be that more
students are persuing higher education, which has a much higher cost than
community colleges and trade schools. This could explain why states like
New Mexico have lower student debt."),
h4("Average Tuition Cost Map"),
p("This map shows that the average tuition cost is slightly correlated with
the previous map. The State with the lowest average tuition is Wyoming,
which also had the lowest average graduation debt. Similarly, in the
New England area, there seems to be a much higher average tuition cost
than the surrounding states. This is most likely due to the large
number of high prestige schools with much lower admission rates.
These schools are able to charge more and without the risk of
a reduction in applicants."),
h4("Average Default Rate"),
p("This map shows that the default rate is not correlated as much as the
other two. High default rates are especially prevelent in the southern
states, despite these states having relatively lower student debt and
tuition costs. This is most likely due to relatively higher poverty rates
or that graduates are unable to obtain jobs and pay off their
student loans.")
)
type_intro <- fluidPage(
h2(
"Does Income Level and Institution
Type Affect the Ability to Pay Off Debt?")
)
type_analysis <- fluidPage(
h3("The Importance of this Graph"),
p(em("Note: The income level is determined by FAFSA data: low = $0-$30,000,
medium = $30,001-$75,000, high = $75,001+). Loan debt in this variable
is calculated by the median cumulative debt each student has borrowed
during their time enrolled in the institution.")),
p(em("Legend: max represents the highest loan debt from the type of
institution and avg represents the average loan debt from the type of
institution. Low, medium, and high represent the levels of income in the
\"Note\" section above.")),
p("Taking an initial scan, the average amount of debt depends on
the type of instiution, where \"public\" colleges have the minimum debt of
approx(~). $16,000, and \"private\" colleges have the max of ~$24,000.
There is a pattern of the income level (low, medium, and high) resulting in
the lowest to highest (respectively) average earnings 10 years
post-graduation for all types of institutions. All the income level
earnings for \"private for-profit\" colleges are the minimum values when
comparing the corresponding levels across all the types of schools, versus
\"private\" institutions and all their earnings being the maximum for each
level."),
p("Average debt accumilated at graduation is significantly less than the
earnings for all income levels, especially for students graduating from
\"public\" and \"private\" schools. The difference between low-income
earnings and the average debt for \"private for-profit\" is only ~$13,000,
whereas \"private\" and \"public\" institutions have a difference of
~$23,000 and ~$24,000 respectively. With \"public\" and \"private\"
institutions' low-level income earnings still surpassing their own max
cumulative loan debt, private for-profit's max is only surpassed by the
high-level income earnings."),
strong("From these insights, we can infer that income level does affect a
student's earnings post-graduation and that amount is dependent on the
type of institution they graduate from. Regardless of the amount of
average debt, all income level earnings (when used effectively) from
\"private\" and \"public\" schools are high enough to pay off the debt
faster/easier than \"private for-profit\" colleges.")
)
|
## cachematrix.R
## Purpose:
## To cache -store- the value of the inverse of a given matrix.
## How it works:
## we will use two functions, the first one to cache the value of the inverse; the second one to actual calculate the inverse values, if already calculated for a given matrix, it will simply return the stored value.
## I will use the model -example- provided as it will need just a very few changes.
## The following function will set and get the values for a given matrix (the object of the calculation)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## initializing the value
## setting the matrix value...
set <- function(y) {
x <<- y
m <<- NULL
}
## defining the get and set for the values
get <- function() x
setinv <- function(inverse) m <<- inverse
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv= getinv)
}
## Calculate and retrieve the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<- x$getinv() ## return the inverse of the matrix
## checking if the value is already cached if it is, the print the message and return the cached value.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() ## get the value to solve
m <- solve(data, ...) ## solving the matrix, note that here we could easily check if the matrix is a square one.
x$setinv(m) ## caching...
m ## return the value...
}
## test case
##> rm(x)
##> X=matrix(3:6,2,2)
##> x<-makeCacheMatrix(X)
##> x$get()
## [,1] [,2]
##[1,] 3 5
##[2,] 4 6
##> cacheSolve(x)
## [,1] [,2]
##[1,] -3 2.5
##[2,] 2 -1.5
##> cacheSolve(x)
##getting cached data
## [,1] [,2]
##[1,] -3 2.5
##[2,] 2 -1.5
##
##>
##Testing with Octave:
## octave:1> A=[
## > 3 5
## > 4 6
## > ]
## A =
## 3 5
## 4 6
## octave:2> pinv(A)
## ans =
## -3.0000 2.5000
## 2.0000 -1.5000
## octave:3> A*pinv(A)
## ans =
## 1.0000e+00 1.7764e-15
## -3.5527e-15 1.0000e+00
| /cachematrix.R | no_license | cibermania5/ProgrammingAssignment2 | R | false | false | 2,404 | r | ## cachematrix.R
## Purpose:
## To cache -store- the value of the inverse of a given matrix.
## How it works:
## we will use two functions, the first one to cache the value of the inverse; the second one to actual calculate the inverse values, if already calculated for a given matrix, it will simply return the stored value.
## I will use the model -example- provided as it will need just a very few changes.
## The following function will set and get the values for a given matrix (the object of the calculation)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## initializing the value
## setting the matrix value...
set <- function(y) {
x <<- y
m <<- NULL
}
## defining the get and set for the values
get <- function() x
setinv <- function(inverse) m <<- inverse
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv= getinv)
}
## Calculate and retrieve the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<- x$getinv() ## return the inverse of the matrix
## checking if the value is already cached if it is, the print the message and return the cached value.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() ## get the value to solve
m <- solve(data, ...) ## solving the matrix, note that here we could easily check if the matrix is a square one.
x$setinv(m) ## caching...
m ## return the value...
}
## test case
##> rm(x)
##> X=matrix(3:6,2,2)
##> x<-makeCacheMatrix(X)
##> x$get()
## [,1] [,2]
##[1,] 3 5
##[2,] 4 6
##> cacheSolve(x)
## [,1] [,2]
##[1,] -3 2.5
##[2,] 2 -1.5
##> cacheSolve(x)
##getting cached data
## [,1] [,2]
##[1,] -3 2.5
##[2,] 2 -1.5
##
##>
##Testing with Octave:
## octave:1> A=[
## > 3 5
## > 4 6
## > ]
## A =
## 3 5
## 4 6
## octave:2> pinv(A)
## ans =
## -3.0000 2.5000
## 2.0000 -1.5000
## octave:3> A*pinv(A)
## ans =
## 1.0000e+00 1.7764e-15
## -3.5527e-15 1.0000e+00
|
3554730693379d9ac618f88a6f443cb2 dungeon_i25-m12-u5-v0.pddl_planlen=77.qdimacs 58223 525166 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u5-v0.pddl_planlen=77/dungeon_i25-m12-u5-v0.pddl_planlen=77.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | 3554730693379d9ac618f88a6f443cb2 dungeon_i25-m12-u5-v0.pddl_planlen=77.qdimacs 58223 525166 |
# 051411 add coat profile
# I will partion Bsu, Bce-clade omegas, wilcox.test in coat proteins and between coat and nonCE protein
##########################################
rm(list=ls());
## some test to demonstrate the syntax for alternative hypothesis
#x = rnorm(100)+5
#y = x + 2
#summary(x);
#summary(y)
#wilcox.test( x, y, alt='gr') #accept null, large p
#wilcox.test( x, y, alt='less') #reject null, accept alternative, small p
## coat gene profile
ctb = read.table( "_coat.profile.122908.csv",sep="\t",header=T);
ctb$id = as.character( ctb$id );
row.names( ctb ) = as.character( ctb$id );
bacillus.specs = names(ctb)[10:20];
ctb2 = ctb[,c(1,10:20)]
ctb3 = ctb2[, 2:12] #profile matrix
ctb3 = ifelse( is.na(ctb3), 0, 1);
ctb3 = data.frame(ctb3);
#head(ctb3); #passed
ctb3$cereushits = ctb3$Ban + ctb3$Bth + ctb3$Bce + ctb3$Bwe
ctb3$subtilishits = ctb3$Bam + ctb3$Bli + ctb3$Bpu
ctb3$outhits = ctb3$Bha + ctb3$Bcl
ctb3$hits = ctb3$cereushits + ctb3$subtilishits + ctb3$outhits;
#hist(ctb3$cereushits)
### coat free omega, and match to coat orthologous hits in the profile table
coat.tb = read.delim("_free.omega.coat.paml.Jan20,2011.txt", header=F);
coat.tb = coat.tb[ ! is.na(coat.tb$V4), ]
coat.tb$V2 = as.character(coat.tb$V2)
positions = match( coat.tb$V1, rownames(ctb3))
#ctb3[positions[1:5], ] #check, passed
coat.tb = cbind( coat.tb, ctb3[positions, c('cereushits','subtilishits', 'outhits', 'hits')])
#head(coat.tb)
### nonCE free omega
nonCE.tb = read.delim("_nonCE.omega.7March2011.tab", header=F);
nonCE.tb = nonCE.tb[nonCE.tb$V2=='H1C',] # H1C is where free omega are calculated
nonCE.tb$V3 = as.character(nonCE.tb$V3)
#remove 999s Many labile coat genes have omega=999??
#Wcutoff = 998
#coat.tb$V10[coat.tb$V10> Wcutoff ] = NA
#nonCE.tb$V11[nonCE.tb$V11> Wcutoff ] = NA
#### parse the omega results
##need to add more
clades = unique(c( nonCE.tb$V3, coat.tb$V2))
# "(Bha,Bcl)" "Bha" "Bcl"
#"(Bcl,Bli,Bam,Bsu)"
#BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "Bpu", "Bli", "(Bam,Bsu)", "Bam", "Bsu", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #p=0.0305
#leaf branches for publication
#BsuClade = c("Bpu", "Bli", "Bam", "Bsu"); #p-value = 0.00823,
# internal branches for publication
BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "Bpu", "Bli", "(Bam,Bsu)", "Bam", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #no Bsu leaf, p=0.13
#BsuClade = c("(Bli,Bam,Bsu)","Bli","(Bam,Bsu)","Bam","Bsu","(Bpu,Bam,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)", "(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)","(Bsu,Bmo)","(Bli,Bmo,Bsu)","(Bli,Bsu)" ); # p-value = 0.01123, no Bpu
#BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "(Bam,Bsu)", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #no leaf nodes, high omega all over the places, large p-value
#so, these suggest coat gene only recently evolves faster than the nonCE genes, suggest adpation to niches
#BsuClade = c("Bli", "Bam", "Bsu"); #0.01082
#BsuClade = c("Bpu", "Bam", "Bsu"); # p-value = 0.002779
#** BsuClade = c("Bsu") # p-value = 0.002939
#**BsuClade = c("Bam") # p=0.0858
#BsuClade = c("Bsu", "Bmo", "(Bsu,Bmo)","(Bmo,Bsu)") # p-value = 3.483e-08
#BsuClade = c("Bsu", "Bmo") # p-value = 4.654e-06
#BsuClade = c("Bsu", "(Bsu,Bmo)","(Bmo,Bsu)") # p-value = 1.899e-05
#BsuClade = c("Bli") # p=0.5 ??
#BsuClade = c("Bpu") # p=0.19
#BsuClade = c("(Bam,Bmo,Bsu)", "(Bsu,Bmo)", "(Bam,Bsu)") #large p
#BsuClade = c( "(Bam,Bsu)") #p=0.1568
BceClade = c("(Bwe,Bce,Ban,Bth)","(Bce,Ban,Bth)","(Ban,Bth)","(Bwe,Bce,Bth)","(Bce,Bth)", "(Bwe,Ban,Bth)","Bwe","Ban","Bth","Bce"); # p=0.1694 cCW>nCW
#BceClade = c("(Bwe,Bce,Ban,Bth)","(Bwe,Bce,Bth)", "(Bwe,Ban,Bth)"); # ?? ambiguous
#BceClade = c("(Bwe,Bce,Ban,Bth)","(Bce,Ban,Bth)","(Ban,Bth)","(Bwe,Bce,Bth)","(Bce,Bth)", "(Bwe,Ban,Bth)"); #p=0.214 cCW > nCW
#BceClade = c("Ban","Bth","Bce"); #p=0.25 cCW < nCW,
#BceClade = c("(Bce,Ban,Bth)","(Ban,Bth)","(Bce,Bth)"); #p=0.1886
#BceClade = c("(Bce,Ban,Bth)","(Ban,Bth)","(Bce,Bth)", "Ban","Bth","Bce"); # p = 0.157
nonCE.tb$flag = NA;
coat.tb$flag = NA;
for( j in 1:length(BsuClade)){
nonCE.tb$flag[ nonCE.tb$V3 == BsuClade[j] ] = 'bsu';
coat.tb$flag[ coat.tb$V2==BsuClade[j] ] = 'bsu';
}
for( j in 1:length(BceClade)){
nonCE.tb$flag[ nonCE.tb$V3 == BceClade[j] ] = 'bce';
coat.tb$flag[ coat.tb$V2==BceClade[j] ] = 'bce';
}
table( coat.tb$V2, coat.tb$flag) #check flags, passed. 051411
nonCE.tb$omegaFlag = ifelse(nonCE.tb$V11>1, 'high','low' )
coat.tb$omegaFlag = ifelse(coat.tb$V10>1, 'high','low' )
table( nonCE.tb$flag, nonCE.tb$omegaFlag )
table( coat.tb$flag, coat.tb$omegaFlag )
table( coat.tb$V2, coat.tb$omegaFlag)
table( coat.tb$flag, coat.tb$V2, coat.tb$omegaFlag)
tmptb = table( coat.tb$V1, coat.tb$omegaFlag)
tmptb = tmptb[-1, ]
coat.summary.tb = as.data.frame( tmptb )
coat.summary.tb$highPer = coat.summary.tb$high / ( coat.summary.tb$low + coat.summary.tb$high)
positions = match( rownames(ctb3), rownames(coat.summary.tb) )
ctb3 = cbind( ctb3, coat.summary.tb[positions, ])
plot(ctb3$highPer ~ jitter(ctb3$cereushits) )
summary(lm(ctb3$highPer ~ ctb3$cereushits)) #significant, conserved coat genes evolve have more high omegas? how about bsu- bce- clades?
#summary(lm( nonCE.tb$V11 ~ nonCE.tb$flag ))
#summary(lm( coat.tb$V10 ~ coat.tb$flag + coat.tb$hits ))
#summary(lm( coat.tb$V10 ~ coat.tb$hits )) #not good
#summary(lm( coat.tb$V10 ~ coat.tb$subtilishits )) # terrible
#summary(lm( log(coat.tb$V10) ~ coat.tb$cereushits )) #p=0.3488
summary(lm( coat.tb$V10 ~ coat.tb$cereushits )) #p=0.0037
plot( coat.tb$V10 ~ jitter(coat.tb$cereushits) )
nBW = nonCE.tb$V11[nonCE.tb$flag=='bsu'];
nCW = nonCE.tb$V11[nonCE.tb$flag=='bce'];
cBW = coat.tb$V10[coat.tb$flag=='bsu'];
cCW = coat.tb$V10[coat.tb$flag=='bce'];
cBW.conserved = coat.tb$V10[coat.tb$flag=='bsu' & coat.tb$cereushits>=4];
cCW.conserved = coat.tb$V10[coat.tb$flag=='bce' & coat.tb$cereushits>=4];
cBW.labile = coat.tb$V10[coat.tb$flag=='bsu' & coat.tb$cereushits <=3 ];
cCW.labile = coat.tb$V10[coat.tb$flag=='bce' & coat.tb$cereushits <=3 ];
n.high = nonCE.tb[nonCE.tb$V11>1, ]
n.low = nonCE.tb[nonCE.tb$V11<1, ]
c.high = coat.tb[coat.tb$V10>1, ]
c.low = coat.tb[coat.tb$V10<1, ]
table(n.high$V3)
table(n.low$V3)
table(c.high$V2)
table(c.low$V2)
####################### summary
#summary(log(nBW))
#summary(log(cBW))
#summary(log(nCW))
#summary(log(cCW))
summary(nBW)
summary(cBW)
summary(nCW)
summary(cCW)
summary(cBW.conserved)
summary(cBW.labile)
summary(cCW.conserved)
summary(cCW.labile)
### analysis of Bsu clade
BsuClade
summary(nBW)
summary(cBW)
summary(cBW.conserved)
summary(cBW.labile)
BsuClade
wilcox.test( cBW, nBW, al='gr') #
t.test( log(cBW), log(nBW), al='gr')
ks.test( cBW, nBW, al='gr')
wilcox.test( cBW.conserved, nBW, al='less') #
wilcox.test( cBW.labile, nBW, al='gr') #
### analysis of Bce clade
summary(nCW)
summary(cCW)
summary(cCW.conserved)
summary(cCW.labile)
#t.test( cCW, nCW, al='gr' )
t.test( log(cCW), log(nCW), al='less' )
wilcox.test( cCW, nCW, al='less')
ks.test( cCW, nCW, al="less" )
#wilcox.test( cCW.conserved, nCW, al='less')
#wilcox.test( cCW.labile, nCW, al='gr')
#wilcox.test( log(cCW), log(nCW), al='gr' ) #for log transformation make no difference for wilcox test
#q("no")
| /coatpaml/test.nearlyAll.2013Jan16.Gblocks/R2013Jan20/old/compare.omega.051411.R | no_license | hongqin/BacillusSporeCoat | R | false | false | 7,867 | r |
# 051411 add coat profile
# I will partion Bsu, Bce-clade omegas, wilcox.test in coat proteins and between coat and nonCE protein
##########################################
rm(list=ls());
## some test to demonstrate the syntax for alternative hypothesis
#x = rnorm(100)+5
#y = x + 2
#summary(x);
#summary(y)
#wilcox.test( x, y, alt='gr') #accept null, large p
#wilcox.test( x, y, alt='less') #reject null, accept alternative, small p
## coat gene profile
ctb = read.table( "_coat.profile.122908.csv",sep="\t",header=T);
ctb$id = as.character( ctb$id );
row.names( ctb ) = as.character( ctb$id );
bacillus.specs = names(ctb)[10:20];
ctb2 = ctb[,c(1,10:20)]
ctb3 = ctb2[, 2:12] #profile matrix
ctb3 = ifelse( is.na(ctb3), 0, 1);
ctb3 = data.frame(ctb3);
#head(ctb3); #passed
ctb3$cereushits = ctb3$Ban + ctb3$Bth + ctb3$Bce + ctb3$Bwe
ctb3$subtilishits = ctb3$Bam + ctb3$Bli + ctb3$Bpu
ctb3$outhits = ctb3$Bha + ctb3$Bcl
ctb3$hits = ctb3$cereushits + ctb3$subtilishits + ctb3$outhits;
#hist(ctb3$cereushits)
### coat free omega, and match to coat orthologous hits in the profile table
coat.tb = read.delim("_free.omega.coat.paml.Jan20,2011.txt", header=F);
coat.tb = coat.tb[ ! is.na(coat.tb$V4), ]
coat.tb$V2 = as.character(coat.tb$V2)
positions = match( coat.tb$V1, rownames(ctb3))
#ctb3[positions[1:5], ] #check, passed
coat.tb = cbind( coat.tb, ctb3[positions, c('cereushits','subtilishits', 'outhits', 'hits')])
#head(coat.tb)
### nonCE free omega
nonCE.tb = read.delim("_nonCE.omega.7March2011.tab", header=F);
nonCE.tb = nonCE.tb[nonCE.tb$V2=='H1C',] # H1C is where free omega are calculated
nonCE.tb$V3 = as.character(nonCE.tb$V3)
#remove 999s Many labile coat genes have omega=999??
#Wcutoff = 998
#coat.tb$V10[coat.tb$V10> Wcutoff ] = NA
#nonCE.tb$V11[nonCE.tb$V11> Wcutoff ] = NA
#### parse the omega results
##need to add more
clades = unique(c( nonCE.tb$V3, coat.tb$V2))
# "(Bha,Bcl)" "Bha" "Bcl"
#"(Bcl,Bli,Bam,Bsu)"
#BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "Bpu", "Bli", "(Bam,Bsu)", "Bam", "Bsu", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #p=0.0305
#leaf branches for publication
#BsuClade = c("Bpu", "Bli", "Bam", "Bsu"); #p-value = 0.00823,
# internal branches for publication
BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "Bpu", "Bli", "(Bam,Bsu)", "Bam", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #no Bsu leaf, p=0.13
#BsuClade = c("(Bli,Bam,Bsu)","Bli","(Bam,Bsu)","Bam","Bsu","(Bpu,Bam,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)", "(Bmo,Bsu)","Bmo","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)","(Bsu,Bmo)","(Bli,Bmo,Bsu)","(Bli,Bsu)" ); # p-value = 0.01123, no Bpu
#BsuClade = c("(Bpu,Bli,Bam,Bsu)","(Bli,Bam,Bsu)","(Bpu,Bli,Bsu)", "(Bam,Bsu)", "(Bpu,Bam,Bsu)", "(Bpu,Bli,Bam,Bmo,Bsu)", "(Bli,Bam,Bmo,Bsu)", "(Bam,Bmo,Bsu)","(Bmo,Bsu)","(Bli,Bam,Bsu,Bmo)", "(Bam,Bsu,Bmo)", "(Bsu,Bmo)", "(Bpu,Bam,Bmo,Bsu)", "(Bli,Bmo,Bsu)", "(Bli,Bsu)" ); #no leaf nodes, high omega all over the places, large p-value
#so, these suggest coat gene only recently evolves faster than the nonCE genes, suggest adpation to niches
#BsuClade = c("Bli", "Bam", "Bsu"); #0.01082
#BsuClade = c("Bpu", "Bam", "Bsu"); # p-value = 0.002779
#** BsuClade = c("Bsu") # p-value = 0.002939
#**BsuClade = c("Bam") # p=0.0858
#BsuClade = c("Bsu", "Bmo", "(Bsu,Bmo)","(Bmo,Bsu)") # p-value = 3.483e-08
#BsuClade = c("Bsu", "Bmo") # p-value = 4.654e-06
#BsuClade = c("Bsu", "(Bsu,Bmo)","(Bmo,Bsu)") # p-value = 1.899e-05
#BsuClade = c("Bli") # p=0.5 ??
#BsuClade = c("Bpu") # p=0.19
#BsuClade = c("(Bam,Bmo,Bsu)", "(Bsu,Bmo)", "(Bam,Bsu)") #large p
#BsuClade = c( "(Bam,Bsu)") #p=0.1568
BceClade = c("(Bwe,Bce,Ban,Bth)","(Bce,Ban,Bth)","(Ban,Bth)","(Bwe,Bce,Bth)","(Bce,Bth)", "(Bwe,Ban,Bth)","Bwe","Ban","Bth","Bce"); # p=0.1694 cCW>nCW
#BceClade = c("(Bwe,Bce,Ban,Bth)","(Bwe,Bce,Bth)", "(Bwe,Ban,Bth)"); # ?? ambiguous
#BceClade = c("(Bwe,Bce,Ban,Bth)","(Bce,Ban,Bth)","(Ban,Bth)","(Bwe,Bce,Bth)","(Bce,Bth)", "(Bwe,Ban,Bth)"); #p=0.214 cCW > nCW
#BceClade = c("Ban","Bth","Bce"); #p=0.25 cCW < nCW,
#BceClade = c("(Bce,Ban,Bth)","(Ban,Bth)","(Bce,Bth)"); #p=0.1886
#BceClade = c("(Bce,Ban,Bth)","(Ban,Bth)","(Bce,Bth)", "Ban","Bth","Bce"); # p = 0.157
nonCE.tb$flag = NA;
coat.tb$flag = NA;
for( j in 1:length(BsuClade)){
nonCE.tb$flag[ nonCE.tb$V3 == BsuClade[j] ] = 'bsu';
coat.tb$flag[ coat.tb$V2==BsuClade[j] ] = 'bsu';
}
for( j in 1:length(BceClade)){
nonCE.tb$flag[ nonCE.tb$V3 == BceClade[j] ] = 'bce';
coat.tb$flag[ coat.tb$V2==BceClade[j] ] = 'bce';
}
table( coat.tb$V2, coat.tb$flag) #check flags, passed. 051411
nonCE.tb$omegaFlag = ifelse(nonCE.tb$V11>1, 'high','low' )
coat.tb$omegaFlag = ifelse(coat.tb$V10>1, 'high','low' )
table( nonCE.tb$flag, nonCE.tb$omegaFlag )
table( coat.tb$flag, coat.tb$omegaFlag )
table( coat.tb$V2, coat.tb$omegaFlag)
table( coat.tb$flag, coat.tb$V2, coat.tb$omegaFlag)
tmptb = table( coat.tb$V1, coat.tb$omegaFlag)
tmptb = tmptb[-1, ]
coat.summary.tb = as.data.frame( tmptb )
coat.summary.tb$highPer = coat.summary.tb$high / ( coat.summary.tb$low + coat.summary.tb$high)
positions = match( rownames(ctb3), rownames(coat.summary.tb) )
ctb3 = cbind( ctb3, coat.summary.tb[positions, ])
plot(ctb3$highPer ~ jitter(ctb3$cereushits) )
summary(lm(ctb3$highPer ~ ctb3$cereushits)) #significant, conserved coat genes evolve have more high omegas? how about bsu- bce- clades?
#summary(lm( nonCE.tb$V11 ~ nonCE.tb$flag ))
#summary(lm( coat.tb$V10 ~ coat.tb$flag + coat.tb$hits ))
#summary(lm( coat.tb$V10 ~ coat.tb$hits )) #not good
#summary(lm( coat.tb$V10 ~ coat.tb$subtilishits )) # terrible
#summary(lm( log(coat.tb$V10) ~ coat.tb$cereushits )) #p=0.3488
summary(lm( coat.tb$V10 ~ coat.tb$cereushits )) #p=0.0037
plot( coat.tb$V10 ~ jitter(coat.tb$cereushits) )
nBW = nonCE.tb$V11[nonCE.tb$flag=='bsu'];
nCW = nonCE.tb$V11[nonCE.tb$flag=='bce'];
cBW = coat.tb$V10[coat.tb$flag=='bsu'];
cCW = coat.tb$V10[coat.tb$flag=='bce'];
cBW.conserved = coat.tb$V10[coat.tb$flag=='bsu' & coat.tb$cereushits>=4];
cCW.conserved = coat.tb$V10[coat.tb$flag=='bce' & coat.tb$cereushits>=4];
cBW.labile = coat.tb$V10[coat.tb$flag=='bsu' & coat.tb$cereushits <=3 ];
cCW.labile = coat.tb$V10[coat.tb$flag=='bce' & coat.tb$cereushits <=3 ];
n.high = nonCE.tb[nonCE.tb$V11>1, ]
n.low = nonCE.tb[nonCE.tb$V11<1, ]
c.high = coat.tb[coat.tb$V10>1, ]
c.low = coat.tb[coat.tb$V10<1, ]
table(n.high$V3)
table(n.low$V3)
table(c.high$V2)
table(c.low$V2)
####################### summary
#summary(log(nBW))
#summary(log(cBW))
#summary(log(nCW))
#summary(log(cCW))
summary(nBW)
summary(cBW)
summary(nCW)
summary(cCW)
summary(cBW.conserved)
summary(cBW.labile)
summary(cCW.conserved)
summary(cCW.labile)
### analysis of Bsu clade
BsuClade
summary(nBW)
summary(cBW)
summary(cBW.conserved)
summary(cBW.labile)
BsuClade
wilcox.test( cBW, nBW, al='gr') #
t.test( log(cBW), log(nBW), al='gr')
ks.test( cBW, nBW, al='gr')
wilcox.test( cBW.conserved, nBW, al='less') #
wilcox.test( cBW.labile, nBW, al='gr') #
### analysis of Bce clade
summary(nCW)
summary(cCW)
summary(cCW.conserved)
summary(cCW.labile)
#t.test( cCW, nCW, al='gr' )
t.test( log(cCW), log(nCW), al='less' )
wilcox.test( cCW, nCW, al='less')
ks.test( cCW, nCW, al="less" )
#wilcox.test( cCW.conserved, nCW, al='less')
#wilcox.test( cCW.labile, nCW, al='gr')
#wilcox.test( log(cCW), log(nCW), al='gr' ) #for log transformation make no difference for wilcox test
#q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upseRt.R
\name{append_string}
\alias{append_string}
\title{Append string to each element of character vector}
\usage{
append_string(char_vector, string_to_append)
}
\arguments{
\item{char_vector}{A character vector}
\item{string_to_prepend}{String to prepend}
}
\value{
Character vector with string appended to each element
}
\description{
Append string to each element of character vector
}
| /man/append_string.Rd | no_license | ganesh-krishnan/upseRt | R | false | true | 472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upseRt.R
\name{append_string}
\alias{append_string}
\title{Append string to each element of character vector}
\usage{
append_string(char_vector, string_to_append)
}
\arguments{
\item{char_vector}{A character vector}
\item{string_to_prepend}{String to prepend}
}
\value{
Character vector with string appended to each element
}
\description{
Append string to each element of character vector
}
|
#' Naive method for Cross-validation in Multiply Imputed datasets
#'
#' \code{MI_cv_naive} Cross-validation by applying multiply imputed pooled models in train
#' and test folds. Called by function \code{psfmi_perform}.
#'
#' @param pobj An object of class \code{pmods} (pooled models), produced by a previous
#' call to \code{psfmi_lr}.
#' @param folds The number of folds, default is 3.
#' @param p.crit A numerical scalar. P-value selection criterium used for backward during
#' cross-validation. When set at 1, pooling and internal validation is done without
#' backward selection.
#' @param BW If TRUE backward selection is conducted within cross-validation. Default is FALSE.
#' @param cv_naive_appt Default is TRUE for showing the cross-validation apparent (train) and
#' test results. Set to FALSE to only give test results.
#'
#' @seealso \code{\link{psfmi_perform}}
#' @author Martijn Heymans, 2020
#' @keywords internal
#'
#' @export
MI_cv_naive <- function(pobj, folds = 3, p.crit = 1, BW=FALSE, cv_naive_appt=TRUE)
{
# Create train and test datasets
# Stratified on outcome
# Extract row id's in first imputed datseet
# to apply in each imputed dataset
idfold <- map(vfold_cv(pobj$data[pobj$data[pobj$impvar] == 1, ],
v=folds, strata = pobj$Outcome)$splits,
function(x) {
id_train <- x[[2]]
id_test <- as.integer(row.names(x[[1]]))[-id_train]
})
# Apply model in train and test set
# and in each imputed dataset
test_cv <- auc.i_train <- auc_se.i_train <-
rsq.i_train <- sc_brier.i_train <- cal_coef.i <-
auc.i_test <- auc_se.i_test <- rsq.i_test <-
sc_brier.i_test <- cal_coef.i <- list()
for(i in 1:pobj$nimp)
{
message("\n", "Imputation ", i)
dat_imp <-
pobj$data[pobj$data[pobj$impvar] == i, ]
cv_perform <- lapply(idfold,
function(x) {
# Apply model in train data
train_data <- dat_imp[-x, ]
Y <- c(paste(pobj$Outcome, paste("~")))
fm_train <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
# if BW = TRUE
if(BW==TRUE){
pobj_bw <- glm_bw(formula = fm_train, data = train_data,
p.crit = p.crit, keep.predictors = pobj$keep.predictors,
model_type="binomial")
if(is_empty(pobj_bw$predictors_final))
pobj_bw$predictors_final <- 1
fm_train <-
as.formula(paste(Y, paste(pobj_bw$predictors_final, collapse = "+")))
}
fit_train <- glm(fm_train, data=train_data, family=binomial)
# Apply model in train data
pr_train <-
predict(fit_train, type="response")
auc_train <- pROC::roc(fit_train$y, pr_train, quiet = TRUE)$auc
se_auc_train <- sqrt(pROC::var(auc_train))
sc_brier_train <-
scaled_brier(fit_train$y, pr_train)
# Nagelkerke R-squared
rsq_train <- rsq_nagel(fit_train)
# Apply model in test data
# Apply model in test data
test_data <- dat_imp[x, ]
pr_test <- predict(fit_train, newdata = test_data, type="response")
lp_test <- predict(fit_train, newdata = test_data)
coef_test <- coef(glm(unlist(test_data[pobj$Outcome]) ~ lp_test, family=binomial))
fit_test <- glm(unlist(test_data[pobj$Outcome]) ~ lp_test, family=binomial)
if(any(is.na(coef_test)))
coef_test[2] <- replace_na(coef_test[2], 1)
# brier score
sc_brier_test <- scaled_brier(fit_test$y, pr_test)
# Nagelkerke R-squared
rsq_test <- rsq_nagel(fit_test)
list(folds=x, pred_train=pr_train, obs_train=fit_train$y,
pred_test=pr_test, obs_test=fit_test$y,
rsq_train=rsq_train, rsq_test=rsq_test,
sc_brier_train=sc_brier_train,
sc_brier_test=sc_brier_test,
coef_test=coef_test, auc_train = auc_train, se_auc_train)
})
id_folds_test <-
sapply(cv_perform, function(x) x[1])
pred_outcome_test <-
unlist(sapply(cv_perform, function(x) x[4]))
obs_outcome_test <-
unlist(sapply(cv_perform, function(x) x[5]))
# Take mean of logit AUC
auc.i_train[[i]] <-
mean_auc_log(sapply(cv_perform, function(x) x$auc_train))
cvAUC_test <-
ci.cvAUC(predictions=pred_outcome_test, labels=obs_outcome_test,
folds=id_folds_test, confidence=0.95)
auc.i_test[[i]] <-
cvAUC_test$cvAUC
auc_se.i_test[[i]] <-
cvAUC_test$se
rsq.i_train[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$rsq_train)), na.rm = TRUE)
sc_brier.i_train[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$sc_brier_train)), na.rm = TRUE)
rsq.i_test[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$rsq_test)), na.rm = TRUE)
sc_brier.i_test[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$sc_brier_test)), na.rm = TRUE)
cal_coef.i[[i]] <-
colMeans(t(sapply(cv_perform,
function(x) x$coef_test)), na.rm = TRUE)
}
# Pooling R square
# Fisher z Transformation
z.rsq_train <-
atanh(unlist(rsq.i_train))
z.rsq.p_train <-
mean(z.rsq_train)
# inv Fisher z = pooled rsq
pool_R2_train <-
tanh(z.rsq.p_train)
pool_sc_brier_train <-
mean(unlist(sc_brier.i_train))
# Pooling R square
# Fisher z Transformation
z.rsq_test <-
atanh(unlist(rsq.i_test))
z.rsq.p_test <-
mean(z.rsq_test)
# inv Fisher z = pooled rsq
pool_R2_test <-
tanh(z.rsq.p_test)
pool_sc_brier_test <-
mean(unlist(sc_brier.i_test))
pool_coef <-
colMeans(do.call("rbind", cal_coef.i) )
names(pool_coef) <-
c("Intercept", "Slope")
auc_pooled_train <-
mean_auc_log(unlist(auc.i_train))
auc_pooled_test <-
pool_auc(est_auc = auc.i_test, est_se = auc_se.i_test, nimp = pobj$nimp, log_auc = TRUE)
auc <-
c(auc_pooled_train, auc_pooled_test[2])
sc_brier <-
c(pool_sc_brier_train, pool_sc_brier_test)
rsq <-
c(pool_R2_train, pool_R2_test)
cv_stats <-
data.frame(matrix(c(auc, sc_brier, rsq), 3, 2, byrow = TRUE))
row.names(cv_stats) <-
c("AUC", "Brier scaled", "R-squared")
names(cv_stats) <-
c("Train", "Test")
rescv <-
list(cv_stats = cv_stats, auc_test=auc_pooled_test, test_coef=pool_coef)
if(cv_naive_appt){
Y <- c(paste(pobj$Outcome, paste("~")))
if(is_empty(pobj$predictors_final)) {
pobj$predictors_final <- 1
fm <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
} else {
fm <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
}
perform_mi_orig <-
pool_performance_internal(data=pobj$data, nimp = pobj$nimp,
impvar=pobj$impvar,
formula = fm)
rescv <- list(Test=rescv, Apparent=perform_mi_orig)
}
return(rescv)
} | /R/MI_cv_naive.R | no_license | mwheymans/psfmi | R | false | false | 7,937 | r | #' Naive method for Cross-validation in Multiply Imputed datasets
#'
#' \code{MI_cv_naive} Cross-validation by applying multiply imputed pooled models in train
#' and test folds. Called by function \code{psfmi_perform}.
#'
#' @param pobj An object of class \code{pmods} (pooled models), produced by a previous
#' call to \code{psfmi_lr}.
#' @param folds The number of folds, default is 3.
#' @param p.crit A numerical scalar. P-value selection criterium used for backward during
#' cross-validation. When set at 1, pooling and internal validation is done without
#' backward selection.
#' @param BW If TRUE backward selection is conducted within cross-validation. Default is FALSE.
#' @param cv_naive_appt Default is TRUE for showing the cross-validation apparent (train) and
#' test results. Set to FALSE to only give test results.
#'
#' @seealso \code{\link{psfmi_perform}}
#' @author Martijn Heymans, 2020
#' @keywords internal
#'
#' @export
MI_cv_naive <- function(pobj, folds = 3, p.crit = 1, BW=FALSE, cv_naive_appt=TRUE)
{
# Create train and test datasets
# Stratified on outcome
# Extract row id's in first imputed datseet
# to apply in each imputed dataset
idfold <- map(vfold_cv(pobj$data[pobj$data[pobj$impvar] == 1, ],
v=folds, strata = pobj$Outcome)$splits,
function(x) {
id_train <- x[[2]]
id_test <- as.integer(row.names(x[[1]]))[-id_train]
})
# Apply model in train and test set
# and in each imputed dataset
test_cv <- auc.i_train <- auc_se.i_train <-
rsq.i_train <- sc_brier.i_train <- cal_coef.i <-
auc.i_test <- auc_se.i_test <- rsq.i_test <-
sc_brier.i_test <- cal_coef.i <- list()
for(i in 1:pobj$nimp)
{
message("\n", "Imputation ", i)
dat_imp <-
pobj$data[pobj$data[pobj$impvar] == i, ]
cv_perform <- lapply(idfold,
function(x) {
# Apply model in train data
train_data <- dat_imp[-x, ]
Y <- c(paste(pobj$Outcome, paste("~")))
fm_train <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
# if BW = TRUE
if(BW==TRUE){
pobj_bw <- glm_bw(formula = fm_train, data = train_data,
p.crit = p.crit, keep.predictors = pobj$keep.predictors,
model_type="binomial")
if(is_empty(pobj_bw$predictors_final))
pobj_bw$predictors_final <- 1
fm_train <-
as.formula(paste(Y, paste(pobj_bw$predictors_final, collapse = "+")))
}
fit_train <- glm(fm_train, data=train_data, family=binomial)
# Apply model in train data
pr_train <-
predict(fit_train, type="response")
auc_train <- pROC::roc(fit_train$y, pr_train, quiet = TRUE)$auc
se_auc_train <- sqrt(pROC::var(auc_train))
sc_brier_train <-
scaled_brier(fit_train$y, pr_train)
# Nagelkerke R-squared
rsq_train <- rsq_nagel(fit_train)
# Apply model in test data
# Apply model in test data
test_data <- dat_imp[x, ]
pr_test <- predict(fit_train, newdata = test_data, type="response")
lp_test <- predict(fit_train, newdata = test_data)
coef_test <- coef(glm(unlist(test_data[pobj$Outcome]) ~ lp_test, family=binomial))
fit_test <- glm(unlist(test_data[pobj$Outcome]) ~ lp_test, family=binomial)
if(any(is.na(coef_test)))
coef_test[2] <- replace_na(coef_test[2], 1)
# brier score
sc_brier_test <- scaled_brier(fit_test$y, pr_test)
# Nagelkerke R-squared
rsq_test <- rsq_nagel(fit_test)
list(folds=x, pred_train=pr_train, obs_train=fit_train$y,
pred_test=pr_test, obs_test=fit_test$y,
rsq_train=rsq_train, rsq_test=rsq_test,
sc_brier_train=sc_brier_train,
sc_brier_test=sc_brier_test,
coef_test=coef_test, auc_train = auc_train, se_auc_train)
})
id_folds_test <-
sapply(cv_perform, function(x) x[1])
pred_outcome_test <-
unlist(sapply(cv_perform, function(x) x[4]))
obs_outcome_test <-
unlist(sapply(cv_perform, function(x) x[5]))
# Take mean of logit AUC
auc.i_train[[i]] <-
mean_auc_log(sapply(cv_perform, function(x) x$auc_train))
cvAUC_test <-
ci.cvAUC(predictions=pred_outcome_test, labels=obs_outcome_test,
folds=id_folds_test, confidence=0.95)
auc.i_test[[i]] <-
cvAUC_test$cvAUC
auc_se.i_test[[i]] <-
cvAUC_test$se
rsq.i_train[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$rsq_train)), na.rm = TRUE)
sc_brier.i_train[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$sc_brier_train)), na.rm = TRUE)
rsq.i_test[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$rsq_test)), na.rm = TRUE)
sc_brier.i_test[[i]] <-
mean(unlist(sapply(cv_perform,
function(x) x$sc_brier_test)), na.rm = TRUE)
cal_coef.i[[i]] <-
colMeans(t(sapply(cv_perform,
function(x) x$coef_test)), na.rm = TRUE)
}
# Pooling R square
# Fisher z Transformation
z.rsq_train <-
atanh(unlist(rsq.i_train))
z.rsq.p_train <-
mean(z.rsq_train)
# inv Fisher z = pooled rsq
pool_R2_train <-
tanh(z.rsq.p_train)
pool_sc_brier_train <-
mean(unlist(sc_brier.i_train))
# Pooling R square
# Fisher z Transformation
z.rsq_test <-
atanh(unlist(rsq.i_test))
z.rsq.p_test <-
mean(z.rsq_test)
# inv Fisher z = pooled rsq
pool_R2_test <-
tanh(z.rsq.p_test)
pool_sc_brier_test <-
mean(unlist(sc_brier.i_test))
pool_coef <-
colMeans(do.call("rbind", cal_coef.i) )
names(pool_coef) <-
c("Intercept", "Slope")
auc_pooled_train <-
mean_auc_log(unlist(auc.i_train))
auc_pooled_test <-
pool_auc(est_auc = auc.i_test, est_se = auc_se.i_test, nimp = pobj$nimp, log_auc = TRUE)
auc <-
c(auc_pooled_train, auc_pooled_test[2])
sc_brier <-
c(pool_sc_brier_train, pool_sc_brier_test)
rsq <-
c(pool_R2_train, pool_R2_test)
cv_stats <-
data.frame(matrix(c(auc, sc_brier, rsq), 3, 2, byrow = TRUE))
row.names(cv_stats) <-
c("AUC", "Brier scaled", "R-squared")
names(cv_stats) <-
c("Train", "Test")
rescv <-
list(cv_stats = cv_stats, auc_test=auc_pooled_test, test_coef=pool_coef)
if(cv_naive_appt){
Y <- c(paste(pobj$Outcome, paste("~")))
if(is_empty(pobj$predictors_final)) {
pobj$predictors_final <- 1
fm <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
} else {
fm <- as.formula(paste(Y, paste(pobj$predictors_final, collapse = "+")))
}
perform_mi_orig <-
pool_performance_internal(data=pobj$data, nimp = pobj$nimp,
impvar=pobj$impvar,
formula = fm)
rescv <- list(Test=rescv, Apparent=perform_mi_orig)
}
return(rescv)
} |
cq <-
function(...) {
paste(...,sep='')
}
| /R/cq.R | no_license | gideonite/cn_pipeline | R | false | false | 47 | r | cq <-
function(...) {
paste(...,sep='')
}
|
library(checkmate)
library(testthat)
library(sp)
context("gc_geom")
test_that("transform from sf to geom", {
# test POINT
input <- gtSF$point
output <- gc_geom(input = input)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 2, ncols = 3)
# test MULTIPOINT
input <- gtSF$multipoint
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
expect_data_frame(output@feature, any.missing = FALSE, nrows = 8, ncols = 3)
output <- gc_geom(input, group = TRUE)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
expect_data_frame(output@group, any.missing = FALSE, nrows = 2, ncols = 2)
# test LINESTRING
input <- gtSF$linestring
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "line")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
# test MULTILINESTRING
input <- gtSF$multilinestring
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "line")
expect_data_frame(output@point, any.missing = FALSE, nrows = 12, ncols = 3)
# test POLYGON
input <- gtSF$polygon
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@point, any.missing = FALSE, nrows = 15, ncols = 3)
# test MULTIPOLYGON
input <- gtSF$multipolygon
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@point, any.missing = FALSE, nrows = 25, ncols = 3)
})
test_that("transform from sp to geom", {
# test 'SpatialPoints'
input <- gtSP$SpatialPoints
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialPointsDataFrame'
input <- SpatialPointsDataFrame(input, data.frame(data = 1:4), match.ID = TRUE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialMultiPoints'
input <- gtSP$SpatialMultiPoints
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
expect_true(length(unique(output@point$fid)) == 8)
# test 'SpatialMultiPointsDataFrame'
input <- SpatialMultiPointsDataFrame(input, data = data.frame(data = 1:2))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 8, ncols = 3)
# test 'SpatialLines'
input <- gtSP$SpatialLines
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "line")
expect_true(length(unique(output@point$fid)) == 2)
# test 'SpatialLinesDataFrame'
input <- SpatialLinesDataFrame(input, data = data.frame(data = 1:2), match.ID = FALSE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "line")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 2, ncols = 3)
# test 'SpatialPolygons'
input = gtSP$SpatialPolygons
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialPolygonsDataFrame'
input <- SpatialPolygonsDataFrame(input, data = data.frame(data = 1:2), match.ID = FALSE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 2, ncols = 3)
# test 'SpatialGrid'
x = GridTopology(c(0,0), c(1,1), c(5,5))
input = SpatialGrid(grid = x)
output <- gc_geom(input = input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialGridDataFrame'
input <- SpatialGridDataFrame(grid = input, data = data.frame(data = letters[1:25]))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialPixels'
data(meuse.grid)
pts = meuse.grid[c("x", "y")]
input = SpatialPixels(SpatialPoints(pts))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialPixelsDataFrame'
input <- SpatialPixelsDataFrame(points = input, data = meuse.grid)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
})
test_that("transform from Raster to geom", {
# RasterStack
input <- gtRasters
output <- gc_geom(input)
expect_list(x = output, len = 2)
expect_class(output$categorical, "geom")
expect_true(output$categorical@type == "grid")
expect_class(output$continuous, "geom")
expect_true(output$continuous@type == "grid")
expect_data_frame(x = output$continuous@group, nrows = 0)
# RasterStack with grouping
output <- gc_geom(input, group = TRUE)
expect_list(x = output, len = 2)
expect_class(output$categorical, "geom")
expect_true(output$categorical@type == "grid")
expect_class(output$continuous, "geom")
expect_true(output$continuous@type == "grid")
expect_data_frame(x = output$continuous@group, nrows = 91)
# RasterStack with stacking
output <- gc_geom(input, stack = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 9)
# RasterStack with stacking and grouping
output <- gc_geom(input, stack = TRUE, group = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 93)
# RasterLayer
input <- gtRasters$continuous
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 0)
# RasterLayer with grouping
output <- gc_geom(input, group = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 91)
})
| /tests/testthat/test_gc_geom.R | no_license | cran/geometr | R | false | false | 6,132 | r | library(checkmate)
library(testthat)
library(sp)
context("gc_geom")
test_that("transform from sf to geom", {
# test POINT
input <- gtSF$point
output <- gc_geom(input = input)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 2, ncols = 3)
# test MULTIPOINT
input <- gtSF$multipoint
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
expect_data_frame(output@feature, any.missing = FALSE, nrows = 8, ncols = 3)
output <- gc_geom(input, group = TRUE)
expect_class(output, classes = "geom")
expect_true(output@type == "point")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
expect_data_frame(output@group, any.missing = FALSE, nrows = 2, ncols = 2)
# test LINESTRING
input <- gtSF$linestring
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "line")
expect_data_frame(output@point, any.missing = FALSE, nrows = 8, ncols = 3)
# test MULTILINESTRING
input <- gtSF$multilinestring
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "line")
expect_data_frame(output@point, any.missing = FALSE, nrows = 12, ncols = 3)
# test POLYGON
input <- gtSF$polygon
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@point, any.missing = FALSE, nrows = 15, ncols = 3)
# test MULTIPOLYGON
input <- gtSF$multipolygon
output <- gc_geom(input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@point, any.missing = FALSE, nrows = 25, ncols = 3)
})
test_that("transform from sp to geom", {
# test 'SpatialPoints'
input <- gtSP$SpatialPoints
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialPointsDataFrame'
input <- SpatialPointsDataFrame(input, data.frame(data = 1:4), match.ID = TRUE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialMultiPoints'
input <- gtSP$SpatialMultiPoints
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
expect_true(length(unique(output@point$fid)) == 8)
# test 'SpatialMultiPointsDataFrame'
input <- SpatialMultiPointsDataFrame(input, data = data.frame(data = 1:2))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 8, ncols = 3)
# test 'SpatialLines'
input <- gtSP$SpatialLines
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "line")
expect_true(length(unique(output@point$fid)) == 2)
# test 'SpatialLinesDataFrame'
input <- SpatialLinesDataFrame(input, data = data.frame(data = 1:2), match.ID = FALSE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "line")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 2, ncols = 3)
# test 'SpatialPolygons'
input = gtSP$SpatialPolygons
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialPolygonsDataFrame'
input <- SpatialPolygonsDataFrame(input, data = data.frame(data = 1:2), match.ID = FALSE)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature, any.missing = FALSE, nrows = 2, ncols = 3)
# test 'SpatialGrid'
x = GridTopology(c(0,0), c(1,1), c(5,5))
input = SpatialGrid(grid = x)
output <- gc_geom(input = input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialGridDataFrame'
input <- SpatialGridDataFrame(grid = input, data = data.frame(data = letters[1:25]))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "polygon")
# test 'SpatialPixels'
data(meuse.grid)
pts = meuse.grid[c("x", "y")]
input = SpatialPixels(SpatialPoints(pts))
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
# test 'SpatialPixelsDataFrame'
input <- SpatialPixelsDataFrame(points = input, data = meuse.grid)
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "point")
})
test_that("transform from Raster to geom", {
# RasterStack
input <- gtRasters
output <- gc_geom(input)
expect_list(x = output, len = 2)
expect_class(output$categorical, "geom")
expect_true(output$categorical@type == "grid")
expect_class(output$continuous, "geom")
expect_true(output$continuous@type == "grid")
expect_data_frame(x = output$continuous@group, nrows = 0)
# RasterStack with grouping
output <- gc_geom(input, group = TRUE)
expect_list(x = output, len = 2)
expect_class(output$categorical, "geom")
expect_true(output$categorical@type == "grid")
expect_class(output$continuous, "geom")
expect_true(output$continuous@type == "grid")
expect_data_frame(x = output$continuous@group, nrows = 91)
# RasterStack with stacking
output <- gc_geom(input, stack = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 9)
# RasterStack with stacking and grouping
output <- gc_geom(input, stack = TRUE, group = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 93)
# RasterLayer
input <- gtRasters$continuous
output <- gc_geom(input)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 0)
# RasterLayer with grouping
output <- gc_geom(input, group = TRUE)
expect_class(output, "geom")
expect_true(output@type == "grid")
expect_data_frame(x = output@group, nrows = 91)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlst.R
\name{cgmlst_distance}
\alias{cgmlst_distance}
\title{cgmlst_distance}
\usage{
cgmlst_distance(df)
}
\arguments{
\item{cgmlst_data_frame}{Data Frame from 'get_cgmlst' function}
}
\value{
Parsed JSON response.
}
\description{
Using output from 'get_cgmlst' determine the pairwise hamming distance between each sample.
}
\examples{
cgmlst_distance(cgmlst_data_frame)
}
| /staphopia/man/cgmlst_distance.Rd | no_license | staphopia/staphopia-r | R | false | true | 452 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlst.R
\name{cgmlst_distance}
\alias{cgmlst_distance}
\title{cgmlst_distance}
\usage{
cgmlst_distance(df)
}
\arguments{
\item{cgmlst_data_frame}{Data Frame from 'get_cgmlst' function}
}
\value{
Parsed JSON response.
}
\description{
Using output from 'get_cgmlst' determine the pairwise hamming distance between each sample.
}
\examples{
cgmlst_distance(cgmlst_data_frame)
}
|
library(RPMG)
### Name: butdoc
### Title: Button Documentation for RPMG codes
### Aliases: butdoc
### Keywords: misc
### ** Examples
ALLLABS = c( "DONE","REFRESH","EPS","LINE","DECIM","MAP","SURF","TRACE","TTC","CITY","TRcol",
"STName","Pick","ZOOM","UNZOOM","IDARR","FILT","UnFILT","P-GEN")
N = length(ALLLABS)
DOC = rep(NA, length=N)
DOC[1] = "Quick and return to calling program"
DOC[2] = "refresh screen"
DOC[3] = "Postscript plot"
DOC[4] = "draw a line (even number of clicks)"
DOC[5] = "Decimate the traces"
DOC[6] = "Make a map with great circles"
DOC[7] = "Draw a set of surface wave arrivals"
DOC[8] = "Toggle drawing of traces"
DOC[9] = "Travel Time Curves"
DOC[10] = "put random cities on X-axis"
DOC[11] = "toggle plotting traces with colors"
DOC[12] = "put station names on X-axis"
DOC[13] = "Pick arrivals on one trace"
DOC[14] = "Zoom display (need two clicks on screen)"
DOC[15] = "unzoom to original display"
DOC[16] = "Identify traces"
DOC[17] = "Fitler traces with a set of filters provided"
DOC[18] = "Unfilter traces to original display"
DOC[19] = "Run PICK.GEN on selected traces: select on the tags at X-axis"
butdoc(ALLLABS, DOC, NEW=FALSE)
| /data/genthat_extracted_code/RPMG/examples/butdoc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,200 | r | library(RPMG)
### Name: butdoc
### Title: Button Documentation for RPMG codes
### Aliases: butdoc
### Keywords: misc
### ** Examples
ALLLABS = c( "DONE","REFRESH","EPS","LINE","DECIM","MAP","SURF","TRACE","TTC","CITY","TRcol",
"STName","Pick","ZOOM","UNZOOM","IDARR","FILT","UnFILT","P-GEN")
N = length(ALLLABS)
DOC = rep(NA, length=N)
DOC[1] = "Quick and return to calling program"
DOC[2] = "refresh screen"
DOC[3] = "Postscript plot"
DOC[4] = "draw a line (even number of clicks)"
DOC[5] = "Decimate the traces"
DOC[6] = "Make a map with great circles"
DOC[7] = "Draw a set of surface wave arrivals"
DOC[8] = "Toggle drawing of traces"
DOC[9] = "Travel Time Curves"
DOC[10] = "put random cities on X-axis"
DOC[11] = "toggle plotting traces with colors"
DOC[12] = "put station names on X-axis"
DOC[13] = "Pick arrivals on one trace"
DOC[14] = "Zoom display (need two clicks on screen)"
DOC[15] = "unzoom to original display"
DOC[16] = "Identify traces"
DOC[17] = "Fitler traces with a set of filters provided"
DOC[18] = "Unfilter traces to original display"
DOC[19] = "Run PICK.GEN on selected traces: select on the tags at X-axis"
butdoc(ALLLABS, DOC, NEW=FALSE)
|
86e0f4591dcc856e81f3f11fc89dfb83 query21_query58_1344.qdimacs 3229 9982 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query21_query58_1344/query21_query58_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 71 | r | 86e0f4591dcc856e81f3f11fc89dfb83 query21_query58_1344.qdimacs 3229 9982 |
setwd("~/great lakes/Facebook_metrics")
fb <- read.csv("dataset_Facebook.csv", header = TRUE)
View(fb)
?read.csv
fb <- read.csv("dataset_Facebook.csv", header = TRUE, sep = ";")
require(dplyr)
Select<- head(select(fb, Category:Paid))
View(Select)
View(fb)
str(fb)
fb$Post.Hour
v <- fb[,2]
View(v)
u <- fb[1:10,2]
View(u)
str(fb)
#dataframe$columnname = as.numeric/factor(dataframe$columnname)
fb$comment <- as.numeric(fb$comment)
# merge
authors <- data.frame(
## I(*) : use character columns of names to get sensible sort order
surname = I(c("Tukey", "Venables", "Tierney", "Ripley", "McNeil")),
nationality = c("US", "Australia", "US", "UK", "Australia"),
deceased = c("yes", rep("no", 4)))
authorN <- within(authors, { name <- surname; rm(surname) })
books <- data.frame(
name = I(c("Tukey", "Venables", "Tierney",
"Ripley", "Ripley", "McNeil", "R Core")),
title = c("Exploratory Data Analysis",
"Modern Applied Statistics ...",
"LISP-STAT",
"Spatial Statistics", "Stochastic Simulation",
"Interactive Data Analysis",
"An Introduction to R"),
other.author = c(NA, "Ripley", NA, NA, NA, NA,
"Venables & Smith"))
m1 <- merge(authors, books, by.x = "surname", by.y = "name")
m2 <- merge(books, authors, by.x = "name", by.y = "surname")
?merge
View(fb)
fb <- read.csv("dataset_Facebook.csv", header = TRUE, sep = ";")
View(fb)
# with funciton
a = mean(fb$Lifetime.Post.Total.Reach + fb$Page.total.likes + fb$Lifetime.Post.Total.Impressions)
a
# using with(), we can clean this up:
b = with(fb, mean(Lifetime.Post.Total.Reach + Page.total.likes + Lifetime.Post.Total.Impressions))
b
library(dplyr)
?subset
reach <- subset(fb,select=c(Lifetime.Post.Total.Reach,Page.total.likes))
View(reach)
str(fb)
?select
reach1 <- select(fb,Lifetime.Post.reach.by.people.who.like.your.Page,comment )
View(reach1)
#ends_with() = Select columns that end with a character string
#contains() = Select columns that contain a character string
#matches() = Select columns that match a regular expression
#one_of() = Select columns names that are from a group of names
?filter
reach3 <- filter(fb, Category == 2)
View(reach3)
reach4 <- filter(fb, Category == 2, Post.Month == 12)
View(reach4)
?arrange
reach5 <- arrange(fb,Post.Month)
View(reach5)
?rename
fb <- rename(fb, month = Post.Month)
View(fb) | /code/day_2,3/3-data manipulation.R | no_license | mahoretushar/r_workshop | R | false | false | 2,520 | r | setwd("~/great lakes/Facebook_metrics")
fb <- read.csv("dataset_Facebook.csv", header = TRUE)
View(fb)
?read.csv
fb <- read.csv("dataset_Facebook.csv", header = TRUE, sep = ";")
require(dplyr)
Select<- head(select(fb, Category:Paid))
View(Select)
View(fb)
str(fb)
fb$Post.Hour
v <- fb[,2]
View(v)
u <- fb[1:10,2]
View(u)
str(fb)
#dataframe$columnname = as.numeric/factor(dataframe$columnname)
fb$comment <- as.numeric(fb$comment)
# merge
authors <- data.frame(
## I(*) : use character columns of names to get sensible sort order
surname = I(c("Tukey", "Venables", "Tierney", "Ripley", "McNeil")),
nationality = c("US", "Australia", "US", "UK", "Australia"),
deceased = c("yes", rep("no", 4)))
authorN <- within(authors, { name <- surname; rm(surname) })
books <- data.frame(
name = I(c("Tukey", "Venables", "Tierney",
"Ripley", "Ripley", "McNeil", "R Core")),
title = c("Exploratory Data Analysis",
"Modern Applied Statistics ...",
"LISP-STAT",
"Spatial Statistics", "Stochastic Simulation",
"Interactive Data Analysis",
"An Introduction to R"),
other.author = c(NA, "Ripley", NA, NA, NA, NA,
"Venables & Smith"))
m1 <- merge(authors, books, by.x = "surname", by.y = "name")
m2 <- merge(books, authors, by.x = "name", by.y = "surname")
?merge
View(fb)
fb <- read.csv("dataset_Facebook.csv", header = TRUE, sep = ";")
View(fb)
# with funciton
a = mean(fb$Lifetime.Post.Total.Reach + fb$Page.total.likes + fb$Lifetime.Post.Total.Impressions)
a
# using with(), we can clean this up:
b = with(fb, mean(Lifetime.Post.Total.Reach + Page.total.likes + Lifetime.Post.Total.Impressions))
b
library(dplyr)
?subset
reach <- subset(fb,select=c(Lifetime.Post.Total.Reach,Page.total.likes))
View(reach)
str(fb)
?select
reach1 <- select(fb,Lifetime.Post.reach.by.people.who.like.your.Page,comment )
View(reach1)
#ends_with() = Select columns that end with a character string
#contains() = Select columns that contain a character string
#matches() = Select columns that match a regular expression
#one_of() = Select columns names that are from a group of names
?filter
reach3 <- filter(fb, Category == 2)
View(reach3)
reach4 <- filter(fb, Category == 2, Post.Month == 12)
View(reach4)
?arrange
reach5 <- arrange(fb,Post.Month)
View(reach5)
?rename
fb <- rename(fb, month = Post.Month)
View(fb) |
library(crossdes)
### Name: allcombs
### Title: Balanced Row-Column Design with all Possible Treatment Orders
### Aliases: allcombs
### Keywords: design
### ** Examples
# Design for 4 treatments assigned in 3 periods.
# All possible treatment orders occur.
allcombs(4,3)
| /data/genthat_extracted_code/crossdes/examples/allcombs.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 279 | r | library(crossdes)
### Name: allcombs
### Title: Balanced Row-Column Design with all Possible Treatment Orders
### Aliases: allcombs
### Keywords: design
### ** Examples
# Design for 4 treatments assigned in 3 periods.
# All possible treatment orders occur.
allcombs(4,3)
|
path.met <- "/Volumes/GoogleDrive/My Drive/East Woods/IMSA_2017_Rollinson/Daymet/MortonArb"
files.met <- dir(path.met, ".nc")
dat.all <- data.frame()
pb <- txtProgressBar(min=0, max=length(files.met), style=3)
for(i in 1:length(files.met)){
yr.now <- as.numeric(stringr::str_split(files.met[i], "[.]")[[1]][2])
ncT <- ncdf4::nc_open(file.path(path.met, files.met[i]))
dat.tmp <- data.frame(year = yr.now,
doy = ncdf4::ncvar_get(ncT, "time"),
tmin = ncdf4::ncvar_get(ncT, "minimum_air_temperature")-273.15,
tmax = ncdf4::ncvar_get(ncT, "maximum_air_temperature")-273.15,
prcp = ncdf4::ncvar_get(ncT, "precipitation_flux"),
swe = ncdf4::ncvar_get(ncT, "liquid_water_content_of_surface_snow"),
swdown = ncdf4::ncvar_get(ncT, "surface_downwelling_shortwave_flux_in_air"),
dayl = ncdf4::ncvar_get(ncT, "day_length"),
vp = ncdf4::ncvar_get(ncT, "water_vapor_partial_pressure_in_air"))
# Calculating growing degree-days
gdd <- apply(dat.tmp[,c("tmax", "tmin")], 1, mean) - 5
gdd[gdd<0] <- 0
for(i in 2:length(gdd)){
gdd[i] <- sum(gdd[(i-1):i])
}
dat.tmp$GDD5 <- gdd
dat.all <- rbind(dat.all, dat.tmp)
setTxtProgressBar(pb, i)
}
summary(dat.all)
write.csv(dat.all, "/Volumes/GoogleDrive/My Drive/East Woods/IMSA_2017_Rollinson/ClimateData_Daymet.csv", row.names=F)
dat.stack <- stack(dat.all[,vars])
dat.stack$year <- dat.all$year
dat.stack$doy <- dat.all$doy
summary(dat.stack)
met.agg <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=mean)
names(met.agg)[3] <- "mean"
met.agg$sd <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=sd)[,3]
met.agg$ci.lwr <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=quantile, 0.025)[,3]
met.agg$ci.upr <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=quantile, 0.975)[,3]
summary(met.agg)
library(ggplot2)
ggplot(data=met.agg[,]) +
facet_wrap(~ind, scales="free_y") +
# geom_ribbon(aes(x=doy, ymin=mean-sd, ymax=mean+sd), alpha=0.5) +
geom_ribbon(aes(x=doy, ymin=ci.lwr, ymax=ci.upr), alpha=0.5) +
geom_line(aes(x=doy, y=mean))
set.seed(231); yrs.rand <- sample(1980:2017, 5)
ggplot(data=met.agg[met.agg$doy<185 & met.agg$ind=="GDD5", ]) +
geom_ribbon(aes(x=doy, ymin=ci.lwr, ymax=ci.upr), alpha=0.5) +
geom_line(aes(x=doy, y=mean), size=2) +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year %in% yrs.rand, ], aes(x=doy, y=values, group=year), size=0.5) +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year==1995, ], aes(x=doy, y=values, group=year), size=0.75, color="red") +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year==2017, ], aes(x=doy, y=values, group=year), size=0.75, color="dodgerblue3")
| /scripts_miscellaneous/Climate_Data_Cleaning.R | no_license | MortonArb-ForestEcology/IMSA_2017-FalseRings | R | false | false | 3,028 | r | path.met <- "/Volumes/GoogleDrive/My Drive/East Woods/IMSA_2017_Rollinson/Daymet/MortonArb"
files.met <- dir(path.met, ".nc")
dat.all <- data.frame()
pb <- txtProgressBar(min=0, max=length(files.met), style=3)
for(i in 1:length(files.met)){
yr.now <- as.numeric(stringr::str_split(files.met[i], "[.]")[[1]][2])
ncT <- ncdf4::nc_open(file.path(path.met, files.met[i]))
dat.tmp <- data.frame(year = yr.now,
doy = ncdf4::ncvar_get(ncT, "time"),
tmin = ncdf4::ncvar_get(ncT, "minimum_air_temperature")-273.15,
tmax = ncdf4::ncvar_get(ncT, "maximum_air_temperature")-273.15,
prcp = ncdf4::ncvar_get(ncT, "precipitation_flux"),
swe = ncdf4::ncvar_get(ncT, "liquid_water_content_of_surface_snow"),
swdown = ncdf4::ncvar_get(ncT, "surface_downwelling_shortwave_flux_in_air"),
dayl = ncdf4::ncvar_get(ncT, "day_length"),
vp = ncdf4::ncvar_get(ncT, "water_vapor_partial_pressure_in_air"))
# Calculating growing degree-days
gdd <- apply(dat.tmp[,c("tmax", "tmin")], 1, mean) - 5
gdd[gdd<0] <- 0
for(i in 2:length(gdd)){
gdd[i] <- sum(gdd[(i-1):i])
}
dat.tmp$GDD5 <- gdd
dat.all <- rbind(dat.all, dat.tmp)
setTxtProgressBar(pb, i)
}
summary(dat.all)
write.csv(dat.all, "/Volumes/GoogleDrive/My Drive/East Woods/IMSA_2017_Rollinson/ClimateData_Daymet.csv", row.names=F)
dat.stack <- stack(dat.all[,vars])
dat.stack$year <- dat.all$year
dat.stack$doy <- dat.all$doy
summary(dat.stack)
met.agg <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=mean)
names(met.agg)[3] <- "mean"
met.agg$sd <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=sd)[,3]
met.agg$ci.lwr <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=quantile, 0.025)[,3]
met.agg$ci.upr <- aggregate(dat.stack[,"values"], by= dat.stack[,c("ind", "doy")], FUN=quantile, 0.975)[,3]
summary(met.agg)
library(ggplot2)
ggplot(data=met.agg[,]) +
facet_wrap(~ind, scales="free_y") +
# geom_ribbon(aes(x=doy, ymin=mean-sd, ymax=mean+sd), alpha=0.5) +
geom_ribbon(aes(x=doy, ymin=ci.lwr, ymax=ci.upr), alpha=0.5) +
geom_line(aes(x=doy, y=mean))
set.seed(231); yrs.rand <- sample(1980:2017, 5)
ggplot(data=met.agg[met.agg$doy<185 & met.agg$ind=="GDD5", ]) +
geom_ribbon(aes(x=doy, ymin=ci.lwr, ymax=ci.upr), alpha=0.5) +
geom_line(aes(x=doy, y=mean), size=2) +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year %in% yrs.rand, ], aes(x=doy, y=values, group=year), size=0.5) +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year==1995, ], aes(x=doy, y=values, group=year), size=0.75, color="red") +
geom_line(data= dat.stack[dat.stack $doy<185 & dat.stack$ind=="GDD5" & dat.stack$year==2017, ], aes(x=doy, y=values, group=year), size=0.75, color="dodgerblue3")
|
library(tidyverse)
library(randomForest)
library(ranger)
library(parallel)
library(cowplot)
source('./scripts/import_data.R')
source('../Helpers/rmv_low_variance.R')
source('./scripts/Functions.R')
# Fit a randomForest model to predict Mutant or WT cell population given the
# morphological features.
dat <- import_data() %>% rmv_correlated_cols() %>%
rmv_low_var(keep_cols = c('Row', 'Column')) %>%
mutate(Y = as.factor(Row > 3))
# sample a small training set to figure out if trainControl will return all the
# model information I need.
set.seed(7)
small_dat <- dat %>% sample_frac(size = 0.02) %>%
split_dat(., bincol = bin, response = Y, n = 10) %>%
select(-Row, -Column, -S, -M)
hp_grid <- expand.grid(mtry = caret::var_seq(ncol(small_dat) - 1, classification = T, len = 3),
min.node.size = 1,
splitrules = c('gini', 'extratrees'))
val_acc <- vapply(1:nrow(hp_grid),
function(iter, train, val, hps){
print(paste0('Iteration: ', iter))
mdl <- ranger(data = train, mtry = hps[iter,1], min.node.size = hps[iter,2],
splitrule = hps[iter,3], dependent.variable.name = 'Y',
probability = T, importance = 'impurity');
ranger_out <- predict(mdl, val)
sum(val$Y == (ranger_out$predictions[,colnames(ranger_out$predictions)[[2]]] > 0.5))/length(val$Y)
}, numeric(1),
small_dat %>% filter(bin < 9), small_dat %>% filter(bin == 9),hp_grid)
best_params <- hp_grid[which(val_acc == max(val_acc)),]
test <- small_dat %>% filter(bin == 10)
final_mdl <- ranger(data = small_dat %>% filter(bin != 10), mtry = best_params$mtry,
min.node.size = best_params$min.node.size, splitrule = best_params$splitrules,
dependent.variable.name = 'Y', probability = T)
final_mdl_out <- predict(final_mdl, test)
final_acc <- sum(test$Y == (final_mdl_out$predictions[,colnames(final_mdl_out$predictions)[[2]]] > 0.5))/length(test$Y)
save.image('./Random_forest_tester.Rdata')
| /scripts/Fit_Random_forest.R | no_license | goldenberg-lab/LFS_fibroblasts | R | false | false | 2,153 | r | library(tidyverse)
library(randomForest)
library(ranger)
library(parallel)
library(cowplot)
source('./scripts/import_data.R')
source('../Helpers/rmv_low_variance.R')
source('./scripts/Functions.R')
# Fit a randomForest model to predict Mutant or WT cell population given the
# morphological features.
dat <- import_data() %>% rmv_correlated_cols() %>%
rmv_low_var(keep_cols = c('Row', 'Column')) %>%
mutate(Y = as.factor(Row > 3))
# sample a small training set to figure out if trainControl will return all the
# model information I need.
set.seed(7)
small_dat <- dat %>% sample_frac(size = 0.02) %>%
split_dat(., bincol = bin, response = Y, n = 10) %>%
select(-Row, -Column, -S, -M)
hp_grid <- expand.grid(mtry = caret::var_seq(ncol(small_dat) - 1, classification = T, len = 3),
min.node.size = 1,
splitrules = c('gini', 'extratrees'))
val_acc <- vapply(1:nrow(hp_grid),
function(iter, train, val, hps){
print(paste0('Iteration: ', iter))
mdl <- ranger(data = train, mtry = hps[iter,1], min.node.size = hps[iter,2],
splitrule = hps[iter,3], dependent.variable.name = 'Y',
probability = T, importance = 'impurity');
ranger_out <- predict(mdl, val)
sum(val$Y == (ranger_out$predictions[,colnames(ranger_out$predictions)[[2]]] > 0.5))/length(val$Y)
}, numeric(1),
small_dat %>% filter(bin < 9), small_dat %>% filter(bin == 9),hp_grid)
best_params <- hp_grid[which(val_acc == max(val_acc)),]
test <- small_dat %>% filter(bin == 10)
final_mdl <- ranger(data = small_dat %>% filter(bin != 10), mtry = best_params$mtry,
min.node.size = best_params$min.node.size, splitrule = best_params$splitrules,
dependent.variable.name = 'Y', probability = T)
final_mdl_out <- predict(final_mdl, test)
final_acc <- sum(test$Y == (final_mdl_out$predictions[,colnames(final_mdl_out$predictions)[[2]]] > 0.5))/length(test$Y)
save.image('./Random_forest_tester.Rdata')
|
plotVegetationGrid <- function(data, time=1:data$parameter$nSteps){
require(fields)
with(data$parameter, {
colsVeg <- c("white", two.colors(n=10, start="yellow", end="green4", middle="green"))
for (i in time){
image.plot(1:n, 1:m, data$rasters$vegetation[[i]], asp=n/m, col=colsVeg, nlevel=10,
main=paste0('simulation run "', title, '", timestep ', i), zlim=c(0,10))
}
})
}
#to plot all:
#plotVegetationGrid(Test)
#to plot only one:
#plotVegetationGrid(Test, 1)
#to plot multiple:
#plotVegetationGrid(Test, c(1,50,100,150,200)) | /evaluation/plotVegetationGrid.r | no_license | n-dim/ecohydraulical-feedback | R | false | false | 593 | r | plotVegetationGrid <- function(data, time=1:data$parameter$nSteps){
require(fields)
with(data$parameter, {
colsVeg <- c("white", two.colors(n=10, start="yellow", end="green4", middle="green"))
for (i in time){
image.plot(1:n, 1:m, data$rasters$vegetation[[i]], asp=n/m, col=colsVeg, nlevel=10,
main=paste0('simulation run "', title, '", timestep ', i), zlim=c(0,10))
}
})
}
#to plot all:
#plotVegetationGrid(Test)
#to plot only one:
#plotVegetationGrid(Test, 1)
#to plot multiple:
#plotVegetationGrid(Test, c(1,50,100,150,200)) |
#' Query Opencast current organization's properties
#'
#' Returns the current organization's properties.
#'
#' This function expects the hostname, username and password to be set as environment variables.
#'
#' @return A list containing the current organization's properties.
#' @importFrom httr modify_url
#' @export
#' @examples
#' Sys.setenv(OPENCAST_HOST = "https://legacy.opencast.org")
#' Sys.setenv(OPENCAST_USERNAME = "admin")
#' Sys.setenv(OPENCAST_PASSWORD = "opencast")
#'
#' oc_info_organization_properties()
oc_info_organization_properties <- function() {
# Set the url path
path <- "/api/info/organization/properties"
# Construct the url for the api call
url <- modify_url(oc_hostname(), path = path)
# Query the api and return the response
oc_package_query(url)
}
#' Print result of oc_info_organization_properties()
#'
#' Print a structured return of the oc_info_organization_properties() function.
#'
#' @param x The return of the function this print function relates to.
#' @param ... Possible further options to the print function.
#' @return A structured print of the return by the oc_info_organization_properties() function.
#' @seealso \code{\link{oc_info_organization_properties}}
#' @importFrom utils str
#' @export
#' @examples
#' Sys.setenv(OPENCAST_HOST = "https://legacy.opencast.org")
#' Sys.setenv(OPENCAST_USERNAME = "admin")
#' Sys.setenv(OPENCAST_PASSWORD = "opencast")
#'
#' resp <- oc_info_organization_properties()
#'
#' resp
print.oc_info_organization_properties <- function(x, ...) {
cat("<Opencast ", x$path, ">\n", sep = "")
str(x$content)
invisible(x)
}
| /R/oc_info_organization_properties.R | no_license | cran/opencastR | R | false | false | 1,616 | r | #' Query Opencast current organization's properties
#'
#' Returns the current organization's properties.
#'
#' This function expects the hostname, username and password to be set as environment variables.
#'
#' @return A list containing the current organization's properties.
#' @importFrom httr modify_url
#' @export
#' @examples
#' Sys.setenv(OPENCAST_HOST = "https://legacy.opencast.org")
#' Sys.setenv(OPENCAST_USERNAME = "admin")
#' Sys.setenv(OPENCAST_PASSWORD = "opencast")
#'
#' oc_info_organization_properties()
oc_info_organization_properties <- function() {
# Set the url path
path <- "/api/info/organization/properties"
# Construct the url for the api call
url <- modify_url(oc_hostname(), path = path)
# Query the api and return the response
oc_package_query(url)
}
#' Print result of oc_info_organization_properties()
#'
#' Print a structured return of the oc_info_organization_properties() function.
#'
#' @param x The return of the function this print function relates to.
#' @param ... Possible further options to the print function.
#' @return A structured print of the return by the oc_info_organization_properties() function.
#' @seealso \code{\link{oc_info_organization_properties}}
#' @importFrom utils str
#' @export
#' @examples
#' Sys.setenv(OPENCAST_HOST = "https://legacy.opencast.org")
#' Sys.setenv(OPENCAST_USERNAME = "admin")
#' Sys.setenv(OPENCAST_PASSWORD = "opencast")
#'
#' resp <- oc_info_organization_properties()
#'
#' resp
print.oc_info_organization_properties <- function(x, ...) {
cat("<Opencast ", x$path, ">\n", sep = "")
str(x$content)
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/posteriors.R
\name{ggplot.posterior}
\alias{ggplot.posterior}
\title{Visualize the Posterior Distributions of Model Statistics}
\usage{
\method{ggplot}{posterior}(data, mapping = NULL, ..., environment = NULL,
reorder = TRUE)
}
\arguments{
\item{data}{An object produced by \code{\link[=tidy.perf_mod]{tidy.perf_mod()}}.}
\item{mapping, ..., environment}{Not currently used.}
\item{reorder}{A logical; should the \code{model} column be reordered
by the average of the posterior distribution?}
}
\value{
A \code{\link[ggplot2:ggplot]{ggplot2::ggplot()}} object using
\code{\link[ggplot2:geom_violin]{ggplot2::geom_violin()}} for the posteriors.
}
\description{
A simple violin plot is created by the function.
}
\examples{
# Example objects from the "Getting Started" vignette at
# https://topepo.github.io/tidyposterior/articles/Getting_Started.html
file <- system.file("examples", "roc_model.RData", package = "tidyposterior")
load(file)
posterior_values <- tidy(roc_model)
ggplot(posterior_values) + theme_bw()
}
| /man/ggplot.posterior.Rd | no_license | gridl/tidyposterior | R | false | true | 1,101 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/posteriors.R
\name{ggplot.posterior}
\alias{ggplot.posterior}
\title{Visualize the Posterior Distributions of Model Statistics}
\usage{
\method{ggplot}{posterior}(data, mapping = NULL, ..., environment = NULL,
reorder = TRUE)
}
\arguments{
\item{data}{An object produced by \code{\link[=tidy.perf_mod]{tidy.perf_mod()}}.}
\item{mapping, ..., environment}{Not currently used.}
\item{reorder}{A logical; should the \code{model} column be reordered
by the average of the posterior distribution?}
}
\value{
A \code{\link[ggplot2:ggplot]{ggplot2::ggplot()}} object using
\code{\link[ggplot2:geom_violin]{ggplot2::geom_violin()}} for the posteriors.
}
\description{
A simple violin plot is created by the function.
}
\examples{
# Example objects from the "Getting Started" vignette at
# https://topepo.github.io/tidyposterior/articles/Getting_Started.html
file <- system.file("examples", "roc_model.RData", package = "tidyposterior")
load(file)
posterior_values <- tidy(roc_model)
ggplot(posterior_values) + theme_bw()
}
|
# BoxPlot
boxplot(LungCap)
quantile(LungCap, probs=c(0,0.25,0.5,0.75,1))
boxplot(LungCap ~ Gender)
boxplot(LungCap[Gender=="female"), LungCap[Gender=="male")]
AgeGroups = cut(Age, breaks = c(0,13,15,17,25), labels=c("<13", "15", "17", "18"))
boxplot(LungCap~Smoke*AgeGroups, las=2)
# Histogram
hist(LungCap)
hist(LungCap, prob= T, ylim=c(0,0.2), breaks=10)
lines(density(LungCap))
# Stem
stem(LungCap[Gender == "male"])
# Barplot
t = table(Smoke, Gender)
barplot(t)
barplot(t, beside=T, legend.text=T)
mosaicplot(t)
cor(Age, Height)
plot(Age, Height)
abline(lm(Height~Age))
lines(smooth.spline(Age, Height), lty=2, lw=5)
# Improve plot
plot(Age, Height, main="Title", cex=0.5, font.main=4, col=5, col.axis=3) #col=color
text(x=5, y=11, label="text", adj=0, cex=0.5, col=1, font=4)
abline(h=mean(LungCap), col=2, lwd=2)
# Subplot
par(mfrow=c(1,2))
plot(Age, Height, cex=0.5)
plot(Age, Height, cex=0.5)
| /Plot.R | no_license | lmontigny/R_programming | R | false | false | 975 | r | # BoxPlot
boxplot(LungCap)
quantile(LungCap, probs=c(0,0.25,0.5,0.75,1))
boxplot(LungCap ~ Gender)
boxplot(LungCap[Gender=="female"), LungCap[Gender=="male")]
AgeGroups = cut(Age, breaks = c(0,13,15,17,25), labels=c("<13", "15", "17", "18"))
boxplot(LungCap~Smoke*AgeGroups, las=2)
# Histogram
hist(LungCap)
hist(LungCap, prob= T, ylim=c(0,0.2), breaks=10)
lines(density(LungCap))
# Stem
stem(LungCap[Gender == "male"])
# Barplot
t = table(Smoke, Gender)
barplot(t)
barplot(t, beside=T, legend.text=T)
mosaicplot(t)
cor(Age, Height)
plot(Age, Height)
abline(lm(Height~Age))
lines(smooth.spline(Age, Height), lty=2, lw=5)
# Improve plot
plot(Age, Height, main="Title", cex=0.5, font.main=4, col=5, col.axis=3) #col=color
text(x=5, y=11, label="text", adj=0, cex=0.5, col=1, font=4)
abline(h=mean(LungCap), col=2, lwd=2)
# Subplot
par(mfrow=c(1,2))
plot(Age, Height, cex=0.5)
plot(Age, Height, cex=0.5)
|
\name{popMisfitMACS}
\alias{popMisfitMACS}
\title{
Find population misfit by sufficient statistics
}
\description{
Find the value quantifying the amount of population misfit: \eqn{F_0}, RMSEA, and SRMR. See the definition of each index at \code{\link{summaryMisspec}}.
}
\usage{
popMisfitMACS(paramM, paramCM, misspecM, misspecCM, dfParam=NULL, fit.measures="all")
}
\arguments{
\item{paramM}{
The model-implied mean from the real parameters
}
\item{paramCM}{
The model-implied covariance matrix from the real parameters
}
\item{misspecM}{
The model-implied mean from the real and misspecified parameters
}
\item{misspecCM}{
The model-implied covariance matrix from the real and misspecified parameters
}
\item{dfParam}{
The degree of freedom of the real model
}
\item{fit.measures}{
The names of indices used to calculate population misfit. There are three types of misfit: 1) discrepancy function (\code{"f0"}; see \code{\link{popDiscrepancy}}), 2) root mean squared error of approximation (\code{"rmsea"}; Equation 12 in Browne & Cudeck, 1992), and 3) standardized root mean squared residual (\code{"srmr"})
}
}
\value{
The vector of the misfit indices
}
\references{
Browne, M. W., & Cudeck, R. (1992). Alternative ways of assessing model fit. \emph{Sociological Methods & Research, 21}, 230-258.
}
\author{
Sunthud Pornprasertmanit (\email{psunthud@gmail.com})
}
\examples{
m1 <- rep(0, 3)
m2 <- c(0.1, -0.1, 0.05)
S1 <- matrix(c(1, 0.6, 0.5, 0.6, 1, 0.4, 0.5, 0.4, 1), 3, 3)
S2 <- matrix(c(1, 0.55, 0.55, 0.55, 1, 0.55, 0.55, 0.55, 1), 3, 3)
popMisfitMACS(m1, S1, m2, S2)
}
| /simsem/man/popMisfitMACS.Rd | no_license | simsem/simsem | R | false | false | 1,652 | rd | \name{popMisfitMACS}
\alias{popMisfitMACS}
\title{
Find population misfit by sufficient statistics
}
\description{
Find the value quantifying the amount of population misfit: \eqn{F_0}, RMSEA, and SRMR. See the definition of each index at \code{\link{summaryMisspec}}.
}
\usage{
popMisfitMACS(paramM, paramCM, misspecM, misspecCM, dfParam=NULL, fit.measures="all")
}
\arguments{
\item{paramM}{
The model-implied mean from the real parameters
}
\item{paramCM}{
The model-implied covariance matrix from the real parameters
}
\item{misspecM}{
The model-implied mean from the real and misspecified parameters
}
\item{misspecCM}{
The model-implied covariance matrix from the real and misspecified parameters
}
\item{dfParam}{
The degree of freedom of the real model
}
\item{fit.measures}{
The names of indices used to calculate population misfit. There are three types of misfit: 1) discrepancy function (\code{"f0"}; see \code{\link{popDiscrepancy}}), 2) root mean squared error of approximation (\code{"rmsea"}; Equation 12 in Browne & Cudeck, 1992), and 3) standardized root mean squared residual (\code{"srmr"})
}
}
\value{
The vector of the misfit indices
}
\references{
Browne, M. W., & Cudeck, R. (1992). Alternative ways of assessing model fit. \emph{Sociological Methods & Research, 21}, 230-258.
}
\author{
Sunthud Pornprasertmanit (\email{psunthud@gmail.com})
}
\examples{
m1 <- rep(0, 3)
m2 <- c(0.1, -0.1, 0.05)
S1 <- matrix(c(1, 0.6, 0.5, 0.6, 1, 0.4, 0.5, 0.4, 1), 3, 3)
S2 <- matrix(c(1, 0.55, 0.55, 0.55, 1, 0.55, 0.55, 0.55, 1), 3, 3)
popMisfitMACS(m1, S1, m2, S2)
}
|
\name{ebayes}
\alias{ebayes}
\alias{eBayes}
\alias{treat}
\title{Empirical Bayes Statistics for Differential Expression}
\description{Given a microarray linear model fit, compute moderated t-statistics, moderated F-statistic, and log-odds of differential expression by empirical Bayes moderation of the standard errors towards a common value.}
\usage{
ebayes(fit, proportion=0.01, stdev.coef.lim=c(0.1,4), trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
eBayes(fit, proportion=0.01, stdev.coef.lim=c(0.1,4), trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
treat(fit, lfc=0, trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
}
\arguments{
\item{fit}{an \code{MArrayLM} fitted model object produced by \code{lmFit} or \code{contrasts.fit}.
For \code{ebayes} only, \code{fit} can alternatively be an unclassed list produced by \code{lm.series}, \code{gls.series} or \code{mrlm} containing components \code{coefficients}, \code{stdev.unscaled}, \code{sigma} and \code{df.residual}.}
\item{proportion}{numeric value between 0 and 1, assumed proportion of genes which are differentially expressed}
\item{stdev.coef.lim}{numeric vector of length 2, assumed lower and upper limits for the standard deviation of log2-fold-changes for differentially expressed genes}
\item{trend}{logical, should an intensity-trend be allowed for the prior variance? Default is that the prior variance is constant.}
\item{robust}{logical, should the estimation of \code{df.prior} and \code{var.prior} be robustified against outlier sample variances?}
\item{winsor.tail.p}{numeric vector of length 1 or 2, giving left and right tail proportions of \code{x} to Winsorize. Used only when \code{robust=TRUE}.}
\item{lfc}{the minimum log2-fold-change that is considered scientifically meaningful}
}
\value{
\code{eBayes} produces an object of class \code{MArrayLM} (see \code{\link{MArrayLM-class}}) containing everything found in \code{fit} plus the following added components:
\item{t}{numeric vector or matrix of moderated t-statistics}
\item{p.value}{numeric vector of p-values corresponding to the t-statistics}
\item{s2.prior}{estimated prior value for \code{sigma^2}. A vector if \code{covariate} is non-\code{NULL}, otherwise a scalar.}
\item{df.prior}{degrees of freedom associated with \code{s2.prior}}
\item{df.total}{numeric vector of total degrees of freedom associated with t-statistics and p-values. Equal to \code{df.prior+df.residual} or \code{sum(df.residual)}, whichever is smaller.}
\item{s2.post}{numeric vector giving the posterior values for \code{sigma^2}}
\item{lods}{numeric vector or matrix giving the log-odds of differential expression}
\item{var.prior}{estimated prior value for the variance of the log2-fold-change for differentially expressed gene}
\item{F}{numeric vector of moderated F-statistics for testing all contrasts defined by the columns of \code{fit} simultaneously equal to zero}
\item{F.p.value}{numeric vector giving p-values corresponding to \code{F}}
\code{treat} a produces an \code{MArrayLM} object similar to \code{eBayes} but without \code{lods}, \code{var.prior}, \code{F} or \code{F.p.value}.
\code{ebayes} produces an ordinary list containing the above components except for \code{F} and \code{F.p.value}.
}
\details{
These functions is used to rank genes in order of evidence for differential expression.
They use an empirical Bayes method to shrink the probe-wise sample variances towards a common value and to augmenting the degrees of freedom for the individual variances (Smyth, 2004).
The functions accept as input argument \code{fit} a fitted model object from the functions \code{lmFit}, \code{lm.series}, \code{mrlm} or \code{gls.series}.
The fitted model object may have been processed by \code{contrasts.fit} before being passed to \code{eBayes} to convert the coefficients of the design matrix into an arbitrary number of contrasts which are to be tested equal to zero.
The columns of \code{fit} define a set of contrasts which are to be tested equal to zero.
The empirical Bayes moderated t-statistics test each individual contrast equal to zero.
For each probe (row), the moderated F-statistic tests whether all the contrasts are zero.
The F-statistic is an overall test computed from the set of t-statistics for that probe.
This is exactly analogous the relationship between t-tests and F-statistics in conventional anova, except that the residual mean squares and residual degrees of freedom have been moderated between probes.
The estimates \code{s2.prior} and \code{df.prior} are computed by \code{fitFDist}.
\code{s2.post} is the weighted average of \code{s2.prior} and \code{sigma^2} with weights proportional to \code{df.prior} and \code{df.residual} respectively.
The \code{lods} is sometimes known as the B-statistic.
The F-statistics \code{F} are computed by \code{classifyTestsF} with \code{fstat.only=TRUE}.
\code{eBayes} doesn't compute ordinary (unmoderated) t-statistics by default, but these can be easily extracted from
the linear model output, see the example below.
\code{ebayes} is the earlier and leaner function, kept for background capatability, while
\code{eBayes} is the later more object-orientated version.
The difference is that \code{ebayes} outputs only the empirical Bayes statistics whereas \code{eBayes} adds them to the fitted model object \code{fit}.
\code{eBayes} is recommended for routine use as it produces objects containing all the necessary components for downstream analysis
\code{treat} computes empirical Bayes moderated-t p-values relative to a minimum required fold-change threshold.
Use \code{\link{topTreat}} to summarize output from \code{treat}.
Instead of testing for genes which have log-fold-changes different from zero, it tests whether the log2-fold-change is greater than \code{lfc} in absolute value (McCarthy and Smyth, 2009).
\code{treat} is concerned with p-values rather than posterior odds, so it does not compute the B-statistic \code{lods}.
The idea of thresholding doesn't apply to F-statistics in a straightforward way, so moderated F-statistics are also not computed.
If \code{trend=TRUE} then an intensity-dependent trend is fitted to the prior variances \code{s2.prior}.
Specifically, \code{squeezeVar} is called with the \code{covariate} equal to \code{Amean}, the average log2-intensity for each gene.
See \code{\link{squeezeVar}} for more details.
If \code{robust=TRUE} then the robust empirical Bayes procedure of Phipson et al (2013) is used.
See \code{\link{squeezeVar}} for more details.
}
\seealso{
\code{\link{squeezeVar}}, \code{\link{fitFDist}}, \code{\link{tmixture.matrix}}.
An overview of linear model functions in limma is given by \link{06.LinearModels}.
}
\author{Gordon Smyth and Davis McCarthy}
\references{
McCarthy, D. J., and Smyth, G. K. (2009). Testing significance relative to a fold-change threshold is a TREAT. \emph{Bioinformatics}.
\url{http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btp053}
Loennstedt, I., and Speed, T. P. (2002). Replicated microarray data. \emph{Statistica Sinica} \bold{12}, 31-46.
Phipson, B, Lee, S, Majewski, IJ, Alexander, WS, and Smyth, GK (2013).
Empirical Bayes in the presence of exceptional cases, with application to microarray data.
Bioinformatics Division, Walter and Eliza Hall Institute of Medical Research, Melbourne, Australia.
\url{http://www.statsci.org/smyth/pubs/RobustEBayesPreprint.pdf}
Smyth, G. K. (2004). Linear models and empirical Bayes methods for assessing differential expression in microarray experiments.
\emph{Statistical Applications in Genetics and Molecular Biology}, Volume \bold{3}, Article 3.
\url{http://www.statsci.org/smyth/pubs/ebayes.pdf}
}
\examples{
# See also lmFit examples
# Simulate gene expression data,
# 6 microarrays and 100 genes with one gene differentially expressed
set.seed(2004); invisible(runif(100))
M <- matrix(rnorm(100*6,sd=0.3),100,6)
M[1,] <- M[1,] + 1
fit <- lmFit(M)
# Moderated t-statistic
fit <- eBayes(fit)
topTable(fit)
# Ordinary t-statistic
ordinary.t <- fit$coef / fit$stdev.unscaled / fit$sigma
# Q-Q plots of t statistics
# Points off the line may be differentially expressed
par(mfrow=c(1,2))
qqt(ordinary.t, df=fit$df.residual, main="Ordinary t")
abline(0,1)
qqt(fit$t, df=fit$df.total,main="Moderated t")
abline(0,1)
par(mfrow=c(1,1))
}
\keyword{htest}
| /man/ebayes.Rd | no_license | SynBioTek/limma2 | R | false | false | 8,382 | rd | \name{ebayes}
\alias{ebayes}
\alias{eBayes}
\alias{treat}
\title{Empirical Bayes Statistics for Differential Expression}
\description{Given a microarray linear model fit, compute moderated t-statistics, moderated F-statistic, and log-odds of differential expression by empirical Bayes moderation of the standard errors towards a common value.}
\usage{
ebayes(fit, proportion=0.01, stdev.coef.lim=c(0.1,4), trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
eBayes(fit, proportion=0.01, stdev.coef.lim=c(0.1,4), trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
treat(fit, lfc=0, trend=FALSE, robust=FALSE, winsor.tail.p=c(0.05,0.1))
}
\arguments{
\item{fit}{an \code{MArrayLM} fitted model object produced by \code{lmFit} or \code{contrasts.fit}.
For \code{ebayes} only, \code{fit} can alternatively be an unclassed list produced by \code{lm.series}, \code{gls.series} or \code{mrlm} containing components \code{coefficients}, \code{stdev.unscaled}, \code{sigma} and \code{df.residual}.}
\item{proportion}{numeric value between 0 and 1, assumed proportion of genes which are differentially expressed}
\item{stdev.coef.lim}{numeric vector of length 2, assumed lower and upper limits for the standard deviation of log2-fold-changes for differentially expressed genes}
\item{trend}{logical, should an intensity-trend be allowed for the prior variance? Default is that the prior variance is constant.}
\item{robust}{logical, should the estimation of \code{df.prior} and \code{var.prior} be robustified against outlier sample variances?}
\item{winsor.tail.p}{numeric vector of length 1 or 2, giving left and right tail proportions of \code{x} to Winsorize. Used only when \code{robust=TRUE}.}
\item{lfc}{the minimum log2-fold-change that is considered scientifically meaningful}
}
\value{
\code{eBayes} produces an object of class \code{MArrayLM} (see \code{\link{MArrayLM-class}}) containing everything found in \code{fit} plus the following added components:
\item{t}{numeric vector or matrix of moderated t-statistics}
\item{p.value}{numeric vector of p-values corresponding to the t-statistics}
\item{s2.prior}{estimated prior value for \code{sigma^2}. A vector if \code{covariate} is non-\code{NULL}, otherwise a scalar.}
\item{df.prior}{degrees of freedom associated with \code{s2.prior}}
\item{df.total}{numeric vector of total degrees of freedom associated with t-statistics and p-values. Equal to \code{df.prior+df.residual} or \code{sum(df.residual)}, whichever is smaller.}
\item{s2.post}{numeric vector giving the posterior values for \code{sigma^2}}
\item{lods}{numeric vector or matrix giving the log-odds of differential expression}
\item{var.prior}{estimated prior value for the variance of the log2-fold-change for differentially expressed gene}
\item{F}{numeric vector of moderated F-statistics for testing all contrasts defined by the columns of \code{fit} simultaneously equal to zero}
\item{F.p.value}{numeric vector giving p-values corresponding to \code{F}}
\code{treat} a produces an \code{MArrayLM} object similar to \code{eBayes} but without \code{lods}, \code{var.prior}, \code{F} or \code{F.p.value}.
\code{ebayes} produces an ordinary list containing the above components except for \code{F} and \code{F.p.value}.
}
\details{
These functions is used to rank genes in order of evidence for differential expression.
They use an empirical Bayes method to shrink the probe-wise sample variances towards a common value and to augmenting the degrees of freedom for the individual variances (Smyth, 2004).
The functions accept as input argument \code{fit} a fitted model object from the functions \code{lmFit}, \code{lm.series}, \code{mrlm} or \code{gls.series}.
The fitted model object may have been processed by \code{contrasts.fit} before being passed to \code{eBayes} to convert the coefficients of the design matrix into an arbitrary number of contrasts which are to be tested equal to zero.
The columns of \code{fit} define a set of contrasts which are to be tested equal to zero.
The empirical Bayes moderated t-statistics test each individual contrast equal to zero.
For each probe (row), the moderated F-statistic tests whether all the contrasts are zero.
The F-statistic is an overall test computed from the set of t-statistics for that probe.
This is exactly analogous the relationship between t-tests and F-statistics in conventional anova, except that the residual mean squares and residual degrees of freedom have been moderated between probes.
The estimates \code{s2.prior} and \code{df.prior} are computed by \code{fitFDist}.
\code{s2.post} is the weighted average of \code{s2.prior} and \code{sigma^2} with weights proportional to \code{df.prior} and \code{df.residual} respectively.
The \code{lods} is sometimes known as the B-statistic.
The F-statistics \code{F} are computed by \code{classifyTestsF} with \code{fstat.only=TRUE}.
\code{eBayes} doesn't compute ordinary (unmoderated) t-statistics by default, but these can be easily extracted from
the linear model output, see the example below.
\code{ebayes} is the earlier and leaner function, kept for background capatability, while
\code{eBayes} is the later more object-orientated version.
The difference is that \code{ebayes} outputs only the empirical Bayes statistics whereas \code{eBayes} adds them to the fitted model object \code{fit}.
\code{eBayes} is recommended for routine use as it produces objects containing all the necessary components for downstream analysis
\code{treat} computes empirical Bayes moderated-t p-values relative to a minimum required fold-change threshold.
Use \code{\link{topTreat}} to summarize output from \code{treat}.
Instead of testing for genes which have log-fold-changes different from zero, it tests whether the log2-fold-change is greater than \code{lfc} in absolute value (McCarthy and Smyth, 2009).
\code{treat} is concerned with p-values rather than posterior odds, so it does not compute the B-statistic \code{lods}.
The idea of thresholding doesn't apply to F-statistics in a straightforward way, so moderated F-statistics are also not computed.
If \code{trend=TRUE} then an intensity-dependent trend is fitted to the prior variances \code{s2.prior}.
Specifically, \code{squeezeVar} is called with the \code{covariate} equal to \code{Amean}, the average log2-intensity for each gene.
See \code{\link{squeezeVar}} for more details.
If \code{robust=TRUE} then the robust empirical Bayes procedure of Phipson et al (2013) is used.
See \code{\link{squeezeVar}} for more details.
}
\seealso{
\code{\link{squeezeVar}}, \code{\link{fitFDist}}, \code{\link{tmixture.matrix}}.
An overview of linear model functions in limma is given by \link{06.LinearModels}.
}
\author{Gordon Smyth and Davis McCarthy}
\references{
McCarthy, D. J., and Smyth, G. K. (2009). Testing significance relative to a fold-change threshold is a TREAT. \emph{Bioinformatics}.
\url{http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btp053}
Loennstedt, I., and Speed, T. P. (2002). Replicated microarray data. \emph{Statistica Sinica} \bold{12}, 31-46.
Phipson, B, Lee, S, Majewski, IJ, Alexander, WS, and Smyth, GK (2013).
Empirical Bayes in the presence of exceptional cases, with application to microarray data.
Bioinformatics Division, Walter and Eliza Hall Institute of Medical Research, Melbourne, Australia.
\url{http://www.statsci.org/smyth/pubs/RobustEBayesPreprint.pdf}
Smyth, G. K. (2004). Linear models and empirical Bayes methods for assessing differential expression in microarray experiments.
\emph{Statistical Applications in Genetics and Molecular Biology}, Volume \bold{3}, Article 3.
\url{http://www.statsci.org/smyth/pubs/ebayes.pdf}
}
\examples{
# See also lmFit examples
# Simulate gene expression data,
# 6 microarrays and 100 genes with one gene differentially expressed
set.seed(2004); invisible(runif(100))
M <- matrix(rnorm(100*6,sd=0.3),100,6)
M[1,] <- M[1,] + 1
fit <- lmFit(M)
# Moderated t-statistic
fit <- eBayes(fit)
topTable(fit)
# Ordinary t-statistic
ordinary.t <- fit$coef / fit$stdev.unscaled / fit$sigma
# Q-Q plots of t statistics
# Points off the line may be differentially expressed
par(mfrow=c(1,2))
qqt(ordinary.t, df=fit$df.residual, main="Ordinary t")
abline(0,1)
qqt(fit$t, df=fit$df.total,main="Moderated t")
abline(0,1)
par(mfrow=c(1,1))
}
\keyword{htest}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{bad_date}
\alias{bad_date}
\title{Dates in character form}
\format{
A tibble with 10 rows and two columns.
}
\source{
Chris Delcher.
}
\usage{
bad_date
}
\description{
A table of dates and observations with the date column stored as a
character string.
}
\keyword{datasets}
| /man/bad_date.Rd | permissive | rserran/socviz | R | false | true | 380 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{bad_date}
\alias{bad_date}
\title{Dates in character form}
\format{
A tibble with 10 rows and two columns.
}
\source{
Chris Delcher.
}
\usage{
bad_date
}
\description{
A table of dates and observations with the date column stored as a
character string.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart.CumReturns.R
\name{chart.CumReturns}
\alias{chart.CumReturns}
\title{Cumulates and graphs a set of periodic returns}
\usage{
chart.CumReturns(
R,
wealth.index = FALSE,
geometric = TRUE,
legend.loc = NULL,
colorset = (1:12),
begin = c("first", "axis"),
plot.engine = "default",
...
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{wealth.index}{if \code{wealth.index} is \code{TRUE}, shows the "value
of $1", starting the cumulation of returns at 1 rather than zero}
\item{geometric}{utilize geometric chaining (TRUE) or simple/arithmetic chaining (FALSE) to aggregate returns,
default TRUE}
\item{legend.loc}{places a legend into one of nine locations on the chart:
bottomright, bottom, bottomleft, left, topleft, top, topright, right, or
center.}
\item{colorset}{color palette to use, set by default to rational choices}
\item{begin}{Align shorter series to: \itemize{ \item first - prior value of
the first column given for the reference or longer series or, \item axis -
the initial value (1 or zero) of the axis. }}
\item{plot.engine}{choose the plot engine you wish to use"
ggplot2, plotly,dygraph,googlevis and default}
\item{\dots}{any other passthru parameters}
}
\description{
Chart that cumulates the periodic returns given and draws a line graph of
the results as a "wealth index".
}
\details{
Cumulates the return series and displays either as a wealth index or as
cumulative returns.
}
\examples{
data(edhec)
chart.CumReturns(edhec[,"Funds of Funds"],main="Cumulative Returns")
chart.CumReturns(edhec[,"Funds of Funds"],wealth.index=TRUE, main="Growth of $1")
data(managers)
chart.CumReturns(managers,main="Cumulative Returns",begin="first")
chart.CumReturns(managers,main="Cumulative Returns",begin="axis")
}
\references{
Bacon, Carl. \emph{Practical Portfolio Performance Measurement
and Attribution}. Wiley. 2004. \cr
}
\seealso{
\code{\link{chart.TimeSeries}} \cr \code{\link{plot}}
}
\author{
Peter Carl
}
| /man/chart.CumReturns.Rd | no_license | braverock/PerformanceAnalytics | R | false | true | 2,095 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart.CumReturns.R
\name{chart.CumReturns}
\alias{chart.CumReturns}
\title{Cumulates and graphs a set of periodic returns}
\usage{
chart.CumReturns(
R,
wealth.index = FALSE,
geometric = TRUE,
legend.loc = NULL,
colorset = (1:12),
begin = c("first", "axis"),
plot.engine = "default",
...
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{wealth.index}{if \code{wealth.index} is \code{TRUE}, shows the "value
of $1", starting the cumulation of returns at 1 rather than zero}
\item{geometric}{utilize geometric chaining (TRUE) or simple/arithmetic chaining (FALSE) to aggregate returns,
default TRUE}
\item{legend.loc}{places a legend into one of nine locations on the chart:
bottomright, bottom, bottomleft, left, topleft, top, topright, right, or
center.}
\item{colorset}{color palette to use, set by default to rational choices}
\item{begin}{Align shorter series to: \itemize{ \item first - prior value of
the first column given for the reference or longer series or, \item axis -
the initial value (1 or zero) of the axis. }}
\item{plot.engine}{choose the plot engine you wish to use"
ggplot2, plotly,dygraph,googlevis and default}
\item{\dots}{any other passthru parameters}
}
\description{
Chart that cumulates the periodic returns given and draws a line graph of
the results as a "wealth index".
}
\details{
Cumulates the return series and displays either as a wealth index or as
cumulative returns.
}
\examples{
data(edhec)
chart.CumReturns(edhec[,"Funds of Funds"],main="Cumulative Returns")
chart.CumReturns(edhec[,"Funds of Funds"],wealth.index=TRUE, main="Growth of $1")
data(managers)
chart.CumReturns(managers,main="Cumulative Returns",begin="first")
chart.CumReturns(managers,main="Cumulative Returns",begin="axis")
}
\references{
Bacon, Carl. \emph{Practical Portfolio Performance Measurement
and Attribution}. Wiley. 2004. \cr
}
\seealso{
\code{\link{chart.TimeSeries}} \cr \code{\link{plot}}
}
\author{
Peter Carl
}
|
# Creating Cladograms in R
###############################################################################################
#Install/Load Packages
#ape is a program used in r that has functions for manipulating phylogenetic trees
install.packages("ape",dependencies = TRUE)
library(ape)
#phangorn is a program in r that uses estimation methods for phylogenetic tree building
install.packages("phangorn",dependencies = TRUE)
library(phangorn)
#seqinr is a program in r that helps read in DNA and protien data files
install.packages("seqinr",dependencies = TRUE)
library(seqinr)
#Natalie's section
################################################################################################
#Import sequence data
smcc<-read.dna("convertedraw(2).phy",format = "interleaved")
head(smcc)
#convert the alignment to a "phyDat" object in order to use treebuilding operations in phangorn.
smc_phyDat <- phyDat(smcc, type = "DNA", levels = NULL)
#Milo's section
################################################################################################
#Model Testing and Distance Matrice
#using the phangorn program to create a distrance matrix of our data.
#the distance matrix turns sequence alignments into a matrix of pairwise distances.
#This is the first step for the phangorn program to calculate distance between species.
mt<-modelTest(smc_phyDat)
print(mt)
#this syntax helps compute pairwise distances. the arguments under model can be either "JC69" or "F81"
#the "JC69" assumes equal base frequences between reads, the "F81" model does not.
dna_dist<-dist.ml(smc_phyDat,model = "JC69")
#Natalie's section
###############################################################################################
#Neighbor joining, UPGMA, and Maximim Parsimony
#UPGMA stands for Unweighted Pair Group Method with Arithmetic Mean it is a agglomerative hierarchical clustering method.
#we use the phangorn package to run this test, it is built into the package
smc_UPGMA <- upgma(dna_dist)
plot(smc_UPGMA, main="UPGMA")
#Neighbor joining is a tree building method computing the lengths of the branches of this tree.
#we use the phangorn package to run this test, it is already built into the package
smc_NJ<- NJ(dna_dist)
plot(smc_NJ, main="Neighbor Joining")
#Both
###############################################################################################
#Testing which tree is the best
#the parsimony function returns a parsimony score
#parsimony gives a score to the tree using either the sankoff or fitch algorithm.
# Fitch's algorithm determines the set of possible states for each node using a bottom- up from leaves to root, it also looks from a top- down point of view where it picks the ancestral state for each node from the set of possibilities.
parsimony(smc_UPGMA,smc_phyDat,method="fitch")
parsimony(smc_NJ,smc_phyDat,method="fitch")
#Sankoff's algorithem relies on counting the smallest number of possible (weighted) state changes needed on a given tree.
parsimony(smc_UPGMA,smc_phyDat,method="sankoff")
parsimony(smc_NJ,smc_phyDat,method="sankoff")
#based on these results the UPGMA is the better tree
#Milo's section
###############################################################################################
#Maximum Likelyhood/ Bootstrapping
#this is another method to test the relationships between species
#here we will use both the ape and phangorn package to find the maximum likelyhood of both plots
#key
#pml returns an object which contains the data, the tree, and parameters of the model
#optim.pml computes the likelihood of a phylogenetic tree given a sequence alignment and a model. the model is "JC" beacuse we are assuming equal base frequencies
#bootstrap.pml produces a list of bootstrapped data sets.
#the following is the process to create the final plots with bootstrap confidence.
#plot for UPGMA
fitU<- pml(smc_UPGMA, smc_phyDat)
print(fitU)
fitJCU<-optim.pml(fitU, model = "JC", rearrangement = "stochastic")
logLik(fitJCU)
bsU<-bootstrap.pml(fitJCU,bs=100, optNni=TRUE,multicore=TRUE,control=pml.control(trace=0))
plotBS(midpoint(fitJCU$tree),bs, p=50, type="p",title(main="UPGMA"))
#plot for Neighbor joining
fitN <- pml(smc_NJ, smc_phyDat)
print(fitN)
fitJCN <-optim.pml(fitN, model = "JC", rearrangement = "stochastic")
logLik(fitJCN)
bsN<-bootstrap.pml(fitJCN,bs=100, optNni=TRUE,multicore=TRUE,control=pml.control(trace=0))
plotBS(midpoint(fitJCN$tree),bs, p=50, type="p",title(main="Neighbor Joining"))
#Both
###############################################################################################
| /Cladogram syntax.R | no_license | nfiutek/myrepo | R | false | false | 4,633 | r |
# Creating Cladograms in R
###############################################################################################
#Install/Load Packages
#ape is a program used in r that has functions for manipulating phylogenetic trees
install.packages("ape",dependencies = TRUE)
library(ape)
#phangorn is a program in r that uses estimation methods for phylogenetic tree building
install.packages("phangorn",dependencies = TRUE)
library(phangorn)
#seqinr is a program in r that helps read in DNA and protien data files
install.packages("seqinr",dependencies = TRUE)
library(seqinr)
#Natalie's section
################################################################################################
#Import sequence data
smcc<-read.dna("convertedraw(2).phy",format = "interleaved")
head(smcc)
#convert the alignment to a "phyDat" object in order to use treebuilding operations in phangorn.
smc_phyDat <- phyDat(smcc, type = "DNA", levels = NULL)
#Milo's section
################################################################################################
#Model Testing and Distance Matrice
#using the phangorn program to create a distrance matrix of our data.
#the distance matrix turns sequence alignments into a matrix of pairwise distances.
#This is the first step for the phangorn program to calculate distance between species.
mt<-modelTest(smc_phyDat)
print(mt)
#this syntax helps compute pairwise distances. the arguments under model can be either "JC69" or "F81"
#the "JC69" assumes equal base frequences between reads, the "F81" model does not.
dna_dist<-dist.ml(smc_phyDat,model = "JC69")
#Natalie's section
###############################################################################################
#Neighbor joining, UPGMA, and Maximim Parsimony
#UPGMA stands for Unweighted Pair Group Method with Arithmetic Mean it is a agglomerative hierarchical clustering method.
#we use the phangorn package to run this test, it is built into the package
smc_UPGMA <- upgma(dna_dist)
plot(smc_UPGMA, main="UPGMA")
#Neighbor joining is a tree building method computing the lengths of the branches of this tree.
#we use the phangorn package to run this test, it is already built into the package
smc_NJ<- NJ(dna_dist)
plot(smc_NJ, main="Neighbor Joining")
#Both
###############################################################################################
#Testing which tree is the best
#the parsimony function returns a parsimony score
#parsimony gives a score to the tree using either the sankoff or fitch algorithm.
# Fitch's algorithm determines the set of possible states for each node using a bottom- up from leaves to root, it also looks from a top- down point of view where it picks the ancestral state for each node from the set of possibilities.
parsimony(smc_UPGMA,smc_phyDat,method="fitch")
parsimony(smc_NJ,smc_phyDat,method="fitch")
#Sankoff's algorithem relies on counting the smallest number of possible (weighted) state changes needed on a given tree.
parsimony(smc_UPGMA,smc_phyDat,method="sankoff")
parsimony(smc_NJ,smc_phyDat,method="sankoff")
#based on these results the UPGMA is the better tree
#Milo's section
###############################################################################################
#Maximum Likelyhood/ Bootstrapping
#this is another method to test the relationships between species
#here we will use both the ape and phangorn package to find the maximum likelyhood of both plots
#key
#pml returns an object which contains the data, the tree, and parameters of the model
#optim.pml computes the likelihood of a phylogenetic tree given a sequence alignment and a model. the model is "JC" beacuse we are assuming equal base frequencies
#bootstrap.pml produces a list of bootstrapped data sets.
#the following is the process to create the final plots with bootstrap confidence.
#plot for UPGMA
fitU<- pml(smc_UPGMA, smc_phyDat)
print(fitU)
fitJCU<-optim.pml(fitU, model = "JC", rearrangement = "stochastic")
logLik(fitJCU)
bsU<-bootstrap.pml(fitJCU,bs=100, optNni=TRUE,multicore=TRUE,control=pml.control(trace=0))
plotBS(midpoint(fitJCU$tree),bs, p=50, type="p",title(main="UPGMA"))
#plot for Neighbor joining
fitN <- pml(smc_NJ, smc_phyDat)
print(fitN)
fitJCN <-optim.pml(fitN, model = "JC", rearrangement = "stochastic")
logLik(fitJCN)
bsN<-bootstrap.pml(fitJCN,bs=100, optNni=TRUE,multicore=TRUE,control=pml.control(trace=0))
plotBS(midpoint(fitJCN$tree),bs, p=50, type="p",title(main="Neighbor Joining"))
#Both
###############################################################################################
|
# The existence of this file forces devtools::check() to run all the tests in
# the "tests/testthat/" directory.
library(testthat)
library(ggrecessions)
test_check("ggrecessions")
| /tests/testthat.R | permissive | matthewstern/ggrecessions | R | false | false | 182 | r | # The existence of this file forces devtools::check() to run all the tests in
# the "tests/testthat/" directory.
library(testthat)
library(ggrecessions)
test_check("ggrecessions")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dvec.R
\name{print.dvec}
\alias{print.dvec}
\title{Print Decorated Vector}
\usage{
\method{print}{dvec}(x, ...)
}
\arguments{
\item{x}{dvec}
\item{...}{passed arguments}
}
\value{
character
}
\description{
Prints a decorated vector.
}
\seealso{
Other dvec:
\code{\link{[.dvec}()},
\code{\link{[<-.dvec}()},
\code{\link{[[.dvec}()},
\code{\link{[[<-.dvec}()},
\code{\link{as.data.frame.dvec}()},
\code{\link{as_dvec.character}()},
\code{\link{as_dvec.complex}()},
\code{\link{as_dvec.dvec}()},
\code{\link{as_dvec.integer}()},
\code{\link{as_dvec.logical}()},
\code{\link{as_dvec.numeric}()},
\code{\link{as_dvec}()},
\code{\link{c.dvec}()},
\code{\link{classified.dvec}()},
\code{\link{desolve.dvec}()},
\code{\link{explicit_guide.data.frame}()},
\code{\link{format.dvec}()},
\code{\link{implicit_guide.dvec}()},
\code{\link{length.dvec}()},
\code{\link{resolve.classified}()},
\code{\link{resolve.dvec}()},
\code{\link{unclassified.dvec}()}
}
\concept{dvec}
\keyword{internal}
| /man/print.dvec.Rd | no_license | bergsmat/yamlet | R | false | true | 1,058 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dvec.R
\name{print.dvec}
\alias{print.dvec}
\title{Print Decorated Vector}
\usage{
\method{print}{dvec}(x, ...)
}
\arguments{
\item{x}{dvec}
\item{...}{passed arguments}
}
\value{
character
}
\description{
Prints a decorated vector.
}
\seealso{
Other dvec:
\code{\link{[.dvec}()},
\code{\link{[<-.dvec}()},
\code{\link{[[.dvec}()},
\code{\link{[[<-.dvec}()},
\code{\link{as.data.frame.dvec}()},
\code{\link{as_dvec.character}()},
\code{\link{as_dvec.complex}()},
\code{\link{as_dvec.dvec}()},
\code{\link{as_dvec.integer}()},
\code{\link{as_dvec.logical}()},
\code{\link{as_dvec.numeric}()},
\code{\link{as_dvec}()},
\code{\link{c.dvec}()},
\code{\link{classified.dvec}()},
\code{\link{desolve.dvec}()},
\code{\link{explicit_guide.data.frame}()},
\code{\link{format.dvec}()},
\code{\link{implicit_guide.dvec}()},
\code{\link{length.dvec}()},
\code{\link{resolve.classified}()},
\code{\link{resolve.dvec}()},
\code{\link{unclassified.dvec}()}
}
\concept{dvec}
\keyword{internal}
|
library('reshape2')
library('plyr')
#####
#####
##### Get slopes of n seconds after each token ######
getPriorGuise <- function(stimulus, tokenNum, token_guise_dataframe) {
if (tokenNum != 1) {
priorNum <- tokenNum - 1
priorGuise <- token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$guise
return(priorGuise)
}
return('first')
}
getNextTokenTime <- function(times, tokenNum, total_seconds) {
# Check if next token number is in list of times
# If so, return it, otherwise return the last second
newToken <- tokenNum + 1
if (newToken > length(times)) {return(total_seconds)}
else{return(times[newToken])}
}
getSlope <- function(thisRow, n, tokenNum, total_seconds, calculateEachNSeconds, token_guise_dataframe) {
thisRow <- data.frame(as.list(thisRow), stringsAsFactors = FALSE)
stimulus <- thisRow$stimulus
# Verify that stimulus is in token_guise_dataframe
if ((stimulus %in% unique(token_guise_dataframe$stimulus)==FALSE)) {
stop(paste("ERROR: stimulus '", stimulus, "' missing from token_guise_dataframe"))
}
# Get time of token according to stimulus name
time1 <- token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$time
if (calculateEachNSeconds==TRUE) {
# for n-second windows after each token instead of token-to-token slopes
time2 <- time1 + n
# If token time + n greater than total num. seconds, use last second
if (time2 > total_seconds) {time2 <- total_seconds}
}
else {
ordered_times <- sort(token_guise_dataframe[token_guise_dataframe$stimulus==stimulus,]$time)
time2 <- getNextTokenTime(ordered_times, tokenNum, total_seconds)
}
time1_rating <- as.numeric(as.character(thisRow[,paste("rating", time1, sep=".")]))
time2_rating <- as.numeric(as.character(thisRow[,paste("rating", time2, sep=".")]))
slope <- time2_rating - time1_rating
return(slope)
}
getGuise <- function(stimulus, tokenNum, token_guise_dataframe) {
return(token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$guise)
}
addGuiseInfo <- function(dataframe, token_guise_dataframe) {
print('...adding guise info')
myData <- dataframe
myData$guise <- mapply(getGuise, stimulus=myData$stimulus, tokenNum=myData$tokenNum, MoreArgs=list(token_guise_dataframe=token_guise_dataframe))
myData$tokenStimulus <- paste(myData$tokenNum, myData$stimulus, sep='*')
myData$tokenStimulus <- mapply(gsub, 'slope', '', myData$tokenStimulus)
print('...adding prior guise')
myData$priorGuise <- mapply(getPriorGuise, myData$stimulus, myData$tokenNum, MoreArgs=list(token_guise_dataframe=token_guise_dataframe))
print('...converting strings to factors')
myData$guise <- factor(myData$guise)
myData$tokenStimulus <- factor(myData$tokenStimulus)
myData$priorGuise <- factor(myData$priorGuise)
return(myData)
}
calculateSlopes <- function(dataframe, token_guise_dataframe, n=5, calculateEachNSeconds=TRUE, roundingFactor=.5) {
### dataframe = the dataframe
#### n = how many seconds after token to calculate slopes for, if calculateEachNSeconds==TRUE
### calculate each n seconds: if TRUE, calculate slopes for N seconds after each token; if FALSE, calculate token-to-token slopes
# roundingFactor = 1 for rounded to nearest second, 2 for rounded to nearest .5 second, 4 for nearest .25 second, etc.
print('...Calculating some slopes...')
myData <- dataframe
### Verify that token_guise_dataframe contains the correct columns
for (column in c('token_num', 'stimulus', 'time', 'guise')) {
if ((column %in% colnames(token_guise_dataframe)) == FALSE) {
stop(paste('ERROR: token_guise_dataframe is missing required column:', column))
}
}
### Round token_guise_dataframe to match results data rounding
token_guise_dataframe$time <- round(token_guise_dataframe$time*roundingFactor)/roundingFactor
### Calculate how many tokens
howManyTokens <- length(unique(token_guise_dataframe$token_num))
### Reshape the data
# 1) Get rid of X.1, time, originalTime, timePlusParticipant, x b/c these are different per timepoint
myData$X.1 <- NULL
myData$time <- NULL
myData$originalTime <- NULL
myData$timePlusParticipant <- NULL
myData$x <- NULL
myData$X <- NULL
myData$timestamp <- NULL
original_ncol <- ncol(myData)
## Some stimuli are slightly longer than others, check to get the shortest one
## and use that as the max time
maxes <- c()
for (stimulus in unique(myData$stimulus)) {
thisMax <- max(myData[myData$stimulus==stimulus,]$roundedTime)
maxes <- c(maxes, thisMax)
}
total_seconds <- min(maxes)
# Drop late times
myData <- myData[myData$roundedTime <= total_seconds,]
# 2) Then reshape so there's a column for each second
myData2 <- reshape(myData, v.names="rating", idvar = "participant", timevar = "roundedTime", direction = "wide")
### Now for each token, use getSlope function to get the slope
for (tokenCount in seq(howManyTokens)) {
myData2[,paste("slope", tokenCount)] <- apply(X=myData2, MARGIN=1, FUN=getSlope, n=n, tokenNum=tokenCount, total_seconds=total_seconds, calculateEachNSeconds=calculateEachNSeconds, token_guise_dataframe=token_guise_dataframe)
}
### Delete extra columns, reshape, and return
myData3 <- myData2[-c((original_ncol -1) :(original_ncol + total_seconds*roundingFactor - 1)) ]
myData4 <- melt(myData3, id.vars=c(colnames(myData3[c(1:(original_ncol-2))])))
colnames(myData4)[colnames(myData4)=="variable"] <- "tokenNum"
colnames(myData4)[colnames(myData4)=="value"] <- "slope"
myData4$tokenNum <- as.numeric(as.character(mapply(gsub, 'slope ', '', myData4$tokenNum)))
myData4 <- addGuiseInfo(myData4, token_guise_dataframe)
return(myData4)
} | /slope_functions.r | no_license | supermartha/realtime_r | R | false | false | 5,886 | r | library('reshape2')
library('plyr')
#####
#####
##### Get slopes of n seconds after each token ######
getPriorGuise <- function(stimulus, tokenNum, token_guise_dataframe) {
if (tokenNum != 1) {
priorNum <- tokenNum - 1
priorGuise <- token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$guise
return(priorGuise)
}
return('first')
}
getNextTokenTime <- function(times, tokenNum, total_seconds) {
# Check if next token number is in list of times
# If so, return it, otherwise return the last second
newToken <- tokenNum + 1
if (newToken > length(times)) {return(total_seconds)}
else{return(times[newToken])}
}
getSlope <- function(thisRow, n, tokenNum, total_seconds, calculateEachNSeconds, token_guise_dataframe) {
thisRow <- data.frame(as.list(thisRow), stringsAsFactors = FALSE)
stimulus <- thisRow$stimulus
# Verify that stimulus is in token_guise_dataframe
if ((stimulus %in% unique(token_guise_dataframe$stimulus)==FALSE)) {
stop(paste("ERROR: stimulus '", stimulus, "' missing from token_guise_dataframe"))
}
# Get time of token according to stimulus name
time1 <- token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$time
if (calculateEachNSeconds==TRUE) {
# for n-second windows after each token instead of token-to-token slopes
time2 <- time1 + n
# If token time + n greater than total num. seconds, use last second
if (time2 > total_seconds) {time2 <- total_seconds}
}
else {
ordered_times <- sort(token_guise_dataframe[token_guise_dataframe$stimulus==stimulus,]$time)
time2 <- getNextTokenTime(ordered_times, tokenNum, total_seconds)
}
time1_rating <- as.numeric(as.character(thisRow[,paste("rating", time1, sep=".")]))
time2_rating <- as.numeric(as.character(thisRow[,paste("rating", time2, sep=".")]))
slope <- time2_rating - time1_rating
return(slope)
}
getGuise <- function(stimulus, tokenNum, token_guise_dataframe) {
return(token_guise_dataframe[token_guise_dataframe$stimulus==stimulus & token_guise_dataframe$token_num==tokenNum,]$guise)
}
addGuiseInfo <- function(dataframe, token_guise_dataframe) {
print('...adding guise info')
myData <- dataframe
myData$guise <- mapply(getGuise, stimulus=myData$stimulus, tokenNum=myData$tokenNum, MoreArgs=list(token_guise_dataframe=token_guise_dataframe))
myData$tokenStimulus <- paste(myData$tokenNum, myData$stimulus, sep='*')
myData$tokenStimulus <- mapply(gsub, 'slope', '', myData$tokenStimulus)
print('...adding prior guise')
myData$priorGuise <- mapply(getPriorGuise, myData$stimulus, myData$tokenNum, MoreArgs=list(token_guise_dataframe=token_guise_dataframe))
print('...converting strings to factors')
myData$guise <- factor(myData$guise)
myData$tokenStimulus <- factor(myData$tokenStimulus)
myData$priorGuise <- factor(myData$priorGuise)
return(myData)
}
calculateSlopes <- function(dataframe, token_guise_dataframe, n=5, calculateEachNSeconds=TRUE, roundingFactor=.5) {
### dataframe = the dataframe
#### n = how many seconds after token to calculate slopes for, if calculateEachNSeconds==TRUE
### calculate each n seconds: if TRUE, calculate slopes for N seconds after each token; if FALSE, calculate token-to-token slopes
# roundingFactor = 1 for rounded to nearest second, 2 for rounded to nearest .5 second, 4 for nearest .25 second, etc.
print('...Calculating some slopes...')
myData <- dataframe
### Verify that token_guise_dataframe contains the correct columns
for (column in c('token_num', 'stimulus', 'time', 'guise')) {
if ((column %in% colnames(token_guise_dataframe)) == FALSE) {
stop(paste('ERROR: token_guise_dataframe is missing required column:', column))
}
}
### Round token_guise_dataframe to match results data rounding
token_guise_dataframe$time <- round(token_guise_dataframe$time*roundingFactor)/roundingFactor
### Calculate how many tokens
howManyTokens <- length(unique(token_guise_dataframe$token_num))
### Reshape the data
# 1) Get rid of X.1, time, originalTime, timePlusParticipant, x b/c these are different per timepoint
myData$X.1 <- NULL
myData$time <- NULL
myData$originalTime <- NULL
myData$timePlusParticipant <- NULL
myData$x <- NULL
myData$X <- NULL
myData$timestamp <- NULL
original_ncol <- ncol(myData)
## Some stimuli are slightly longer than others, check to get the shortest one
## and use that as the max time
maxes <- c()
for (stimulus in unique(myData$stimulus)) {
thisMax <- max(myData[myData$stimulus==stimulus,]$roundedTime)
maxes <- c(maxes, thisMax)
}
total_seconds <- min(maxes)
# Drop late times
myData <- myData[myData$roundedTime <= total_seconds,]
# 2) Then reshape so there's a column for each second
myData2 <- reshape(myData, v.names="rating", idvar = "participant", timevar = "roundedTime", direction = "wide")
### Now for each token, use getSlope function to get the slope
for (tokenCount in seq(howManyTokens)) {
myData2[,paste("slope", tokenCount)] <- apply(X=myData2, MARGIN=1, FUN=getSlope, n=n, tokenNum=tokenCount, total_seconds=total_seconds, calculateEachNSeconds=calculateEachNSeconds, token_guise_dataframe=token_guise_dataframe)
}
### Delete extra columns, reshape, and return
myData3 <- myData2[-c((original_ncol -1) :(original_ncol + total_seconds*roundingFactor - 1)) ]
myData4 <- melt(myData3, id.vars=c(colnames(myData3[c(1:(original_ncol-2))])))
colnames(myData4)[colnames(myData4)=="variable"] <- "tokenNum"
colnames(myData4)[colnames(myData4)=="value"] <- "slope"
myData4$tokenNum <- as.numeric(as.character(mapply(gsub, 'slope ', '', myData4$tokenNum)))
myData4 <- addGuiseInfo(myData4, token_guise_dataframe)
return(myData4)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SQLiteConnection.R, R/query.R, R/table.R
\docType{class}
\name{SQLiteConnection-class}
\alias{SQLiteConnection-class}
\alias{format.SQLiteConnection}
\alias{show,SQLiteConnection-method}
\alias{dbIsValid,SQLiteConnection-method}
\alias{dbQuoteIdentifier,SQLiteConnection,character-method}
\alias{dbQuoteIdentifier,SQLiteConnection,SQL-method}
\alias{dbUnquoteIdentifier,SQLiteConnection,SQL-method}
\alias{dbGetException,SQLiteConnection-method}
\alias{dbSendQuery,SQLiteConnection,character-method}
\alias{sqlData,SQLiteConnection-method}
\alias{dbRemoveTable,SQLiteConnection,character-method}
\alias{dbExistsTable,SQLiteConnection,character-method}
\alias{dbListTables,SQLiteConnection-method}
\alias{dbListFields,SQLiteConnection,character-method}
\alias{dbDataType,SQLiteConnection-method}
\title{Class SQLiteConnection (and methods)}
\usage{
\method{format}{SQLiteConnection}(x, ...)
\S4method{show}{SQLiteConnection}(object)
\S4method{dbIsValid}{SQLiteConnection}(dbObj, ...)
\S4method{dbQuoteIdentifier}{SQLiteConnection,character}(conn, x, ...)
\S4method{dbQuoteIdentifier}{SQLiteConnection,SQL}(conn, x, ...)
\S4method{dbUnquoteIdentifier}{SQLiteConnection,SQL}(conn, x, ...)
\S4method{dbGetException}{SQLiteConnection}(conn, ...)
\S4method{dbSendQuery}{SQLiteConnection,character}(conn, statement,
params = NULL, ...)
\S4method{sqlData}{SQLiteConnection}(con, value,
row.names = pkgconfig::get_config("RSQLite::row.names.query", FALSE), ...)
\S4method{dbRemoveTable}{SQLiteConnection,character}(conn, name, ...,
temporary = FALSE, fail_if_missing = TRUE)
\S4method{dbExistsTable}{SQLiteConnection,character}(conn, name, ...)
\S4method{dbListTables}{SQLiteConnection}(conn, ...)
\S4method{dbListFields}{SQLiteConnection,character}(conn, name, ...)
\S4method{dbDataType}{SQLiteConnection}(dbObj, obj, ...)
}
\arguments{
\item{temporary}{If \code{TRUE}, only temporary tables are considered.}
\item{fail_if_missing}{If \code{FALSE}, \code{dbRemoveTable()} succeeds if the
table doesn't exist.}
}
\description{
SQLiteConnection objects are created by passing \code{\link[=SQLite]{SQLite()}} as first
argument to \code{\link[DBI:dbConnect]{DBI::dbConnect()}}.
They are a superclass of the \linkS4class{DBIConnection} class.
The "Usage" section lists the class methods overridden by \pkg{RSQLite}.
}
\seealso{
The corresponding generic functions
\code{\link[DBI:dbSendQuery]{DBI::dbSendQuery()}}, \code{\link[DBI:dbGetQuery]{DBI::dbGetQuery()}},
\code{\link[DBI:dbSendStatement]{DBI::dbSendStatement()}}, \code{\link[DBI:dbExecute]{DBI::dbExecute()}},
\code{\link[DBI:dbExistsTable]{DBI::dbExistsTable()}}, \code{\link[DBI:dbListTables]{DBI::dbListTables()}}, \code{\link[DBI:dbListFields]{DBI::dbListFields()}},
\code{\link[DBI:dbRemoveTable]{DBI::dbRemoveTable()}}, and \code{\link[DBI:sqlData]{DBI::sqlData()}}.
}
\keyword{internal}
| /man/SQLiteConnection-class.Rd | permissive | hannes/RSQLite | R | false | true | 2,939 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SQLiteConnection.R, R/query.R, R/table.R
\docType{class}
\name{SQLiteConnection-class}
\alias{SQLiteConnection-class}
\alias{format.SQLiteConnection}
\alias{show,SQLiteConnection-method}
\alias{dbIsValid,SQLiteConnection-method}
\alias{dbQuoteIdentifier,SQLiteConnection,character-method}
\alias{dbQuoteIdentifier,SQLiteConnection,SQL-method}
\alias{dbUnquoteIdentifier,SQLiteConnection,SQL-method}
\alias{dbGetException,SQLiteConnection-method}
\alias{dbSendQuery,SQLiteConnection,character-method}
\alias{sqlData,SQLiteConnection-method}
\alias{dbRemoveTable,SQLiteConnection,character-method}
\alias{dbExistsTable,SQLiteConnection,character-method}
\alias{dbListTables,SQLiteConnection-method}
\alias{dbListFields,SQLiteConnection,character-method}
\alias{dbDataType,SQLiteConnection-method}
\title{Class SQLiteConnection (and methods)}
\usage{
\method{format}{SQLiteConnection}(x, ...)
\S4method{show}{SQLiteConnection}(object)
\S4method{dbIsValid}{SQLiteConnection}(dbObj, ...)
\S4method{dbQuoteIdentifier}{SQLiteConnection,character}(conn, x, ...)
\S4method{dbQuoteIdentifier}{SQLiteConnection,SQL}(conn, x, ...)
\S4method{dbUnquoteIdentifier}{SQLiteConnection,SQL}(conn, x, ...)
\S4method{dbGetException}{SQLiteConnection}(conn, ...)
\S4method{dbSendQuery}{SQLiteConnection,character}(conn, statement,
params = NULL, ...)
\S4method{sqlData}{SQLiteConnection}(con, value,
row.names = pkgconfig::get_config("RSQLite::row.names.query", FALSE), ...)
\S4method{dbRemoveTable}{SQLiteConnection,character}(conn, name, ...,
temporary = FALSE, fail_if_missing = TRUE)
\S4method{dbExistsTable}{SQLiteConnection,character}(conn, name, ...)
\S4method{dbListTables}{SQLiteConnection}(conn, ...)
\S4method{dbListFields}{SQLiteConnection,character}(conn, name, ...)
\S4method{dbDataType}{SQLiteConnection}(dbObj, obj, ...)
}
\arguments{
\item{temporary}{If \code{TRUE}, only temporary tables are considered.}
\item{fail_if_missing}{If \code{FALSE}, \code{dbRemoveTable()} succeeds if the
table doesn't exist.}
}
\description{
SQLiteConnection objects are created by passing \code{\link[=SQLite]{SQLite()}} as first
argument to \code{\link[DBI:dbConnect]{DBI::dbConnect()}}.
They are a superclass of the \linkS4class{DBIConnection} class.
The "Usage" section lists the class methods overridden by \pkg{RSQLite}.
}
\seealso{
The corresponding generic functions
\code{\link[DBI:dbSendQuery]{DBI::dbSendQuery()}}, \code{\link[DBI:dbGetQuery]{DBI::dbGetQuery()}},
\code{\link[DBI:dbSendStatement]{DBI::dbSendStatement()}}, \code{\link[DBI:dbExecute]{DBI::dbExecute()}},
\code{\link[DBI:dbExistsTable]{DBI::dbExistsTable()}}, \code{\link[DBI:dbListTables]{DBI::dbListTables()}}, \code{\link[DBI:dbListFields]{DBI::dbListFields()}},
\code{\link[DBI:dbRemoveTable]{DBI::dbRemoveTable()}}, and \code{\link[DBI:sqlData]{DBI::sqlData()}}.
}
\keyword{internal}
|
library(algstat)
### Name: bump
### Title: Convert Dimensions of Approval Data
### Aliases: bump
### ** Examples
## Not run:
##D
##D V0 <- 100 # V0 = number of voters (not votes)
##D bump(V0, 6, 3, 0, 0) # no bump
##D bump(V0, 6, 3, 0, 1) # 1-up
##D bump(V0, 6, 3, 0, 2) # 2-up
##D bump(V0, 6, 3, 0, 3) # 3-up
##D
##D V1 <- c(30, 40, 50, 50, 60, 70)
##D bump(V1, 6, 3, 1, 0) # bump down
##D bump(V1, 6, 3, 1, 1) # no bump
##D bump(V1, 6, 3, 1, 2) # 1-up
##D bump(V1, 6, 3, 1, 3) # 2-up
##D
##D cbind(
##D bump(V1, 6, 3, 1, 2, "popular"),
##D bump(V1, 6, 3, 1, 2, "even")
##D )
##D
##D
##D
##D
##D
##D data(cookie)
##D (out <- spectral(cookie$freq, 6, 3, cookie$cookies))
##D
##D (V0 <- out$obs$V0)
##D bump(V0, 6, 3, 0, 0)
##D bump(V0, 6, 3, 0, 1)
##D bump(V0, 6, 3, 0, 2)
##D bump(V0, 6, 3, 0, 3)
##D out$fullExp$V0
##D out$decompose(out$effects[,1])
##D
##D (V1 <- out$obs$V1)
##D bump(V1, 6, 3, 1, 0) # cbind(bump(V1, 6, 3, 1, 0), out$fullExp$V1[[1]])
##D bump(V1, 6, 3, 1, 1) # cbind(bump(V1, 6, 3, 1, 1), out$fullExp$V1[[2]])
##D bump(V1, 6, 3, 1, 2) # cbind(bump(V1, 6, 3, 1, 2), out$fullExp$V1[[3]])
##D bump(V1, 6, 3, 1, 3) # cbind(bump(V1, 6, 3, 1, 3), out$fullExp$V1[[4]])
##D out$fullExp$V1 # the sampler doesn't distribute it's samples up evenly
##D
##D (V2 <- out$obs$V2)
##D bump(V2, 6, 3, 2, 0) # cbind(bump(V2, 6, 3, 2, 0), out$fullExp$V2[[1]])
##D bump(V2, 6, 3, 2, 1) # cbind(bump(V2, 6, 3, 2, 1), out$fullExp$V2[[2]])
##D bump(V2, 6, 3, 2, 2) # cbind(bump(V2, 6, 3, 2, 2), out$fullExp$V2[[3]])
##D bump(V2, 6, 3, 2, 3) # cbind(bump(V2, 6, 3, 2, 3), out$fullExp$V2[[4]])
##D
##D (V3 <- out$obs$V3)
##D bump(V3, 6, 3, 3, 0)
##D bump(V3, 6, 3, 3, 1)
##D bump(V3, 6, 3, 3, 2)
##D bump(V3, 6, 3, 3, 3)
##D
##D
##D
## End(Not run)
| /data/genthat_extracted_code/algstat/examples/bump.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,771 | r | library(algstat)
### Name: bump
### Title: Convert Dimensions of Approval Data
### Aliases: bump
### ** Examples
## Not run:
##D
##D V0 <- 100 # V0 = number of voters (not votes)
##D bump(V0, 6, 3, 0, 0) # no bump
##D bump(V0, 6, 3, 0, 1) # 1-up
##D bump(V0, 6, 3, 0, 2) # 2-up
##D bump(V0, 6, 3, 0, 3) # 3-up
##D
##D V1 <- c(30, 40, 50, 50, 60, 70)
##D bump(V1, 6, 3, 1, 0) # bump down
##D bump(V1, 6, 3, 1, 1) # no bump
##D bump(V1, 6, 3, 1, 2) # 1-up
##D bump(V1, 6, 3, 1, 3) # 2-up
##D
##D cbind(
##D bump(V1, 6, 3, 1, 2, "popular"),
##D bump(V1, 6, 3, 1, 2, "even")
##D )
##D
##D
##D
##D
##D
##D data(cookie)
##D (out <- spectral(cookie$freq, 6, 3, cookie$cookies))
##D
##D (V0 <- out$obs$V0)
##D bump(V0, 6, 3, 0, 0)
##D bump(V0, 6, 3, 0, 1)
##D bump(V0, 6, 3, 0, 2)
##D bump(V0, 6, 3, 0, 3)
##D out$fullExp$V0
##D out$decompose(out$effects[,1])
##D
##D (V1 <- out$obs$V1)
##D bump(V1, 6, 3, 1, 0) # cbind(bump(V1, 6, 3, 1, 0), out$fullExp$V1[[1]])
##D bump(V1, 6, 3, 1, 1) # cbind(bump(V1, 6, 3, 1, 1), out$fullExp$V1[[2]])
##D bump(V1, 6, 3, 1, 2) # cbind(bump(V1, 6, 3, 1, 2), out$fullExp$V1[[3]])
##D bump(V1, 6, 3, 1, 3) # cbind(bump(V1, 6, 3, 1, 3), out$fullExp$V1[[4]])
##D out$fullExp$V1 # the sampler doesn't distribute it's samples up evenly
##D
##D (V2 <- out$obs$V2)
##D bump(V2, 6, 3, 2, 0) # cbind(bump(V2, 6, 3, 2, 0), out$fullExp$V2[[1]])
##D bump(V2, 6, 3, 2, 1) # cbind(bump(V2, 6, 3, 2, 1), out$fullExp$V2[[2]])
##D bump(V2, 6, 3, 2, 2) # cbind(bump(V2, 6, 3, 2, 2), out$fullExp$V2[[3]])
##D bump(V2, 6, 3, 2, 3) # cbind(bump(V2, 6, 3, 2, 3), out$fullExp$V2[[4]])
##D
##D (V3 <- out$obs$V3)
##D bump(V3, 6, 3, 3, 0)
##D bump(V3, 6, 3, 3, 1)
##D bump(V3, 6, 3, 3, 2)
##D bump(V3, 6, 3, 3, 3)
##D
##D
##D
## End(Not run)
|
library(shiny)
# Definir la parte UI del archivo
ui <- fluidPage(
# Se selecciona el título de la interfaz
titlePanel("Hacks para datos agrupados"),
# Generacion de un conjunto de pestañas
tabsetPanel(
# Primera pestaña
tabPanel('El magi número',
# Se genera interfaz que contiene entrada y salida
sidebarLayout(
# Parte de la entrada de interfaz 1
sidebarPanel(
# Se especifica el tipo de entrada, su codigo interno y como aparece
numericInput(inputId = 'na',
label = 'Tamaño de la muestra',
value = 0),
textInput(inputId = 'opcion',
label = 'Escoja un cálculo',
value = 'Cuartil'),
numericInput(inputId = 'numa',
label = 'Número auxiliar (num)',
value = 0)
),
# Parte de la salida de la interfaz 1
mainPanel(textOutput('caption1'))
)
),
# Segunda pestaña
tabPanel('Fórmulas varias',
sidebarLayout(
# Parte de la entrada de la interfaz 2
sidebarPanel(width = 2/3, # Para modificar el ancho de la interfaz
fluidRow(
column(2,
numericInput(inputId = 'Icb',
label = 'Intervalo de clase',
value = 0),
numericInput(inputId = 'Lib',
label = 'Limite inferior',
value = 0)
),
column(3,
numericInput(inputId = 'fib',
label = 'freq absoluta',
value = 0),
numericInput(inputId = 'Fantb',
label = 'Freq acumuluda anterior',
value = 0)
),
column(4,
numericInput(inputId = 'numb',
label = 'num (Q, Dec, Perc, Med)',
value = 0),
numericInput(inputId = 'nb',
label = 'Tamaño de la muestra',
value = 0)
)
),
textInput(inputId = 'opciones',
label = 'Escoge un cálculo',
value = 'Percentil')
),
# Parte de salida de la interfaz 2
mainPanel(textOutput("caption2"))
)
),
# Tercera pestaña
tabPanel('Ecuación de la moda',
sidebarLayout(
# Parte de la entrada de la interfaz 3
sidebarPanel(width = 0.45,
fluidPage(
column(3,
numericInput(inputId = 'Lic',
label = 'Límite inferior',
value = 0),
numericInput(inputId = 'Icc',
label = 'Intervalo de clase',
value = 0),
numericInput(inputId = 'fic',
label = 'Freq absoluta',
value = 0)
),
column(4,
numericInput(inputId = 'fantc',
label = 'Freq absoluta acumulada anterior',
value = 0),
numericInput(inputId = 'fdep',
label = 'Freq absoluta acumulada posterior',
value = 0)
)
)
),
# Parte de salida de la interfaz 3
mainPanel(textOutput('caption3'))
)
)
)
)
# Se define la parter server que genera los procedimientos
server <- function(input, output) {
# Se específica el procedimiento de la pestaña 2
output$caption2 <- renderPrint({
# Codigo de funcion de pestaña 2
per_dec <- function(n, fi, Li, Ic, Fant, num, calculo = 'Percentil'){
# Condiciones inciales para entregar un buen resultado
if(calculo != 'Mediana' & calculo != 'Cuartil' & calculo != 'Decil' & calculo != 'Percentil') stop("calculo debe ser 'Mediana', o 'Cuartil', o 'Decil', o 'Percentil'")
if(calculo == 'Mediana'){
if(num != 1) stop("El valor de 'num' admitido para cálculo = 'Mediana' es 1")
}
if(calculo == 'Decil'){
if(num <= 0 | num >= 10) stop("los valores de 'num' admitidos para calculo = 'Decil' es entre 0 y 10")
}
if(calculo == 'Percentil'){
if(num <= 0 | num >= 100) stop("los valores de 'num' admitidos para calculo = 'Percentil' es entre 0 y 100")
}
if(calculo == 'Cuartil'){
if(num != 1 & num != 2 & num != 3) stop("Para calculo = 'Cuartil' los 'num' admitidos son 1 (Q1), 2 (Q2) y 3 (Q3)")
}
c <- n * num
switch(calculo,
'Decil' = co <- c/10,
'Percentil'= co <- c/100,
'Cuartil' = co <- c/4,
'Mediana' = co <- c/2
)
tot <- Li + ((co-Fant)/(fi)) * Ic
round(tot, 2)
}
# Ejecucion interna del comando
res <- per_dec(n = input$nb, fi = input$fib, Li = input$Lib, Ic = input$Icb,
Fant = input$Fantb, num = input$numb, calculo = input$opciones)
if(input$opciones == 'Mediana'){
paste('El valor de la', input$opciones, 'es de:', res)
} else(paste('El valor del', input$opciones, input$numb, 'es de:', res))
})
# Se específica el procedimiento de la pestaña 1
output$caption1 <- renderPrint({
magic_number <- function(n, calculo = 'Cuartil', num){
if(calculo != 'Mediana' & calculo != 'Cuartil' & calculo != 'Decil' & calculo != 'Percentil') stop("calculo debe ser 'Mediana', o 'Cuartil', o 'Decil', o 'Percentil'")
if(calculo == 'Mediana'){
if(num != 1) stop("El valor de 'num' admitido para calculo = 'Mediana' es 1")
}
if(calculo == 'Decil'){
if(num <= 0 | num >= 10) stop("los valores de 'num' admitidos para calculo = 'Decil' es entre 0 y 10")
}
if(calculo == 'Percentil'){
if(num <= 0 | num >= 100) stop("los valores de 'num' admitidos para calculo = 'Percentil' es entre 0 y 100")
}
if(calculo == 'Cuartil'){
if(num != 1 & num != 2 & num != 3) stop("Para calculo = 'Cuartil' los 'num' admitidos son 1 (Q1), 2 (Q2) y 3 (Q3)")
}
c <- n * num
switch(calculo,
'Decil' = co <- c/10,
'Percentil'= co <- c/100,
'Cuartil' = co <- c/4,
'Mediana' = co <- c/2)
co
}
opci <- input$opcion
resula <- magic_number(n = input$na, calculo = opci, num = input$numa)
paste('Tu magi numero es:', resula)
})
# Se específica el procedimiento de la pestaña 3
output$caption3 <- renderPrint({
Moda <- function(Li, Ic, fi, fant, fdep){
del1 <- fi - fant
del2 <- fdep - fant
y <- Li+((del1)/(del1+del2))*Ic
round(y,2)
}
mo <- Moda(Li = input$Lic, Ic = input$Icc, fi = input$fic, fant = input$fantc, fdep = input$fdep)
paste('El resultado de la moda es', mo)
})
}
# Se ejecuta la aplicacion
shinyApp(ui = ui, server = server)
| /datos_agrupados_app.R | no_license | Sleiven/Shiny | R | false | false | 10,003 | r | library(shiny)
# Definir la parte UI del archivo
ui <- fluidPage(
# Se selecciona el título de la interfaz
titlePanel("Hacks para datos agrupados"),
# Generacion de un conjunto de pestañas
tabsetPanel(
# Primera pestaña
tabPanel('El magi número',
# Se genera interfaz que contiene entrada y salida
sidebarLayout(
# Parte de la entrada de interfaz 1
sidebarPanel(
# Se especifica el tipo de entrada, su codigo interno y como aparece
numericInput(inputId = 'na',
label = 'Tamaño de la muestra',
value = 0),
textInput(inputId = 'opcion',
label = 'Escoja un cálculo',
value = 'Cuartil'),
numericInput(inputId = 'numa',
label = 'Número auxiliar (num)',
value = 0)
),
# Parte de la salida de la interfaz 1
mainPanel(textOutput('caption1'))
)
),
# Segunda pestaña
tabPanel('Fórmulas varias',
sidebarLayout(
# Parte de la entrada de la interfaz 2
sidebarPanel(width = 2/3, # Para modificar el ancho de la interfaz
fluidRow(
column(2,
numericInput(inputId = 'Icb',
label = 'Intervalo de clase',
value = 0),
numericInput(inputId = 'Lib',
label = 'Limite inferior',
value = 0)
),
column(3,
numericInput(inputId = 'fib',
label = 'freq absoluta',
value = 0),
numericInput(inputId = 'Fantb',
label = 'Freq acumuluda anterior',
value = 0)
),
column(4,
numericInput(inputId = 'numb',
label = 'num (Q, Dec, Perc, Med)',
value = 0),
numericInput(inputId = 'nb',
label = 'Tamaño de la muestra',
value = 0)
)
),
textInput(inputId = 'opciones',
label = 'Escoge un cálculo',
value = 'Percentil')
),
# Parte de salida de la interfaz 2
mainPanel(textOutput("caption2"))
)
),
# Tercera pestaña
tabPanel('Ecuación de la moda',
sidebarLayout(
# Parte de la entrada de la interfaz 3
sidebarPanel(width = 0.45,
fluidPage(
column(3,
numericInput(inputId = 'Lic',
label = 'Límite inferior',
value = 0),
numericInput(inputId = 'Icc',
label = 'Intervalo de clase',
value = 0),
numericInput(inputId = 'fic',
label = 'Freq absoluta',
value = 0)
),
column(4,
numericInput(inputId = 'fantc',
label = 'Freq absoluta acumulada anterior',
value = 0),
numericInput(inputId = 'fdep',
label = 'Freq absoluta acumulada posterior',
value = 0)
)
)
),
# Parte de salida de la interfaz 3
mainPanel(textOutput('caption3'))
)
)
)
)
# Se define la parter server que genera los procedimientos
server <- function(input, output) {
# Se específica el procedimiento de la pestaña 2
output$caption2 <- renderPrint({
# Codigo de funcion de pestaña 2
per_dec <- function(n, fi, Li, Ic, Fant, num, calculo = 'Percentil'){
# Condiciones inciales para entregar un buen resultado
if(calculo != 'Mediana' & calculo != 'Cuartil' & calculo != 'Decil' & calculo != 'Percentil') stop("calculo debe ser 'Mediana', o 'Cuartil', o 'Decil', o 'Percentil'")
if(calculo == 'Mediana'){
if(num != 1) stop("El valor de 'num' admitido para cálculo = 'Mediana' es 1")
}
if(calculo == 'Decil'){
if(num <= 0 | num >= 10) stop("los valores de 'num' admitidos para calculo = 'Decil' es entre 0 y 10")
}
if(calculo == 'Percentil'){
if(num <= 0 | num >= 100) stop("los valores de 'num' admitidos para calculo = 'Percentil' es entre 0 y 100")
}
if(calculo == 'Cuartil'){
if(num != 1 & num != 2 & num != 3) stop("Para calculo = 'Cuartil' los 'num' admitidos son 1 (Q1), 2 (Q2) y 3 (Q3)")
}
c <- n * num
switch(calculo,
'Decil' = co <- c/10,
'Percentil'= co <- c/100,
'Cuartil' = co <- c/4,
'Mediana' = co <- c/2
)
tot <- Li + ((co-Fant)/(fi)) * Ic
round(tot, 2)
}
# Ejecucion interna del comando
res <- per_dec(n = input$nb, fi = input$fib, Li = input$Lib, Ic = input$Icb,
Fant = input$Fantb, num = input$numb, calculo = input$opciones)
if(input$opciones == 'Mediana'){
paste('El valor de la', input$opciones, 'es de:', res)
} else(paste('El valor del', input$opciones, input$numb, 'es de:', res))
})
# Se específica el procedimiento de la pestaña 1
output$caption1 <- renderPrint({
magic_number <- function(n, calculo = 'Cuartil', num){
if(calculo != 'Mediana' & calculo != 'Cuartil' & calculo != 'Decil' & calculo != 'Percentil') stop("calculo debe ser 'Mediana', o 'Cuartil', o 'Decil', o 'Percentil'")
if(calculo == 'Mediana'){
if(num != 1) stop("El valor de 'num' admitido para calculo = 'Mediana' es 1")
}
if(calculo == 'Decil'){
if(num <= 0 | num >= 10) stop("los valores de 'num' admitidos para calculo = 'Decil' es entre 0 y 10")
}
if(calculo == 'Percentil'){
if(num <= 0 | num >= 100) stop("los valores de 'num' admitidos para calculo = 'Percentil' es entre 0 y 100")
}
if(calculo == 'Cuartil'){
if(num != 1 & num != 2 & num != 3) stop("Para calculo = 'Cuartil' los 'num' admitidos son 1 (Q1), 2 (Q2) y 3 (Q3)")
}
c <- n * num
switch(calculo,
'Decil' = co <- c/10,
'Percentil'= co <- c/100,
'Cuartil' = co <- c/4,
'Mediana' = co <- c/2)
co
}
opci <- input$opcion
resula <- magic_number(n = input$na, calculo = opci, num = input$numa)
paste('Tu magi numero es:', resula)
})
# Se específica el procedimiento de la pestaña 3
output$caption3 <- renderPrint({
Moda <- function(Li, Ic, fi, fant, fdep){
del1 <- fi - fant
del2 <- fdep - fant
y <- Li+((del1)/(del1+del2))*Ic
round(y,2)
}
mo <- Moda(Li = input$Lic, Ic = input$Icc, fi = input$fic, fant = input$fantc, fdep = input$fdep)
paste('El resultado de la moda es', mo)
})
}
# Se ejecuta la aplicacion
shinyApp(ui = ui, server = server)
|
ORGANISM <- "Felis catus"
### List of assemblies first by WGS Project, then by date.
ASSEMBLIES <- list(
## --- WGS Project: AANG04 ---
## 4805 sequences.
list(assembly="Felis_catus_9.0",
assembly_level="Chromosome",
date="2017/11/20",
extra_info=c(breed="Abyssinian", sex="female"),
assembly_accession="GCF_000181335.3", # felCat9
circ_seqs="MT"),
## 4507 sequences.
list(assembly="felCat9.1_X",
assembly_level="Chromosome",
date="2021/10/01",
extra_info=c(breed="Abyssinian", sex="female"),
assembly_accession="GCA_000181335.5",
circ_seqs=character(0)),
## --- WGS Project: JAFEKA01 ---
## 71 sequences.
list(assembly="F.catus_Fca126_mat1.0",
assembly_level="Chromosome",
date="2021/05/13",
extra_info=c(sex="female"),
assembly_accession="GCF_018350175.1",
circ_seqs="MT")
)
| /inst/registered/NCBI_assemblies/Felis_catus.R | no_license | Bioconductor/GenomeInfoDb | R | false | false | 951 | r | ORGANISM <- "Felis catus"
### List of assemblies first by WGS Project, then by date.
ASSEMBLIES <- list(
## --- WGS Project: AANG04 ---
## 4805 sequences.
list(assembly="Felis_catus_9.0",
assembly_level="Chromosome",
date="2017/11/20",
extra_info=c(breed="Abyssinian", sex="female"),
assembly_accession="GCF_000181335.3", # felCat9
circ_seqs="MT"),
## 4507 sequences.
list(assembly="felCat9.1_X",
assembly_level="Chromosome",
date="2021/10/01",
extra_info=c(breed="Abyssinian", sex="female"),
assembly_accession="GCA_000181335.5",
circ_seqs=character(0)),
## --- WGS Project: JAFEKA01 ---
## 71 sequences.
list(assembly="F.catus_Fca126_mat1.0",
assembly_level="Chromosome",
date="2021/05/13",
extra_info=c(sex="female"),
assembly_accession="GCF_018350175.1",
circ_seqs="MT")
)
|
library(jjb)
### Name: char_at
### Title: Character at Position _i_
### Aliases: char_at
### ** Examples
# Example string
s = "statistics"
# Single character
char_at(s, 1)
# Vectorized position
char_at(s, c(2, 3))
| /data/genthat_extracted_code/jjb/examples/char_at.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 223 | r | library(jjb)
### Name: char_at
### Title: Character at Position _i_
### Aliases: char_at
### ** Examples
# Example string
s = "statistics"
# Single character
char_at(s, 1)
# Vectorized position
char_at(s, c(2, 3))
|
pkgname <- "clc"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
base::assign(".ExTimings", "clc-Ex.timings", pos = 'CheckExEnv')
base::cat("name\tuser\tsystem\telapsed\n", file=base::get(".ExTimings", pos = 'CheckExEnv'))
base::assign(".format_ptime",
function(x) {
if(!is.na(x[4L])) x[1L] <- x[1L] + x[4L]
if(!is.na(x[5L])) x[2L] <- x[2L] + x[5L]
options(OutDec = '.')
format(x[1L:3L], digits = 7L)
},
pos = 'CheckExEnv')
### * </HEADER>
library('clc')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("hello")
### * hello
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: hello
### Title: Hello, World!
### Aliases: hello
### ** Examples
hello()
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("hello", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
### * <FOOTER>
###
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
| /clc.Rcheck/clc-Ex.R | no_license | bambooforest/clc | R | false | false | 1,396 | r | pkgname <- "clc"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
base::assign(".ExTimings", "clc-Ex.timings", pos = 'CheckExEnv')
base::cat("name\tuser\tsystem\telapsed\n", file=base::get(".ExTimings", pos = 'CheckExEnv'))
base::assign(".format_ptime",
function(x) {
if(!is.na(x[4L])) x[1L] <- x[1L] + x[4L]
if(!is.na(x[5L])) x[2L] <- x[2L] + x[5L]
options(OutDec = '.')
format(x[1L:3L], digits = 7L)
},
pos = 'CheckExEnv')
### * </HEADER>
library('clc')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("hello")
### * hello
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: hello
### Title: Hello, World!
### Aliases: hello
### ** Examples
hello()
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("hello", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
### * <FOOTER>
###
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
#' Calculate Upper/Lower Bound Confidence Intervals for Accountability
#'
#' @param n A count (valid tests, enrolled, etc.).
#' @param pct A percentage (Percent On Track/Mastered, Chronic Absenteeism, etc.).
#'
#' @export
#'
#' @examples
#'
#' ci_upper_bound(n = 10, pct = 50)
#' ci_lower_bound(n = 10, pct = 50)
#'
ci_lower_bound <- function(n, pct) {
pct <- pct/100
round5(100 * n/(n + qnorm(0.975)^2) * (pct + (qnorm(0.975)^2/(2 * n)) -
qnorm(0.975) * sqrt((pct * (1 - pct))/n + qnorm(0.975)^2/(4 * n^2))), 1)
}
#' @export
ci_upper_bound <- function(n, pct) {
pct <- pct/100
round5(100 * n/(n + qnorm(0.975)^2) * (pct + (qnorm(0.975)^2/(2 * n)) +
qnorm(0.975) * sqrt((pct * (1 - pct))/n + qnorm(0.975)^2/(4 * n^2))), 1)
}
| /R/lower_upper_bound_ci.R | no_license | tnedu/acct | R | false | false | 760 | r | #' Calculate Upper/Lower Bound Confidence Intervals for Accountability
#'
#' @param n A count (valid tests, enrolled, etc.).
#' @param pct A percentage (Percent On Track/Mastered, Chronic Absenteeism, etc.).
#'
#' @export
#'
#' @examples
#'
#' ci_upper_bound(n = 10, pct = 50)
#' ci_lower_bound(n = 10, pct = 50)
#'
ci_lower_bound <- function(n, pct) {
pct <- pct/100
round5(100 * n/(n + qnorm(0.975)^2) * (pct + (qnorm(0.975)^2/(2 * n)) -
qnorm(0.975) * sqrt((pct * (1 - pct))/n + qnorm(0.975)^2/(4 * n^2))), 1)
}
#' @export
ci_upper_bound <- function(n, pct) {
pct <- pct/100
round5(100 * n/(n + qnorm(0.975)^2) * (pct + (qnorm(0.975)^2/(2 * n)) +
qnorm(0.975) * sqrt((pct * (1 - pct))/n + qnorm(0.975)^2/(4 * n^2))), 1)
}
|
library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme = shinytheme("readable"),
titlePanel("Quantitative PCR"),
fluidRow(
column(width = 3,
wellPanel(
uiOutput("manual_ct"),
fileInput("ct_file", "Datei hochladen", accept = c(".xls", ".xlsx", ".csv", ".txt")),
uiOutput("header"),
uiOutput("sample_col"),
uiOutput("ct_col"),
uiOutput("selection"),
uiOutput("info_col")
)
),
column(width =3,
wellPanel(
radioButtons("virus", "Virus", choices = c("Adeno", "CMV", "EBV"), selected = NULL),
radioButtons("cycler", "Instrument", choices = c("QuantStudio", "Veriti", "PCR3", "PCR4"), selected = NULL),
textOutput("formula"),
h4(""),
plotOutput("std_curve")
)
),
column(width = 6,
wellPanel(selectInput("visum", "Visum", width = 100, choices = c("MHu", "AB", "CD")),
downloadButton("report", "Report erstellen")
),
h4("Resultat"),
tableOutput(outputId = "result")
)
)
))
| /DIA/qPCR/ui.R | permissive | medvir/shiny-server | R | false | false | 1,659 | r | library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme = shinytheme("readable"),
titlePanel("Quantitative PCR"),
fluidRow(
column(width = 3,
wellPanel(
uiOutput("manual_ct"),
fileInput("ct_file", "Datei hochladen", accept = c(".xls", ".xlsx", ".csv", ".txt")),
uiOutput("header"),
uiOutput("sample_col"),
uiOutput("ct_col"),
uiOutput("selection"),
uiOutput("info_col")
)
),
column(width =3,
wellPanel(
radioButtons("virus", "Virus", choices = c("Adeno", "CMV", "EBV"), selected = NULL),
radioButtons("cycler", "Instrument", choices = c("QuantStudio", "Veriti", "PCR3", "PCR4"), selected = NULL),
textOutput("formula"),
h4(""),
plotOutput("std_curve")
)
),
column(width = 6,
wellPanel(selectInput("visum", "Visum", width = 100, choices = c("MHu", "AB", "CD")),
downloadButton("report", "Report erstellen")
),
h4("Resultat"),
tableOutput(outputId = "result")
)
)
))
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{splitNetworkData}
\alias{splitNetworkData}
\title{splitNetworkData}
\usage{
splitNetworkData(network_data, group, network_only = FALSE)
}
\arguments{
\item{network_data}{network_data}
\item{group}{group}
\item{network_only}{network_only}
}
\value{
value
}
\description{
Description splitNetworkData
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
| /SESHK2011/man/splitNetworkData.Rd | no_license | ctszkin/SESHK2011 | R | false | false | 419 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{splitNetworkData}
\alias{splitNetworkData}
\title{splitNetworkData}
\usage{
splitNetworkData(network_data, group, network_only = FALSE)
}
\arguments{
\item{network_data}{network_data}
\item{group}{group}
\item{network_only}{network_only}
}
\value{
value
}
\description{
Description splitNetworkData
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
|
library(EDR)
### Name: predict.edr
### Title: Predict values of the link function for a fitted edr-object
### Aliases: predict.edr
### Keywords: smooth regression
### ** Examples
require(EDR)
## Not run: demo(edr_ex4)
| /data/genthat_extracted_code/EDR/examples/predict.edr.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 225 | r | library(EDR)
### Name: predict.edr
### Title: Predict values of the link function for a fitted edr-object
### Aliases: predict.edr
### Keywords: smooth regression
### ** Examples
require(EDR)
## Not run: demo(edr_ex4)
|
# Q1
# look at the 'iris' dataset that comes with R.
library(datasets)
data(iris)
# A description of the dataset can be found by running
?iris
str(iris)
head(iris)
virginica<-subset(iris,Species == 'virginica')
sl<-data.frame(virginica$Sepal.Length)
sl<-data.frame(iris$Sepal.Length[iris$Species=="virginica"])
sl
summary(sl)
# Mean :6.588
# Q2
# Simple: either the rows (1), the columns (2) or both (1:2)
apply(iris[, 1:4], 2, mean)# by columns
# Sepal.Length Sepal.Width Petal.Length Petal.Width
# 5.843333 3.057333 3.758000 1.199333
apply(iris[, 1:4], 1, mean)# by rows
# Q3
# Load the 'mtcars' dataset
library(datasets)
data(mtcars)
# object names 'mtcars' in your workspace
# information about the dataset by running
?mtcars
str(mtcars)
head(mtcars)
# calculate the average miles per gallon (mpg)
# by number of cylinders in the car (cyl)
tapply(mtcars$cyl, mtcars$mpg, mean)
# Apply a function to each cell of a ragged array,
# that is to each (non-empty) group of values
# given by a unique combination of the levels of certain factors.
apply(mtcars, 2, mean)# mean for every column
sapply(split(mtcars$mpg, mtcars$cyl), mean)
# 4 6 8
# 26.66364 19.74286 15.10000
# sapply is a user-firendly version of lapply
# by default returning a vector or matrix if appropriate
# description of lapply:
# lapply returns a list of the same length as X,
# each element of which is the result of applying FUN
# to the corresponding element of X
# create a list with 2 elements
l <- list(a = 1:10, b = 11:20)
# the mean of the values in each element
lapply(l, mean)
# $a
# [1] 5.5
#
# $b
# [1] 15.5
# the sum of the values in each element
lapply(l, sum)
# $a
# [1] 55
#
# $b
# [1] 155
# answer is: sapply(split(mtcars$mpg, mtcars$cyl), mean)
# also: with(mtcars, tapply(mpg, cyl, mean))
# Q4
# the absolute difference between
# the average horsepower of 4-cylinder cars and
# the average horsepower of 8-cylinder cars
abs(mean(split(mtcars, mtcars$cyl)$'4'$hp) - mean(split(mtcars, mtcars$cyl)$'8'$hp))
# 126.5779
> abs(mean(mtcars$hp[mtcars$cyl==4])-mean(mtcars$hp[mtcars$cyl==8]))
# Q5
debug(ls)
ls()
# You will be prompted to specify at which line of the function
# you would like to suspend execution and enter the browser.
debugonce()
# when you need to quit the status debug
# type help at the Browse[]>
# Answer: Execution of 'ls' will suspend at the beginning of the function and you will be in the browser. | /Coursera-R-Programming/Week 3/Quiz 3.r | no_license | LyndonBubb/datasciencecoursera | R | false | false | 2,469 | r | # Q1
# look at the 'iris' dataset that comes with R.
library(datasets)
data(iris)
# A description of the dataset can be found by running
?iris
str(iris)
head(iris)
virginica<-subset(iris,Species == 'virginica')
sl<-data.frame(virginica$Sepal.Length)
sl<-data.frame(iris$Sepal.Length[iris$Species=="virginica"])
sl
summary(sl)
# Mean :6.588
# Q2
# Simple: either the rows (1), the columns (2) or both (1:2)
apply(iris[, 1:4], 2, mean)# by columns
# Sepal.Length Sepal.Width Petal.Length Petal.Width
# 5.843333 3.057333 3.758000 1.199333
apply(iris[, 1:4], 1, mean)# by rows
# Q3
# Load the 'mtcars' dataset
library(datasets)
data(mtcars)
# object names 'mtcars' in your workspace
# information about the dataset by running
?mtcars
str(mtcars)
head(mtcars)
# calculate the average miles per gallon (mpg)
# by number of cylinders in the car (cyl)
tapply(mtcars$cyl, mtcars$mpg, mean)
# Apply a function to each cell of a ragged array,
# that is to each (non-empty) group of values
# given by a unique combination of the levels of certain factors.
apply(mtcars, 2, mean)# mean for every column
sapply(split(mtcars$mpg, mtcars$cyl), mean)
# 4 6 8
# 26.66364 19.74286 15.10000
# sapply is a user-firendly version of lapply
# by default returning a vector or matrix if appropriate
# description of lapply:
# lapply returns a list of the same length as X,
# each element of which is the result of applying FUN
# to the corresponding element of X
# create a list with 2 elements
l <- list(a = 1:10, b = 11:20)
# the mean of the values in each element
lapply(l, mean)
# $a
# [1] 5.5
#
# $b
# [1] 15.5
# the sum of the values in each element
lapply(l, sum)
# $a
# [1] 55
#
# $b
# [1] 155
# answer is: sapply(split(mtcars$mpg, mtcars$cyl), mean)
# also: with(mtcars, tapply(mpg, cyl, mean))
# Q4
# the absolute difference between
# the average horsepower of 4-cylinder cars and
# the average horsepower of 8-cylinder cars
abs(mean(split(mtcars, mtcars$cyl)$'4'$hp) - mean(split(mtcars, mtcars$cyl)$'8'$hp))
# 126.5779
> abs(mean(mtcars$hp[mtcars$cyl==4])-mean(mtcars$hp[mtcars$cyl==8]))
# Q5
debug(ls)
ls()
# You will be prompted to specify at which line of the function
# you would like to suspend execution and enter the browser.
debugonce()
# when you need to quit the status debug
# type help at the Browse[]>
# Answer: Execution of 'ls' will suspend at the beginning of the function and you will be in the browser. |
#Dummy data
#set.seed(1)
#e1 <- rnorm(10, mean = 0, sd = 1)
#set.seed(2)
#e2 <- rnorm(10, mean = 0, sd = 1)
# FUNCTION 1 ; Do a dm test on bootstrapped sample
dm.bootstrap <- function(e1,e2, h, seed){
# WHAT THE FUNCTION DOES
# This function takes two error vectors which are time indexed, then constructs a bootstrap sample
# (indentical rows, with replacement) from both error vectors . The time index of error vectors is
# matched by setting the same seed for each round of sampling using set.seed(seed). [See code below]
#ARGUMENTS
# - e1 and e2 are error vectors from which we will be resampling from
# - h is a parameter that represents the forecasting time horizon for the error vectors
# - seed is a random integer that will be used as an arg. for set.seed so sampling from error vectors
# is matched.
#VALUE
# Function returns the p.val when Diebold-Mariano test is conducted for bootstrapped sample errors
#Step1: Select random integer to be the seed for sampling
#seed <- round(runif(1, min = 1, max = 10000)) #This isn't changing when I re-run the code. I'll set the see outside the function
#Step2: Sampling from error vectors(matching of time series indices are preserved using the random seed above)
#Only continue with test if error vectors have the same length
l1 <- length(e1)
l2 <- length(e2)
if(l1==l2){
set.seed(seed)
boot_e1 <- sample(e1, l1, replace = TRUE)
set.seed(seed)
boot_e2 <- sample(e2, l2, replace = TRUE)
} else {
print("Error vectors do not have the same length")
}
test <- dm.test(boot_e1, boot_e2, h = h, alternative = c("two.sided")) #Need to specify the forecast horizon(h), default is 1
return(c(test$p.value))
}
# FUNCTION 2: Repeated the dm test on a bootstrap sample N times (uses function 1)
#Question: Think of a more efficient way to do this using dplyr
repeat.dm.bootstrap <- function(e1,e2,N,h){
# Conducts N DM Test(forecast horizon h) on a bootstrapped sample from e1 and e2
# Returns a vector of N p.values from the bootstrap dm tests
library(forecast) #Needed by dm.bootstrap func. to do Diebold-Mariano Test
p.values <- NULL
for(i in 1:N){
rand.int <- sample.int(1000, 1)
p.values[i] <- dm.bootstrap(e1, e2, h, rand.int)
}
return(p.values)
}
#Test
p_vals <- repeat.dm.bootstrap(e_1, e_2, 1e5, h = 1) #Check if seed for rand.int is changing as a control measure
hist(p_vals)
####Delete this later
#Data
#log. returns for ARIMA
index.0 <- read.table("top40index.txt")
index.1 <- as.matrix(index.0)
index.2 <-(t(index.1))[2:1828,1]
index <- as.numeric(index.2)
log.return <- NULL
for(i in 1:length(index)-1){
x <- index[i]
y <- index[i+1]
log.return[i] <-log(y/x)
}
#data-frame for prophet package
start_date <- as.Date("2012-07-01")
end_date <- as.Date("2014-01-01") # Should be "2017-06-30" if we use the full time period, i.e w/o in- sample and out of sample
dates <- bizseq(start_date,end_date,"actual")
prophet_data <- data.frame(ds = dates,y = log.return[1:550]) #Note to self : remove [1:1766] If forecasting using full period
#Model estimation
library(forecast)
arima.fit <- auto.arima(data$return, approximation = F, trace = F) #This also builds the best ARIMA model and it turns out that you got it right. Its the arima(2,0,2) :)
prophet.fit <- prophet(df = prophet_data,yearly.seasonality = TRUE)
#Model Forecasts and residuals
#ARIMA
start_date <- 1766 #???
end_date <- 1825 # ???
period <- 1
arima_residuals <- list()
j = 1
for(i in seq(start_date, end_date, period)){
arima_model <- arima(data$returns[1:i], order = c(2,0,2),
optim.control = list(maxit = 1000))
arima_pred <- predict(arima_model, n.ahead = 5, newdata = data$returns[(i+1):(i+5)])
arima_error <- arima_pred$pred - data$returns[(i+1):(i+5)]
arima_residuals[[j]] <- data.frame(ret = data$returns[(i+1):(i+5)],
pred = as.numeric(arima_pred$pred),
error = as.numeric(arima_error))
j <- j + 1
}
do.call(rbind, arima_residuals)
arima_five_res <- do.call(rbind, arima_residuals)
plot(arima_five_res$ret, type = "l")
lines(arima_five_res$pred, col = 2)
#Prophet
pred_period_dates <- make_future_dataframe(proph_fit,periods = 1826-550) #Dates for prediction period
library("tidyr")
proph_forecast <- predict(proph_fit,pred_period_dates)
proph_in_sample_forecast <- proph_forecast$yhat[1:550]
prophet_residuals <- proph_in_sample_forecast - log.return[1:550]
#Bootstrap
e1 <- arima_five_res$error #Qn: There's 300 of them, how to I control this? Why are there NA's?
e2 <- prophet_residuals[1:300] #Truncated this so the length of the matrices match
p_vals <-repeat.dm.bootstrap(e1, e1, 1000, h = 1) #Check if seed for rand.int is changing as a control measure
hist(p_vals)
# Hanjo
#Model Forecasts and residuals
#ARIMA
arima_eval <- function(ret_data, start_date, end_date, period = 1){
library(lubridate)
start_date <- as.Date("2013-11-01") #???
end_date <- as.Date("2014-01-01") # ???
period <- paste0("+", period," day")
for_period <- period
arima_residuals <- list()
j = 1
for(i in seq(start_date, end_date, by = period)){
#training
train <- which(prophet_data$ds < start_date)
test <- which(prophet_data$ds %in% seq(from = start_date,
to = (start_date + (for_period-1)),
by = "day"))
arima_model <- arima(prophet_data$y[train], order = c(2,0,2),
optim.control = list(maxit = 1000))
arima_pred <- forecast(arima_model, h = for_period)
arima_error <- arima_pred$mean - prophet_data$y[test]
arima_residuals[[j]] <- data.frame(ret = prophet_data$y[test],
pred = as.numeric(arima_pred$mean),
error = as.numeric(arima_error))
j <- j + 1
}
do.call(rbind, arima_residuals)
arima_res <- do.call(rbind, arima_residuals)
return(arima_res)
}
arima_eval() | /DM Bootstrap Functions.R | no_license | Merveilleuse/Honours_project | R | false | false | 6,410 | r | #Dummy data
#set.seed(1)
#e1 <- rnorm(10, mean = 0, sd = 1)
#set.seed(2)
#e2 <- rnorm(10, mean = 0, sd = 1)
# FUNCTION 1 ; Do a dm test on bootstrapped sample
dm.bootstrap <- function(e1,e2, h, seed){
# WHAT THE FUNCTION DOES
# This function takes two error vectors which are time indexed, then constructs a bootstrap sample
# (indentical rows, with replacement) from both error vectors . The time index of error vectors is
# matched by setting the same seed for each round of sampling using set.seed(seed). [See code below]
#ARGUMENTS
# - e1 and e2 are error vectors from which we will be resampling from
# - h is a parameter that represents the forecasting time horizon for the error vectors
# - seed is a random integer that will be used as an arg. for set.seed so sampling from error vectors
# is matched.
#VALUE
# Function returns the p.val when Diebold-Mariano test is conducted for bootstrapped sample errors
#Step1: Select random integer to be the seed for sampling
#seed <- round(runif(1, min = 1, max = 10000)) #This isn't changing when I re-run the code. I'll set the see outside the function
#Step2: Sampling from error vectors(matching of time series indices are preserved using the random seed above)
#Only continue with test if error vectors have the same length
l1 <- length(e1)
l2 <- length(e2)
if(l1==l2){
set.seed(seed)
boot_e1 <- sample(e1, l1, replace = TRUE)
set.seed(seed)
boot_e2 <- sample(e2, l2, replace = TRUE)
} else {
print("Error vectors do not have the same length")
}
test <- dm.test(boot_e1, boot_e2, h = h, alternative = c("two.sided")) #Need to specify the forecast horizon(h), default is 1
return(c(test$p.value))
}
# FUNCTION 2: Repeated the dm test on a bootstrap sample N times (uses function 1)
#Question: Think of a more efficient way to do this using dplyr
repeat.dm.bootstrap <- function(e1,e2,N,h){
# Conducts N DM Test(forecast horizon h) on a bootstrapped sample from e1 and e2
# Returns a vector of N p.values from the bootstrap dm tests
library(forecast) #Needed by dm.bootstrap func. to do Diebold-Mariano Test
p.values <- NULL
for(i in 1:N){
rand.int <- sample.int(1000, 1)
p.values[i] <- dm.bootstrap(e1, e2, h, rand.int)
}
return(p.values)
}
#Test
p_vals <- repeat.dm.bootstrap(e_1, e_2, 1e5, h = 1) #Check if seed for rand.int is changing as a control measure
hist(p_vals)
####Delete this later
#Data
#log. returns for ARIMA
index.0 <- read.table("top40index.txt")
index.1 <- as.matrix(index.0)
index.2 <-(t(index.1))[2:1828,1]
index <- as.numeric(index.2)
log.return <- NULL
for(i in 1:length(index)-1){
x <- index[i]
y <- index[i+1]
log.return[i] <-log(y/x)
}
#data-frame for prophet package
start_date <- as.Date("2012-07-01")
end_date <- as.Date("2014-01-01") # Should be "2017-06-30" if we use the full time period, i.e w/o in- sample and out of sample
dates <- bizseq(start_date,end_date,"actual")
prophet_data <- data.frame(ds = dates,y = log.return[1:550]) #Note to self : remove [1:1766] If forecasting using full period
#Model estimation
library(forecast)
arima.fit <- auto.arima(data$return, approximation = F, trace = F) #This also builds the best ARIMA model and it turns out that you got it right. Its the arima(2,0,2) :)
prophet.fit <- prophet(df = prophet_data,yearly.seasonality = TRUE)
#Model Forecasts and residuals
#ARIMA
start_date <- 1766 #???
end_date <- 1825 # ???
period <- 1
arima_residuals <- list()
j = 1
for(i in seq(start_date, end_date, period)){
arima_model <- arima(data$returns[1:i], order = c(2,0,2),
optim.control = list(maxit = 1000))
arima_pred <- predict(arima_model, n.ahead = 5, newdata = data$returns[(i+1):(i+5)])
arima_error <- arima_pred$pred - data$returns[(i+1):(i+5)]
arima_residuals[[j]] <- data.frame(ret = data$returns[(i+1):(i+5)],
pred = as.numeric(arima_pred$pred),
error = as.numeric(arima_error))
j <- j + 1
}
do.call(rbind, arima_residuals)
arima_five_res <- do.call(rbind, arima_residuals)
plot(arima_five_res$ret, type = "l")
lines(arima_five_res$pred, col = 2)
#Prophet
pred_period_dates <- make_future_dataframe(proph_fit,periods = 1826-550) #Dates for prediction period
library("tidyr")
proph_forecast <- predict(proph_fit,pred_period_dates)
proph_in_sample_forecast <- proph_forecast$yhat[1:550]
prophet_residuals <- proph_in_sample_forecast - log.return[1:550]
#Bootstrap
e1 <- arima_five_res$error #Qn: There's 300 of them, how to I control this? Why are there NA's?
e2 <- prophet_residuals[1:300] #Truncated this so the length of the matrices match
p_vals <-repeat.dm.bootstrap(e1, e1, 1000, h = 1) #Check if seed for rand.int is changing as a control measure
hist(p_vals)
# Hanjo
#Model Forecasts and residuals
#ARIMA
arima_eval <- function(ret_data, start_date, end_date, period = 1){
library(lubridate)
start_date <- as.Date("2013-11-01") #???
end_date <- as.Date("2014-01-01") # ???
period <- paste0("+", period," day")
for_period <- period
arima_residuals <- list()
j = 1
for(i in seq(start_date, end_date, by = period)){
#training
train <- which(prophet_data$ds < start_date)
test <- which(prophet_data$ds %in% seq(from = start_date,
to = (start_date + (for_period-1)),
by = "day"))
arima_model <- arima(prophet_data$y[train], order = c(2,0,2),
optim.control = list(maxit = 1000))
arima_pred <- forecast(arima_model, h = for_period)
arima_error <- arima_pred$mean - prophet_data$y[test]
arima_residuals[[j]] <- data.frame(ret = prophet_data$y[test],
pred = as.numeric(arima_pred$mean),
error = as.numeric(arima_error))
j <- j + 1
}
do.call(rbind, arima_residuals)
arima_res <- do.call(rbind, arima_residuals)
return(arima_res)
}
arima_eval() |
context("nadir/ideal point")
test_that("approximate nadir or ideal point if missing", {
# setup
A = matrix(c(1,2,1,3), byrow = TRUE, ncol = 2L)
B = matrix(c(3,2,4,5), byrow = TRUE, ncol = 2L)
# passing nonsense (matrizes with different dimensions)
expect_error(approximateIdealPoint(matrix(1:10, nrow = 5L), A))
# passing a single set
ip = approximateIdealPoint(A)
np = approximateNadirPoint(A)
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(2, 3)))
# passing two sets
ip = approximateIdealPoint(A, B)
np = approximateNadirPoint(A, B)
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(3, 5)))
# passing two sets via a list
ip = approximateIdealPoint(sets = list(A, B))
np = approximateNadirPoint(sets = list(A, B))
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(3, 5)))
})
| /tests/testthat/test_approximatePoints.R | no_license | jakobbossek/ecr | R | false | false | 843 | r | context("nadir/ideal point")
test_that("approximate nadir or ideal point if missing", {
# setup
A = matrix(c(1,2,1,3), byrow = TRUE, ncol = 2L)
B = matrix(c(3,2,4,5), byrow = TRUE, ncol = 2L)
# passing nonsense (matrizes with different dimensions)
expect_error(approximateIdealPoint(matrix(1:10, nrow = 5L), A))
# passing a single set
ip = approximateIdealPoint(A)
np = approximateNadirPoint(A)
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(2, 3)))
# passing two sets
ip = approximateIdealPoint(A, B)
np = approximateNadirPoint(A, B)
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(3, 5)))
# passing two sets via a list
ip = approximateIdealPoint(sets = list(A, B))
np = approximateNadirPoint(sets = list(A, B))
expect_true(all(ip == c(1, 1)))
expect_true(all(np == c(3, 5)))
})
|
mg_per_m2 = TRUE
moles = FALSE
## ASSIGN PROTEINS TO FUNCTIONAL CATEGORIES AND CALCULATE PROTEIN AMOUNTS IN EACH CATEGORY ##
# called by transformations.R
require(readr)
require(stringr)
require(dplyr)
require(tidyr)
source('scripts/functions.R')
mercator <- read_csv('data/mercator/D14_mercator_20170217.csv')
# 'mg_per_m2' and 'moles' switches are defined in transformations.R
if(mg_per_m2) {
protein_samples_D14 <- read_csv('data/D14_protein_GGLEP-DEDT.csv') # protein amounts calculated using D14 ion library, in avg(GGLEP/DEDT) equivalents
}
if(moles) {
protein_samples_D14 <- read_csv('data/D14_protein_moles_GGLEP-DEDT.csv') # protein amounts as above but in moles (not multiplied by MW)
}
# first add the mercator$NAME values for each protein in protein_samples_D14
protein_samples_D14 <- getProteinBins(protein_samples_D14, mercator)
# then import the names of categories we're interested in from mercator_names* and use grep to find all proteins associated with those categories
# put the results in instances of a list
# mercator_names.csv contains search terms for protein categories. These searches are run on mercator$NAMES.
# search terms must be unique to the functional category to avoid non-target returns.
# for example, a search for 'photosystem I' will also pick up proteins from 'photosystem II' - to avoid this we search for 'photosystem I\.'
# this works because all instances of proteins within 'photosystem I' are actually within subcategories. We'd miss some returns if there were proteins in the upper 'photosystem I' category.
# N.B. the '\' is an 'escape' and must be used because .'s are special in regular expressions and mean 'anything'. By using the escape we will actually search for the character '.'
# search terms for top level categories can be made unique by using ' in front
mercator_names <- read.csv('data/mercator/mercator_names.csv', header=T, stringsAsFactors = F)
mercator_names <- arrange(mercator_names, funccat)
func_assigned.list <- vector('list', length(mercator_names$funccat))
func_assigned <- data.frame()
for(i in 1:length(mercator_names$funccat)) {
name <- mercator_names$funccat[i]
proteins <- protein_samples_D14[grep(name, protein_samples_D14$NAME),]
proteins$funccat <- mercator_names$funccat[i]
proteins <- distinct(proteins, Protein, .keep_all = TRUE)
func_assigned.list[[i]] <- proteins
}
func_assigned <- rbind(func_assigned, do.call(rbind, func_assigned.list))
rm(func_assigned.list)
func_assigned$mean <- rowMeans(func_assigned[,c(2:(ncol(func_assigned)-4))])
get_sunburstData <- function(column) {
to_sunburst <- dplyr::select(func_assigned, Protein, NAME, mean) %>% dplyr::group_by(NAME) %>% dplyr::summarise(funccat_sum = sum(mean))
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "\\'",
replacement = "", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "-",
replacement = "_", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = " ",
replacement = "_", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = ",_",
replacement = ",", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "([0-9])(,)([0-9])", # create 3 capturing groups (first number)(comma)(second number)
replacement = "\\1\\3", x)}) # replacement is first capture and third capture
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "\\.",
replacement = "-", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = " ",
replacement = "", x)})
x <- stringr::str_split(to_sunburst$NAME, ',')
y <- rbind.fill(lapply(x,function(y){as.data.frame(t(y),stringsAsFactors=FALSE)}))
to_sunburst$NAME <- y[,column]
to_sunburst <- select(to_sunburst, NAME, funccat_sum)
#to_sunburst$funccat_sum <- as.integer(to_sunburst$funccat_sum)
bla <- str_split(to_sunburst$NAME, '-')
bla <- rbind.fill(lapply(bla,function(y){as.data.frame(t(y),stringsAsFactors=FALSE)}))
for(i in 1:nrow(bla)) {
if(any(is.na(bla[i,]))) {
branch_end <- min(which(as.numeric(is.na(bla[i,]))==1))
bla[i,branch_end] <- 'end'
}
}
to_sunburst$NAME <- unite(as.data.frame(bla), all, 1:ncol(bla), sep = '-')
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "NA",
replacement = "", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "end.+",
replacement = "end", x)})
names(to_sunburst)[1] <- paste(names(to_sunburst)[1], column, sep = '_')
return(to_sunburst)
}
bla <- get_sunburstData(1)
bla2 <- get_sunburstData(2)
require(sunburstR)
sunburst(bla2)
sunburst(to_sunburst)
| /sunburst@.R | no_license | jamesrlawson/leaf_proteomics | R | false | false | 5,473 | r | mg_per_m2 = TRUE
moles = FALSE
## ASSIGN PROTEINS TO FUNCTIONAL CATEGORIES AND CALCULATE PROTEIN AMOUNTS IN EACH CATEGORY ##
# called by transformations.R
require(readr)
require(stringr)
require(dplyr)
require(tidyr)
source('scripts/functions.R')
mercator <- read_csv('data/mercator/D14_mercator_20170217.csv')
# 'mg_per_m2' and 'moles' switches are defined in transformations.R
if(mg_per_m2) {
protein_samples_D14 <- read_csv('data/D14_protein_GGLEP-DEDT.csv') # protein amounts calculated using D14 ion library, in avg(GGLEP/DEDT) equivalents
}
if(moles) {
protein_samples_D14 <- read_csv('data/D14_protein_moles_GGLEP-DEDT.csv') # protein amounts as above but in moles (not multiplied by MW)
}
# first add the mercator$NAME values for each protein in protein_samples_D14
protein_samples_D14 <- getProteinBins(protein_samples_D14, mercator)
# then import the names of categories we're interested in from mercator_names* and use grep to find all proteins associated with those categories
# put the results in instances of a list
# mercator_names.csv contains search terms for protein categories. These searches are run on mercator$NAMES.
# search terms must be unique to the functional category to avoid non-target returns.
# for example, a search for 'photosystem I' will also pick up proteins from 'photosystem II' - to avoid this we search for 'photosystem I\.'
# this works because all instances of proteins within 'photosystem I' are actually within subcategories. We'd miss some returns if there were proteins in the upper 'photosystem I' category.
# N.B. the '\' is an 'escape' and must be used because .'s are special in regular expressions and mean 'anything'. By using the escape we will actually search for the character '.'
# search terms for top level categories can be made unique by using ' in front
mercator_names <- read.csv('data/mercator/mercator_names.csv', header=T, stringsAsFactors = F)
mercator_names <- arrange(mercator_names, funccat)
func_assigned.list <- vector('list', length(mercator_names$funccat))
func_assigned <- data.frame()
for(i in 1:length(mercator_names$funccat)) {
name <- mercator_names$funccat[i]
proteins <- protein_samples_D14[grep(name, protein_samples_D14$NAME),]
proteins$funccat <- mercator_names$funccat[i]
proteins <- distinct(proteins, Protein, .keep_all = TRUE)
func_assigned.list[[i]] <- proteins
}
func_assigned <- rbind(func_assigned, do.call(rbind, func_assigned.list))
rm(func_assigned.list)
func_assigned$mean <- rowMeans(func_assigned[,c(2:(ncol(func_assigned)-4))])
get_sunburstData <- function(column) {
to_sunburst <- dplyr::select(func_assigned, Protein, NAME, mean) %>% dplyr::group_by(NAME) %>% dplyr::summarise(funccat_sum = sum(mean))
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "\\'",
replacement = "", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "-",
replacement = "_", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = " ",
replacement = "_", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = ",_",
replacement = ",", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "([0-9])(,)([0-9])", # create 3 capturing groups (first number)(comma)(second number)
replacement = "\\1\\3", x)}) # replacement is first capture and third capture
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "\\.",
replacement = "-", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = " ",
replacement = "", x)})
x <- stringr::str_split(to_sunburst$NAME, ',')
y <- rbind.fill(lapply(x,function(y){as.data.frame(t(y),stringsAsFactors=FALSE)}))
to_sunburst$NAME <- y[,column]
to_sunburst <- select(to_sunburst, NAME, funccat_sum)
#to_sunburst$funccat_sum <- as.integer(to_sunburst$funccat_sum)
bla <- str_split(to_sunburst$NAME, '-')
bla <- rbind.fill(lapply(bla,function(y){as.data.frame(t(y),stringsAsFactors=FALSE)}))
for(i in 1:nrow(bla)) {
if(any(is.na(bla[i,]))) {
branch_end <- min(which(as.numeric(is.na(bla[i,]))==1))
bla[i,branch_end] <- 'end'
}
}
to_sunburst$NAME <- unite(as.data.frame(bla), all, 1:ncol(bla), sep = '-')
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "NA",
replacement = "", x)})
to_sunburst$NAME <- sapply(to_sunburst$NAME,
function(x){gsub(pattern = "end.+",
replacement = "end", x)})
names(to_sunburst)[1] <- paste(names(to_sunburst)[1], column, sep = '_')
return(to_sunburst)
}
bla <- get_sunburstData(1)
bla2 <- get_sunburstData(2)
require(sunburstR)
sunburst(bla2)
sunburst(to_sunburst)
|
######################################################################
# Deal with Fonts
is.Font <- function(x) inherits(x, "Font")
######################################################################
# Create a Font. It needs a workbook object!
# - color is an R color string.
#
Font <- function(wb, color=NULL, heightInPoints=NULL, name=NULL,
isItalic=FALSE, isStrikeout=FALSE, isBold=FALSE, underline=NULL,
boldweight=NULL) # , setFamily=NULL
{
font <- .jcall(wb, "Lorg/apache/poi/ss/usermodel/Font;",
"createFont")
if (!is.null(color))
if (grepl("XSSF", wb$getClass()$getName())){
.jcall(font, "V", "setColor", .xssfcolor(color))
} else {
.jcall(font, "V", "setColor",
.jshort(INDEXED_COLORS_[toupper(color)]))
}
if (!is.null(heightInPoints))
.jcall(font, "V", "setFontHeightInPoints", .jshort(heightInPoints))
if (!is.null(name))
.jcall(font, "V", "setFontName", name)
if (isItalic)
.jcall(font, "V", "setItalic", TRUE)
if (isStrikeout)
.jcall(font, "V", "setStrikeout", TRUE)
if (isBold & grepl("XSSF", wb$getClass()$getName()))
.jcall(font, "V", "setBold", TRUE)
if (!is.null(underline))
.jcall(font, "V", "setUnderline", .jbyte(underline))
if (!is.null(boldweight))
.jcall(font, "V", "setBoldweigth", .jshort(boldweight))
structure(list(ref=font), class="Font")
}
| /R/Font.R | no_license | alexkowa/xlsx | R | false | false | 1,436 | r | ######################################################################
# Deal with Fonts
is.Font <- function(x) inherits(x, "Font")
######################################################################
# Create a Font. It needs a workbook object!
# - color is an R color string.
#
Font <- function(wb, color=NULL, heightInPoints=NULL, name=NULL,
isItalic=FALSE, isStrikeout=FALSE, isBold=FALSE, underline=NULL,
boldweight=NULL) # , setFamily=NULL
{
font <- .jcall(wb, "Lorg/apache/poi/ss/usermodel/Font;",
"createFont")
if (!is.null(color))
if (grepl("XSSF", wb$getClass()$getName())){
.jcall(font, "V", "setColor", .xssfcolor(color))
} else {
.jcall(font, "V", "setColor",
.jshort(INDEXED_COLORS_[toupper(color)]))
}
if (!is.null(heightInPoints))
.jcall(font, "V", "setFontHeightInPoints", .jshort(heightInPoints))
if (!is.null(name))
.jcall(font, "V", "setFontName", name)
if (isItalic)
.jcall(font, "V", "setItalic", TRUE)
if (isStrikeout)
.jcall(font, "V", "setStrikeout", TRUE)
if (isBold & grepl("XSSF", wb$getClass()$getName()))
.jcall(font, "V", "setBold", TRUE)
if (!is.null(underline))
.jcall(font, "V", "setUnderline", .jbyte(underline))
if (!is.null(boldweight))
.jcall(font, "V", "setBoldweigth", .jshort(boldweight))
structure(list(ref=font), class="Font")
}
|
require(R.utils)
library(openxlsx)
library(stringr)
#devtools::install_github("ropensci/RSelenium")
library(RSelenium)
#devtools::install_github("omegahat/Rcompression")
## setting up
WAIT = 5
SF = "E:temp"
ff64 = "c:/PROGRAMS/Firefox/FirefoxPortable/App/Firefox64/firefox.exe"
eCap1 <- list(`moz:firefoxOptions` = list(binary = ff64), pageLoadStrategy = 'none', timeouts = list(pageLoad = 10))
#timeouts = list(script = 5, pageLoad = 10))
eCap2 <- makeFirefoxProfile(list("browser.download.panel.shown" = FALSE,
"browser.download.manager.showWhenStarting" = FALSE,
"browser.download.dir" = SF,
"browser.download.folderList" = 2L,
"browser.download.manager.closeWhenDone" = TRUE,
"browser.download.manager.showAlertOnComplete" = FALSE,
"browser.download.animateNotifications" = FALSE,
"browser.helperApps.neverAsk.saveToDisk" = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
))
rS = rsDriver(browser = "firefox", port = 4566L, extraCapabilities = c(eCap1, eCap2))
rDr <- rS[['client']]
#rDr$open()
#rDr$close()
#rS$server$stop()
#rDr$navigate("about:config")
# whole file
idf <- read.xlsx("Indices.xlsx")
### testing BNP:
bnp <- idf[which(str_detect(idf$Index, "^BNP")),]
# go disclamer page
rDr$navigate(url = "https://indx.bnpparibas.com/PreDisclaimer/Index")
Sys.sleep(WAIT)
# pass first disclamer
cb1 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_1']")
cb3 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_3']")
cb1$clickElement()
cb3$clickElement()
vb <- rDr$findElement(using = "xpath", "//a[@ng-click='validatePreDisclaimer()']")
vb$clickElement()
Sys.sleep(WAIT)
# pass second disclamer
cb1 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_1']")
cbr <- rDr$findElement(using = "xpath", "//label[@for='chkRem']")
cb1$clickElement()
cbr$clickElement()
bv <- rDr$findElement(using = "xpath", "//button[@id='btnValidate']")
bv$clickElement()
Sys.sleep(WAIT)
# get files
for (i in 1:nrow(bnp)) { #i=i+1
#download.file(bnp$Historical.data[i], destfile = paste0("indices/",bnp$Symbol[i],".xls"))
rDr$navigate(bnp$Historical.data[i])
Sys.sleep(WAIT)
exp <- rDr$findElement(using = "xpath", "//a[@id='export-history-excel']")
hrf = exp$getElementAttribute(attrName = 'href')
# navigating stores files in browser default download place (eg. /user/Downloads)
try(rDr$navigate(hrf[[1]]))
Sys.sleep(WAIT)
# renaming
renameFile(paste0(SF,"/Index.xlsx"), paste0(SF,"/",bnp$Symbol[i],".xlsx"), overwrite = T)
#download.file(hrf[[1]], destfile = "E:/temp/x.xls", extra = )
#tryCatch(expr = evalWithTimeout(rDr$navigate(hrf[[1]]), cpu = 2, timeout = 3),
# TimeoutException = function(ex) cat("Timeout. Skipping.\n"))
# pass to wget
#cookies = rDr$getAllCookies()
#url = hrf[[1]]
#filename = paste0("E:/temp/",bnp$Symbol)
}
gc()
### testing ftse:
ftse <- idf[which(str_detect(idf$Historical.data, "ftse.com/")),]
# load main page of historic indexes
ftse_hix <- "https://www.ftse.com/analytics/factsheets/Home/HistoricIndexValues"
rDr$navigate(ftse_hix)
Sys.sleep(WAIT*2)
# iterate thru all rows
tr <- rDr$findElements(using = "xpath", "//tr[@class='Highlight historicValuesRow']")
i = 0
repeat {
i = i + 1
if (i > length(tr)) break
#download.file(ftse$Historical.data, ftse$Symbol, "wget", extra = "")
idx <- tr[[i]]$findChildElement("xpath", "td[@data-title='Index']")
idx <- idx$getElementText()[[1]]
cur <- tr[[i]]$findChildElement("xpath", "td[@data-title='Currency']")
cur <- cur$getElementText()[[1]]
ttr <- tr[[i]]$findChildElement("xpath", "td[@data-title='Tax Treatment']")
ttr <- ttr$getElementText()[[1]]
nix <- try(tr[[i]]$findChildElement("xpath", "td[@data-title='Net Index']/a"))
if (class(nix) != "try-error") {
nixl <- nix$getElementAttribute('href')[[1]]
#rDr$navigate(nixl)
ox = loadWorkbook(nixl)
saveWorkbook(ox, file = paste0("FTSE/NetIdx_",idx,"_",cur,".xlsx"))
}
cri <- try(tr[[i]]$findChildElement("xpath", "td[@data-title='Total Ret & Cap Ret Index']/a"))
if (class(cri) != "try-error") {
cril <- cri$getElementAttribute('href')[[1]]
#rDr$navigate(cril)
ox = loadWorkbook(cril)
saveWorkbook(ox, file = paste0("FTSE/TotalRet_",idx,"_",cur,".xlsx"))
}
print(paste("i:",i,"|",idx,cur))
Sys.sleep(WAIT/5)
}
| /Indices.R | no_license | mgei/investable_indices | R | false | false | 4,562 | r | require(R.utils)
library(openxlsx)
library(stringr)
#devtools::install_github("ropensci/RSelenium")
library(RSelenium)
#devtools::install_github("omegahat/Rcompression")
## setting up
WAIT = 5
SF = "E:temp"
ff64 = "c:/PROGRAMS/Firefox/FirefoxPortable/App/Firefox64/firefox.exe"
eCap1 <- list(`moz:firefoxOptions` = list(binary = ff64), pageLoadStrategy = 'none', timeouts = list(pageLoad = 10))
#timeouts = list(script = 5, pageLoad = 10))
eCap2 <- makeFirefoxProfile(list("browser.download.panel.shown" = FALSE,
"browser.download.manager.showWhenStarting" = FALSE,
"browser.download.dir" = SF,
"browser.download.folderList" = 2L,
"browser.download.manager.closeWhenDone" = TRUE,
"browser.download.manager.showAlertOnComplete" = FALSE,
"browser.download.animateNotifications" = FALSE,
"browser.helperApps.neverAsk.saveToDisk" = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
))
rS = rsDriver(browser = "firefox", port = 4566L, extraCapabilities = c(eCap1, eCap2))
rDr <- rS[['client']]
#rDr$open()
#rDr$close()
#rS$server$stop()
#rDr$navigate("about:config")
# whole file
idf <- read.xlsx("Indices.xlsx")
### testing BNP:
bnp <- idf[which(str_detect(idf$Index, "^BNP")),]
# go disclamer page
rDr$navigate(url = "https://indx.bnpparibas.com/PreDisclaimer/Index")
Sys.sleep(WAIT)
# pass first disclamer
cb1 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_1']")
cb3 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_3']")
cb1$clickElement()
cb3$clickElement()
vb <- rDr$findElement(using = "xpath", "//a[@ng-click='validatePreDisclaimer()']")
vb$clickElement()
Sys.sleep(WAIT)
# pass second disclamer
cb1 <- rDr$findElement(using = "xpath", "//label[@for='checkbox_1']")
cbr <- rDr$findElement(using = "xpath", "//label[@for='chkRem']")
cb1$clickElement()
cbr$clickElement()
bv <- rDr$findElement(using = "xpath", "//button[@id='btnValidate']")
bv$clickElement()
Sys.sleep(WAIT)
# get files
for (i in 1:nrow(bnp)) { #i=i+1
#download.file(bnp$Historical.data[i], destfile = paste0("indices/",bnp$Symbol[i],".xls"))
rDr$navigate(bnp$Historical.data[i])
Sys.sleep(WAIT)
exp <- rDr$findElement(using = "xpath", "//a[@id='export-history-excel']")
hrf = exp$getElementAttribute(attrName = 'href')
# navigating stores files in browser default download place (eg. /user/Downloads)
try(rDr$navigate(hrf[[1]]))
Sys.sleep(WAIT)
# renaming
renameFile(paste0(SF,"/Index.xlsx"), paste0(SF,"/",bnp$Symbol[i],".xlsx"), overwrite = T)
#download.file(hrf[[1]], destfile = "E:/temp/x.xls", extra = )
#tryCatch(expr = evalWithTimeout(rDr$navigate(hrf[[1]]), cpu = 2, timeout = 3),
# TimeoutException = function(ex) cat("Timeout. Skipping.\n"))
# pass to wget
#cookies = rDr$getAllCookies()
#url = hrf[[1]]
#filename = paste0("E:/temp/",bnp$Symbol)
}
gc()
### testing ftse:
ftse <- idf[which(str_detect(idf$Historical.data, "ftse.com/")),]
# load main page of historic indexes
ftse_hix <- "https://www.ftse.com/analytics/factsheets/Home/HistoricIndexValues"
rDr$navigate(ftse_hix)
Sys.sleep(WAIT*2)
# iterate thru all rows
tr <- rDr$findElements(using = "xpath", "//tr[@class='Highlight historicValuesRow']")
i = 0
repeat {
i = i + 1
if (i > length(tr)) break
#download.file(ftse$Historical.data, ftse$Symbol, "wget", extra = "")
idx <- tr[[i]]$findChildElement("xpath", "td[@data-title='Index']")
idx <- idx$getElementText()[[1]]
cur <- tr[[i]]$findChildElement("xpath", "td[@data-title='Currency']")
cur <- cur$getElementText()[[1]]
ttr <- tr[[i]]$findChildElement("xpath", "td[@data-title='Tax Treatment']")
ttr <- ttr$getElementText()[[1]]
nix <- try(tr[[i]]$findChildElement("xpath", "td[@data-title='Net Index']/a"))
if (class(nix) != "try-error") {
nixl <- nix$getElementAttribute('href')[[1]]
#rDr$navigate(nixl)
ox = loadWorkbook(nixl)
saveWorkbook(ox, file = paste0("FTSE/NetIdx_",idx,"_",cur,".xlsx"))
}
cri <- try(tr[[i]]$findChildElement("xpath", "td[@data-title='Total Ret & Cap Ret Index']/a"))
if (class(cri) != "try-error") {
cril <- cri$getElementAttribute('href')[[1]]
#rDr$navigate(cril)
ox = loadWorkbook(cril)
saveWorkbook(ox, file = paste0("FTSE/TotalRet_",idx,"_",cur,".xlsx"))
}
print(paste("i:",i,"|",idx,cur))
Sys.sleep(WAIT/5)
}
|
\name{hist_scores}
\alias{hist_scores}
\title{
Plots the distribution of scores following an GOexpress analysis.
}
\description{
Plots the an histogram representing the frequencies of scores in the output
variable of the {GO_analyse()} function.
This function can also be used on the output of \code{subset_scores()}
function as it returns a value formatted identically to the output of the
\code{GO_analyse()} function.
}
\usage{
hist_scores(result,
main=paste("Distribution of average scores in",
deparse(substitute(result))),
xlab="Average score", ...)
}
\arguments{
\item{result}{
The output of the \code{GO_analyse()} function.
}
\item{main, xlab}{
These arguments to title have useful defaults here.
}
\item{\dots}{
Additional arguments passed on to \code{hist()}.
}
}
\value{
Returns the output of the \code{hist()} function.
}
\author{
Kevin Rue-Albrecht
}
\seealso{
Method \code{\link[graphics:hist]{hist}},
and \code{\link[GOexpress:GO_analyse]{GO_analyse}}.
}
\examples{
# load the sample output data with p.values computed
data(AlvMac_results.pVal)
# Histogram of scores (labelled with counts)
hist_scores(result=AlvMac_results, breaks=20, labels=TRUE)
# filter for Biological Processes associated with 5+ genes and <=0.05 P-value
filtered_results <- subset_scores(
result=AlvMac_results.pVal, total_count=5, p.val=0.05,
namespace="BP")
# Histogram of scores (labelled with counts)
hist_scores(result=filtered_results, breaks=20, labels=TRUE)
}
\keyword{ GOexpress } | /man/hist_scores.Rd | no_license | kevinrue/GOexpress | R | false | false | 1,600 | rd | \name{hist_scores}
\alias{hist_scores}
\title{
Plots the distribution of scores following an GOexpress analysis.
}
\description{
Plots the an histogram representing the frequencies of scores in the output
variable of the {GO_analyse()} function.
This function can also be used on the output of \code{subset_scores()}
function as it returns a value formatted identically to the output of the
\code{GO_analyse()} function.
}
\usage{
hist_scores(result,
main=paste("Distribution of average scores in",
deparse(substitute(result))),
xlab="Average score", ...)
}
\arguments{
\item{result}{
The output of the \code{GO_analyse()} function.
}
\item{main, xlab}{
These arguments to title have useful defaults here.
}
\item{\dots}{
Additional arguments passed on to \code{hist()}.
}
}
\value{
Returns the output of the \code{hist()} function.
}
\author{
Kevin Rue-Albrecht
}
\seealso{
Method \code{\link[graphics:hist]{hist}},
and \code{\link[GOexpress:GO_analyse]{GO_analyse}}.
}
\examples{
# load the sample output data with p.values computed
data(AlvMac_results.pVal)
# Histogram of scores (labelled with counts)
hist_scores(result=AlvMac_results, breaks=20, labels=TRUE)
# filter for Biological Processes associated with 5+ genes and <=0.05 P-value
filtered_results <- subset_scores(
result=AlvMac_results.pVal, total_count=5, p.val=0.05,
namespace="BP")
# Histogram of scores (labelled with counts)
hist_scores(result=filtered_results, breaks=20, labels=TRUE)
}
\keyword{ GOexpress } |
#Zadanie1
ma1 = matrix(0:11, nrow = 3, ncol = 4)
ma2 = matrix(2, nrow = 3, ncol = 4)
ma3 = matrix(sample(1:3), 1:3, nrow = 3, ncol = 4)
#Zadanie2
ma1 + ma2
ma1 - ma2
ma1 * ma2
ma1 / ma2
ma1 + ma3
ma1 - ma3
ma1 * ma3
ma1 / ma3
#Podstawowe operacje wykonywane są w kolejności
#np: pierwsza liczba z ma1 plus pierwsza liczba z ma2 itd. (x1 + y1, x2 + y2...)
#Zadanie3
ma1[1,ncol(ma1) ]
#Zadanie4
ma3[c(ma3 > 2)]
#Zadanie5
ma4 = cbind(ma1, ma3)
#Zadanie6
ra1 = data.frame(data = c("21-04-2019", "20-04-2019", "19-04-2019"),
miasto = c("Kutno"))
ra1
#Zadanie7
ra2 = data.frame( tmin = c(5.3, 4.6, 2.9),
tmax = c(11.1, 14.6, 9))
ra2
#Zadanie8
ra3 = cbind(ra1, ra2)
tmean = (ra3$tmin + ra3$tmax)/2
as.data.frame(tmean)
ra4 = cbind(ra3,tmean)
ra4
#Zadanie9
colnames(ra4)[5] = "tsr"
#Zadanie10
ra4$tmean > 8
#Zadanie11
li1 = list(c(10:1),
ma4,ra4)
li1
#Zadanie12
wektor_1 = li1[[1]]
#Zadanie13
mean(li1[[3]]$tsr)
#Zadanie14
ra1_jako_macierz = as.matrix(ra1)
ra1_jako_macierz
| /Mikołaj Kaczuba7.R.r | no_license | Mikkac2/zlozone-obiekty | R | false | false | 1,383 | r | #Zadanie1
ma1 = matrix(0:11, nrow = 3, ncol = 4)
ma2 = matrix(2, nrow = 3, ncol = 4)
ma3 = matrix(sample(1:3), 1:3, nrow = 3, ncol = 4)
#Zadanie2
ma1 + ma2
ma1 - ma2
ma1 * ma2
ma1 / ma2
ma1 + ma3
ma1 - ma3
ma1 * ma3
ma1 / ma3
#Podstawowe operacje wykonywane są w kolejności
#np: pierwsza liczba z ma1 plus pierwsza liczba z ma2 itd. (x1 + y1, x2 + y2...)
#Zadanie3
ma1[1,ncol(ma1) ]
#Zadanie4
ma3[c(ma3 > 2)]
#Zadanie5
ma4 = cbind(ma1, ma3)
#Zadanie6
ra1 = data.frame(data = c("21-04-2019", "20-04-2019", "19-04-2019"),
miasto = c("Kutno"))
ra1
#Zadanie7
ra2 = data.frame( tmin = c(5.3, 4.6, 2.9),
tmax = c(11.1, 14.6, 9))
ra2
#Zadanie8
ra3 = cbind(ra1, ra2)
tmean = (ra3$tmin + ra3$tmax)/2
as.data.frame(tmean)
ra4 = cbind(ra3,tmean)
ra4
#Zadanie9
colnames(ra4)[5] = "tsr"
#Zadanie10
ra4$tmean > 8
#Zadanie11
li1 = list(c(10:1),
ma4,ra4)
li1
#Zadanie12
wektor_1 = li1[[1]]
#Zadanie13
mean(li1[[3]]$tsr)
#Zadanie14
ra1_jako_macierz = as.matrix(ra1)
ra1_jako_macierz
|
print('the firs try to connect R to github')
print('now this file lives on github')
a <- c(1,2,3,3,4,4,5,5,6,6)
aMean <- mean(a)
print(aMean) | /testing.r | no_license | sulaimanbehzad/testRstudio | R | false | false | 141 | r | print('the firs try to connect R to github')
print('now this file lives on github')
a <- c(1,2,3,3,4,4,5,5,6,6)
aMean <- mean(a)
print(aMean) |
thislist <- list("apple", "banana", "cherry", "blueberry")
thislist
| /Saneeth/lists.R | no_license | tactlabs/r-samples | R | false | false | 69 | r | thislist <- list("apple", "banana", "cherry", "blueberry")
thislist
|
# exit_cmd --- terminate execution of a command file
subroutine exit_cmd
include CI_COMMON
integer i, nlevels
integer ctoi, getarg
character arg (5)
if (getarg (1, arg, 5) == EOF)
nlevels = 1
else {
i = 1
nlevels = ctoi (arg, i)
}
for (i = Ci_file; i > 0 && nlevels > 0; {i -= 1; nlevels -= 1}) {
call wind (Ci_fd (i))
call lsfree (Ci_buf (i), ALL)
}
stop
end
| /swt/src/lib/sh/src/intcmd.u/exit_cmd.r | no_license | arnoldrobbins/gt-swt | R | false | false | 438 | r | # exit_cmd --- terminate execution of a command file
subroutine exit_cmd
include CI_COMMON
integer i, nlevels
integer ctoi, getarg
character arg (5)
if (getarg (1, arg, 5) == EOF)
nlevels = 1
else {
i = 1
nlevels = ctoi (arg, i)
}
for (i = Ci_file; i > 0 && nlevels > 0; {i -= 1; nlevels -= 1}) {
call wind (Ci_fd (i))
call lsfree (Ci_buf (i), ALL)
}
stop
end
|
context("Check Data Identical")
test_that("Data identical (text formats)", {
expect_equivalent(import(export(mtcars, "mtcars.txt")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.csv")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.tsv")), mtcars)
})
unlink("mtcars.txt")
unlink("mtcars.csv")
unlink("mtcars.tsv")
test_that("Data identical (R formats)", {
expect_equivalent(import(export(mtcars, "mtcars.rds")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.R")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.RData")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.R", format = "dump")), mtcars)
})
unlink("mtcars.rds")
unlink("mtcars.R")
unlink("mtcars.RData")
test_that("Data identical (R formats), feather", {
skip_if_not_installed(pkg="feather")
expect_equivalent(import(export(mtcars, "mtcars.feather")), mtcars)
unlink("mtcars.feather")
})
test_that("Data identical (haven formats)", {
expect_equivalent(import(export(mtcars, "mtcars.dta")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.sav")), mtcars)
})
unlink("mtcars.dta")
unlink("mtcars.sav")
test_that("Data identical (Excel formats)", {
expect_equivalent(import(export(mtcars, "mtcars.xlsx")), mtcars)
})
unlink("mtcars.xlsx")
test_that("Data identical (other formats)", {
expect_equivalent(import(export(mtcars, "mtcars.dbf")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.json")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.arff")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.xml")), mtcars)
})
unlink("mtcars.dbf")
unlink("mtcars.json")
unlink("mtcars.arff")
unlink("mtcars.xml")
test_that("Data identical (optional arguments)", {
#expect_equivalent(import(export(mtcars, "mtcars.csv", format = "csv2"), format = "csv2"), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.csv"), nrows = 4), mtcars[1:4,])
expect_equivalent(import(export(mtcars, "mtcars.csv", format = "tsv"), format = "tsv"), mtcars)
expect_true(all.equal(import(export(mtcars, "mtcars", format = "csv"), format = "csv"), mtcars, check.attributes = FALSE))
})
unlink("mtcars.csv")
unlink("mtcars")
| /tests/testthat/test_identical.R | no_license | cran/rio | R | false | false | 2,284 | r | context("Check Data Identical")
test_that("Data identical (text formats)", {
expect_equivalent(import(export(mtcars, "mtcars.txt")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.csv")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.tsv")), mtcars)
})
unlink("mtcars.txt")
unlink("mtcars.csv")
unlink("mtcars.tsv")
test_that("Data identical (R formats)", {
expect_equivalent(import(export(mtcars, "mtcars.rds")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.R")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.RData")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.R", format = "dump")), mtcars)
})
unlink("mtcars.rds")
unlink("mtcars.R")
unlink("mtcars.RData")
test_that("Data identical (R formats), feather", {
skip_if_not_installed(pkg="feather")
expect_equivalent(import(export(mtcars, "mtcars.feather")), mtcars)
unlink("mtcars.feather")
})
test_that("Data identical (haven formats)", {
expect_equivalent(import(export(mtcars, "mtcars.dta")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.sav")), mtcars)
})
unlink("mtcars.dta")
unlink("mtcars.sav")
test_that("Data identical (Excel formats)", {
expect_equivalent(import(export(mtcars, "mtcars.xlsx")), mtcars)
})
unlink("mtcars.xlsx")
test_that("Data identical (other formats)", {
expect_equivalent(import(export(mtcars, "mtcars.dbf")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.json")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.arff")), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.xml")), mtcars)
})
unlink("mtcars.dbf")
unlink("mtcars.json")
unlink("mtcars.arff")
unlink("mtcars.xml")
test_that("Data identical (optional arguments)", {
#expect_equivalent(import(export(mtcars, "mtcars.csv", format = "csv2"), format = "csv2"), mtcars)
expect_equivalent(import(export(mtcars, "mtcars.csv"), nrows = 4), mtcars[1:4,])
expect_equivalent(import(export(mtcars, "mtcars.csv", format = "tsv"), format = "tsv"), mtcars)
expect_true(all.equal(import(export(mtcars, "mtcars", format = "csv"), format = "csv"), mtcars, check.attributes = FALSE))
})
unlink("mtcars.csv")
unlink("mtcars")
|
\name{gs6}
\alias{gs6}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
CDO Pricing with the C_gs6 Model
}
\description{
'gs6' compute 5 tranches spreads under the C_gs6 Model
}
\usage{
gs6(theta1Input, theta2Input, theta3Input, M, dateInput)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta1Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{theta2Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{theta3Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{MInput}{
a numeric giving the Monte Carlo simulation runs
}
\item{dateInput}{
a character giving the pricing date, e.g. c("2007-10-23")
}
}
\details{
Please make sure that the data sets of "defIntensity.csv" and "payday.csv" have been correctly installed in such paths: "C:/defIntensity.csv", "C:/payday.csv". The both data sets can be downloaded from "https://github.com/YafeiXu/xyfQuantlet".
}
\value{
A vector with 5 numerics will be returned, from left to right: equity, junior mezzanine, senior mezzanine, junior senior, senior.
}
\references{
The master thesis, CDO, HAME Copulas and an R Package 'CDO', can be downloaded from
https://sites.google.com/site/cdowithr/.
}
\author{
Yafei Xu <yafei.xu@hu-berlin.de>
}
\examples{
gs6(0.3,0.4,0.5, 100, c("2007-10-23"))
}
| /man/gs6.Rd | no_license | freephys/cdov | R | false | false | 1,407 | rd | \name{gs6}
\alias{gs6}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
CDO Pricing with the C_gs6 Model
}
\description{
'gs6' compute 5 tranches spreads under the C_gs6 Model
}
\usage{
gs6(theta1Input, theta2Input, theta3Input, M, dateInput)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta1Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{theta2Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{theta3Input}{
a numeric giving the Pearson's correlation for dependence specification
}
\item{MInput}{
a numeric giving the Monte Carlo simulation runs
}
\item{dateInput}{
a character giving the pricing date, e.g. c("2007-10-23")
}
}
\details{
Please make sure that the data sets of "defIntensity.csv" and "payday.csv" have been correctly installed in such paths: "C:/defIntensity.csv", "C:/payday.csv". The both data sets can be downloaded from "https://github.com/YafeiXu/xyfQuantlet".
}
\value{
A vector with 5 numerics will be returned, from left to right: equity, junior mezzanine, senior mezzanine, junior senior, senior.
}
\references{
The master thesis, CDO, HAME Copulas and an R Package 'CDO', can be downloaded from
https://sites.google.com/site/cdowithr/.
}
\author{
Yafei Xu <yafei.xu@hu-berlin.de>
}
\examples{
gs6(0.3,0.4,0.5, 100, c("2007-10-23"))
}
|
#' Compute Alpha Shape Persistence Diagram in 3D
#'
#' \code{diagAS} computes the persistent diagram of the Alpha Shape filtration
#' constructed on a point cloud in \eqn{\mathbf{R}^3}. This function is a
#' wrapper to \pkg{TDA}'s implementation. It is noted that AS filtration is \emph{only}
#' available in 3-dimensional point cloud.
#'
#' @param data an \eqn{(n\times 3)} data matrix.
#' @param maxdim maximum dimension of the computed homological features (default: 1).
#' @param threshold maximum value of the filtration (default: \code{Inf}).
#'
#' @return a data frame with following columns\describe{
#' \item{Dimension}{dimension corresponding to a feature.}
#' \item{Birth}{birth of a feature.}
#' \item{Death}{death of a feature.}
#' }
#'
#' @examples
#' # ---------------------------------------------------------------------------
#' # Compare VR and AS Persistence Diagrams
#' # ---------------------------------------------------------------------------
#' # Use 'iris' data for the first 3 columns
#' XX = as.matrix(iris[,1:3])
#'
#' # Compute VR and AS Diagram
#' run.vr = diagRips(XX, maxdim=1)
#' run.as = diagAS(XX, maxdim=1)
#'
#' col1 = as.factor(run.vr$Dimension)
#' col2 = as.factor(run.as$Dimension)
#'
#' # Visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,2), pty="s")
#' plot(run.vr$Birth, run.vr$Death, pch=19, col=col1, main="VR diagram")
#' plot(run.as$Birth, run.as$Death, pch=19, col=col2, main="AS diagram")
#' par(opar)
#'
#' @seealso \code{\link[TDA]{alphaShapeDiag}}
#'
#' @concept diagram
#' @export
diagAS <- function(data, maxdim=1){
## PREPARATION
if (!is.matrix(data)){
stop(paste0("* diagAS : input 'data' must be a matrix."))
}
if (ncol(data)!=3){
stop("* diagAS : input 'data' must have 3 columns.")
}
mydim = max(0, round(maxdim))
mydim = min(2, mydim)
## COMPUTATION
tdarun = TDA::alphaShapeDiag(data, maxdimension = mydim, library = c("GUDHI", "Dionysus"))$diagram
delete = which(is.infinite(tdarun[,3]))
tdarun = tdarun[-delete,]
## WRAP
output = data.frame(Dimension = round(tdarun[,1]),
Birth = tdarun[,2],
Death = tdarun[,3])
return(output)
}
| /R/diagram_diagAS.R | permissive | kyound/TDAkit | R | false | false | 2,221 | r | #' Compute Alpha Shape Persistence Diagram in 3D
#'
#' \code{diagAS} computes the persistent diagram of the Alpha Shape filtration
#' constructed on a point cloud in \eqn{\mathbf{R}^3}. This function is a
#' wrapper to \pkg{TDA}'s implementation. It is noted that AS filtration is \emph{only}
#' available in 3-dimensional point cloud.
#'
#' @param data an \eqn{(n\times 3)} data matrix.
#' @param maxdim maximum dimension of the computed homological features (default: 1).
#' @param threshold maximum value of the filtration (default: \code{Inf}).
#'
#' @return a data frame with following columns\describe{
#' \item{Dimension}{dimension corresponding to a feature.}
#' \item{Birth}{birth of a feature.}
#' \item{Death}{death of a feature.}
#' }
#'
#' @examples
#' # ---------------------------------------------------------------------------
#' # Compare VR and AS Persistence Diagrams
#' # ---------------------------------------------------------------------------
#' # Use 'iris' data for the first 3 columns
#' XX = as.matrix(iris[,1:3])
#'
#' # Compute VR and AS Diagram
#' run.vr = diagRips(XX, maxdim=1)
#' run.as = diagAS(XX, maxdim=1)
#'
#' col1 = as.factor(run.vr$Dimension)
#' col2 = as.factor(run.as$Dimension)
#'
#' # Visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,2), pty="s")
#' plot(run.vr$Birth, run.vr$Death, pch=19, col=col1, main="VR diagram")
#' plot(run.as$Birth, run.as$Death, pch=19, col=col2, main="AS diagram")
#' par(opar)
#'
#' @seealso \code{\link[TDA]{alphaShapeDiag}}
#'
#' @concept diagram
#' @export
diagAS <- function(data, maxdim=1){
## PREPARATION
if (!is.matrix(data)){
stop(paste0("* diagAS : input 'data' must be a matrix."))
}
if (ncol(data)!=3){
stop("* diagAS : input 'data' must have 3 columns.")
}
mydim = max(0, round(maxdim))
mydim = min(2, mydim)
## COMPUTATION
tdarun = TDA::alphaShapeDiag(data, maxdimension = mydim, library = c("GUDHI", "Dionysus"))$diagram
delete = which(is.infinite(tdarun[,3]))
tdarun = tdarun[-delete,]
## WRAP
output = data.frame(Dimension = round(tdarun[,1]),
Birth = tdarun[,2],
Death = tdarun[,3])
return(output)
}
|
# here is my script
test <- function()print("1d af")
| /dunno.R | no_license | rebeccahz12/test | R | false | false | 54 | r | # here is my script
test <- function()print("1d af")
|
create_voronoi <- function(cp, land, bbox) {
cp$x2 <- cp$x + ifelse(cp$small | is.na(cp$direction), 0, (SIN(cp$direction) * 100))
cp$y2 <- cp$y + ifelse(cp$small | is.na(cp$direction), 0, (COS(cp$direction) * 100))
cp2 <- st_set_geometry(cp, NULL)
cp2 <- st_as_sf(cp2, coords = c("x2", "y2"), crs = st_crs(cp))
box <- st_as_sf(tmaptools::bb_sp(matrix(bbox, ncol=2), projection = st_crs(cp)$proj4string))
v <- st_sf(geometry=st_cast(st_voronoi(st_union(cp2), box$geometry)))
vint <- unlist(st_intersects(cp2, v))
x <- v[vint, ]
x <- st_intersection(x, box)
crop_to_land(x, land)
}
| /R/create_voronoi.R | no_license | Flowminder/mobloc | R | false | false | 628 | r | create_voronoi <- function(cp, land, bbox) {
cp$x2 <- cp$x + ifelse(cp$small | is.na(cp$direction), 0, (SIN(cp$direction) * 100))
cp$y2 <- cp$y + ifelse(cp$small | is.na(cp$direction), 0, (COS(cp$direction) * 100))
cp2 <- st_set_geometry(cp, NULL)
cp2 <- st_as_sf(cp2, coords = c("x2", "y2"), crs = st_crs(cp))
box <- st_as_sf(tmaptools::bb_sp(matrix(bbox, ncol=2), projection = st_crs(cp)$proj4string))
v <- st_sf(geometry=st_cast(st_voronoi(st_union(cp2), box$geometry)))
vint <- unlist(st_intersects(cp2, v))
x <- v[vint, ]
x <- st_intersection(x, box)
crop_to_land(x, land)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occ_functions.R
\name{print.occ_mod}
\alias{print.occ_mod}
\title{Print method for \code{occ_mod} class}
\usage{
\method{print}{occ_mod}(x, ...)
}
\arguments{
\item{x}{An object of class \link{occ_mod}}
\item{...}{Other arguments passed to or from other methods}
}
\description{
Print method for \code{occ_mod} class
}
| /man/print.occ_mod.Rd | permissive | StrattonCh/occupancy | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occ_functions.R
\name{print.occ_mod}
\alias{print.occ_mod}
\title{Print method for \code{occ_mod} class}
\usage{
\method{print}{occ_mod}(x, ...)
}
\arguments{
\item{x}{An object of class \link{occ_mod}}
\item{...}{Other arguments passed to or from other methods}
}
\description{
Print method for \code{occ_mod} class
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addAxesToRgl.R
\name{addAxesToRgl}
\alias{addAxesToRgl}
\title{Add axis markers at local coordinate space origin}
\usage{
addAxesToRgl(len = 1, sca = NULL, rot = NULL, trn = NULL,
title = NULL, charexp = 1)
}
\arguments{
\item{len}{The length of each axis}
\item{sca}{scale vector for local coord system (1 coord per axis)}
\item{rot}{rotation matrix for local coord system}
\item{trn}{translation vector for local coord system (1 coord per axis)}
\item{title}{An optional title}
\item{charexp}{character expansion factor for axis label}
}
\description{
Add axis markers at local coordinate space origin
}
\examples{
addAxesToRgl()
}
| /mw3dlib/man/addAxesToRgl.Rd | no_license | MikeWise2718/mw3dlib | R | false | true | 721 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addAxesToRgl.R
\name{addAxesToRgl}
\alias{addAxesToRgl}
\title{Add axis markers at local coordinate space origin}
\usage{
addAxesToRgl(len = 1, sca = NULL, rot = NULL, trn = NULL,
title = NULL, charexp = 1)
}
\arguments{
\item{len}{The length of each axis}
\item{sca}{scale vector for local coord system (1 coord per axis)}
\item{rot}{rotation matrix for local coord system}
\item{trn}{translation vector for local coord system (1 coord per axis)}
\item{title}{An optional title}
\item{charexp}{character expansion factor for axis label}
}
\description{
Add axis markers at local coordinate space origin
}
\examples{
addAxesToRgl()
}
|
library(data.table)
data <- fread(file = "household_power_consumption.txt", sep = ";", header = TRUE,
na.strings = "?")
days <- data[Date == "1/2/2007" | Date == "2/2/2007", ]
dt <- days[, paste(Date,Time)]
datetime <- as.POSIXct(dt, format = "%d/%m/%Y %H:%M:%S")
all <- cbind(datetime, days)
png("plot3.png", width = 480, height = 480, unit = "px")
plot(all$datetime, all$Sub_metering_1, type = "n", xlab = "datetime", ylab = "energy sub metering")
points(all$datetime, all$Sub_metering_1, type = "l", col = "black")
points(all$datetime, all$Sub_metering_2, type = "l", col = "red")
points(all$datetime, all$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), lty = c(1,1,1), lwd = c(2,2,2), legend = c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
dev.off() | /plot3.R | no_license | MonikaeR/ExData_Plotting1 | R | false | false | 814 | r | library(data.table)
data <- fread(file = "household_power_consumption.txt", sep = ";", header = TRUE,
na.strings = "?")
days <- data[Date == "1/2/2007" | Date == "2/2/2007", ]
dt <- days[, paste(Date,Time)]
datetime <- as.POSIXct(dt, format = "%d/%m/%Y %H:%M:%S")
all <- cbind(datetime, days)
png("plot3.png", width = 480, height = 480, unit = "px")
plot(all$datetime, all$Sub_metering_1, type = "n", xlab = "datetime", ylab = "energy sub metering")
points(all$datetime, all$Sub_metering_1, type = "l", col = "black")
points(all$datetime, all$Sub_metering_2, type = "l", col = "red")
points(all$datetime, all$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), lty = c(1,1,1), lwd = c(2,2,2), legend = c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
dev.off() |
Data <- read.csv(file = "NYPD_Motor_Vehicle_Collisions.csv", header = TRUE)
View(Data)
names(Data)
Var_data <- Data[c(1,5,6,8)]
View(Var_data)
Var_data[Var_data==""] <- NA
Var_clean <- na.omit(Var_data)
View(Var_clean)
colnames(Streets) <- c("Street", "Count")
Streets <- data.frame(table(Var_clean$ON.STREET.NAME))
View(Streets)
library(data.table)
setDT(Streets)[, z := sqrt(Freq/sum(Freq))]
View(Streets)
| /Function.R | no_license | sumitsameriya/Capstone_progress | R | false | false | 417 | r | Data <- read.csv(file = "NYPD_Motor_Vehicle_Collisions.csv", header = TRUE)
View(Data)
names(Data)
Var_data <- Data[c(1,5,6,8)]
View(Var_data)
Var_data[Var_data==""] <- NA
Var_clean <- na.omit(Var_data)
View(Var_clean)
colnames(Streets) <- c("Street", "Count")
Streets <- data.frame(table(Var_clean$ON.STREET.NAME))
View(Streets)
library(data.table)
setDT(Streets)[, z := sqrt(Freq/sum(Freq))]
View(Streets)
|
p <- 0.2;
success <- 1;
fail <- 0;
x <- runif(1000);
y <- ifelse(x > p, success, fail);
hist(y);
| /lab1/lab1_ex3.r | no_license | pasreis/ddrs-labs | R | false | false | 103 | r | p <- 0.2;
success <- 1;
fail <- 0;
x <- runif(1000);
y <- ifelse(x > p, success, fail);
hist(y);
|
predict.gbt <-
function(object, newdata, n.trees,...) {
predictions <- numeric(length(newdata[,1]))
mf <- model.frame(formula=object$formula, data=newdata)
x <- model.matrix(attr(mf, "terms"), data=mf)
treevector.size <- 6*(2^object$interaction.depth)
.Call("predict", as.numeric(object$treematrix), as.numeric(object$nu), as.numeric(x), as.integer(length(x[,1])), as.integer(n.trees), as.integer(treevector.size), as.integer(object$interaction.depth), as.numeric(predictions), as.numeric(object$initF))
return(predictions)
}
| /R/predict.gbt.R | no_license | harrysouthworth/gbt | R | false | false | 544 | r | predict.gbt <-
function(object, newdata, n.trees,...) {
predictions <- numeric(length(newdata[,1]))
mf <- model.frame(formula=object$formula, data=newdata)
x <- model.matrix(attr(mf, "terms"), data=mf)
treevector.size <- 6*(2^object$interaction.depth)
.Call("predict", as.numeric(object$treematrix), as.numeric(object$nu), as.numeric(x), as.integer(length(x[,1])), as.integer(n.trees), as.integer(treevector.size), as.integer(object$interaction.depth), as.numeric(predictions), as.numeric(object$initF))
return(predictions)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psychobject.R
\name{is.psychobject}
\alias{is.psychobject}
\title{Creates or tests for objects of mode "psychobject".}
\usage{
is.psychobject(x)
}
\arguments{
\item{x}{an arbitrary R object.}
}
\description{
Creates or tests for objects of mode "psychobject".
}
| /man/is.psychobject.Rd | permissive | HugoNjb/psycho.R | R | false | true | 340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psychobject.R
\name{is.psychobject}
\alias{is.psychobject}
\title{Creates or tests for objects of mode "psychobject".}
\usage{
is.psychobject(x)
}
\arguments{
\item{x}{an arbitrary R object.}
}
\description{
Creates or tests for objects of mode "psychobject".
}
|
#' Download pollution data by station in IMECAs
#'
#' Retrieve hourly averages of pollution data, by station, measured in
#' \href{https://en.wikipedia.org/wiki/Índice_Metropolitano_de_la_Calidad_del_Aire}{IMECAs}
#'
#' Note that in 2015 it was determined that the stations with codes ACO, AJU,
#' INN, MON and MPA would no longer be taken into consideration when computing
#' the pollution index because they didn't meet the
#' \href{http://www.aire.cdmx.gob.mx/objetivos-monitoreo-calidad-aire.html}{objectives
#' of monitoring air quality}, and are no longer included in the index, even if
#' they are still part of the SIMAT (Sistema de Monitoreo Atmosférico de la
#' Ciudad de México). Thus, even if they are located inside a zone, they are not
#' included in the pollution values for that zone.
#'
#' @param pollutant The type of pollutant to download
#' \itemize{
#' \item{"SO2"}{ - Sulfur Dioxide}
#' \item{"CO"}{ - Carbon Monoxide}
#' \item{"NO2"}{ - Nitrogen Dioxide}
#' \item{"O3"}{ - Ozone}
#' \item{"PM10"}{ - Particulate matter 10 micrometers or less}
#' }
#' @param date The date for which to download data in YYYY-MM-DD format
#' (the earliest possible date is 2009-01-01).
#' @param show_messages show a message about issues with excluded stations
#'
#' @return A data.frame with pollution data measured in IMECAs, by station.
#' The hours correspond to the \emph{Etc/GMT+6} timezone, with no daylight
#' saving time
#' @export
#' @family IMECA functions
#' @seealso \href{http://www.aire.cdmx.gob.mx/default.php?opc='aqBjnmc='}{Índice de calidad del aire por estaciones}
#' @importFrom rvest html_nodes html_table
#' @importFrom xml2 read_html
#' @importFrom tidyr gather
#' @importFrom httr POST http_error status_code http_type add_headers
#'
#' @examples
#' \dontrun{
#' ## There was an ozone pollution emergency on May 15, 2017
#' df_o3 <- get_station_imeca("O3", "2017-05-15", show_messages = FALSE)
#'
#' ## Convert to local Mexico City time
#' df_o3$mxc_time <- format(as.POSIXct(paste0(df_o3$date,
#' " ",
#' df_o3$hour,
#' ":00"),
#' tz = "Etc/GMT+6"),
#' tz = "America/Mexico_City")
#' head(df_o3[order(-df_o3$value), ])
#' }
get_station_imeca <- function(pollutant, date,
show_messages = TRUE) {
if (missing(date))
stop("You need to specify a start date (YYYY-MM-DD)", call. = FALSE)
if (length(date) != 1)
stop("date should be a date in YYYY-MM-DD format", call. = FALSE)
if (!is.Date(date))
stop("date should be a date in YYYY-MM-DD format", call. = FALSE)
if (date < "2009-01-01")
stop("date should be after 2009-01-01", call. = FALSE)
if (!(identical("O3", pollutant) || identical("NO2", pollutant) |
identical("SO2", pollutant) || identical("CO", pollutant) |
identical("PM10", pollutant)))
stop("Invalid pollutant value", call. = FALSE)
if (date >= "2017-01-01" && show_messages)
message(paste0("Sometime in 2015-2017 the stations with codes",
" ACO, AJU, INN, MON, and MPA were excluded from the",
" index"))
url <- "http://www.aire.cdmx.gob.mx/default.php?opc=%27aqBjnmc=%27"
fd <- list(
fecha = date,
RadioGroup1 = switch(pollutant,
"O3" = 0,
"NO2" = 1,
"SO2" = 2,
"CO" = 3,
"PM10" = 4),
aceptar = "Submit",
consulta = 1
)
result <- POST(url,
add_headers("user-agent" =
"https://github.com/diegovalle/aire.zmvm"),
body = fd,
encode = "form")
if (http_error(result))
stop(sprintf("The request to <%s> failed [%s]",
url,
status_code(result)
), call. = FALSE)
if (http_type(result) != "text/html")
stop(paste0(url, " did not return text/html", call. = FALSE))
poll_table <- read_html(content(result, "text"))
df <- html_table(html_nodes(poll_table, "table")[[1]],
header = TRUE,
fill = TRUE)
if (nrow(df) <= 1)
stop("The website returned invalid data. Please check the date format.",
call. = FALSE)
pollutant2 <- names(df)[3]
names(df) <- df[2, ]
df$date <- date
names(df)[1] <- "hour"
## There's an empty row at the end of the data
df <- df[3:(nrow(df)-1), ]
df <- gather(df, station_code, value, -date, -hour)
df[which(df$value == ""), "value"] <- NA
df$value <- as.numeric(as.character(df$value))
df$pollutant <- pollutant2
df$unit <- "IMECA"
df[, c("date", "hour", "station_code", "pollutant", "unit", "value" )]
}
| /R/get_station_imeca.R | no_license | cran/aire.zmvm | R | false | false | 4,865 | r |
#' Download pollution data by station in IMECAs
#'
#' Retrieve hourly averages of pollution data, by station, measured in
#' \href{https://en.wikipedia.org/wiki/Índice_Metropolitano_de_la_Calidad_del_Aire}{IMECAs}
#'
#' Note that in 2015 it was determined that the stations with codes ACO, AJU,
#' INN, MON and MPA would no longer be taken into consideration when computing
#' the pollution index because they didn't meet the
#' \href{http://www.aire.cdmx.gob.mx/objetivos-monitoreo-calidad-aire.html}{objectives
#' of monitoring air quality}, and are no longer included in the index, even if
#' they are still part of the SIMAT (Sistema de Monitoreo Atmosférico de la
#' Ciudad de México). Thus, even if they are located inside a zone, they are not
#' included in the pollution values for that zone.
#'
#' @param pollutant The type of pollutant to download
#' \itemize{
#' \item{"SO2"}{ - Sulfur Dioxide}
#' \item{"CO"}{ - Carbon Monoxide}
#' \item{"NO2"}{ - Nitrogen Dioxide}
#' \item{"O3"}{ - Ozone}
#' \item{"PM10"}{ - Particulate matter 10 micrometers or less}
#' }
#' @param date The date for which to download data in YYYY-MM-DD format
#' (the earliest possible date is 2009-01-01).
#' @param show_messages show a message about issues with excluded stations
#'
#' @return A data.frame with pollution data measured in IMECAs, by station.
#' The hours correspond to the \emph{Etc/GMT+6} timezone, with no daylight
#' saving time
#' @export
#' @family IMECA functions
#' @seealso \href{http://www.aire.cdmx.gob.mx/default.php?opc='aqBjnmc='}{Índice de calidad del aire por estaciones}
#' @importFrom rvest html_nodes html_table
#' @importFrom xml2 read_html
#' @importFrom tidyr gather
#' @importFrom httr POST http_error status_code http_type add_headers
#'
#' @examples
#' \dontrun{
#' ## There was an ozone pollution emergency on May 15, 2017
#' df_o3 <- get_station_imeca("O3", "2017-05-15", show_messages = FALSE)
#'
#' ## Convert to local Mexico City time
#' df_o3$mxc_time <- format(as.POSIXct(paste0(df_o3$date,
#' " ",
#' df_o3$hour,
#' ":00"),
#' tz = "Etc/GMT+6"),
#' tz = "America/Mexico_City")
#' head(df_o3[order(-df_o3$value), ])
#' }
get_station_imeca <- function(pollutant, date,
show_messages = TRUE) {
if (missing(date))
stop("You need to specify a start date (YYYY-MM-DD)", call. = FALSE)
if (length(date) != 1)
stop("date should be a date in YYYY-MM-DD format", call. = FALSE)
if (!is.Date(date))
stop("date should be a date in YYYY-MM-DD format", call. = FALSE)
if (date < "2009-01-01")
stop("date should be after 2009-01-01", call. = FALSE)
if (!(identical("O3", pollutant) || identical("NO2", pollutant) |
identical("SO2", pollutant) || identical("CO", pollutant) |
identical("PM10", pollutant)))
stop("Invalid pollutant value", call. = FALSE)
if (date >= "2017-01-01" && show_messages)
message(paste0("Sometime in 2015-2017 the stations with codes",
" ACO, AJU, INN, MON, and MPA were excluded from the",
" index"))
url <- "http://www.aire.cdmx.gob.mx/default.php?opc=%27aqBjnmc=%27"
fd <- list(
fecha = date,
RadioGroup1 = switch(pollutant,
"O3" = 0,
"NO2" = 1,
"SO2" = 2,
"CO" = 3,
"PM10" = 4),
aceptar = "Submit",
consulta = 1
)
result <- POST(url,
add_headers("user-agent" =
"https://github.com/diegovalle/aire.zmvm"),
body = fd,
encode = "form")
if (http_error(result))
stop(sprintf("The request to <%s> failed [%s]",
url,
status_code(result)
), call. = FALSE)
if (http_type(result) != "text/html")
stop(paste0(url, " did not return text/html", call. = FALSE))
poll_table <- read_html(content(result, "text"))
df <- html_table(html_nodes(poll_table, "table")[[1]],
header = TRUE,
fill = TRUE)
if (nrow(df) <= 1)
stop("The website returned invalid data. Please check the date format.",
call. = FALSE)
pollutant2 <- names(df)[3]
names(df) <- df[2, ]
df$date <- date
names(df)[1] <- "hour"
## There's an empty row at the end of the data
df <- df[3:(nrow(df)-1), ]
df <- gather(df, station_code, value, -date, -hour)
df[which(df$value == ""), "value"] <- NA
df$value <- as.numeric(as.character(df$value))
df$pollutant <- pollutant2
df$unit <- "IMECA"
df[, c("date", "hour", "station_code", "pollutant", "unit", "value" )]
}
|
epidemie=function(n){
f=function (x){
coef=runif(1)
coef*(1/sqrt(2*pi*0.5))*exp(-(x+2)^2) + (1-coef)*(1/sqrt(2*pi*0.5))*exp(-(x-2)^2)}
F=function (x){
coef=runif(1)
coef*(1/sqrt(2*pi*0.5))*exp(-(x+2)^2) + (1-coef)*(1/sqrt(2*pi*0.5))*exp(-(x-2)^2)}
x=seq(-5,5,by=0.0001)
X=c() #vecteur abscisse
Y=c() #vecteur ordonnée
X=sample(x,n,rep=TRUE,prob=f(x))
Y=sample(x,n,rep=TRUE,prob=F(x))
Markov=c(0,0.75,0.2,0.05) #Chaine de Markov pour les individus infectés (état 2)
E=matrix(nrow=n,ncol=500) #Matrice des états de chaque individu, à chaque instant t.
E[,1]=1 #On initialise tous les individus à l'état 1
E[,2:500]=0 #On remplit les autres colones avec des 0 (cf ligne41)
IMN=sample(n,floor(n/10)) #Un dizième de la pop est Immunisée naturelement
E[IMN,1:500]=3 #On applique l'état 3 à cette part de la population jusqu'à la fin.
P0=sample(1:n,1) #On séléctionne le patient 0
E[P0,1]=2 #On ajoute P0 à l'état infecté au temps 1
POS=matrix(c(X,Y),nrow=n) #Matrice des coordonnées
DISTALL=matrix(as.matrix(dist(POS)),nrow=n) #Matrice des distances
Z1=apply(DISTALL,1,function(x) sum(x<0.4))
Z2=apply(DISTALL,1,function(x) sum(x<2))
Z3=apply(DISTALL,1,function(x) sum(x>=2))
T=1
while (2%in%E[,T]){ #on déroule la chaine tant qu'il y a encore des malades.
SA=c(which(E[,T]==1)) #On isole les individus à l'état 1 au temps T dans un vecteur.
IN=c(which(E[,T]==2))
DIST=as.matrix(DISTALL[SA,IN]) #Distances entre infectés et Sains
ZONE=matrix(nrow=length(SA),ncol=6)
ZONE[,1]=apply(DIST,1,function(x) sum(x<0.4)) #Nb d'individus infectés zone 1
ZONE[,2]=Z1[SA] #Nb d'individus zone 1
ZONE[,3]=apply(DIST,1,function(x) sum(x<2)) #Nb d'individus infectés zone 2
ZONE[,4]=Z2[SA] #Nb d'individus zone 2
ZONE[,5]=apply(DIST,1,function(x) sum(x>=2))
ZONE[,6]=Z3[SA]
p=0.8*ZONE[,1]/(ZONE[,2]+1)+0.15*ZONE[,3]/(ZONE[,4]+1)+0.05*ZONE[,5]/(ZONE[,6]+1)
E[SA,T+1]=sapply(p,function(x) sample(1:2,1,prob=c(1-x,x)))
E[IN,T+1]=sapply(E[IN,T+1],function(x) x+sample(1:4,1,prob=Markov)) #Chaine de markov pour les individus infectés
E[which(E[,T+1]==3),T+2]=3 #On applique l'état 3 jusqu'à la fin pour les individus qui viennennt d'entrer dans cet état.
E[which(E[,T+1]==4),T+2]=4 #On applique l'état 4 jusqu'à la fin pour les individus qui viennennt d'entrer dans cet état.
T=T+1
next
}
return(c(T,100*length(which(E[,T]==4))/n,100*length(which(E[,T]==3))/n))
}
MC=function(n,t){
deb=Sys.time()
Ep=c()
X=c()
M=c()
Y=c()
m=c()
S=c()
s=c()
ICm1=c()
ICm2=c()
ICv1=c()
ICv2=c()
for (i in 1:n){
Ep=epidemie(t)
X[i]=Ep[2]
Y[i]=(Ep[2]+0.25*(Ep[3]-10))/2
M[i]=mean(X[1:i])
m[i]=mean(Y[1:i])
S[i]=sd(X[1:i])
s[i]=sd(Y[1:i])
ICm1[i]=M[i]-1.96*S[i]/sqrt(i)
ICm2[i]=M[i]+1.96*S[i]/sqrt(i)
ICv1[i]=m[i]-1.96*s[i]/sqrt(i)
ICv2[i]=m[i]+1.96*s[i]/sqrt(i)
}
ICv=ICv2[n]-ICv1[n]
ICm=ICm2[n]-ICm1[n]
par(mfrow=c(2,2))
matplot(1:n,matrix(c(M[1:n],ICm1[1:n],ICm2[1:n]),ncol=3),ylim=c(0,25),col=c("black","red","red"),type='l',main="Convergence du pourcentage de décès",xlab="Nombre de simulations",ylab="Pourcentage de décès moyen")
matplot(1:n,matrix(c(m[1:n],ICv1[1:n],ICv2[1:n]),ncol=3),ylim=c(0,25),col=c("black","red","red"),type='l',main="Convergence de la nouvelle variable",xlab="Nombre de simulations",ylab="Moyenne de la nouvelle ")
hist(X,main ="Histogramme du pourcentage de décès",xlab="Pourcentage de décès",ylab="Fréquence")
hist(Y,main="Histogramme de la nouvelle variable",xlab="Nouvelle variable",ylab="Fréquence")
fin=Sys.time()
temps=fin-deb
return(c(M[n],m[n],ICm1[n],ICm2[n],ICv1[n],ICv2[n],var(X),var(Y),temps))
}
| /Reduction-de-variance.R | no_license | satacroteam/Propagation-Epidemie | R | false | false | 3,791 | r | epidemie=function(n){
f=function (x){
coef=runif(1)
coef*(1/sqrt(2*pi*0.5))*exp(-(x+2)^2) + (1-coef)*(1/sqrt(2*pi*0.5))*exp(-(x-2)^2)}
F=function (x){
coef=runif(1)
coef*(1/sqrt(2*pi*0.5))*exp(-(x+2)^2) + (1-coef)*(1/sqrt(2*pi*0.5))*exp(-(x-2)^2)}
x=seq(-5,5,by=0.0001)
X=c() #vecteur abscisse
Y=c() #vecteur ordonnée
X=sample(x,n,rep=TRUE,prob=f(x))
Y=sample(x,n,rep=TRUE,prob=F(x))
Markov=c(0,0.75,0.2,0.05) #Chaine de Markov pour les individus infectés (état 2)
E=matrix(nrow=n,ncol=500) #Matrice des états de chaque individu, à chaque instant t.
E[,1]=1 #On initialise tous les individus à l'état 1
E[,2:500]=0 #On remplit les autres colones avec des 0 (cf ligne41)
IMN=sample(n,floor(n/10)) #Un dizième de la pop est Immunisée naturelement
E[IMN,1:500]=3 #On applique l'état 3 à cette part de la population jusqu'à la fin.
P0=sample(1:n,1) #On séléctionne le patient 0
E[P0,1]=2 #On ajoute P0 à l'état infecté au temps 1
POS=matrix(c(X,Y),nrow=n) #Matrice des coordonnées
DISTALL=matrix(as.matrix(dist(POS)),nrow=n) #Matrice des distances
Z1=apply(DISTALL,1,function(x) sum(x<0.4))
Z2=apply(DISTALL,1,function(x) sum(x<2))
Z3=apply(DISTALL,1,function(x) sum(x>=2))
T=1
while (2%in%E[,T]){ #on déroule la chaine tant qu'il y a encore des malades.
SA=c(which(E[,T]==1)) #On isole les individus à l'état 1 au temps T dans un vecteur.
IN=c(which(E[,T]==2))
DIST=as.matrix(DISTALL[SA,IN]) #Distances entre infectés et Sains
ZONE=matrix(nrow=length(SA),ncol=6)
ZONE[,1]=apply(DIST,1,function(x) sum(x<0.4)) #Nb d'individus infectés zone 1
ZONE[,2]=Z1[SA] #Nb d'individus zone 1
ZONE[,3]=apply(DIST,1,function(x) sum(x<2)) #Nb d'individus infectés zone 2
ZONE[,4]=Z2[SA] #Nb d'individus zone 2
ZONE[,5]=apply(DIST,1,function(x) sum(x>=2))
ZONE[,6]=Z3[SA]
p=0.8*ZONE[,1]/(ZONE[,2]+1)+0.15*ZONE[,3]/(ZONE[,4]+1)+0.05*ZONE[,5]/(ZONE[,6]+1)
E[SA,T+1]=sapply(p,function(x) sample(1:2,1,prob=c(1-x,x)))
E[IN,T+1]=sapply(E[IN,T+1],function(x) x+sample(1:4,1,prob=Markov)) #Chaine de markov pour les individus infectés
E[which(E[,T+1]==3),T+2]=3 #On applique l'état 3 jusqu'à la fin pour les individus qui viennennt d'entrer dans cet état.
E[which(E[,T+1]==4),T+2]=4 #On applique l'état 4 jusqu'à la fin pour les individus qui viennennt d'entrer dans cet état.
T=T+1
next
}
return(c(T,100*length(which(E[,T]==4))/n,100*length(which(E[,T]==3))/n))
}
MC=function(n,t){
deb=Sys.time()
Ep=c()
X=c()
M=c()
Y=c()
m=c()
S=c()
s=c()
ICm1=c()
ICm2=c()
ICv1=c()
ICv2=c()
for (i in 1:n){
Ep=epidemie(t)
X[i]=Ep[2]
Y[i]=(Ep[2]+0.25*(Ep[3]-10))/2
M[i]=mean(X[1:i])
m[i]=mean(Y[1:i])
S[i]=sd(X[1:i])
s[i]=sd(Y[1:i])
ICm1[i]=M[i]-1.96*S[i]/sqrt(i)
ICm2[i]=M[i]+1.96*S[i]/sqrt(i)
ICv1[i]=m[i]-1.96*s[i]/sqrt(i)
ICv2[i]=m[i]+1.96*s[i]/sqrt(i)
}
ICv=ICv2[n]-ICv1[n]
ICm=ICm2[n]-ICm1[n]
par(mfrow=c(2,2))
matplot(1:n,matrix(c(M[1:n],ICm1[1:n],ICm2[1:n]),ncol=3),ylim=c(0,25),col=c("black","red","red"),type='l',main="Convergence du pourcentage de décès",xlab="Nombre de simulations",ylab="Pourcentage de décès moyen")
matplot(1:n,matrix(c(m[1:n],ICv1[1:n],ICv2[1:n]),ncol=3),ylim=c(0,25),col=c("black","red","red"),type='l',main="Convergence de la nouvelle variable",xlab="Nombre de simulations",ylab="Moyenne de la nouvelle ")
hist(X,main ="Histogramme du pourcentage de décès",xlab="Pourcentage de décès",ylab="Fréquence")
hist(Y,main="Histogramme de la nouvelle variable",xlab="Nouvelle variable",ylab="Fréquence")
fin=Sys.time()
temps=fin-deb
return(c(M[n],m[n],ICm1[n],ICm2[n],ICv1[n],ICv2[n],var(X),var(Y),temps))
}
|
plot.cv.ncvreg <- function(x, log.l=TRUE, type=c("cve", "rsq", "scale", "snr", "pred", "all"), selected=TRUE, vertical.line=TRUE, col="red", ...) {
type <- match.arg(type)
if (type=="all") {
plot(x, log.l=log.l, type="cve", selected=selected, ...)
plot(x, log.l=log.l, type="rsq", selected=selected, ...)
plot(x, log.l=log.l, type="snr", selected=selected, ...)
if (x$fit$family == "binomial") plot(x, log.l=log.l, type="pred", selected=selected, ...)
if (x$fit$family == "gaussian") plot(x, log.l=log.l, type="scale", selected=selected, ...)
return(invisible(NULL))
}
l <- x$lambda
if (log.l) {
l <- log(l)
xlab <- expression(log(lambda))
} else xlab <- expression(lambda)
## Calculate y
L.cve <- x$cve - x$cvse
U.cve <- x$cve + x$cvse
if (type=="cve") {
y <- x$cve
L <- L.cve
U <- U.cve
ylab <- "Cross-validation error"
} else if (type=="rsq") {
S <- pmax(x$null.dev - x$cve, 0)
y <- S/x$null.dev
L <- S/(S+U.cve)
U <- S/(S+L.cve)
ylab <- ~R^2
} else if (type=="snr") {
S <- pmax(x$null.dev - x$cve, 0)
y <- S/(x$cve)
L <- S/U.cve
U <- S/L.cve
ylab <- "Signal-to-noise ratio"
} else if (type=="scale") {
if (x$fit$family == "binomial") stop("Scale parameter for binomial family fixed at 1")
y <- sqrt(x$cve)
L <- sqrt(L.cve)
U <- sqrt(U.cve)
ylab <- ~hat(sigma)
} else if (type=="pred") {
y <- x$pe
n <- x$fit$n
CI <- sapply(y, function(x) {binom.test(x*n, n, conf.level=0.68)$conf.int})
L <- CI[1,]
U <- CI[2,]
ylab <- "Prediction error"
}
ind <- if (type=="pred") which(is.finite(l[1:length(x$pe)])) else which(is.finite(l[1:length(x$cve)]))
ylim <- range(c(L[ind], U[ind]))
aind <- ((U-L)/diff(ylim) > 1e-3) & ind
plot.args = list(x=l[ind], y=y[ind], ylim=ylim, xlab=xlab, ylab=ylab, type="n", xlim=rev(range(l[ind])), las=1)
new.args = list(...)
if (length(new.args)) plot.args[names(new.args)] = new.args
do.call("plot", plot.args)
if (vertical.line) abline(v=l[x$min],lty=2,lwd=.5)
suppressWarnings(arrows(x0=l[aind], x1=l[aind], y0=L[aind], y1=U[aind], code=3, angle=90, col="gray80", length=.05))
points(l[ind], y[ind], col=col, pch=19, cex=.5)
if (selected) {
n.s <- predict(x$fit, lambda=x$lambda, type="nvars")
axis(3, at=l, labels=n.s, tick=FALSE, line=-0.5)
mtext("Variables selected", cex=0.8, line=1.5)
}
}
| /R/plot.cv.ncvreg.R | no_license | grantbrown/ncvreg | R | false | false | 2,428 | r | plot.cv.ncvreg <- function(x, log.l=TRUE, type=c("cve", "rsq", "scale", "snr", "pred", "all"), selected=TRUE, vertical.line=TRUE, col="red", ...) {
type <- match.arg(type)
if (type=="all") {
plot(x, log.l=log.l, type="cve", selected=selected, ...)
plot(x, log.l=log.l, type="rsq", selected=selected, ...)
plot(x, log.l=log.l, type="snr", selected=selected, ...)
if (x$fit$family == "binomial") plot(x, log.l=log.l, type="pred", selected=selected, ...)
if (x$fit$family == "gaussian") plot(x, log.l=log.l, type="scale", selected=selected, ...)
return(invisible(NULL))
}
l <- x$lambda
if (log.l) {
l <- log(l)
xlab <- expression(log(lambda))
} else xlab <- expression(lambda)
## Calculate y
L.cve <- x$cve - x$cvse
U.cve <- x$cve + x$cvse
if (type=="cve") {
y <- x$cve
L <- L.cve
U <- U.cve
ylab <- "Cross-validation error"
} else if (type=="rsq") {
S <- pmax(x$null.dev - x$cve, 0)
y <- S/x$null.dev
L <- S/(S+U.cve)
U <- S/(S+L.cve)
ylab <- ~R^2
} else if (type=="snr") {
S <- pmax(x$null.dev - x$cve, 0)
y <- S/(x$cve)
L <- S/U.cve
U <- S/L.cve
ylab <- "Signal-to-noise ratio"
} else if (type=="scale") {
if (x$fit$family == "binomial") stop("Scale parameter for binomial family fixed at 1")
y <- sqrt(x$cve)
L <- sqrt(L.cve)
U <- sqrt(U.cve)
ylab <- ~hat(sigma)
} else if (type=="pred") {
y <- x$pe
n <- x$fit$n
CI <- sapply(y, function(x) {binom.test(x*n, n, conf.level=0.68)$conf.int})
L <- CI[1,]
U <- CI[2,]
ylab <- "Prediction error"
}
ind <- if (type=="pred") which(is.finite(l[1:length(x$pe)])) else which(is.finite(l[1:length(x$cve)]))
ylim <- range(c(L[ind], U[ind]))
aind <- ((U-L)/diff(ylim) > 1e-3) & ind
plot.args = list(x=l[ind], y=y[ind], ylim=ylim, xlab=xlab, ylab=ylab, type="n", xlim=rev(range(l[ind])), las=1)
new.args = list(...)
if (length(new.args)) plot.args[names(new.args)] = new.args
do.call("plot", plot.args)
if (vertical.line) abline(v=l[x$min],lty=2,lwd=.5)
suppressWarnings(arrows(x0=l[aind], x1=l[aind], y0=L[aind], y1=U[aind], code=3, angle=90, col="gray80", length=.05))
points(l[ind], y[ind], col=col, pch=19, cex=.5)
if (selected) {
n.s <- predict(x$fit, lambda=x$lambda, type="nvars")
axis(3, at=l, labels=n.s, tick=FALSE, line=-0.5)
mtext("Variables selected", cex=0.8, line=1.5)
}
}
|
data.dir <- ("./")
ctrl.pattern <- paste(<REGEX>) #INPUT REQUIRED
case.pattern <- paste(<REGEX) #INPUT REQUIRED
case.files <- dir(data.dir, pattern=case.pattern, full.names = TRUE)
print("Case Files")
print(case.files)
control.files <- dir(data.dir, pattern=ctrl.pattern, full.names = TRUE)
print("Control Files")
print(control.files)
geneset.file <- ("msigdb.v5.0.symbols.gmt.txt") #CHANGE IF NECESSARY
geneID.type <- "gene.symbol"
output.prefix <- "SeqGSEA"
nCores <- 1
perm.times <- 1000
DEonly <- FALSE
DEweight <- c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
integrationMethod <- "linear"
library(doParallel)
cl <- makeCluster(30)
registerDoParallel(cl)
RCS <- loadExonCountData(case.files, control.files)
RCS <- exonTestability(RCS, cutoff=5)
geneTestable <- geneTestability(RCS)
RCS <- subsetByGenes(RCS, unique(geneID(RCS))[ geneTestable ])
geneIDs <- unique(geneID(RCS))
RCS <- estiExonNBstat(RCS)
RCS <- estiGeneNBstat(RCS)
print("Generated RCS")
permuteMat <- genpermuteMat(RCS, times=perm.times)
print("Permuted Matrix")
RCS <- DSpermute4GSEA(RCS, permuteMat)
print("DSpermute4GSEA Done!")
geneCounts <- getGeneCount(RCS)
label <- label(RCS)
DEG <-runDESeq(geneCounts, label)
DEGres <- DENBStat4GSEA(DEG)
print("DENBStat4GSEA Done!")
DEpermNBstat <- DENBStatPermut4GSEA(DEG, permuteMat)
DEscore.normFac <- normFactor(DEpermNBstat)
DEscore <- scoreNormalization(DEGres$NBstat, DEscore.normFac)
DEscore.perm <- scoreNormalization(DEpermNBstat, DEscore.normFac)
print("DE Score Perm Done!")
DSscore.normFac <- normFactor(RCS@permute_NBstat_gene)
DSscore <- scoreNormalization(RCS@featureData_gene$NBstat, DSscore.normFac)
DSscore.perm <- scoreNormalization(RCS@permute_NBstat_gene, DSscore.normFac)
print("DS Score Perm Done!")
gene.score <- geneScore(DEscore, DSscore, DEweight=0.5)
gene.score.perm <- genePermuteScore(DEscore.perm, DSscore.perm, DEweight=0.5)
print("Gene Score Perm Done!")
#gene.set <- loadGenesets(geneset.file, geneIDs, geneID.type="gene.symbol", genesetsize.min = 5, genesetsize.max = 1000)
#gene.set <- GSEnrichAnalyze(gene.set, gene.score, gene.score.perm, weighted.type=1)
#print("Gene Set Enriched!")
#GSEAres <- GSEAresultTable(gene.set, TRUE)
#write.table(GSEAres, paste(output.prefix,".GSEA.result.txt",sep=""), quote=FALSE, sep="\t", row.names=FALSE)
#runSeqGSEA(data.dir=data.dir, case.pattern=case.pattern, ctrl.pattern=ctrl.pattern,geneset.file=geneset.file, geneID.type=geneID.type, output.prefix=output.prefix,nCores=nCores, perm.times=perm.times, integrationMethod=integrationMethod,DEonly=DEonly, DEweight=DEweight)
write.table(gene.score, paste(output.prefix,".geneScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(DEscore, paste(output.prefix,".DEScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(DSscore, paste(output.prefix,".DSScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(geneIDs, paste(output.prefix, ".geneIDs.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
print("Tables written")
| /SeqGSEA/2-SeqGSEA/run_seqgsea.R | no_license | Shelby-Simpson/Phenotype-Prediction-Pipeline | R | false | false | 3,073 | r | data.dir <- ("./")
ctrl.pattern <- paste(<REGEX>) #INPUT REQUIRED
case.pattern <- paste(<REGEX) #INPUT REQUIRED
case.files <- dir(data.dir, pattern=case.pattern, full.names = TRUE)
print("Case Files")
print(case.files)
control.files <- dir(data.dir, pattern=ctrl.pattern, full.names = TRUE)
print("Control Files")
print(control.files)
geneset.file <- ("msigdb.v5.0.symbols.gmt.txt") #CHANGE IF NECESSARY
geneID.type <- "gene.symbol"
output.prefix <- "SeqGSEA"
nCores <- 1
perm.times <- 1000
DEonly <- FALSE
DEweight <- c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
integrationMethod <- "linear"
library(doParallel)
cl <- makeCluster(30)
registerDoParallel(cl)
RCS <- loadExonCountData(case.files, control.files)
RCS <- exonTestability(RCS, cutoff=5)
geneTestable <- geneTestability(RCS)
RCS <- subsetByGenes(RCS, unique(geneID(RCS))[ geneTestable ])
geneIDs <- unique(geneID(RCS))
RCS <- estiExonNBstat(RCS)
RCS <- estiGeneNBstat(RCS)
print("Generated RCS")
permuteMat <- genpermuteMat(RCS, times=perm.times)
print("Permuted Matrix")
RCS <- DSpermute4GSEA(RCS, permuteMat)
print("DSpermute4GSEA Done!")
geneCounts <- getGeneCount(RCS)
label <- label(RCS)
DEG <-runDESeq(geneCounts, label)
DEGres <- DENBStat4GSEA(DEG)
print("DENBStat4GSEA Done!")
DEpermNBstat <- DENBStatPermut4GSEA(DEG, permuteMat)
DEscore.normFac <- normFactor(DEpermNBstat)
DEscore <- scoreNormalization(DEGres$NBstat, DEscore.normFac)
DEscore.perm <- scoreNormalization(DEpermNBstat, DEscore.normFac)
print("DE Score Perm Done!")
DSscore.normFac <- normFactor(RCS@permute_NBstat_gene)
DSscore <- scoreNormalization(RCS@featureData_gene$NBstat, DSscore.normFac)
DSscore.perm <- scoreNormalization(RCS@permute_NBstat_gene, DSscore.normFac)
print("DS Score Perm Done!")
gene.score <- geneScore(DEscore, DSscore, DEweight=0.5)
gene.score.perm <- genePermuteScore(DEscore.perm, DSscore.perm, DEweight=0.5)
print("Gene Score Perm Done!")
#gene.set <- loadGenesets(geneset.file, geneIDs, geneID.type="gene.symbol", genesetsize.min = 5, genesetsize.max = 1000)
#gene.set <- GSEnrichAnalyze(gene.set, gene.score, gene.score.perm, weighted.type=1)
#print("Gene Set Enriched!")
#GSEAres <- GSEAresultTable(gene.set, TRUE)
#write.table(GSEAres, paste(output.prefix,".GSEA.result.txt",sep=""), quote=FALSE, sep="\t", row.names=FALSE)
#runSeqGSEA(data.dir=data.dir, case.pattern=case.pattern, ctrl.pattern=ctrl.pattern,geneset.file=geneset.file, geneID.type=geneID.type, output.prefix=output.prefix,nCores=nCores, perm.times=perm.times, integrationMethod=integrationMethod,DEonly=DEonly, DEweight=DEweight)
write.table(gene.score, paste(output.prefix,".geneScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(DEscore, paste(output.prefix,".DEScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(DSscore, paste(output.prefix,".DSScores.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
write.table(geneIDs, paste(output.prefix, ".geneIDs.txt",sep=""), quote=FALSE, sep="\t", row.names=TRUE)
print("Tables written")
|
# Generates a randomization distribution for my data, plots the
# distribution on a histogram, and performs a two-tailed test for the
# p-value
setwd("C:/Users/dritc/Documents/R/STA13TermProject")
northData = read.csv("STA13ProjectData-North.csv", header=TRUE)
southData = read.csv("STA13ProjectData-South.csv", header=TRUE)
nDiffs = northData$Diff
sDiffs = southData$Diff
n = length(nDiffs)
totalDiffs = rep(NA, n*2)
for (i in 1:n) {
totalDiffs[i] = nDiffs[i]
totalDiffs[i+n] = sDiffs[i]
}
b = 10000
rand.dist = rep(NA, b)
for (i in 1:b) {
rand.sample = sample(totalDiffs)
rand.diffs1 = rand.sample[1:n]
rand.diffs2 = rand.sample[(n+1):(n*2)]
rand.dist[i] = mean(rand.diffs1) - mean(rand.diffs2)
}
hist(rand.dist, main="Randomization Distribution", xlab="Difference in Means")
sampleStat = mean(nDiffs) - mean(sDiffs)
moreExtremeCount = 0
for (i in 1:length(rand.dist)) {
if (sampleStat > 0) {
if (rand.dist[i] >= sampleStat) {
moreExtremeCount = moreExtremeCount + 1
}
} else {
if (rand.dist[i] <= sampleStat) {
moreExtremeCount = moreExtremeCount + 1
}
}
}
pval = (moreExtremeCount/b) * 2
pval | /usTempHypTest.R | no_license | dritchie1031/STA13TermProject | R | false | false | 1,152 | r | # Generates a randomization distribution for my data, plots the
# distribution on a histogram, and performs a two-tailed test for the
# p-value
setwd("C:/Users/dritc/Documents/R/STA13TermProject")
northData = read.csv("STA13ProjectData-North.csv", header=TRUE)
southData = read.csv("STA13ProjectData-South.csv", header=TRUE)
nDiffs = northData$Diff
sDiffs = southData$Diff
n = length(nDiffs)
totalDiffs = rep(NA, n*2)
for (i in 1:n) {
totalDiffs[i] = nDiffs[i]
totalDiffs[i+n] = sDiffs[i]
}
b = 10000
rand.dist = rep(NA, b)
for (i in 1:b) {
rand.sample = sample(totalDiffs)
rand.diffs1 = rand.sample[1:n]
rand.diffs2 = rand.sample[(n+1):(n*2)]
rand.dist[i] = mean(rand.diffs1) - mean(rand.diffs2)
}
hist(rand.dist, main="Randomization Distribution", xlab="Difference in Means")
sampleStat = mean(nDiffs) - mean(sDiffs)
moreExtremeCount = 0
for (i in 1:length(rand.dist)) {
if (sampleStat > 0) {
if (rand.dist[i] >= sampleStat) {
moreExtremeCount = moreExtremeCount + 1
}
} else {
if (rand.dist[i] <= sampleStat) {
moreExtremeCount = moreExtremeCount + 1
}
}
}
pval = (moreExtremeCount/b) * 2
pval |
.plotLayer <- function (layer, loc, asp = NULL, add = FALSE)
{
# Extract the data for the appropriate plane
axis <- which(!is.na(loc))
indices <- alist(i=, j=, k=, t=1, u=1, v=1, w=1)
indices[axis] <- loc[axis]
data <- do.call("[", c(layer["image"], indices[seq_len(ndim(layer$image))], list(drop=FALSE)))
dims <- dim(data)[-c(axis,4:7)]
dim(data) <- dims
if (is.null(asp))
asp <- dims[2] / dims[1]
if (inherits(layer$image, "rgbArray"))
{
# RGB display is achieved by converting the data to an array of indices into a custom palette
data <- as.character(structure(data, class="rgbArray"))
palette <- unique(data)
indices <- match(data, palette)
dim(indices) <- dims
oldPars <- par(mai=c(0,0,0,0))
on.exit(par(oldPars))
image(indices, col=palette, axes=FALSE, asp=asp, add=add, zlim=c(1,length(palette)))
}
else
{
# Other data is shown using standard image(), but zeroes are set to NA to make them transparent
if (add)
data <- replace(data, which(data==0), NA)
oldPars <- par(mai=c(0,0,0,0), bg=layer$colours[1])
on.exit(par(oldPars))
image(data, col=layer$colours, axes=FALSE, asp=asp, add=add, zlim=layer$window)
}
}
#' A basic 3D image viewer
#'
#' This function displays one or more 2D or 3D images, with optional
#' click-to-navigate interactivity.
#'
#' @param ... One or more images, or \code{"viewLayer"} objects, to display.
#' @param point A numeric vector giving the location to initially centre the
#' view on. If crosshairs are in use, they will be placed at this point. For
#' 3D images, this parameter also determines the planes shown in each
#' subview.
#' @param radiological Logical value. If \code{TRUE}, images will be displayed
#' in the radiological convention whereby the left of the image is shown on
#' the right; otherwise left is on the left.
#' @param interactive Logical value. If \code{TRUE}, the user can navigate
#' around the image by repeatedly clicking on a new centre point; otherwise
#' the view is fixed.
#' @param crosshairs Logical value, indicating whether crosshairs should be
#' shown or not.
#' @param labels Logical value, indicating whether orientation labels should be
#' shown or not. Ignored (defaulting to \code{FALSE}) if the image is 2D or
#' orientation information is not available.
#' @param infoPanel A function of three arguments, which must produce a plot
#' for the information panel of the view. \code{\link{defaultInfoPanel}} is
#' the default, which shows the labels and values of each image at the
#' current point.
#' @param image The image being shown in this layer.
#' @param scale A character vector of colour values for the scale, or a single
#' string naming a predefined scale: \code{"grey"} or \code{"gray"} for
#' greyscale, \code{"heat"} for a heatmap, \code{"rainbow"} for a rainbow
#' scale, or any of the scales defined in the \code{shades} package (see
#' \code{?shades::gradient}, if that package is installed). Ignored for RGB
#' images.
#' @param min,max The window minimum and maximum for the layer, i.e., the black
#' and white points. These are ignored for RGB images. Otherwise, if
#' \code{NULL}, the default, they are taken from the \code{cal_min} or
#' \code{cal_max} NIfTI header fields. If either is \code{NA}, the image has
#' no window stored in its header, or the two values are equal, then the 1st
#' and 99th percentiles of the data are used, with values close to zero
#' rounded to that extreme.
#' @return \code{lyr} returns a list of class \code{"viewLayer"}, to be used
#' in a view. \code{view} is called for its side-effect of showing a view.
#'
#' @note Because of the way R's main run-loop interacts with graphics, it will
#' not be possible to issue further commands to the terminal while
#' interactive mode is enabled. Instructions for leaving this mode are shown
#' by the default info panel; see also \code{\link{locator}}, which is the
#' underlying core function.
#'
#' @examples
#' im <- readNifti(system.file("extdata", "example.nii.gz", package="RNifti"))
#' view(im, interactive=FALSE)
#'
#' @author Jon Clayden <code@@clayden.org>
#' @seealso \code{\link{defaultInfoPanel}}, \code{\link{orientation}},
#' \code{\link{locator}}
#' @export
view <- function (..., point = NULL, radiological = getOption("radiologicalView",FALSE), interactive = base::interactive(), crosshairs = TRUE, labels = TRUE, infoPanel = defaultInfoPanel)
{
# Get the layers to display, and the expressions used to generate them
layers <- list(...)
layerExpressions <- sub("^[\\s\"]*(.+?)[\\s\"]*$", "\\1", sapply(substitute(list(...)), deparse, control=NULL, nlines=1)[-1], perl=TRUE)
nLayers <- length(layers)
if (nLayers == 0)
stop("At least one image must be specified")
# In general, data will need reordering for visual consistency
originalXform <- NULL
orientation <- ifelse(radiological, "LAS", "RAS")
for (i in seq_len(nLayers))
{
# Images need to be converted to layer objects
if (!inherits(layers[[i]], "viewLayer"))
{
layers[[i]] <- lyr(layers[[i]])
layers[[i]]$label <- layerExpressions[i]
}
# The xform of the first image is used for indexing
if (i == 1)
originalXform <- xform(layers[[i]]$image)
# If the image has orientation information and isn't 2D, reorient it as necessary
header <- niftiHeader(layers[[i]]$image)
if (ndim(layers[[i]]$image) > 2 && (header$qform_code > 0 || header$sform_code > 0))
orientation(layers[[i]]$image) <- orientation
}
baseImage <- layers[[1]]$image
reorientedXform <- xform(baseImage)
ndim <- ndim(baseImage)
dims <- c(dim(baseImage), rep(1,max(0,3-ndim)))[1:3]
fov <- dims * c(pixdim(baseImage), rep(1,max(0,3-ndim)))[1:3]
# Don't show labels if the base image is 2D or has no meaningful xform information
if (ndim < 3L || attr(reorientedXform,"code") == 0L)
labels <- FALSE
# If no point is specified, use the origin if it's nontrivial, otherwise the centre of the image
if (is.null(point) && any(origin(originalXform) > 1))
point <- round(origin(originalXform))
else if (is.null(point))
point <- round(dims / 2)
# Work out the point location in viewer space
reorientedPoint <- round(worldToVoxel(voxelToWorld(point, originalXform), reorientedXform))
positiveLabels <- unlist(strsplit(orientation, ""))
negativeLabels <- c(R="L", A="P", S="I", L="R", P="A", I="S")[positiveLabels]
# Set some graphics parameters, and make sure they get reset
oldPars <- par(bg="black", col="white", fg="white", col.axis="white", col.lab="white", col.main="white")
oldOptions <- options(locatorBell=FALSE, preferRaster=TRUE)
on.exit({
par(oldPars)
options(oldOptions)
})
repeat
{
reorientedPoint[reorientedPoint < 1] <- 1
reorientedPoint[reorientedPoint > dims] <- dims[reorientedPoint > dims]
voxelCentre <- (reorientedPoint - 1) / (dims - 1)
# Work out the point location in source space
point <- round(worldToVoxel(voxelToWorld(reorientedPoint, reorientedXform), originalXform))
# The boundaries of each subview
starts <- ends <- numeric(0)
# Plot the info panel first so that we have some handle on the coordinate system when we use locator()
if (ndim == 2)
{
starts <- ends <- rep(0:1, 2)
layout(matrix(c(2,1), nrow=1))
}
else
layout(matrix(c(2,3,4,1), nrow=2, byrow=TRUE))
# For each layer, extract the data values corresponding to the current point, and pass them to the info panel function
data <- lapply(layers, function(layer) {
indices <- alist(i=, j=, k=, t=, u=, v=, w=)
indices[seq_along(point)] <- reorientedPoint
result <- do.call("[", c(list(layer$image), indices[seq_len(ndim(layer$image))]))
if (inherits(layer$image, "rgbArray"))
return (as.character(structure(result, dim=c(1,length(result)), class="rgbArray")))
else
return (result)
})
infoPanel(point, data, sapply(layers,"[[","label"))
for (i in 1:3)
{
# 2D images don't require three views
if (ndim == 2 && i < 3)
next
inPlaneAxes <- setdiff(1:3, i)
loc <- replace(rep(NA,3), i, reorientedPoint[i])
# Plot each layer for the current plane
for (j in seq_along(layers))
.plotLayer(layers[[j]], loc, asp=fov[inPlaneAxes[2]]/fov[inPlaneAxes[1]], add=(j>1))
# "Measure" the subview canvas
region <- par("usr")
starts <- c(starts, region[c(1,3)])
ends <- c(ends, region[c(2,4)])
width <- c(region[2]-region[1], region[4]-region[3])
# Plot the crosshairs, if required
if (crosshairs)
{
halfVoxelWidth <- 0.5 / (dims[inPlaneAxes] - 1)
lines(rep(voxelCentre[inPlaneAxes[1]],2), c(-halfVoxelWidth[2],1+halfVoxelWidth[2]), col="red")
lines(c(-halfVoxelWidth[1],1+halfVoxelWidth[1]), rep(voxelCentre[inPlaneAxes[2]],2), col="red")
}
# Plot the labels, if required
if (labels)
{
# Order is left, right, bottom, top
currentLabels <- c(negativeLabels[inPlaneAxes[1]], positiveLabels[inPlaneAxes[1]], negativeLabels[inPlaneAxes[2]], positiveLabels[inPlaneAxes[2]])
text(c(0.1*width[1]+region[1],0.9*width[1]+region[1],0.5*width[2]+region[3],0.5*width[2]+region[3]), c(0.5*width[1]+region[1],0.5*width[1]+region[1],0.1*width[2]+region[3],0.9*width[2]+region[3]), labels=currentLabels)
}
}
if (!interactive)
break
# Find the next point
nextPoint <- locator(1)
if (is.null(nextPoint))
break
# Coordinates are relative to the axial plot at this point
nextPoint <- unlist(nextPoint)
if (nextPoint[1] > ends[5] && nextPoint[2] <= ends[6])
next
else if (nextPoint[1] <= ends[5] && nextPoint[2] > ends[6])
{
adjustedPoint <- (nextPoint-c(starts[5],ends[6])) / (ends[5:6]-starts[5:6]) * (ends[1:2]-starts[1:2]) + starts[1:2]
reorientedPoint[2:3] <- round(adjustedPoint * (dims[2:3] - 1)) + 1
}
else if (nextPoint[1] > ends[5] && nextPoint[2] > ends[6])
{
adjustedPoint <- (nextPoint-ends[5:6]) / (ends[5:6]-starts[5:6]) * (ends[3:4]-starts[3:4]) + starts[3:4]
reorientedPoint[c(1,3)] <- round(adjustedPoint * (dims[c(1,3)] - 1)) + 1
}
else
reorientedPoint[1:2] <- round(nextPoint * (dims[1:2] - 1)) + 1
}
invisible(NULL)
}
#' @rdname view
#' @export
lyr <- function (image, scale = "grey", min = NULL, max = NULL)
{
label <- deparse(substitute(image))
image <- asNifti(image, internal=FALSE)
if (inherits(image, "rgbArray"))
colours <- window <- NULL
else
{
if (is.character(scale) && length(scale) == 1 && !inherits(scale,"AsIs"))
colours <- switch(scale, grey=gray(0:99/99), gray=gray(0:99/99), greyscale=gray(0:99/99), grayscale=gray(0:99/99), heat=heat.colors(100), rainbow=rainbow(100,start=0.7,end=0.1), unclass(shades::gradient(scale,100)))
else
colours <- unclass(scale)
if (is.null(min))
min <- image$cal_min
if (is.null(max))
max <- image$cal_max
window <- c(min, max)
if (any(is.na(window)) || (min == max))
{
window <- quantile(image[is.finite(image)], c(0.01,0.99), na.rm=TRUE)
if (diff(window) > abs(mean(window)))
window[which.min(abs(window))] <- 0
if (diff(window) == 0)
window <- range(image, na.rm=TRUE)
message("Setting window to (", signif(window[1],4), ", ", signif(window[2],4), ")")
}
image[image < window[1]] <- window[1]
image[image > window[2]] <- window[2]
}
return (structure(list(image=image, label=label, colours=colours, window=window), class="viewLayer"))
}
#' The built-in viewer's default info panel
#'
#' A default info panel for \code{\link{view}}, which shows the labels and
#' values of each image at the current point.
#'
#' @param point A numeric vector giving the current point location.
#' @param data A list of data values for each image at the current point.
#' Note that, for images of more than three dimensions, there will be more
#' than one value per image.
#' @param labels A character vector of image labels.
#'
#' @author Jon Clayden <code@@clayden.org>
#' @seealso \code{\link{view}}
#' @export
defaultInfoPanel <- function (point, data, labels)
{
escapeToQuit <- isTRUE(names(dev.cur()) %in% c("quartz","RStudioGD"))
quitInstructions <- paste(ifelse(escapeToQuit,"Press Esc","Right click"), "to leave interactive mode", sep=" ")
plot(NA, xlim=c(0,1), ylim=c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", main=paste("Location: [", paste(point,collapse=","), "]", sep=""))
nImages <- min(4, length(labels))
yLocs <- 0.95 - cumsum(c(0,rep(c(0.1,0.13),nImages)))
yLocs[length(yLocs)] <- -0.05
text <- quitInstructions
for (i in seq_len(nImages))
{
text <- c(text, {
if (is.numeric(data[[i]]) && length(data[[i]]) == 1)
as.character(signif(data[[i]], 6))
else if (is.numeric(data[[i]]))
paste0(signif(data[[i]][1],6), ", ... (", length(data[[i]]), " values)")
else
data[[i]]
}, labels[i])
}
text(0.5, yLocs, rev(text), col=c(rep(c("white","red"),nImages),"grey70"), cex=pmin(1,1/strwidth(rev(text))), xpd=TRUE)
}
| /R/viewer.R | no_license | saso008/RNifti | R | false | false | 14,350 | r | .plotLayer <- function (layer, loc, asp = NULL, add = FALSE)
{
# Extract the data for the appropriate plane
axis <- which(!is.na(loc))
indices <- alist(i=, j=, k=, t=1, u=1, v=1, w=1)
indices[axis] <- loc[axis]
data <- do.call("[", c(layer["image"], indices[seq_len(ndim(layer$image))], list(drop=FALSE)))
dims <- dim(data)[-c(axis,4:7)]
dim(data) <- dims
if (is.null(asp))
asp <- dims[2] / dims[1]
if (inherits(layer$image, "rgbArray"))
{
# RGB display is achieved by converting the data to an array of indices into a custom palette
data <- as.character(structure(data, class="rgbArray"))
palette <- unique(data)
indices <- match(data, palette)
dim(indices) <- dims
oldPars <- par(mai=c(0,0,0,0))
on.exit(par(oldPars))
image(indices, col=palette, axes=FALSE, asp=asp, add=add, zlim=c(1,length(palette)))
}
else
{
# Other data is shown using standard image(), but zeroes are set to NA to make them transparent
if (add)
data <- replace(data, which(data==0), NA)
oldPars <- par(mai=c(0,0,0,0), bg=layer$colours[1])
on.exit(par(oldPars))
image(data, col=layer$colours, axes=FALSE, asp=asp, add=add, zlim=layer$window)
}
}
#' A basic 3D image viewer
#'
#' This function displays one or more 2D or 3D images, with optional
#' click-to-navigate interactivity.
#'
#' @param ... One or more images, or \code{"viewLayer"} objects, to display.
#' @param point A numeric vector giving the location to initially centre the
#' view on. If crosshairs are in use, they will be placed at this point. For
#' 3D images, this parameter also determines the planes shown in each
#' subview.
#' @param radiological Logical value. If \code{TRUE}, images will be displayed
#' in the radiological convention whereby the left of the image is shown on
#' the right; otherwise left is on the left.
#' @param interactive Logical value. If \code{TRUE}, the user can navigate
#' around the image by repeatedly clicking on a new centre point; otherwise
#' the view is fixed.
#' @param crosshairs Logical value, indicating whether crosshairs should be
#' shown or not.
#' @param labels Logical value, indicating whether orientation labels should be
#' shown or not. Ignored (defaulting to \code{FALSE}) if the image is 2D or
#' orientation information is not available.
#' @param infoPanel A function of three arguments, which must produce a plot
#' for the information panel of the view. \code{\link{defaultInfoPanel}} is
#' the default, which shows the labels and values of each image at the
#' current point.
#' @param image The image being shown in this layer.
#' @param scale A character vector of colour values for the scale, or a single
#' string naming a predefined scale: \code{"grey"} or \code{"gray"} for
#' greyscale, \code{"heat"} for a heatmap, \code{"rainbow"} for a rainbow
#' scale, or any of the scales defined in the \code{shades} package (see
#' \code{?shades::gradient}, if that package is installed). Ignored for RGB
#' images.
#' @param min,max The window minimum and maximum for the layer, i.e., the black
#' and white points. These are ignored for RGB images. Otherwise, if
#' \code{NULL}, the default, they are taken from the \code{cal_min} or
#' \code{cal_max} NIfTI header fields. If either is \code{NA}, the image has
#' no window stored in its header, or the two values are equal, then the 1st
#' and 99th percentiles of the data are used, with values close to zero
#' rounded to that extreme.
#' @return \code{lyr} returns a list of class \code{"viewLayer"}, to be used
#' in a view. \code{view} is called for its side-effect of showing a view.
#'
#' @note Because of the way R's main run-loop interacts with graphics, it will
#' not be possible to issue further commands to the terminal while
#' interactive mode is enabled. Instructions for leaving this mode are shown
#' by the default info panel; see also \code{\link{locator}}, which is the
#' underlying core function.
#'
#' @examples
#' im <- readNifti(system.file("extdata", "example.nii.gz", package="RNifti"))
#' view(im, interactive=FALSE)
#'
#' @author Jon Clayden <code@@clayden.org>
#' @seealso \code{\link{defaultInfoPanel}}, \code{\link{orientation}},
#' \code{\link{locator}}
#' @export
view <- function (..., point = NULL, radiological = getOption("radiologicalView",FALSE), interactive = base::interactive(), crosshairs = TRUE, labels = TRUE, infoPanel = defaultInfoPanel)
{
# Get the layers to display, and the expressions used to generate them
layers <- list(...)
layerExpressions <- sub("^[\\s\"]*(.+?)[\\s\"]*$", "\\1", sapply(substitute(list(...)), deparse, control=NULL, nlines=1)[-1], perl=TRUE)
nLayers <- length(layers)
if (nLayers == 0)
stop("At least one image must be specified")
# In general, data will need reordering for visual consistency
originalXform <- NULL
orientation <- ifelse(radiological, "LAS", "RAS")
for (i in seq_len(nLayers))
{
# Images need to be converted to layer objects
if (!inherits(layers[[i]], "viewLayer"))
{
layers[[i]] <- lyr(layers[[i]])
layers[[i]]$label <- layerExpressions[i]
}
# The xform of the first image is used for indexing
if (i == 1)
originalXform <- xform(layers[[i]]$image)
# If the image has orientation information and isn't 2D, reorient it as necessary
header <- niftiHeader(layers[[i]]$image)
if (ndim(layers[[i]]$image) > 2 && (header$qform_code > 0 || header$sform_code > 0))
orientation(layers[[i]]$image) <- orientation
}
baseImage <- layers[[1]]$image
reorientedXform <- xform(baseImage)
ndim <- ndim(baseImage)
dims <- c(dim(baseImage), rep(1,max(0,3-ndim)))[1:3]
fov <- dims * c(pixdim(baseImage), rep(1,max(0,3-ndim)))[1:3]
# Don't show labels if the base image is 2D or has no meaningful xform information
if (ndim < 3L || attr(reorientedXform,"code") == 0L)
labels <- FALSE
# If no point is specified, use the origin if it's nontrivial, otherwise the centre of the image
if (is.null(point) && any(origin(originalXform) > 1))
point <- round(origin(originalXform))
else if (is.null(point))
point <- round(dims / 2)
# Work out the point location in viewer space
reorientedPoint <- round(worldToVoxel(voxelToWorld(point, originalXform), reorientedXform))
positiveLabels <- unlist(strsplit(orientation, ""))
negativeLabels <- c(R="L", A="P", S="I", L="R", P="A", I="S")[positiveLabels]
# Set some graphics parameters, and make sure they get reset
oldPars <- par(bg="black", col="white", fg="white", col.axis="white", col.lab="white", col.main="white")
oldOptions <- options(locatorBell=FALSE, preferRaster=TRUE)
on.exit({
par(oldPars)
options(oldOptions)
})
repeat
{
reorientedPoint[reorientedPoint < 1] <- 1
reorientedPoint[reorientedPoint > dims] <- dims[reorientedPoint > dims]
voxelCentre <- (reorientedPoint - 1) / (dims - 1)
# Work out the point location in source space
point <- round(worldToVoxel(voxelToWorld(reorientedPoint, reorientedXform), originalXform))
# The boundaries of each subview
starts <- ends <- numeric(0)
# Plot the info panel first so that we have some handle on the coordinate system when we use locator()
if (ndim == 2)
{
starts <- ends <- rep(0:1, 2)
layout(matrix(c(2,1), nrow=1))
}
else
layout(matrix(c(2,3,4,1), nrow=2, byrow=TRUE))
# For each layer, extract the data values corresponding to the current point, and pass them to the info panel function
data <- lapply(layers, function(layer) {
indices <- alist(i=, j=, k=, t=, u=, v=, w=)
indices[seq_along(point)] <- reorientedPoint
result <- do.call("[", c(list(layer$image), indices[seq_len(ndim(layer$image))]))
if (inherits(layer$image, "rgbArray"))
return (as.character(structure(result, dim=c(1,length(result)), class="rgbArray")))
else
return (result)
})
infoPanel(point, data, sapply(layers,"[[","label"))
for (i in 1:3)
{
# 2D images don't require three views
if (ndim == 2 && i < 3)
next
inPlaneAxes <- setdiff(1:3, i)
loc <- replace(rep(NA,3), i, reorientedPoint[i])
# Plot each layer for the current plane
for (j in seq_along(layers))
.plotLayer(layers[[j]], loc, asp=fov[inPlaneAxes[2]]/fov[inPlaneAxes[1]], add=(j>1))
# "Measure" the subview canvas
region <- par("usr")
starts <- c(starts, region[c(1,3)])
ends <- c(ends, region[c(2,4)])
width <- c(region[2]-region[1], region[4]-region[3])
# Plot the crosshairs, if required
if (crosshairs)
{
halfVoxelWidth <- 0.5 / (dims[inPlaneAxes] - 1)
lines(rep(voxelCentre[inPlaneAxes[1]],2), c(-halfVoxelWidth[2],1+halfVoxelWidth[2]), col="red")
lines(c(-halfVoxelWidth[1],1+halfVoxelWidth[1]), rep(voxelCentre[inPlaneAxes[2]],2), col="red")
}
# Plot the labels, if required
if (labels)
{
# Order is left, right, bottom, top
currentLabels <- c(negativeLabels[inPlaneAxes[1]], positiveLabels[inPlaneAxes[1]], negativeLabels[inPlaneAxes[2]], positiveLabels[inPlaneAxes[2]])
text(c(0.1*width[1]+region[1],0.9*width[1]+region[1],0.5*width[2]+region[3],0.5*width[2]+region[3]), c(0.5*width[1]+region[1],0.5*width[1]+region[1],0.1*width[2]+region[3],0.9*width[2]+region[3]), labels=currentLabels)
}
}
if (!interactive)
break
# Find the next point
nextPoint <- locator(1)
if (is.null(nextPoint))
break
# Coordinates are relative to the axial plot at this point
nextPoint <- unlist(nextPoint)
if (nextPoint[1] > ends[5] && nextPoint[2] <= ends[6])
next
else if (nextPoint[1] <= ends[5] && nextPoint[2] > ends[6])
{
adjustedPoint <- (nextPoint-c(starts[5],ends[6])) / (ends[5:6]-starts[5:6]) * (ends[1:2]-starts[1:2]) + starts[1:2]
reorientedPoint[2:3] <- round(adjustedPoint * (dims[2:3] - 1)) + 1
}
else if (nextPoint[1] > ends[5] && nextPoint[2] > ends[6])
{
adjustedPoint <- (nextPoint-ends[5:6]) / (ends[5:6]-starts[5:6]) * (ends[3:4]-starts[3:4]) + starts[3:4]
reorientedPoint[c(1,3)] <- round(adjustedPoint * (dims[c(1,3)] - 1)) + 1
}
else
reorientedPoint[1:2] <- round(nextPoint * (dims[1:2] - 1)) + 1
}
invisible(NULL)
}
#' @rdname view
#' @export
lyr <- function (image, scale = "grey", min = NULL, max = NULL)
{
label <- deparse(substitute(image))
image <- asNifti(image, internal=FALSE)
if (inherits(image, "rgbArray"))
colours <- window <- NULL
else
{
if (is.character(scale) && length(scale) == 1 && !inherits(scale,"AsIs"))
colours <- switch(scale, grey=gray(0:99/99), gray=gray(0:99/99), greyscale=gray(0:99/99), grayscale=gray(0:99/99), heat=heat.colors(100), rainbow=rainbow(100,start=0.7,end=0.1), unclass(shades::gradient(scale,100)))
else
colours <- unclass(scale)
if (is.null(min))
min <- image$cal_min
if (is.null(max))
max <- image$cal_max
window <- c(min, max)
if (any(is.na(window)) || (min == max))
{
window <- quantile(image[is.finite(image)], c(0.01,0.99), na.rm=TRUE)
if (diff(window) > abs(mean(window)))
window[which.min(abs(window))] <- 0
if (diff(window) == 0)
window <- range(image, na.rm=TRUE)
message("Setting window to (", signif(window[1],4), ", ", signif(window[2],4), ")")
}
image[image < window[1]] <- window[1]
image[image > window[2]] <- window[2]
}
return (structure(list(image=image, label=label, colours=colours, window=window), class="viewLayer"))
}
#' The built-in viewer's default info panel
#'
#' A default info panel for \code{\link{view}}, which shows the labels and
#' values of each image at the current point.
#'
#' @param point A numeric vector giving the current point location.
#' @param data A list of data values for each image at the current point.
#' Note that, for images of more than three dimensions, there will be more
#' than one value per image.
#' @param labels A character vector of image labels.
#'
#' @author Jon Clayden <code@@clayden.org>
#' @seealso \code{\link{view}}
#' @export
defaultInfoPanel <- function (point, data, labels)
{
escapeToQuit <- isTRUE(names(dev.cur()) %in% c("quartz","RStudioGD"))
quitInstructions <- paste(ifelse(escapeToQuit,"Press Esc","Right click"), "to leave interactive mode", sep=" ")
plot(NA, xlim=c(0,1), ylim=c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", main=paste("Location: [", paste(point,collapse=","), "]", sep=""))
nImages <- min(4, length(labels))
yLocs <- 0.95 - cumsum(c(0,rep(c(0.1,0.13),nImages)))
yLocs[length(yLocs)] <- -0.05
text <- quitInstructions
for (i in seq_len(nImages))
{
text <- c(text, {
if (is.numeric(data[[i]]) && length(data[[i]]) == 1)
as.character(signif(data[[i]], 6))
else if (is.numeric(data[[i]]))
paste0(signif(data[[i]][1],6), ", ... (", length(data[[i]]), " values)")
else
data[[i]]
}, labels[i])
}
text(0.5, yLocs, rev(text), col=c(rep(c("white","red"),nImages),"grey70"), cex=pmin(1,1/strwidth(rev(text))), xpd=TRUE)
}
|
## The purpose of the following functions is to support the caching of
## of the inverse of a matrix, which can be a costly calculation.
##
## makeCacheMatrix takes a matrix as its input (assumed to be N x N [invertible])
## and creates a special list that makes available get and set functions for that matrix
## as well as get and set functions for the inverse of that matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(mean) inv <<- mean
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve makes use of the special object created by makeCacheMatrix
## it checks to see if the inverse of the matrix has already been calculated
## in which case it avoids the costly computation and returns the cached
## value of the inverse. If there is no cached value, then it calculates
## the inverse, sets it in the object and returns the inverse matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
## if inv is not null, then we just use the cached version
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## otherwise, we compute the inverse
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | rtsuther/ProgrammingAssignment2 | R | false | false | 1,385 | r | ## The purpose of the following functions is to support the caching of
## of the inverse of a matrix, which can be a costly calculation.
##
## makeCacheMatrix takes a matrix as its input (assumed to be N x N [invertible])
## and creates a special list that makes available get and set functions for that matrix
## as well as get and set functions for the inverse of that matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(mean) inv <<- mean
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve makes use of the special object created by makeCacheMatrix
## it checks to see if the inverse of the matrix has already been calculated
## in which case it avoids the costly computation and returns the cached
## value of the inverse. If there is no cached value, then it calculates
## the inverse, sets it in the object and returns the inverse matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
## if inv is not null, then we just use the cached version
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## otherwise, we compute the inverse
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
## ll.nb = l.nb;
| /R/alias.R | no_license | diystat/NBPSeq | R | false | false | 17 | r | ## ll.nb = l.nb;
|
\name{MONSTER-package}
\alias{MONSTER-package}
\alias{MONSTER}
\docType{package}
\title{
\packageTitle{MONSTER}
}
\description{
\packageDescription{MONSTER}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{MONSTER}
\packageIndices{MONSTER}
MONSTER takes in sequence motif data linking transcription factors (TFs) to genes and gene expression from two conditions. The goal is generate bipartite networks from the gene expression data which quantify evidence of the regulatory roles of each of the TFs to each of the genes. Next, critical TFs are identified by computing a transition matrix, which maps the gene regulatory network in the first state to the gene regulatory network in the second state.
}
\author{
\packageAuthor{MONSTER}
Maintainer: \packageMaintainer{MONSTER}
}
\seealso{
\code{\link{monster}}
}
\keyword{ package }
| /man/MONSTER-package.Rd | no_license | QuackenbushLab/MONSTER | R | false | false | 842 | rd | \name{MONSTER-package}
\alias{MONSTER-package}
\alias{MONSTER}
\docType{package}
\title{
\packageTitle{MONSTER}
}
\description{
\packageDescription{MONSTER}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{MONSTER}
\packageIndices{MONSTER}
MONSTER takes in sequence motif data linking transcription factors (TFs) to genes and gene expression from two conditions. The goal is generate bipartite networks from the gene expression data which quantify evidence of the regulatory roles of each of the TFs to each of the genes. Next, critical TFs are identified by computing a transition matrix, which maps the gene regulatory network in the first state to the gene regulatory network in the second state.
}
\author{
\packageAuthor{MONSTER}
Maintainer: \packageMaintainer{MONSTER}
}
\seealso{
\code{\link{monster}}
}
\keyword{ package }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_objects.R
\name{Attachment}
\alias{Attachment}
\title{Attachment Object}
\usage{
Attachment(driveFile = NULL, youTubeVideo = NULL, link = NULL,
form = NULL)
}
\arguments{
\item{driveFile}{Google Drive file attachment}
\item{youTubeVideo}{Youtube video attachment}
\item{link}{Link attachment}
\item{form}{Google Forms attachment}
}
\value{
Attachment object
}
\description{
Attachment Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Attachment added to student assignment work. When creating attachments, setting the `form` field is not supported.
}
| /googleclassroomv1.auto/man/Attachment.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 682 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_objects.R
\name{Attachment}
\alias{Attachment}
\title{Attachment Object}
\usage{
Attachment(driveFile = NULL, youTubeVideo = NULL, link = NULL,
form = NULL)
}
\arguments{
\item{driveFile}{Google Drive file attachment}
\item{youTubeVideo}{Youtube video attachment}
\item{link}{Link attachment}
\item{form}{Google Forms attachment}
}
\value{
Attachment object
}
\description{
Attachment Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Attachment added to student assignment work. When creating attachments, setting the `form` field is not supported.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch02.R
\name{m.svd}
\alias{m.svd}
\title{Replicate Matlab function svd including the diag(d) which is not
included in R svd()}
\usage{
m.svd(M)
}
\arguments{
\item{M}{a matrix}
}
\description{
Replicate Matlab function svd including the diag(d) which is not
included in R svd()
}
| /man/m.svd.Rd | no_license | AlfonsoRReyes/RMatlabEDA | R | false | true | 367 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch02.R
\name{m.svd}
\alias{m.svd}
\title{Replicate Matlab function svd including the diag(d) which is not
included in R svd()}
\usage{
m.svd(M)
}
\arguments{
\item{M}{a matrix}
}
\description{
Replicate Matlab function svd including the diag(d) which is not
included in R svd()
}
|
data <- read_csv("./Data/Manipulated_Data/listings_prepared_for_regression.csv")
# regression for price
lm(formula = price ~
demand
# + number_of_reviews
+ reviews_per_month
# + distance_from_subway_station
+ distance_from_bus_stop
+ n_cult_orgs
+ shooting_number
# + calls_911_number
# + negative
# + neutral
# + positive
# + days_last_rev
+ entire_home_apt
# + distance_from_transportation
+ negative_prop
# + neutral_prop
# + positive_prop
, data = data) %>%
summary()
data %>%
split(.$neighbourhood_group) %>%
map(function(x) lm(formula = price ~
demand
+ number_of_reviews
+ reviews_per_month
+ distance_from_subway_station
+ distance_from_bus_stop
+ n_cult_orgs
+ shooting_number
+ calls_911_number
+ negative
+ neutral
+ positive
+ days_last_rev
+ entire_home_apt
+ distance_from_transportation
+ negative_prop
+ neutral_prop
+ positive_prop
, data = x)) %>%
map(summary) %>%
map(~.$r.squared)
| /regression_analysis_by_neighborhoods.R | no_license | alexyushkin/What-affects-the-Price-of-Airbnb-Listings-in-NYC | R | false | false | 1,419 | r | data <- read_csv("./Data/Manipulated_Data/listings_prepared_for_regression.csv")
# regression for price
lm(formula = price ~
demand
# + number_of_reviews
+ reviews_per_month
# + distance_from_subway_station
+ distance_from_bus_stop
+ n_cult_orgs
+ shooting_number
# + calls_911_number
# + negative
# + neutral
# + positive
# + days_last_rev
+ entire_home_apt
# + distance_from_transportation
+ negative_prop
# + neutral_prop
# + positive_prop
, data = data) %>%
summary()
data %>%
split(.$neighbourhood_group) %>%
map(function(x) lm(formula = price ~
demand
+ number_of_reviews
+ reviews_per_month
+ distance_from_subway_station
+ distance_from_bus_stop
+ n_cult_orgs
+ shooting_number
+ calls_911_number
+ negative
+ neutral
+ positive
+ days_last_rev
+ entire_home_apt
+ distance_from_transportation
+ negative_prop
+ neutral_prop
+ positive_prop
, data = x)) %>%
map(summary) %>%
map(~.$r.squared)
|
\name{aov_all_vars}
\alias{aov_all_vars}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Analysis of variance
}
\description{
Perform analysis of variance of all variables in the dataset.
}
\usage{
aov_all_vars(dataset, column.class, doTukey = T, write.file = F,
file.out = "anova-res.csv")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
list representing the dataset from a metabolomics experiment.
}
\item{column.class}{
string or index indicating what metadata to use.
}
\item{doTukey}{
boolean value for do or do not TukeyHSD.
}
\item{write.file}{
boolean value indicating if a file with the results is written or not.
}
\item{file.out}{
name of the file if write.file is TRUE.
}
}
\value{
Data frame with the results of ANOVA, with p-value, logarithm of p-value, false discovery rate (fdr) and tukey is doTukey is TRUE. The result is ordered by p-value.
}
\examples{
## Example of ANOVA with TukeyHSD
data(cassavaPPD)
cassavaPPD = flat_pattern_filter(cassavaPPD, "iqr", by.percent = TRUE,
red.value = 75)
result = aov_all_vars(cassavaPPD, "varieties", doTukey = FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ anova }
\keyword{ tukey }% __ONLY ONE__ keyword per line
| /man/aov_all_vars.Rd | no_license | Neal050617/specmine | R | false | false | 1,349 | rd | \name{aov_all_vars}
\alias{aov_all_vars}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Analysis of variance
}
\description{
Perform analysis of variance of all variables in the dataset.
}
\usage{
aov_all_vars(dataset, column.class, doTukey = T, write.file = F,
file.out = "anova-res.csv")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
list representing the dataset from a metabolomics experiment.
}
\item{column.class}{
string or index indicating what metadata to use.
}
\item{doTukey}{
boolean value for do or do not TukeyHSD.
}
\item{write.file}{
boolean value indicating if a file with the results is written or not.
}
\item{file.out}{
name of the file if write.file is TRUE.
}
}
\value{
Data frame with the results of ANOVA, with p-value, logarithm of p-value, false discovery rate (fdr) and tukey is doTukey is TRUE. The result is ordered by p-value.
}
\examples{
## Example of ANOVA with TukeyHSD
data(cassavaPPD)
cassavaPPD = flat_pattern_filter(cassavaPPD, "iqr", by.percent = TRUE,
red.value = 75)
result = aov_all_vars(cassavaPPD, "varieties", doTukey = FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ anova }
\keyword{ tukey }% __ONLY ONE__ keyword per line
|
library(ISLR)
library(glmnet)
library(ggplot2)
library(glmnet)
library(faraway)
library(MASS)
library(GGally)
datos<-Pima.tr
#a) analisis preliminar: correlacion entre variables
ggpairs(datos)
#b) ajuste de regresion logistica:
reg<-glm(type~.,data=datos,family=binomial)
summary(reg)
#c intervalo de confianza para los estimadores de Betas:
IC<-matrix(0,ncol=3,nrow=8)
for(i in 1:8)
{
IC[i,]<-c(summary(reg)$coef[i,1]-qnorm(0.975)*summary(reg)$coef[i,2],summary(reg)$coef[i,1],
summary(reg)$coef[i,1]+qnorm(0.975)*summary(reg)$coef[i,2])
}
colnames(IC)<-c("LI","beta","LS")
rownames(IC)<-c("beta 0", "beta 1", "beta 2", "beta 3",
"beta 4", "beta 5", "beta 6", "beta 7")
IC
#d) Tabla de confucius:
prediccion<-predict(reg, type="response")
predichos = rep("No",length(prediccion))
predichos[prediccion>0.5]="Yes"
confusion<-table(predichos,datos$type)
confusion
aciertos<-(confusion[1,1]+confusion[2,2])/length(predichos)
mean(predichos==datos$type)
#e: tabla de confucius para muestras de test: lo voy a hacer manualmente.
test<-Pima.te
#prediccion<-predict(reg,newdata=test, type="response")
exponente<-reg$coefficients[1]+reg$coefficients[2:8]%*%t(test[,1:7])
prediction<-1/(1+exp(-exponente))
treshold<-0.5
predichos = rep("No",length(prediction))
predichos[prediction>treshold]="Yes"
observados<-test$type
confusion<-table(predichos,observados)
confusion
aciertos<-100*(confusion[1,1]+confusion[2,2])/length(predichos)
aciertos
mean(predichos==test$type)
#f: predecimos a Poppers
x<-c(2,100,70,20,26,0.24,30)
expo<-reg$coefficients%*%x
y_pred<-1/(1+exp(-expo))
#si lo quiero hacer con predict, necesito crear una newdata con formato compatible:
xx<-data.frame(2,100,70,20,26,0.24,30,"Yes")
colnames(xx)<-c("npreg","glu","bp","skin","bmi","ped","age")
yy_pred<-predict(reg,newdata=xx,type="response")
#g
| /G3/pima.R | no_license | jgonzet/Statistical-Learning-in-R | R | false | false | 1,860 | r | library(ISLR)
library(glmnet)
library(ggplot2)
library(glmnet)
library(faraway)
library(MASS)
library(GGally)
datos<-Pima.tr
#a) analisis preliminar: correlacion entre variables
ggpairs(datos)
#b) ajuste de regresion logistica:
reg<-glm(type~.,data=datos,family=binomial)
summary(reg)
#c intervalo de confianza para los estimadores de Betas:
IC<-matrix(0,ncol=3,nrow=8)
for(i in 1:8)
{
IC[i,]<-c(summary(reg)$coef[i,1]-qnorm(0.975)*summary(reg)$coef[i,2],summary(reg)$coef[i,1],
summary(reg)$coef[i,1]+qnorm(0.975)*summary(reg)$coef[i,2])
}
colnames(IC)<-c("LI","beta","LS")
rownames(IC)<-c("beta 0", "beta 1", "beta 2", "beta 3",
"beta 4", "beta 5", "beta 6", "beta 7")
IC
#d) Tabla de confucius:
prediccion<-predict(reg, type="response")
predichos = rep("No",length(prediccion))
predichos[prediccion>0.5]="Yes"
confusion<-table(predichos,datos$type)
confusion
aciertos<-(confusion[1,1]+confusion[2,2])/length(predichos)
mean(predichos==datos$type)
#e: tabla de confucius para muestras de test: lo voy a hacer manualmente.
test<-Pima.te
#prediccion<-predict(reg,newdata=test, type="response")
exponente<-reg$coefficients[1]+reg$coefficients[2:8]%*%t(test[,1:7])
prediction<-1/(1+exp(-exponente))
treshold<-0.5
predichos = rep("No",length(prediction))
predichos[prediction>treshold]="Yes"
observados<-test$type
confusion<-table(predichos,observados)
confusion
aciertos<-100*(confusion[1,1]+confusion[2,2])/length(predichos)
aciertos
mean(predichos==test$type)
#f: predecimos a Poppers
x<-c(2,100,70,20,26,0.24,30)
expo<-reg$coefficients%*%x
y_pred<-1/(1+exp(-expo))
#si lo quiero hacer con predict, necesito crear una newdata con formato compatible:
xx<-data.frame(2,100,70,20,26,0.24,30,"Yes")
colnames(xx)<-c("npreg","glu","bp","skin","bmi","ped","age")
yy_pred<-predict(reg,newdata=xx,type="response")
#g
|
##################### Exploratory Data Analysis ##############################################
# Data Exploratory
# Data Preparation
# Spliting Data into Train nd Test data using Random sampling
setwd("C:\\Users\\Home\\Desktop\\Dataset\\data sheets")
cr<-read.csv("Credit.csv",na.strings = c("",NA))
library(dplyr) #for Manuplation
options(scipen=999) #switch off Scintific Notation in terms of number Notation
## Data Exploration using Credit Data Set
#Sanity check
#Identifying Outiliars ,Replace them
#Impute Missing Values
#Bin Data Using -Quantile function,ntile() foir binning
# Partioning Data into Train nd Test
names(cr)
#Removing Duplicate columns :Giving Same Information
#column 1: NPA STATUS (Good_Bad)
#COlumn 12 :MonthlyIncome.1 (MonthlyIncome)
cr<-cr[,-c(1,12)] # [,column oprn]
names(cr)
#Sanity check.
#Quantitative (Numeric):5points summary (Min,max,mean,median,quantile,NA's)
#Qualitative(catgorical) :Finding Freq Distribution
summary(cr)
#Missing Value Treatment.
#DV/Target Variable :Good/Bad
#IDV :except good/Bad column
#Its Bad idea to impute Missing values for DV :Better to Negalt it/Delete it.
index<-which(is.na(cr$Good_Bad)) #Which column as Missing Values
index
cr<-cr[-index,] # [Row oprn ,]
cr
summary(cr) # NO NA'S in Good_Bad column
#Looking Individual Variables :
summary(cr$RevolvingUtilizationOfUnsecuredLines)
cr%>%filter(RevolvingUtilizationOfUnsecuredLines==0)%>%nrow() #10878 having 0 values
cr%>%filter(RevolvingUtilizationOfUnsecuredLines>=0.99)%>%nrow()#14383 having equal/Greater den 0.99
#Percentile BREAKUP (quantile function used)
quantile(cr$RevolvingUtilizationOfUnsecuredLines,p=c(1:100)/100)
#Discus vit client ,2 is limit.
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)%>%nrow()
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)->cr
summary(cr$age)
cr%>%filter(age==0)%>%nrow() #only one person having zero age stil having credt card(Its Data entry Mistake)
quantile(cr$age,p=(1:100)/100)
cr%>%filter(age!=0)->cr
summary(cr$Gender)
summary(cr)
#############Missing Value Treatment for Contionoues nd Catgorical Variable:####################
#Imputing Missing values for Catgorical Variable
unique(cr$NumberOfTime30.59DaysPastDueNotWorse)
table1<-table(cr$NumberOfTime30.59DaysPastDueNotWorse,cr$Good_Bad)
bad_rate<-table1[,1]/rowSums(table1)
ind2<-which(is.na(cr$NumberOfTime30.59DaysPastDueNotWorse))
table(cr$Good_Bad[ind2])/length(ind2)
cr$NumberOfTime30.59DaysPastDueNotWorse[ind2]<-6
summary(cr$NumberOfTime30.59DaysPastDueNotWorse)
#Imputing Missing Values for Continuoues Variable (By creating decile using ntile function)
summary(cr$MonthlyIncome)
library(dplyr) # to use ntile function
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(Good_Bad,quantile)%>%summarize(N=n())%>%filter(Good_Bad=="Bad")->dat
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(quantile)%>%summarize(N=n())->dat1
dat$Percentage<-dat$N/dat1$N
#Replace with 8 quantile
quantile(cr$MonthlyIncome,p=(0:10)/10,na.rm=T)
cr$MonthlyIncome[is.na(cr$MonthlyIncome)]<-9200
summary(cr$MonthlyIncome)
####################### Spliting Data into Train nd Test Using Random Sampling ######################################
#1.Using set.seed(100)
#2.Using Library(caret)
#1.Using set.seed(100) :
set.seed(100)
indexP<-sample(1:nrow(cr),0.70*nrow(cr),replace=F)
train_cr<-cr[indexP,]
test_cr<-cr[-indexP,]
##2.Using Library(caret) :
library(caret)
indexPC<-CreateDataPartition(y=cr$Good_Bad,times=1,p=0.70,list=F)
train_cr<-cr[indexPC,]
test_cr<-cr[-indexPC,]
table(train_cr$Good_Bad)/nrow(train_crC)
table(test_cr$Good_Bad)/nrow(test_crC)
| /Explorartory Data Analysis (EDA).R | no_license | Yogi5693/Exploratory-Data-Analysis-EDA- | R | false | false | 3,792 | r | ##################### Exploratory Data Analysis ##############################################
# Data Exploratory
# Data Preparation
# Spliting Data into Train nd Test data using Random sampling
setwd("C:\\Users\\Home\\Desktop\\Dataset\\data sheets")
cr<-read.csv("Credit.csv",na.strings = c("",NA))
library(dplyr) #for Manuplation
options(scipen=999) #switch off Scintific Notation in terms of number Notation
## Data Exploration using Credit Data Set
#Sanity check
#Identifying Outiliars ,Replace them
#Impute Missing Values
#Bin Data Using -Quantile function,ntile() foir binning
# Partioning Data into Train nd Test
names(cr)
#Removing Duplicate columns :Giving Same Information
#column 1: NPA STATUS (Good_Bad)
#COlumn 12 :MonthlyIncome.1 (MonthlyIncome)
cr<-cr[,-c(1,12)] # [,column oprn]
names(cr)
#Sanity check.
#Quantitative (Numeric):5points summary (Min,max,mean,median,quantile,NA's)
#Qualitative(catgorical) :Finding Freq Distribution
summary(cr)
#Missing Value Treatment.
#DV/Target Variable :Good/Bad
#IDV :except good/Bad column
#Its Bad idea to impute Missing values for DV :Better to Negalt it/Delete it.
index<-which(is.na(cr$Good_Bad)) #Which column as Missing Values
index
cr<-cr[-index,] # [Row oprn ,]
cr
summary(cr) # NO NA'S in Good_Bad column
#Looking Individual Variables :
summary(cr$RevolvingUtilizationOfUnsecuredLines)
cr%>%filter(RevolvingUtilizationOfUnsecuredLines==0)%>%nrow() #10878 having 0 values
cr%>%filter(RevolvingUtilizationOfUnsecuredLines>=0.99)%>%nrow()#14383 having equal/Greater den 0.99
#Percentile BREAKUP (quantile function used)
quantile(cr$RevolvingUtilizationOfUnsecuredLines,p=c(1:100)/100)
#Discus vit client ,2 is limit.
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)%>%nrow()
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)->cr
summary(cr$age)
cr%>%filter(age==0)%>%nrow() #only one person having zero age stil having credt card(Its Data entry Mistake)
quantile(cr$age,p=(1:100)/100)
cr%>%filter(age!=0)->cr
summary(cr$Gender)
summary(cr)
#############Missing Value Treatment for Contionoues nd Catgorical Variable:####################
#Imputing Missing values for Catgorical Variable
unique(cr$NumberOfTime30.59DaysPastDueNotWorse)
table1<-table(cr$NumberOfTime30.59DaysPastDueNotWorse,cr$Good_Bad)
bad_rate<-table1[,1]/rowSums(table1)
ind2<-which(is.na(cr$NumberOfTime30.59DaysPastDueNotWorse))
table(cr$Good_Bad[ind2])/length(ind2)
cr$NumberOfTime30.59DaysPastDueNotWorse[ind2]<-6
summary(cr$NumberOfTime30.59DaysPastDueNotWorse)
#Imputing Missing Values for Continuoues Variable (By creating decile using ntile function)
summary(cr$MonthlyIncome)
library(dplyr) # to use ntile function
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(Good_Bad,quantile)%>%summarize(N=n())%>%filter(Good_Bad=="Bad")->dat
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(quantile)%>%summarize(N=n())->dat1
dat$Percentage<-dat$N/dat1$N
#Replace with 8 quantile
quantile(cr$MonthlyIncome,p=(0:10)/10,na.rm=T)
cr$MonthlyIncome[is.na(cr$MonthlyIncome)]<-9200
summary(cr$MonthlyIncome)
####################### Spliting Data into Train nd Test Using Random Sampling ######################################
#1.Using set.seed(100)
#2.Using Library(caret)
#1.Using set.seed(100) :
set.seed(100)
indexP<-sample(1:nrow(cr),0.70*nrow(cr),replace=F)
train_cr<-cr[indexP,]
test_cr<-cr[-indexP,]
##2.Using Library(caret) :
library(caret)
indexPC<-CreateDataPartition(y=cr$Good_Bad,times=1,p=0.70,list=F)
train_cr<-cr[indexPC,]
test_cr<-cr[-indexPC,]
table(train_cr$Good_Bad)/nrow(train_crC)
table(test_cr$Good_Bad)/nrow(test_crC)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AmStockChart.R
\docType{methods}
\name{setStockEventsSettings}
\alias{setStockEventsSettings}
\title{Setter for StockEventsSettings}
\usage{
\S4method{setStockEventsSettings}{AmStockChart}(.Object, ...)
}
\arguments{
\item{\code{.Object}:}{Object of class \code{\linkS4class{AmStockChart}}.}
\item{\code{...}:}{Properties of \code{StockEventsSettings}.}
}
\value{
The updated object of class \code{\linkS4class{AmStockChart}}.
}
\description{
Setter for StockEventsSettings
}
\details{
Use this methode in case of an AmStockChart.
}
\examples{
library(pipeR)
amStockChart() \%>>\% setStockEventsSettings(backgroundAlpha = 1)
}
\seealso{
\code{\linkS4class{AmStockChart}} S4 class
Other AmStockChart methods: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartCursorSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setValueAxesSettings}}
Other AmStockChart setters: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartCursorSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setValueAxesSettings}}
}
| /man/setStockEventsSettings.Rd | no_license | myndworkz/rAmCharts | R | false | false | 1,850 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AmStockChart.R
\docType{methods}
\name{setStockEventsSettings}
\alias{setStockEventsSettings}
\title{Setter for StockEventsSettings}
\usage{
\S4method{setStockEventsSettings}{AmStockChart}(.Object, ...)
}
\arguments{
\item{\code{.Object}:}{Object of class \code{\linkS4class{AmStockChart}}.}
\item{\code{...}:}{Properties of \code{StockEventsSettings}.}
}
\value{
The updated object of class \code{\linkS4class{AmStockChart}}.
}
\description{
Setter for StockEventsSettings
}
\details{
Use this methode in case of an AmStockChart.
}
\examples{
library(pipeR)
amStockChart() \%>>\% setStockEventsSettings(backgroundAlpha = 1)
}
\seealso{
\code{\linkS4class{AmStockChart}} S4 class
Other AmStockChart methods: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartCursorSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setValueAxesSettings}}
Other AmStockChart setters: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartCursorSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setValueAxesSettings}}
}
|
####SHRAVYA
library(shiny)
library(rgdal)
library(leaflet)
s1 <- readOGR(dsn = '2010_CensusData', "2010_CensusData", stringsAsFactors = FALSE)
pal <- colorNumeric(palette = "viridis",
domain = s1$SE_T002_01)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Choose a race and click on a census district to see the race's population within the district"),
column(8,leafletOutput("map", height="800px")))
# Define server logic required to draw a histogram
server <- function(input, output) {
# Leaflet map: population by district by race
output$map <- renderLeaflet({
m <- leaflet() %>%
addTiles() %>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), group= "South Bend Boundaries")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_01, group= "Total Population")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_02, group= "White/Caucasian") %>% addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_03, group= "Black/African American")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_04, group= "American Indian/Alaska Native") %>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_05, group= "Asian")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_06, group= "Native Hawaiian/Pacific Islander")%>%
addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_07, group= "Other")%>%addLayersControl(
overlayGroups = c("South Bend Boundaries"),
baseGroups = c("Total Population", "White/Caucasian", "Black/African American", "American Indian/Alaska Native", "Asian", "Native Hawaiian/Pacific Islander", "Other"),
options = layersControlOptions(collapsed = T)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /Shravya/app .R | no_license | sbaylor02/Data_Vis_Project | R | false | false | 2,292 | r | ####SHRAVYA
library(shiny)
library(rgdal)
library(leaflet)
s1 <- readOGR(dsn = '2010_CensusData', "2010_CensusData", stringsAsFactors = FALSE)
pal <- colorNumeric(palette = "viridis",
domain = s1$SE_T002_01)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Choose a race and click on a census district to see the race's population within the district"),
column(8,leafletOutput("map", height="800px")))
# Define server logic required to draw a histogram
server <- function(input, output) {
# Leaflet map: population by district by race
output$map <- renderLeaflet({
m <- leaflet() %>%
addTiles() %>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), group= "South Bend Boundaries")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_01, group= "Total Population")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_02, group= "White/Caucasian") %>% addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_03, group= "Black/African American")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_04, group= "American Indian/Alaska Native") %>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_05, group= "Asian")%>%addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_06, group= "Native Hawaiian/Pacific Islander")%>%
addPolygons(data = s1, stroke = TRUE, smoothFactor = 0.2, fillOpacity = .35, color = ~pal(SE_T002_01), popup = ~SE_T054_07, group= "Other")%>%addLayersControl(
overlayGroups = c("South Bend Boundaries"),
baseGroups = c("Total Population", "White/Caucasian", "Black/African American", "American Indian/Alaska Native", "Asian", "Native Hawaiian/Pacific Islander", "Other"),
options = layersControlOptions(collapsed = T)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# the function:
make.est_dfe.input <- function(poly.dat, genome.dat, fixed.dat, generation, num.inds.sampled, genome.size, filename, fold=FALSE, use.manual.sample=FALSE){
# because diploid:
sample.size <- 2 * num.inds.sampled
# make data frames of all possible neutral and deleterious mutations at all time points recorded
neut.muts <- NULL
seln.muts <- NULL
## WHERE poly.dat IS A FILE OF ROWS OF MUTATIONS OCCURRING
# m1 = neutral site in coding
# m2, 3 = deleterious selected site in coding
# m4 = beneficial selected site in coding
# tack on fixed data and then can include counts for 0's
fixed.mut.dat <- fixed.dat[fixed.dat$gen.fixed <= as.numeric(generation) ,]
# this gives only mutations that have fixed PRIOR to and INCLUDING WITHIN the current generation time point sampled
fixed.neut.muts <- c(which(fixed.mut.dat$mut.type == "m1"))
fixed.seln.mut.IDs <- fixed.mut.dat$mut.ID[-fixed.neut.muts]
fixed.neut.mut.IDs <- fixed.mut.dat$mut.ID[fixed.neut.muts]
num.neut.muts.fixed <- length(fixed.neut.mut.IDs)
num.seln.muts.fixed <- length(fixed.seln.mut.IDs)
neut.muts <- poly.dat[poly.dat$mut.type == "m1" ,]
seln.muts <- poly.dat[poly.dat$mut.type != "m1" ,]
if(use.manual.sample == FALSE){
sfs.total <- table(poly.dat$mut.prev)
sfs.neut <- table(neut.muts$mut.prev)
sfs.seln <- table(seln.muts$mut.prev)
# add on fixed things to the fixed section of the table
if(is.na(sfs.total[as.character(sample.size)])){
sfs.total[as.character(sample.size)] <- num.neut.muts.fixed + num.seln.muts.fixed
}else{sfs.total[as.character(sample.size)] <- sfs.total[as.character(sample.size)] + num.neut.muts.fixed + num.seln.muts.fixed}
if(is.na(sfs.neut[as.character(sample.size)])){
sfs.neut[as.character(sample.size)] <- num.neut.muts.fixed
}else{sfs.neut[as.character(sample.size)] <- sfs.neut[as.character(sample.size)] + num.neut.muts.fixed}
if(is.na(sfs.seln[as.character(sample.size)])){
sfs.seln[as.character(sample.size)] <- num.seln.muts.fixed
}else{sfs.seln[as.character(sample.size)] <- sfs.seln[as.character(sample.size)] + num.seln.muts.fixed}
sfs.total["0"] <- genome.size - sum(sfs.total)
genome.size.neut <- 0.25*genome.size
genome.size.seln <- 0.75*genome.size
sfs.neut["0"] <- genome.size.neut - sum(sfs.neut)
sfs.seln["0"] <- genome.size.seln - sum(sfs.seln)
}
if(use.manual.sample == TRUE){
## would have to use this section of code when manually subsampling a full sample output, because don't have the allele frequencies in the sample...
# all possible mut IDs of any type (need for later calcs):
neutral.mut.IDs <- c(neut.muts$mut.ID)
selected.mut.IDs <- c(seln.muts$mut.ID)
all.mutations <- unlist(lapply(as.character(genome.dat[,2]), FUN=strsplit, split=" "))
just.neut.muts <- all.mutations[which(all.mutations %in% neutral.mut.IDs)]
just.seln.muts <- all.mutations[which(all.mutations %in% selected.mut.IDs)]
# get the allele frequencies:
freqs.total <- table(all.mutations)
freqs.neut <- table(just.neut.muts)
freqs.seln <- table(just.seln.muts)
# get the frequency spectra
sfs.total <- table(freqs.total)
sfs.neut <- table(freqs.neut)
sfs.seln <- table(freqs.seln)
# add on fixed things to the fixed section of the table
if(is.na(sfs.total[as.character(sample.size)])){
sfs.total[as.character(sample.size)] <- num.neut.muts.fixed + num.seln.muts.fixed
}else{sfs.total[as.character(sample.size)] <- sfs.total[as.character(sample.size)] + num.neut.muts.fixed + num.seln.muts.fixed}
if(is.na(sfs.neut[as.character(sample.size)])){
sfs.neut[as.character(sample.size)] <- num.neut.muts.fixed
}else{sfs.neut[as.character(sample.size)] <- sfs.neut[as.character(sample.size)] + num.neut.muts.fixed}
if(is.na(sfs.seln[as.character(sample.size)])){
sfs.seln[as.character(sample.size)] <- num.seln.muts.fixed
}else{sfs.seln[as.character(sample.size)] <- sfs.seln[as.character(sample.size)] + num.seln.muts.fixed}
sfs.total["0"] <- genome.size - sum(sfs.total)
genome.size.neut <- 0.25*genome.size
genome.size.seln <- 0.75*genome.size
sfs.neut["0"] <- genome.size.neut - sum(sfs.neut)
sfs.seln["0"] <- genome.size.seln - sum(sfs.seln)
}
# each SFS (for DFE) must have the counts for 0 and for fixation
# if 100 inds sampled, that makes lenght of sfs 201 (0 - 200)
full.list <- as.character(0:sample.size)
# have to fill in any missing ones with zero to make the neutral match the selected SFS
neut.missing <- setdiff(full.list, names(sfs.neut))
sfs.neut[neut.missing] <- 0
sfs.neut.contents <- as.numeric(paste(sfs.neut))
sfs.neut.labels <- as.numeric(names(sfs.neut))
temp.neut <- data.frame(cbind(sfs.neut.labels, sfs.neut.contents))
ordered.neut <- temp.neut[order(temp.neut[,1]), c(1,2)]
final.sfs.neut <- ordered.neut[,2]
seln.missing <- setdiff(full.list, names(sfs.seln))
sfs.seln[seln.missing] <- 0
sfs.seln.contents <- as.numeric(paste(sfs.seln))
sfs.seln.labels <- as.numeric(names(sfs.seln))
temp.seln <- data.frame(cbind(sfs.seln.labels, sfs.seln.contents))
ordered.seln <- temp.seln[order(temp.seln[,1]), c(1,2)]
final.sfs.seln <- ordered.seln[,2]
if(fold == TRUE){
# fold the site frequency table back on itself
# take freqs 0-99 and bin with freqs 200-101, then 100 stays on its own at the end (but it's actually 101 because R starts counting at 1, not 0)
final.sfs.seln <- c(final.sfs.seln[1:num.inds.sampled] + final.sfs.seln[(sample.size+1):(num.inds.sampled+2)], final.sfs.seln[(num.inds.sampled + 1)], rep(0, num.inds.sampled))
final.sfs.neut <- c(final.sfs.neut[1:num.inds.sampled] + final.sfs.neut[(sample.size+1):(num.inds.sampled+2)], final.sfs.neut[(num.inds.sampled + 1)], rep(0, num.inds.sampled))
}
dfe.input <- paste(c(
"1
", sample.size,"
", paste(c(final.sfs.seln), collapse=" "),"
", paste(c(final.sfs.neut), collapse=" ")
), collapse="")
if(fold == FALSE){
write(dfe.input, file=paste(c("Unfolded", filename), collapse=""))
}
if(fold == TRUE){
write(dfe.input, file=paste(c("Folded", filename), collapse=""))
}
}
#____________________________________________________________________________________________________#
inds.sampled <- 100
pop.size <- 10000
args <- commandArgs(trailingOnly=TRUE)
gen <- as.numeric(unlist(strsplit(as.character(args[1]), split="_"))[1])
gsize <- as.numeric(args[4])
setwd(as.character(args[3]))
#____________________________________________________________________________________________________#
## full data output
full.file <- paste(c("ModifiedSampleOutput_", as.character(args[1])), collapse="")
full.samp.muts.start <- as.numeric(unlist(strsplit(system(paste(c("grep -n Mutations ", full.file), collapse=""), intern=TRUE), split=":"))[1])
full.samp.genomes.start <- as.numeric(unlist(strsplit(system(paste(c("grep -n Genomes ", full.file), collapse=""), intern=TRUE), split=":"))[1])
full.samp.file.end <- as.numeric(head(tail(unlist(strsplit(system(paste(c("wc -l ", full.file), collapse=""), intern=TRUE), split=" ")), n=2), n=1))
pdat <- read.table(full.file, skip=full.samp.muts.start, nrow=((full.samp.genomes.start-1) - full.samp.muts.start), sep=" ")
names(pdat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "generation_arose", "mut.prev")
gdat <- read.table(full.file, skip=full.samp.genomes.start, nrow=(full.samp.file.end - full.samp.genomes.start), sep="A")
## fixed data output
fixed.mut.id.start <- 2
fdat <- read.table(paste(c("FixedOutput_", paste(unlist(strsplit(as.character(args[1]), split="_"))[-1], collapse="_")), collapse=""), skip=fixed.mut.id.start)
names(fdat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "gen_arose", "gen.fixed")
#____________________________________________________________________________________________________#
if(args[2] == "subsample"){
outfile <- paste(c("DFE_SFS_subsamp_gen", as.character(args[1])), collapse="")
make.est_dfe.input(poly.dat=pdat, genome.dat=gdat, fixed.dat=fdat,
generation=gen, num.inds.sampled=inds.sampled, genome.size=gsize,
filename=outfile, fold=TRUE, use.manual.sample=TRUE)
}else{
outfile <- paste(c("DFE_SFS_full_gen", as.character(args[1])), collapse="")
make.est_dfe.input(poly.dat=pdat, genome.dat=gdat, fixed.dat=fdat,
generation=gen, num.inds.sampled=pop.size, genome.size=gsize,
filename=outfile, fold=TRUE, use.manual.sample=FALSE)
}
| /DFE_analysisCode/CommandLine_RunSlimToDFEconversion_transMateSys.R | no_license | kjgilbert/SlimSimCode | R | false | false | 8,484 | r | # the function:
make.est_dfe.input <- function(poly.dat, genome.dat, fixed.dat, generation, num.inds.sampled, genome.size, filename, fold=FALSE, use.manual.sample=FALSE){
# because diploid:
sample.size <- 2 * num.inds.sampled
# make data frames of all possible neutral and deleterious mutations at all time points recorded
neut.muts <- NULL
seln.muts <- NULL
## WHERE poly.dat IS A FILE OF ROWS OF MUTATIONS OCCURRING
# m1 = neutral site in coding
# m2, 3 = deleterious selected site in coding
# m4 = beneficial selected site in coding
# tack on fixed data and then can include counts for 0's
fixed.mut.dat <- fixed.dat[fixed.dat$gen.fixed <= as.numeric(generation) ,]
# this gives only mutations that have fixed PRIOR to and INCLUDING WITHIN the current generation time point sampled
fixed.neut.muts <- c(which(fixed.mut.dat$mut.type == "m1"))
fixed.seln.mut.IDs <- fixed.mut.dat$mut.ID[-fixed.neut.muts]
fixed.neut.mut.IDs <- fixed.mut.dat$mut.ID[fixed.neut.muts]
num.neut.muts.fixed <- length(fixed.neut.mut.IDs)
num.seln.muts.fixed <- length(fixed.seln.mut.IDs)
neut.muts <- poly.dat[poly.dat$mut.type == "m1" ,]
seln.muts <- poly.dat[poly.dat$mut.type != "m1" ,]
if(use.manual.sample == FALSE){
sfs.total <- table(poly.dat$mut.prev)
sfs.neut <- table(neut.muts$mut.prev)
sfs.seln <- table(seln.muts$mut.prev)
# add on fixed things to the fixed section of the table
if(is.na(sfs.total[as.character(sample.size)])){
sfs.total[as.character(sample.size)] <- num.neut.muts.fixed + num.seln.muts.fixed
}else{sfs.total[as.character(sample.size)] <- sfs.total[as.character(sample.size)] + num.neut.muts.fixed + num.seln.muts.fixed}
if(is.na(sfs.neut[as.character(sample.size)])){
sfs.neut[as.character(sample.size)] <- num.neut.muts.fixed
}else{sfs.neut[as.character(sample.size)] <- sfs.neut[as.character(sample.size)] + num.neut.muts.fixed}
if(is.na(sfs.seln[as.character(sample.size)])){
sfs.seln[as.character(sample.size)] <- num.seln.muts.fixed
}else{sfs.seln[as.character(sample.size)] <- sfs.seln[as.character(sample.size)] + num.seln.muts.fixed}
sfs.total["0"] <- genome.size - sum(sfs.total)
genome.size.neut <- 0.25*genome.size
genome.size.seln <- 0.75*genome.size
sfs.neut["0"] <- genome.size.neut - sum(sfs.neut)
sfs.seln["0"] <- genome.size.seln - sum(sfs.seln)
}
if(use.manual.sample == TRUE){
## would have to use this section of code when manually subsampling a full sample output, because don't have the allele frequencies in the sample...
# all possible mut IDs of any type (need for later calcs):
neutral.mut.IDs <- c(neut.muts$mut.ID)
selected.mut.IDs <- c(seln.muts$mut.ID)
all.mutations <- unlist(lapply(as.character(genome.dat[,2]), FUN=strsplit, split=" "))
just.neut.muts <- all.mutations[which(all.mutations %in% neutral.mut.IDs)]
just.seln.muts <- all.mutations[which(all.mutations %in% selected.mut.IDs)]
# get the allele frequencies:
freqs.total <- table(all.mutations)
freqs.neut <- table(just.neut.muts)
freqs.seln <- table(just.seln.muts)
# get the frequency spectra
sfs.total <- table(freqs.total)
sfs.neut <- table(freqs.neut)
sfs.seln <- table(freqs.seln)
# add on fixed things to the fixed section of the table
if(is.na(sfs.total[as.character(sample.size)])){
sfs.total[as.character(sample.size)] <- num.neut.muts.fixed + num.seln.muts.fixed
}else{sfs.total[as.character(sample.size)] <- sfs.total[as.character(sample.size)] + num.neut.muts.fixed + num.seln.muts.fixed}
if(is.na(sfs.neut[as.character(sample.size)])){
sfs.neut[as.character(sample.size)] <- num.neut.muts.fixed
}else{sfs.neut[as.character(sample.size)] <- sfs.neut[as.character(sample.size)] + num.neut.muts.fixed}
if(is.na(sfs.seln[as.character(sample.size)])){
sfs.seln[as.character(sample.size)] <- num.seln.muts.fixed
}else{sfs.seln[as.character(sample.size)] <- sfs.seln[as.character(sample.size)] + num.seln.muts.fixed}
sfs.total["0"] <- genome.size - sum(sfs.total)
genome.size.neut <- 0.25*genome.size
genome.size.seln <- 0.75*genome.size
sfs.neut["0"] <- genome.size.neut - sum(sfs.neut)
sfs.seln["0"] <- genome.size.seln - sum(sfs.seln)
}
# each SFS (for DFE) must have the counts for 0 and for fixation
# if 100 inds sampled, that makes lenght of sfs 201 (0 - 200)
full.list <- as.character(0:sample.size)
# have to fill in any missing ones with zero to make the neutral match the selected SFS
neut.missing <- setdiff(full.list, names(sfs.neut))
sfs.neut[neut.missing] <- 0
sfs.neut.contents <- as.numeric(paste(sfs.neut))
sfs.neut.labels <- as.numeric(names(sfs.neut))
temp.neut <- data.frame(cbind(sfs.neut.labels, sfs.neut.contents))
ordered.neut <- temp.neut[order(temp.neut[,1]), c(1,2)]
final.sfs.neut <- ordered.neut[,2]
seln.missing <- setdiff(full.list, names(sfs.seln))
sfs.seln[seln.missing] <- 0
sfs.seln.contents <- as.numeric(paste(sfs.seln))
sfs.seln.labels <- as.numeric(names(sfs.seln))
temp.seln <- data.frame(cbind(sfs.seln.labels, sfs.seln.contents))
ordered.seln <- temp.seln[order(temp.seln[,1]), c(1,2)]
final.sfs.seln <- ordered.seln[,2]
if(fold == TRUE){
# fold the site frequency table back on itself
# take freqs 0-99 and bin with freqs 200-101, then 100 stays on its own at the end (but it's actually 101 because R starts counting at 1, not 0)
final.sfs.seln <- c(final.sfs.seln[1:num.inds.sampled] + final.sfs.seln[(sample.size+1):(num.inds.sampled+2)], final.sfs.seln[(num.inds.sampled + 1)], rep(0, num.inds.sampled))
final.sfs.neut <- c(final.sfs.neut[1:num.inds.sampled] + final.sfs.neut[(sample.size+1):(num.inds.sampled+2)], final.sfs.neut[(num.inds.sampled + 1)], rep(0, num.inds.sampled))
}
dfe.input <- paste(c(
"1
", sample.size,"
", paste(c(final.sfs.seln), collapse=" "),"
", paste(c(final.sfs.neut), collapse=" ")
), collapse="")
if(fold == FALSE){
write(dfe.input, file=paste(c("Unfolded", filename), collapse=""))
}
if(fold == TRUE){
write(dfe.input, file=paste(c("Folded", filename), collapse=""))
}
}
#____________________________________________________________________________________________________#
inds.sampled <- 100
pop.size <- 10000
args <- commandArgs(trailingOnly=TRUE)
gen <- as.numeric(unlist(strsplit(as.character(args[1]), split="_"))[1])
gsize <- as.numeric(args[4])
setwd(as.character(args[3]))
#____________________________________________________________________________________________________#
## full data output
full.file <- paste(c("ModifiedSampleOutput_", as.character(args[1])), collapse="")
full.samp.muts.start <- as.numeric(unlist(strsplit(system(paste(c("grep -n Mutations ", full.file), collapse=""), intern=TRUE), split=":"))[1])
full.samp.genomes.start <- as.numeric(unlist(strsplit(system(paste(c("grep -n Genomes ", full.file), collapse=""), intern=TRUE), split=":"))[1])
full.samp.file.end <- as.numeric(head(tail(unlist(strsplit(system(paste(c("wc -l ", full.file), collapse=""), intern=TRUE), split=" ")), n=2), n=1))
pdat <- read.table(full.file, skip=full.samp.muts.start, nrow=((full.samp.genomes.start-1) - full.samp.muts.start), sep=" ")
names(pdat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "generation_arose", "mut.prev")
gdat <- read.table(full.file, skip=full.samp.genomes.start, nrow=(full.samp.file.end - full.samp.genomes.start), sep="A")
## fixed data output
fixed.mut.id.start <- 2
fdat <- read.table(paste(c("FixedOutput_", paste(unlist(strsplit(as.character(args[1]), split="_"))[-1], collapse="_")), collapse=""), skip=fixed.mut.id.start)
names(fdat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "gen_arose", "gen.fixed")
#____________________________________________________________________________________________________#
if(args[2] == "subsample"){
outfile <- paste(c("DFE_SFS_subsamp_gen", as.character(args[1])), collapse="")
make.est_dfe.input(poly.dat=pdat, genome.dat=gdat, fixed.dat=fdat,
generation=gen, num.inds.sampled=inds.sampled, genome.size=gsize,
filename=outfile, fold=TRUE, use.manual.sample=TRUE)
}else{
outfile <- paste(c("DFE_SFS_full_gen", as.character(args[1])), collapse="")
make.est_dfe.input(poly.dat=pdat, genome.dat=gdat, fixed.dat=fdat,
generation=gen, num.inds.sampled=pop.size, genome.size=gsize,
filename=outfile, fold=TRUE, use.manual.sample=FALSE)
}
|
#---
#title: "Prevalence Plot"
#author: "Mercè Garí"
#date: '2021-02-10'
#---
here_r = function (...) here::here("Statistics", ...)
here_weights = function (...) here::here("SamplingWeights", ...)
here_output = function (...) here::here("Output", ...)
# Setup
library(dplyr)
library(ggplot2)
library(tidyr)
library(ggthemes)
library(GGally)
library(colorspace)
library(readxl)
library(forcats)
library(scales)
library(gridExtra)
library(cowplot)
library(Cairo)
col_grey = "#999999"
col_trueneg = "#56B4E9" #"#0072B2"
col_truepos = "#D55E00"
black = "#000000"
pal <- c(col_grey, col_trueneg, col_truepos, black)
# Function to set the significant digits to 2
sigfig <- function(vec, n=3){
formatC(signif(vec,digits=n), digits=n,format="fg", flag="#")
}
d <- read.csv(here_weights("Estimates_Cal_R1_Test.csv"))
head(d)
str(d)
plot <- d %>%
filter(Calculation %in% "Weighted") %>%
filter(Categories %in% c("Prevalence R2", "Negative R1 Positive R2")) %>%
mutate(Categories = case_when(
Categories == "Prevalence R2" ~ "Prevalence",
Categories == "Negative R1 Positive R2" ~ "Incidence")) %>%
# mutate(Estimates = ifelse(Estimates <= 0, 0.9, Estimates),
# Lower_95_CB = ifelse(Lower_95_CB <= 0, 0.9, Lower_95_CB)) %>%
ggplot(aes(y=Estimates,
x=Categories,
color=Adjustment)) +
geom_point(position=position_dodge(width=0.5)) +
geom_linerange(aes(ymin=Lower_95_CB,
ymax=Upper_95_CB),
position=position_dodge(width=0.5)) +
coord_flip() +
scale_linetype_manual(values=c(1,6)) +
scale_color_manual(values=pal[c(3,2,1,4)]) +
xlab("") + ylab("Percentage") +
theme_classic() +
theme(legend.title=element_blank(),
legend.justification=c(1,0), legend.position=c(1,0.05)) +
geom_text(data=.%>% filter(Adjustment == "Adjusted"),
aes(label=paste0(signif(Estimates, 2), " (",
signif(Lower_95_CB, 2), " - ",
signif(Upper_95_CB, 2), ")"),
hjust=1.8, vjust=2),
show.legend=FALSE, size=3) +
geom_text(data=.%>% filter(Adjustment == "Unadjusted"),
aes(label=paste0(signif(Estimates, 2), " (",
signif(Lower_95_CB, 2), " - ",
signif(Upper_95_CB, 2), ")"),
hjust=1.8, vjust=-1.1),
show.legend=FALSE, size=3) +
ylim(c(0,4.5))
plot
ggsave(here_output(
file="Figure_4.pdf"),
device=cairo_pdf,
width=5, height=3)
ggsave(here_output(
file="Figure_4.png"),
width=5, height=3)
| /Statistics/Plot-Prevalence-Incidence.R | permissive | koco19/epi2 | R | false | false | 2,612 | r | #---
#title: "Prevalence Plot"
#author: "Mercè Garí"
#date: '2021-02-10'
#---
here_r = function (...) here::here("Statistics", ...)
here_weights = function (...) here::here("SamplingWeights", ...)
here_output = function (...) here::here("Output", ...)
# Setup
library(dplyr)
library(ggplot2)
library(tidyr)
library(ggthemes)
library(GGally)
library(colorspace)
library(readxl)
library(forcats)
library(scales)
library(gridExtra)
library(cowplot)
library(Cairo)
col_grey = "#999999"
col_trueneg = "#56B4E9" #"#0072B2"
col_truepos = "#D55E00"
black = "#000000"
pal <- c(col_grey, col_trueneg, col_truepos, black)
# Function to set the significant digits to 2
sigfig <- function(vec, n=3){
formatC(signif(vec,digits=n), digits=n,format="fg", flag="#")
}
d <- read.csv(here_weights("Estimates_Cal_R1_Test.csv"))
head(d)
str(d)
plot <- d %>%
filter(Calculation %in% "Weighted") %>%
filter(Categories %in% c("Prevalence R2", "Negative R1 Positive R2")) %>%
mutate(Categories = case_when(
Categories == "Prevalence R2" ~ "Prevalence",
Categories == "Negative R1 Positive R2" ~ "Incidence")) %>%
# mutate(Estimates = ifelse(Estimates <= 0, 0.9, Estimates),
# Lower_95_CB = ifelse(Lower_95_CB <= 0, 0.9, Lower_95_CB)) %>%
ggplot(aes(y=Estimates,
x=Categories,
color=Adjustment)) +
geom_point(position=position_dodge(width=0.5)) +
geom_linerange(aes(ymin=Lower_95_CB,
ymax=Upper_95_CB),
position=position_dodge(width=0.5)) +
coord_flip() +
scale_linetype_manual(values=c(1,6)) +
scale_color_manual(values=pal[c(3,2,1,4)]) +
xlab("") + ylab("Percentage") +
theme_classic() +
theme(legend.title=element_blank(),
legend.justification=c(1,0), legend.position=c(1,0.05)) +
geom_text(data=.%>% filter(Adjustment == "Adjusted"),
aes(label=paste0(signif(Estimates, 2), " (",
signif(Lower_95_CB, 2), " - ",
signif(Upper_95_CB, 2), ")"),
hjust=1.8, vjust=2),
show.legend=FALSE, size=3) +
geom_text(data=.%>% filter(Adjustment == "Unadjusted"),
aes(label=paste0(signif(Estimates, 2), " (",
signif(Lower_95_CB, 2), " - ",
signif(Upper_95_CB, 2), ")"),
hjust=1.8, vjust=-1.1),
show.legend=FALSE, size=3) +
ylim(c(0,4.5))
plot
ggsave(here_output(
file="Figure_4.pdf"),
device=cairo_pdf,
width=5, height=3)
ggsave(here_output(
file="Figure_4.png"),
width=5, height=3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.