content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#### TITLE: EC urban (UTM) data ####
##
## Author: Yusri Yusup, PhD
## Date: 2016-05-16
## Version: 1.0
## R Script to import and process urban EC data from UTM
##
##
#### * Preliminaries ####
source('R/tool_convert_magic.R')
source('R/tool_convert_magic_num.R')
source('R/tool_charactersNumeric.R')
source('R/tool_asNumeric.R')
#### * Data import and process ####
df_urban <- read.csv(file.choose(),
header = TRUE, skip = 1)
# Remove the first row
df_urban <- df_urban[-1,]
# Remove the first column
df_urban <- df_urban[,-1]
# Renumber the row numbers
rownames(df_urban) <- NULL
# Using convert_magic to convert all columns to 'character' first
df_urban <- convert_magic(df_urban[,c(seq(1,ncol(df_urban)))],
c(rep('character',times = ncol(df_urban))))
# Changing all the '-9999.0' or '-9999' (missing data) to NA
for (i in 1:length(df_urban)){
df_urban[i][df_urban[i] == '-9999' | df_urban[i] == '-9999.0'] <- NA
}
rm(i)
# Formatting time
time_stamp <- paste(df_urban$date,df_urban$time)
# Might need to change format of date 1/1/2014 or 2014-1-1
time_stamp <- strptime(time_stamp,"%Y-%m-%d %H:%M")
df_urban$time <- time_stamp
df_urban <- df_urban[,c(-1)]
colnames(df_urban)[1] <-'time_stamp'
# Remove 'DOY', don't know what it is for...
df_urban <- df_urban[,c(-2)]
# Changing all relevant columns to factors
df_urban$daytime <- as.factor(df_urban$daytime)
df_urban$file_records <- as.factor(df_urban$file_records)
df_urban$used_records <- as.factor(df_urban$used_records)
df_urban$qc_Tau <- as.factor(df_urban$qc_Tau)
df_urban$qc_H <- as.factor(df_urban$qc_H)
df_urban$spikes_hf <- as.factor(df_urban$spikes_hf)
df_urban$amplitude_resolution_hf <- as.factor(df_urban$amplitude_resolution_hf)
df_urban$drop_out_hf <- as.factor(df_urban$drop_out_hf)
df_urban$absolute_limits_hf <- as.factor(df_urban$absolute_limits_hf)
df_urban$skewness_kurtosis_hf <- as.factor(df_urban$skewness_kurtosis_hf)
df_urban$skewness_kurtosis_sf <- as.factor(df_urban$skewness_kurtosis_sf)
df_urban$discontinuities_hf <- as.factor(df_urban$discontinuities_hf)
df_urban$discontinuities_sf <- as.factor(df_urban$discontinuities_sf)
df_urban$timelag_hf <- as.factor(df_urban$timelag_hf)
df_urban$timelag_sf <- as.factor(df_urban$timelag_sf)
df_urban$attack_angle_hf <- as.factor(df_urban$attack_angle_hf)
df_urban$non_steady_wind_hf <- as.factor(df_urban$non_steady_wind_hf)
df_urban$model <- as.factor(df_urban$model)
# Change all non-factors (or characters) to numeric)
df_urban <- charactersNumeric(df_urban)
# Change column name of (z-d)/L to Z.L
colnames(df_urban)[which(colnames(df_urban) == 'X.z.d..L')] <- 'Z.L'
# Remove 'ET' because the only column different with other data sets
df_urban$ET <- NULL
#### * Cleanup ####
rm(time_stamp)
|
/R/EC_urban_analysis.R
|
no_license
|
yusriy/EC_urban
|
R
| false
| false
| 2,778
|
r
|
#### TITLE: EC urban (UTM) data ####
##
## Author: Yusri Yusup, PhD
## Date: 2016-05-16
## Version: 1.0
## R Script to import and process urban EC data from UTM
##
##
#### * Preliminaries ####
source('R/tool_convert_magic.R')
source('R/tool_convert_magic_num.R')
source('R/tool_charactersNumeric.R')
source('R/tool_asNumeric.R')
#### * Data import and process ####
df_urban <- read.csv(file.choose(),
header = TRUE, skip = 1)
# Remove the first row
df_urban <- df_urban[-1,]
# Remove the first column
df_urban <- df_urban[,-1]
# Renumber the row numbers
rownames(df_urban) <- NULL
# Using convert_magic to convert all columns to 'character' first
df_urban <- convert_magic(df_urban[,c(seq(1,ncol(df_urban)))],
c(rep('character',times = ncol(df_urban))))
# Changing all the '-9999.0' or '-9999' (missing data) to NA
for (i in 1:length(df_urban)){
df_urban[i][df_urban[i] == '-9999' | df_urban[i] == '-9999.0'] <- NA
}
rm(i)
# Formatting time
time_stamp <- paste(df_urban$date,df_urban$time)
# Might need to change format of date 1/1/2014 or 2014-1-1
time_stamp <- strptime(time_stamp,"%Y-%m-%d %H:%M")
df_urban$time <- time_stamp
df_urban <- df_urban[,c(-1)]
colnames(df_urban)[1] <-'time_stamp'
# Remove 'DOY', don't know what it is for...
df_urban <- df_urban[,c(-2)]
# Changing all relevant columns to factors
df_urban$daytime <- as.factor(df_urban$daytime)
df_urban$file_records <- as.factor(df_urban$file_records)
df_urban$used_records <- as.factor(df_urban$used_records)
df_urban$qc_Tau <- as.factor(df_urban$qc_Tau)
df_urban$qc_H <- as.factor(df_urban$qc_H)
df_urban$spikes_hf <- as.factor(df_urban$spikes_hf)
df_urban$amplitude_resolution_hf <- as.factor(df_urban$amplitude_resolution_hf)
df_urban$drop_out_hf <- as.factor(df_urban$drop_out_hf)
df_urban$absolute_limits_hf <- as.factor(df_urban$absolute_limits_hf)
df_urban$skewness_kurtosis_hf <- as.factor(df_urban$skewness_kurtosis_hf)
df_urban$skewness_kurtosis_sf <- as.factor(df_urban$skewness_kurtosis_sf)
df_urban$discontinuities_hf <- as.factor(df_urban$discontinuities_hf)
df_urban$discontinuities_sf <- as.factor(df_urban$discontinuities_sf)
df_urban$timelag_hf <- as.factor(df_urban$timelag_hf)
df_urban$timelag_sf <- as.factor(df_urban$timelag_sf)
df_urban$attack_angle_hf <- as.factor(df_urban$attack_angle_hf)
df_urban$non_steady_wind_hf <- as.factor(df_urban$non_steady_wind_hf)
df_urban$model <- as.factor(df_urban$model)
# Change all non-factors (or characters) to numeric)
df_urban <- charactersNumeric(df_urban)
# Change column name of (z-d)/L to Z.L
colnames(df_urban)[which(colnames(df_urban) == 'X.z.d..L')] <- 'Z.L'
# Remove 'ET' because the only column different with other data sets
df_urban$ET <- NULL
#### * Cleanup ####
rm(time_stamp)
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0144) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_power_", smooth, "_",b.var,"_seed1_grp50-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
/full simulation/03.13.2018/power test/variance0.0144/seed1/power_0.0144_pca_s_seed1_50_20.R
|
no_license
|
wma9/FMRI-project
|
R
| false
| false
| 9,158
|
r
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0144) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_power_", smooth, "_",b.var,"_seed1_grp50-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
.domessage = function(msg) message(msg)
.onLoad = function(libname, pkgname) {
ns = asNamespace(pkgname)
delayedAssign("BiocRelease", tryCatch(BiocVers(getBiocReleaseVr()),
error = function(x) {
.domessage("Unable to populate the Bi ocRelease object")
NULL
}),
eval.env = ns,
assign.env = ns)
delayedAssign("develVers", getBiocDevelVr(),
assign.env = ns)
delayedAssign("BiocDevel", tryCatch(BiocVers(getBiocDevelVr()),
error = function(x) {
.domessage("Unable to populate the BiocDevel object")
NULL
}
), eval.env = ns,
assign.env = ns)
delayedAssign("getBiocYaml", doyamlsetup(),
assign.env = ns)
delayedAssign("defaultBiocRepos", getBiocReposFromRVers(),
assign.env = ns)
namespaceExport(ns, c("BiocRelease", "BiocDevel"))
}
|
/R/zzz.R
|
no_license
|
Spectra-Infra/switchr
|
R
| false
| false
| 1,243
|
r
|
.domessage = function(msg) message(msg)
.onLoad = function(libname, pkgname) {
ns = asNamespace(pkgname)
delayedAssign("BiocRelease", tryCatch(BiocVers(getBiocReleaseVr()),
error = function(x) {
.domessage("Unable to populate the Bi ocRelease object")
NULL
}),
eval.env = ns,
assign.env = ns)
delayedAssign("develVers", getBiocDevelVr(),
assign.env = ns)
delayedAssign("BiocDevel", tryCatch(BiocVers(getBiocDevelVr()),
error = function(x) {
.domessage("Unable to populate the BiocDevel object")
NULL
}
), eval.env = ns,
assign.env = ns)
delayedAssign("getBiocYaml", doyamlsetup(),
assign.env = ns)
delayedAssign("defaultBiocRepos", getBiocReposFromRVers(),
assign.env = ns)
namespaceExport(ns, c("BiocRelease", "BiocDevel"))
}
|
f<-readRDS("summarySCC_PM25.rds")
f2<-readRDS("Source_Classification_Code.rds")
a<-split(f[f$fips==24510 , ],f[f$fips==24510 , ]$type)
x<-list(arr=array())
for(i in 1:4)
{
x[[i]]<-tapply(a[[i]]$Emissions, a[[i]]$year,sum)
names(x)[i]<- names(a)[[i]]
}
df<-data.frame(type=character() , year=numeric() , ems=numeric() , stringsAsFactors = FALSE)
for(i in 1:4)
{
for(j in 1:4)
{
typ<-names(x)[i]
yr<-as.numeric(names(x[[i]])[j])
em<-x[[i]][[j]]
df2<-data.frame(type=typ , year=yr, ems=em , stringsAsFactors = FALSE )
df[nrow(df)+1 , ]<-df2
}
}
rng<-range(df$ems)
library(ggplot2)
g<-ggplot(df,aes(year,ems , group=type))+
geom_point(aes(color=type))+
geom_line(aes(color=type))+
labs(title="Emissions by type in Baltimore City (1999-2008)" , x="Year", y="Emissions(ton)")
g
ggsave("plot3.png")
|
/plot3.R
|
no_license
|
fyras1/PM2.5-assignment
|
R
| false
| false
| 838
|
r
|
f<-readRDS("summarySCC_PM25.rds")
f2<-readRDS("Source_Classification_Code.rds")
a<-split(f[f$fips==24510 , ],f[f$fips==24510 , ]$type)
x<-list(arr=array())
for(i in 1:4)
{
x[[i]]<-tapply(a[[i]]$Emissions, a[[i]]$year,sum)
names(x)[i]<- names(a)[[i]]
}
df<-data.frame(type=character() , year=numeric() , ems=numeric() , stringsAsFactors = FALSE)
for(i in 1:4)
{
for(j in 1:4)
{
typ<-names(x)[i]
yr<-as.numeric(names(x[[i]])[j])
em<-x[[i]][[j]]
df2<-data.frame(type=typ , year=yr, ems=em , stringsAsFactors = FALSE )
df[nrow(df)+1 , ]<-df2
}
}
rng<-range(df$ems)
library(ggplot2)
g<-ggplot(df,aes(year,ems , group=type))+
geom_point(aes(color=type))+
geom_line(aes(color=type))+
labs(title="Emissions by type in Baltimore City (1999-2008)" , x="Year", y="Emissions(ton)")
g
ggsave("plot3.png")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locate.r
\name{invert_match}
\alias{invert_match}
\title{Switch location of matches to location of non-matches.}
\usage{
invert_match(loc)
}
\arguments{
\item{loc}{matrix of match locations, as from \code{\link{str_locate_all}}}
}
\value{
numeric match giving locations of non-matches
}
\description{
Invert a matrix of match locations to match the opposite of what was
previously matched.
}
\examples{
numbers <- "1 and 2 and 4 and 456"
num_loc <- str_locate_all(numbers, "[0-9]+")[[1]]
str_sub(numbers, num_loc[, "start"], num_loc[, "end"])
text_loc <- invert_match(num_loc)
str_sub(numbers, text_loc[, "start"], text_loc[, "end"])
}
|
/man/invert_match.Rd
|
no_license
|
analyticalmonk/stringr
|
R
| false
| true
| 716
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locate.r
\name{invert_match}
\alias{invert_match}
\title{Switch location of matches to location of non-matches.}
\usage{
invert_match(loc)
}
\arguments{
\item{loc}{matrix of match locations, as from \code{\link{str_locate_all}}}
}
\value{
numeric match giving locations of non-matches
}
\description{
Invert a matrix of match locations to match the opposite of what was
previously matched.
}
\examples{
numbers <- "1 and 2 and 4 and 456"
num_loc <- str_locate_all(numbers, "[0-9]+")[[1]]
str_sub(numbers, num_loc[, "start"], num_loc[, "end"])
text_loc <- invert_match(num_loc)
str_sub(numbers, text_loc[, "start"], text_loc[, "end"])
}
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list (set = set, get = get, setinv = setinv, getinv = getinv)
}
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
dasmey/ProgrammingAssignment2
|
R
| false
| false
| 614
|
r
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list (set = set, get = get, setinv = setinv, getinv = getinv)
}
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/position-stack.r
\name{position_stack}
\alias{position_stack}
\title{Stack overlapping objects on top of one another.}
\usage{
position_stack(width = NULL, height = NULL)
}
\arguments{
\item{width}{Manually specify width (does not affect all position
adjustments)}
\item{height}{Manually specify height (does not affect all position
adjustments)}
}
\description{
Stack overlapping objects on top of one another.
}
\examples{
# Stacking is the default behaviour for most area plots:
ggplot(mtcars, aes(factor(cyl), fill = factor(vs))) + geom_bar()
# To change stacking order, use factor() to change order of levels
mtcars$vs <- factor(mtcars$vs, levels = c(1,0))
ggplot(mtcars, aes(factor(cyl), fill = factor(vs))) + geom_bar()
ggplot(diamonds, aes(price)) + geom_histogram(binwidth=500)
ggplot(diamonds, aes(price, fill = cut)) + geom_histogram(binwidth=500)
# Stacking is also useful for time series
data.set <- data.frame(
Time = c(rep(1, 4),rep(2, 4), rep(3, 4), rep(4, 4)),
Type = rep(c('a', 'b', 'c', 'd'), 4),
Value = rpois(16, 10)
)
ggplot(data.set, aes(Time, Value)) + geom_area(aes(fill = Type))
# If you want to stack lines, you need to say so:
ggplot(data.set, aes(Time, Value)) + geom_line(aes(colour = Type))
ggplot(data.set, aes(Time, Value)) +
geom_line(position = "stack", aes(colour = Type))
# But realise that this makes it *much* harder to compare individual
# trends
}
\seealso{
Other position adjustments: \code{\link{position_dodge}};
\code{\link{position_fill}};
\code{\link{position_identity}};
\code{\link{position_jitterdodge}};
\code{\link{position_jitter}};
\code{\link{position_nudge}}
}
|
/man/position_stack.Rd
|
no_license
|
katto2/ggplot2
|
R
| false
| false
| 1,729
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/position-stack.r
\name{position_stack}
\alias{position_stack}
\title{Stack overlapping objects on top of one another.}
\usage{
position_stack(width = NULL, height = NULL)
}
\arguments{
\item{width}{Manually specify width (does not affect all position
adjustments)}
\item{height}{Manually specify height (does not affect all position
adjustments)}
}
\description{
Stack overlapping objects on top of one another.
}
\examples{
# Stacking is the default behaviour for most area plots:
ggplot(mtcars, aes(factor(cyl), fill = factor(vs))) + geom_bar()
# To change stacking order, use factor() to change order of levels
mtcars$vs <- factor(mtcars$vs, levels = c(1,0))
ggplot(mtcars, aes(factor(cyl), fill = factor(vs))) + geom_bar()
ggplot(diamonds, aes(price)) + geom_histogram(binwidth=500)
ggplot(diamonds, aes(price, fill = cut)) + geom_histogram(binwidth=500)
# Stacking is also useful for time series
data.set <- data.frame(
Time = c(rep(1, 4),rep(2, 4), rep(3, 4), rep(4, 4)),
Type = rep(c('a', 'b', 'c', 'd'), 4),
Value = rpois(16, 10)
)
ggplot(data.set, aes(Time, Value)) + geom_area(aes(fill = Type))
# If you want to stack lines, you need to say so:
ggplot(data.set, aes(Time, Value)) + geom_line(aes(colour = Type))
ggplot(data.set, aes(Time, Value)) +
geom_line(position = "stack", aes(colour = Type))
# But realise that this makes it *much* harder to compare individual
# trends
}
\seealso{
Other position adjustments: \code{\link{position_dodge}};
\code{\link{position_fill}};
\code{\link{position_identity}};
\code{\link{position_jitterdodge}};
\code{\link{position_jitter}};
\code{\link{position_nudge}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcenKM.R
\name{mcenKM}
\alias{mcenKM}
\title{Estimate Statistics}
\usage{
mcenKM(x, group)
}
\arguments{
\item{x}{an object of "mcens" to compute}
\item{group}{the group variable}
}
\value{
An object of class "survfit."
}
\description{
Support function for computing statistics for multiply-censored data.
}
\keyword{misc}
|
/man/mcenKM.Rd
|
permissive
|
Zhenglei-BCS/smwrQW
|
R
| false
| true
| 402
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcenKM.R
\name{mcenKM}
\alias{mcenKM}
\title{Estimate Statistics}
\usage{
mcenKM(x, group)
}
\arguments{
\item{x}{an object of "mcens" to compute}
\item{group}{the group variable}
}
\value{
An object of class "survfit."
}
\description{
Support function for computing statistics for multiply-censored data.
}
\keyword{misc}
|
# https://rpubs.com/cen0te/naivebayes-sentimentpolarity
# install.packages("mongolite")
# install.packages("tm")
# install.packages("RTextTools")
# install.packages("e1071")
# install.packages("dplyr")
# install.packages("caret")
# Load required libraries
library(tm)
library(RTextTools)
library(e1071)
library(dplyr)
library(caret)
library(mongolite)
## Get all data from Mongo
con=mongo(collection="reviews",db="movies")
df <-con$find("{}")
df_count <- nrow(df)
## The seed makes the sample function predictable. The results will stay the same
set.seed(1)
## Get 75% of the set for training puproses
train_set <- 1:ceiling(df_count / 4 * 3)
## Get 25% of the set for test purposes
test_set <- (ceiling(df_count / 4 * 3) + 1)
## Clean the corpus
clean_corpus <- function(corpus) {
corpus %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(removeWords, stopwords(kind="en"))
}
## Prepare the set for NaiveBayes
convert_count <- function(x) {
y <- ifelse(x > 0, 1,0)
y <- factor(y, levels=c(0,1))
y
}
## Use reviews, classifier and DTM dictionary as input
## Output: a prediction
predict_by_classifier <- function(reviews, classifier, dictionary) {
reviews <- data.frame(reviews)
colnames(reviews)[1] <- "text"
corpus <- Corpus(VectorSource(reviews$text))
corpus <- clean_corpus(corpus)
dtm <- DocumentTermMatrix(corpus, control=list(dictionary = dictionary))
dtm <- apply(dtm, 2, convert_count)
pred <- predict(classifier, newdata=dtm)
return(pred)
}
## Randomize all results
df <- df[sample(nrow(df)), ]
## Convert sentiment to factor instead of decimal
df$sentiment <- as.factor(df$sentiment)
## Interpret each element of vector df$text as a document & create the corpus
corpus <- Corpus(VectorSource(df$text))
## Clean the corpus,
corpus <- clean_corpus(corpus)
## Create a documenttermmatrix of the cleaned corpus
dtm <- DocumentTermMatrix(corpus)
## Used for the statistics later on
df_train <- df[train_set,]
df_test <- df[test_set:df_count,]
dtm_train <- dtm[train_set,]
dtm_test <- dtm[test_set:df_count,]
corpus_clean_train <- corpus[train_set]
corpus_clean_test <- corpus[test_set:df_count]
## Only use words that appear more than X times
frequent_terms <- findFreqTerms(dtm_train, 420)
## Create the DocumentTermMatrices using the frequent_terms dictionary
dtm_train_nb <- DocumentTermMatrix(corpus_clean_train, control=list(dictionary = frequent_terms))
dtm_test_nb <- DocumentTermMatrix(corpus_clean_test, control=list(dictionary = frequent_terms))
## Check for word occurence instead of frequency. This improves the accuracy of the model
trainNB <- apply(dtm_train_nb, 2, convert_count)
testNB <- apply(dtm_test_nb, 2, convert_count)
## Train the naiveBayes model & predict using the test set
classifier <- naiveBayes(trainNB, df_train$sentiment)
prediction <- predict(classifier, newdata=testNB)
conf_mat <- confusionMatrix(prediction, df_test$sentiment)
conf_mat
## My own reviews
my_reviews <- c("As you expected, Deadpool 2 is all that we expected and above. The action does not stop almost the entire movie, the new characters are great especially Cable and Domino. The humor as you expected is something without which the movie will not be the same ... yet it is a distinctive feature for the character of Deadpool. The story is good and you can expect some surprising people to interfere in the the movie, but I will not tell you, see for yourself. The effects are very good, the music choice fits great with the action scenes and the individual moments in the movie.", ## pos
"Your going to be hard pressed to find a movie more over the top than Deadpool 2, David leitch takes it to a whole new level entirely. It works namely because the violence here is just to creative. Sure it defies all Logic most of the time, but i was laughing and having a rip-roaring time throughout. Go into this movie knowing that it is a campy, corny, over the top superhero/action/comedy that is just about crazy shooting sequences and one-liners, and you'll be fine. Do not go into this movie expecting deep plot, meaningful conversations among characters, or anything remotely resembling a serious action or drama movie.", ## pos
"Summer movies often hype themselves as spectacular events not to be missed and their ad campaigns use words like \"epic\", \"spectacle\", and \"smash\" as ways to build the hype to increase advanced box office sales. The summer 2018 film season kicks off in a big way with \"Avengers: Infinity War\" and it is the rare summer film that exceeds is lofty expectations and delivers a crowning achievement for the Marvel Cinematic Universe. When Thanos (Josh Brolin), embarks on a deadly campaign to find and possess the Infinity Stones, he leaves a path of death and destruction in his path. When his pursuit leading him to Earth, Bruce Banner (Mark Ruffalo), and Doctor Strange (Benedict Cumberbatch), who enlist a reluctant Tony Stark (Robert Downey Jr.), to the cause just as the minions of Thanos arrive. With The Avengers fractured and scattered following the events of \"Captain America: Civil War\" the teams find themselves dealing with the threat in various groups as fate steps in to divide many key members of the team. This allows for a great entry by the Guardians of the Galaxy and allows the film to take a very enjoyable path. Essentially the movie weaves separate storylines into one cohesive and very satisfying epic. You have a story with the Guardians as well as one with many of the key Avengers, as well as others with characters old and new. As such it is like a Guardians, Avengers, and Marvel film all rolled into one and each one supports the other very well yet has the charm and identity you would expect. While the tone is very dark as you would expect with literally half of the known universe facing destruction, there is also some solid humor in the film that never once undermines the story or pacing. Naturally the effects are stunning as you would expect as Marvel has put a significant amount of money into the film and it shows in every eye-popping action sequence. What really impressed me the most was that the Russo Brothers never let the FX of the film overshadow the characters and with a very large cast, it was nice to see everyone got their moment and you did not see members of the cast trying to one up each other. There are some real surprise moments along the way and the action is easily the best of any Marvel film to date. Many fans had expressed disappointment with \"Avengers: Age of Ultron\" for me this film is significantly better than the prior film and is everything that a Marvel fan would want in a film. I was also surprised by how well Thanos was portrayed as while you hopefully will not agree with his deductions and methods; you could actually understand his motivations and it help him transcend from the usual megalomaniacs which are so common in films of his type. I am really looking forward to seeing what comes next and make sure you stay through the credits for a bonus scene. Believe the hype, \"Avengers: Infinity War\" is an epic not to be missed and is an example of what a summer event film should be. 5 stars out of 5", ## pos
"Hello, For one thing I have to say that I have waited since last year to see this movie. I have been reading a couple of negative reviews, but I said well that was going to happen lol. A lot of people were already predispose with oceans 8 because it had been done by Clooney and Pitt which I love. The problem is this movie is great, in my opinion it's not imitating oceans 11, it's a movie on its own with its own heist. I love the movie I had certain reservations when it comes to Rihanna in the movie going up against Bullock or blanchett which I love or Hathaway which in my opinion those 3 stars are very good and let's not forget Helena Bonham Carter she was brilliant in this. Okay I am deviating Rihanna was okay meshed well with the others. I love Sandra I thought she had the same arrogant good kind of cockiness I am in control thing going, was hilarious with her schemes loved it. Cate was great there was a point when I thought before seeing the movie that. Cate should have been the leader, but I saw the movie and I am happy that Bullock pulled it off. My two favorite actresses Sandra Bullock and Cate Blanchett. Anyhow the movie was great all 8 actresses worked well, I rate it a 10 because I love the ingenuity, the plot it was fun. I saw this movie for what it was, a fun movie that it is not copying oceans 11 or anything, I saw it for what it is a entertaining movie that I enjoyed. I hope that we see more sequels in regards to this movie. I do think that if you give it a shot you might like it forget all those user that were already predispose to this movie, they are not giving it a fair shot.",## pos
"I have never seen such an amazing film since I saw The Shawshank Redemption. Shawshank encompasses friendships, hardships, hopes, and dreams. And what is so great about the movie is that it moves you, it gives you hope. Even though the circumstances between the characters and the viewers are quite different, you don't feel that far removed from what the characters are going through. It is a simple film, yet it has an everlasting message. Frank Darabont didn't need to put any kind of outlandish special effects to get us to love this film, the narration and the acting does that for him. Why this movie didn't win all seven Oscars is beyond me, but don't let that sway you to not see this film, let its ranking on the IMDb's top 250 list sway you, let your friends recommendation about the movie sway you. Set aside a little over two hours tonight and rent this movie. You will finally understand what everyone is talking about and you will understand why this is my all time favorite movie.", ## pos
"I didn't think an all women movie could be any worse than the awful new Ghostbusters. But, I was wrong. Sandra Bullock once again reminds us all why she is the most boring actress still alive. Mono-tone acting and painful script writing by all. Avoid.", ## neg
"Good heavens... I didn't make it far into this embarrassing bilge-water. This desperate attempt to be like the men falls flat on its' clean shaven face. I suppose a few frustrated women will get a kick out of it, but most movie goers are going to feel ripped off by this idiotic and preposterous mess. The big names are reeling in the cash and sacrificing their legacies and dignity to perform in this circus act. Non of them look like they want to be there. I've seen more on-screen chemistry in an episode of The Teletubbies! I'd be more than a little annoyed if I had paid to watch this!", ## neg
"The filmmakers had absolutely no idea what they wanted out of this movie. You can tell it took 44 producers and 8 years to make. Who was the protagonist? What style did they want to adopt? Is it a documentary? A character piece? A chronology of John Gotti's life? I couldn't tell. There was 4 time lines happening at once. Way to many names and places. It was simply boring and had no suspense. However, Travolta's performance wasn't bad, the script was. He definitly did the best he could with what he was given. Long story short, I have no more affection or knowledge of the Gotti family now than I did before the movie. Go watch the Sopranos instead.", ## neg
"Yes, my wife and I laughed once in the first 25 minutes of this dreadful, brainless, and frighteningly unfunny film (and that was when Melissa McCarthy's character gets blown across the lawn). I realize that this film appeals to the funny bones of some viewers, but objectively speaking it is a very sorry excuse for a comedy. We both like Melissa McCarthy. We really enjoyed her in \"The Heat,\" \"Bridesmaids,\" and \"St Vincent\" where she really got to act. She's been great on \"Saturday Night Live.\" But in this film, she's got lousy material to work with and she sinks to its level. With one laugh in 25 minutes, we availed ourselves of the local theater's 30-minute satisfaction guarantee and walked out of the theater and received a refund of the ticket price. It's the first time we've done that since \"Dennis the Menace\" in 1993.", ## neg
"This was the worst movie I have ever seen. And I'm not saying that as an exaggeration. This was the absolute worst movie I have ever seen. In my life. Do. Not. Waste. Any. Time. Seeing. This. You are better off taking the 8-10 bucks and ripping it up and throwing it away so you can't go see this movie") ## neg
my_prediction <- predict_by_classifier(my_reviews, classifier, fivefreq)
table("Prediction"=my_prediction)
|
/Dl.R
|
no_license
|
marcveens/movie-review-r
|
R
| false
| false
| 12,879
|
r
|
# https://rpubs.com/cen0te/naivebayes-sentimentpolarity
# install.packages("mongolite")
# install.packages("tm")
# install.packages("RTextTools")
# install.packages("e1071")
# install.packages("dplyr")
# install.packages("caret")
# Load required libraries
library(tm)
library(RTextTools)
library(e1071)
library(dplyr)
library(caret)
library(mongolite)
## Get all data from Mongo
con=mongo(collection="reviews",db="movies")
df <-con$find("{}")
df_count <- nrow(df)
## The seed makes the sample function predictable. The results will stay the same
set.seed(1)
## Get 75% of the set for training puproses
train_set <- 1:ceiling(df_count / 4 * 3)
## Get 25% of the set for test purposes
test_set <- (ceiling(df_count / 4 * 3) + 1)
## Clean the corpus
clean_corpus <- function(corpus) {
corpus %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(removeWords, stopwords(kind="en"))
}
## Prepare the set for NaiveBayes
convert_count <- function(x) {
y <- ifelse(x > 0, 1,0)
y <- factor(y, levels=c(0,1))
y
}
## Use reviews, classifier and DTM dictionary as input
## Output: a prediction
predict_by_classifier <- function(reviews, classifier, dictionary) {
reviews <- data.frame(reviews)
colnames(reviews)[1] <- "text"
corpus <- Corpus(VectorSource(reviews$text))
corpus <- clean_corpus(corpus)
dtm <- DocumentTermMatrix(corpus, control=list(dictionary = dictionary))
dtm <- apply(dtm, 2, convert_count)
pred <- predict(classifier, newdata=dtm)
return(pred)
}
## Randomize all results
df <- df[sample(nrow(df)), ]
## Convert sentiment to factor instead of decimal
df$sentiment <- as.factor(df$sentiment)
## Interpret each element of vector df$text as a document & create the corpus
corpus <- Corpus(VectorSource(df$text))
## Clean the corpus,
corpus <- clean_corpus(corpus)
## Create a documenttermmatrix of the cleaned corpus
dtm <- DocumentTermMatrix(corpus)
## Used for the statistics later on
df_train <- df[train_set,]
df_test <- df[test_set:df_count,]
dtm_train <- dtm[train_set,]
dtm_test <- dtm[test_set:df_count,]
corpus_clean_train <- corpus[train_set]
corpus_clean_test <- corpus[test_set:df_count]
## Only use words that appear more than X times
frequent_terms <- findFreqTerms(dtm_train, 420)
## Create the DocumentTermMatrices using the frequent_terms dictionary
dtm_train_nb <- DocumentTermMatrix(corpus_clean_train, control=list(dictionary = frequent_terms))
dtm_test_nb <- DocumentTermMatrix(corpus_clean_test, control=list(dictionary = frequent_terms))
## Check for word occurence instead of frequency. This improves the accuracy of the model
trainNB <- apply(dtm_train_nb, 2, convert_count)
testNB <- apply(dtm_test_nb, 2, convert_count)
## Train the naiveBayes model & predict using the test set
classifier <- naiveBayes(trainNB, df_train$sentiment)
prediction <- predict(classifier, newdata=testNB)
conf_mat <- confusionMatrix(prediction, df_test$sentiment)
conf_mat
## My own reviews
my_reviews <- c("As you expected, Deadpool 2 is all that we expected and above. The action does not stop almost the entire movie, the new characters are great especially Cable and Domino. The humor as you expected is something without which the movie will not be the same ... yet it is a distinctive feature for the character of Deadpool. The story is good and you can expect some surprising people to interfere in the the movie, but I will not tell you, see for yourself. The effects are very good, the music choice fits great with the action scenes and the individual moments in the movie.", ## pos
"Your going to be hard pressed to find a movie more over the top than Deadpool 2, David leitch takes it to a whole new level entirely. It works namely because the violence here is just to creative. Sure it defies all Logic most of the time, but i was laughing and having a rip-roaring time throughout. Go into this movie knowing that it is a campy, corny, over the top superhero/action/comedy that is just about crazy shooting sequences and one-liners, and you'll be fine. Do not go into this movie expecting deep plot, meaningful conversations among characters, or anything remotely resembling a serious action or drama movie.", ## pos
"Summer movies often hype themselves as spectacular events not to be missed and their ad campaigns use words like \"epic\", \"spectacle\", and \"smash\" as ways to build the hype to increase advanced box office sales. The summer 2018 film season kicks off in a big way with \"Avengers: Infinity War\" and it is the rare summer film that exceeds is lofty expectations and delivers a crowning achievement for the Marvel Cinematic Universe. When Thanos (Josh Brolin), embarks on a deadly campaign to find and possess the Infinity Stones, he leaves a path of death and destruction in his path. When his pursuit leading him to Earth, Bruce Banner (Mark Ruffalo), and Doctor Strange (Benedict Cumberbatch), who enlist a reluctant Tony Stark (Robert Downey Jr.), to the cause just as the minions of Thanos arrive. With The Avengers fractured and scattered following the events of \"Captain America: Civil War\" the teams find themselves dealing with the threat in various groups as fate steps in to divide many key members of the team. This allows for a great entry by the Guardians of the Galaxy and allows the film to take a very enjoyable path. Essentially the movie weaves separate storylines into one cohesive and very satisfying epic. You have a story with the Guardians as well as one with many of the key Avengers, as well as others with characters old and new. As such it is like a Guardians, Avengers, and Marvel film all rolled into one and each one supports the other very well yet has the charm and identity you would expect. While the tone is very dark as you would expect with literally half of the known universe facing destruction, there is also some solid humor in the film that never once undermines the story or pacing. Naturally the effects are stunning as you would expect as Marvel has put a significant amount of money into the film and it shows in every eye-popping action sequence. What really impressed me the most was that the Russo Brothers never let the FX of the film overshadow the characters and with a very large cast, it was nice to see everyone got their moment and you did not see members of the cast trying to one up each other. There are some real surprise moments along the way and the action is easily the best of any Marvel film to date. Many fans had expressed disappointment with \"Avengers: Age of Ultron\" for me this film is significantly better than the prior film and is everything that a Marvel fan would want in a film. I was also surprised by how well Thanos was portrayed as while you hopefully will not agree with his deductions and methods; you could actually understand his motivations and it help him transcend from the usual megalomaniacs which are so common in films of his type. I am really looking forward to seeing what comes next and make sure you stay through the credits for a bonus scene. Believe the hype, \"Avengers: Infinity War\" is an epic not to be missed and is an example of what a summer event film should be. 5 stars out of 5", ## pos
"Hello, For one thing I have to say that I have waited since last year to see this movie. I have been reading a couple of negative reviews, but I said well that was going to happen lol. A lot of people were already predispose with oceans 8 because it had been done by Clooney and Pitt which I love. The problem is this movie is great, in my opinion it's not imitating oceans 11, it's a movie on its own with its own heist. I love the movie I had certain reservations when it comes to Rihanna in the movie going up against Bullock or blanchett which I love or Hathaway which in my opinion those 3 stars are very good and let's not forget Helena Bonham Carter she was brilliant in this. Okay I am deviating Rihanna was okay meshed well with the others. I love Sandra I thought she had the same arrogant good kind of cockiness I am in control thing going, was hilarious with her schemes loved it. Cate was great there was a point when I thought before seeing the movie that. Cate should have been the leader, but I saw the movie and I am happy that Bullock pulled it off. My two favorite actresses Sandra Bullock and Cate Blanchett. Anyhow the movie was great all 8 actresses worked well, I rate it a 10 because I love the ingenuity, the plot it was fun. I saw this movie for what it was, a fun movie that it is not copying oceans 11 or anything, I saw it for what it is a entertaining movie that I enjoyed. I hope that we see more sequels in regards to this movie. I do think that if you give it a shot you might like it forget all those user that were already predispose to this movie, they are not giving it a fair shot.",## pos
"I have never seen such an amazing film since I saw The Shawshank Redemption. Shawshank encompasses friendships, hardships, hopes, and dreams. And what is so great about the movie is that it moves you, it gives you hope. Even though the circumstances between the characters and the viewers are quite different, you don't feel that far removed from what the characters are going through. It is a simple film, yet it has an everlasting message. Frank Darabont didn't need to put any kind of outlandish special effects to get us to love this film, the narration and the acting does that for him. Why this movie didn't win all seven Oscars is beyond me, but don't let that sway you to not see this film, let its ranking on the IMDb's top 250 list sway you, let your friends recommendation about the movie sway you. Set aside a little over two hours tonight and rent this movie. You will finally understand what everyone is talking about and you will understand why this is my all time favorite movie.", ## pos
"I didn't think an all women movie could be any worse than the awful new Ghostbusters. But, I was wrong. Sandra Bullock once again reminds us all why she is the most boring actress still alive. Mono-tone acting and painful script writing by all. Avoid.", ## neg
"Good heavens... I didn't make it far into this embarrassing bilge-water. This desperate attempt to be like the men falls flat on its' clean shaven face. I suppose a few frustrated women will get a kick out of it, but most movie goers are going to feel ripped off by this idiotic and preposterous mess. The big names are reeling in the cash and sacrificing their legacies and dignity to perform in this circus act. Non of them look like they want to be there. I've seen more on-screen chemistry in an episode of The Teletubbies! I'd be more than a little annoyed if I had paid to watch this!", ## neg
"The filmmakers had absolutely no idea what they wanted out of this movie. You can tell it took 44 producers and 8 years to make. Who was the protagonist? What style did they want to adopt? Is it a documentary? A character piece? A chronology of John Gotti's life? I couldn't tell. There was 4 time lines happening at once. Way to many names and places. It was simply boring and had no suspense. However, Travolta's performance wasn't bad, the script was. He definitly did the best he could with what he was given. Long story short, I have no more affection or knowledge of the Gotti family now than I did before the movie. Go watch the Sopranos instead.", ## neg
"Yes, my wife and I laughed once in the first 25 minutes of this dreadful, brainless, and frighteningly unfunny film (and that was when Melissa McCarthy's character gets blown across the lawn). I realize that this film appeals to the funny bones of some viewers, but objectively speaking it is a very sorry excuse for a comedy. We both like Melissa McCarthy. We really enjoyed her in \"The Heat,\" \"Bridesmaids,\" and \"St Vincent\" where she really got to act. She's been great on \"Saturday Night Live.\" But in this film, she's got lousy material to work with and she sinks to its level. With one laugh in 25 minutes, we availed ourselves of the local theater's 30-minute satisfaction guarantee and walked out of the theater and received a refund of the ticket price. It's the first time we've done that since \"Dennis the Menace\" in 1993.", ## neg
"This was the worst movie I have ever seen. And I'm not saying that as an exaggeration. This was the absolute worst movie I have ever seen. In my life. Do. Not. Waste. Any. Time. Seeing. This. You are better off taking the 8-10 bucks and ripping it up and throwing it away so you can't go see this movie") ## neg
my_prediction <- predict_by_classifier(my_reviews, classifier, fivefreq)
table("Prediction"=my_prediction)
|
\name{bssn-package}
\alias{bssn-package}
\docType{package}
\title{
Birnbaum-Saunders model
}
\description{
It provides the density, distribution function, quantile function,
random number generator, reliability function, failure rate, likelihood function,
moments and EM algorithm for Maximum Likelihood estimators, also empirical quantile
and generated envelope for a given sample, all this for the three parameter
Birnbaum-Saunders model based on Skew-Normal Distribution.
Also, it provides the random number generator for the mixture of Birbaum-Saunders model based on Skew-Normal distribution. Additionally, we incorporate the EM algorithm based on the assumption that the error term follows a finite mixture of Sinh-normal distributions.
}
\details{
\tabular{ll}{
Package: \tab bssn\cr
Type: \tab Package\cr
Version: \tab 1.5\cr
Date: \tab 2020-02-12\cr
License: \tab GPL (>=2)\cr
}
}
\author{Rocio Maehara \email{rmaeharaa@gmail.com} and Luis Benites \email{lbenitesanchez@gmail.com}
}
\references{
Vilca, Filidor; Santana, L. R.; Leiva, Victor; Balakrishnan, N. (2011). Estimation of extreme percentiles in Birnbaum Saunders distributions. Computational Statistics & Data Analysis (Print), 55, 1665-1678.
Santana, Lucia; Vilca, Filidor; Leiva, Victor (2011). Influence analysis in skew-Birnbaum Saunders regression models and applications. Journal of Applied Statistics, 38, 1633-1649.
}
\keyword{package}
\seealso{
\code{\link{bssn}}, \code{\link{EMbssn}}, \code{\link{momentsbssn}}, \code{\link{ozone}}, \code{\link{reliabilitybssn}}, \code{\link{FMshnReg}}
}
\examples{
#See examples for the bssnEM function linked above.
}
|
/man/bssn-package.Rd
|
no_license
|
cran/bssn
|
R
| false
| false
| 1,682
|
rd
|
\name{bssn-package}
\alias{bssn-package}
\docType{package}
\title{
Birnbaum-Saunders model
}
\description{
It provides the density, distribution function, quantile function,
random number generator, reliability function, failure rate, likelihood function,
moments and EM algorithm for Maximum Likelihood estimators, also empirical quantile
and generated envelope for a given sample, all this for the three parameter
Birnbaum-Saunders model based on Skew-Normal Distribution.
Also, it provides the random number generator for the mixture of Birbaum-Saunders model based on Skew-Normal distribution. Additionally, we incorporate the EM algorithm based on the assumption that the error term follows a finite mixture of Sinh-normal distributions.
}
\details{
\tabular{ll}{
Package: \tab bssn\cr
Type: \tab Package\cr
Version: \tab 1.5\cr
Date: \tab 2020-02-12\cr
License: \tab GPL (>=2)\cr
}
}
\author{Rocio Maehara \email{rmaeharaa@gmail.com} and Luis Benites \email{lbenitesanchez@gmail.com}
}
\references{
Vilca, Filidor; Santana, L. R.; Leiva, Victor; Balakrishnan, N. (2011). Estimation of extreme percentiles in Birnbaum Saunders distributions. Computational Statistics & Data Analysis (Print), 55, 1665-1678.
Santana, Lucia; Vilca, Filidor; Leiva, Victor (2011). Influence analysis in skew-Birnbaum Saunders regression models and applications. Journal of Applied Statistics, 38, 1633-1649.
}
\keyword{package}
\seealso{
\code{\link{bssn}}, \code{\link{EMbssn}}, \code{\link{momentsbssn}}, \code{\link{ozone}}, \code{\link{reliabilitybssn}}, \code{\link{FMshnReg}}
}
\examples{
#See examples for the bssnEM function linked above.
}
|
#
#install.packages("googleway")
library(googleway)
#install.packages("geosphere")
library(geosphere)
library(dplyr)
#install.packages("getopt")
library(getopt)
#install.packages("jsonlite")
library(jsonlite)
library(data.table)
library(RCurl)
x.start <- 51.55313
y.start <- -0.111235
x.end <- 51.563376
y.end <- -0.045317
key <- "AIzaSyAdRpFqJ31VMifeFUTrLYcw9VNoaIfwyW4"
pubs <- fread("pubs.tsv")
integrateWithWait <- function(URL,ae_trust,loc_ratings){
x <- getURL(URL)
out <- read.csv(textConnection(x),skip=6,header=FALSE, stringsAsFactors = FALSE)
df <- out %>% select(V3,V4,V6,V7,V9)
df$total_attendance <- df$V4+df$V6
df$total_over_4hrs <- df$V7+df$V9
df$total_under_4hrs <- df$total_attendance - df$total_over_4hrs
df$percent_under_4hrs <- (df$total_under_4hrs/df$total_attendance)*100
final <- df %>% select(V3,percent_under_4hrs)
wait_times <- inner_join(final,ae_trust,by=c("V3"="V2"))
final_frame <- inner_join(wait_times,loc_ratings,by=c("V1"="hospital"))
names(final_frame) <- c("trust","percent_under_4hrs","centre","lat","long","google_review")
final_frame[is.na(final_frame)] <- "Not available"
return(final_frame)
}
getHospRatings <- function(x.start,y.start,x.end,y.end,key){
x.mid <- midPoint(c(x.start, y.start), c(x.end, y.end))[1]
y.mid <- midPoint(c(x.start, y.start), c(x.end, y.end))[2]
# Query google API to find all pubs within a radius around
pubs.list <- google_places(search_string = 'accident and emergency',
location = c(x.mid, y.mid),
radius = 50000,
#rankby = 'distance',
#place_type = c('bar', 'restaurant'),
key = key)
accidentandemergency <- data.frame(unique(pubs.list$results$name),pubs.list$results$geometry$location, pubs.list$results$rating)
uc.list <- google_places(search_string = 'urgent care centre',
location = c(x.mid, y.mid),
radius = 50000,
#rankby = 'distance',
#place_type = c('bar', 'restaurant'),
key = key)
urgent_Care <- data.frame(unique(uc.list$results$name),uc.list$results$geometry$location, uc.list$results$rating)
names(urgent_Care) <- c("hospital","lat","long","google_review")
names(accidentandemergency) <- c("hospital","lat","long","google_review")
loc_ratings <- rbind(urgent_Care,accidentandemergency)
return(loc_ratings)
}
getMinHos <- function(pubs,final_frame,key){
hospital_co <- final_frame %>% dplyr::select(lat,long)
hospital.dt <- data.table(hospital_co)[, hosp := seq(1:nrow(hospital_co))]
test2 <- split(hospital.dt, by = "hosp")
hos2 <- lapply(test2, function(x) unlist(x[, hosp := NULL]))
df <- google_distance(origins = c(pubs[1]$lat,pubs[1]$lng),
destinations = hos2,
mode="driving",
key = key)
ind <- which.min(df$rows$elements[[1]]$distance[,2])
return(data.frame(t(data.frame(hos2[ind]))))
}
getFinalString <- function(hos_coords,final_frame){
final_frame$lat <- as.character(final_frame$lat)
final_frame$long <- as.character(final_frame$long)
final <- final_frame %>% filter(lat==as.character(hos_coords[[1]]) & long==as.character(hos_coords[[2]]))
return(paste0("The nearest A&E to your Pub Brawl (TM) is ",final$centre,".\n It has an average google review of ",final$google_review," and ",round(final$percent_under_4hrs,2),"% of people get seen within 4 hours,\nso you probably won't die."))
}
##### MAIN ##########
getNearestAandE <- function(x.start,
y.start,
x.end,
y.end,
key,
pubs){
loc_ratings <- getHospRatings(x.start,y.start,x.end,y.end,key)
ae_trust <- fread("london_AandE_plusTrust.csv",header=FALSE)
URL_jan18 <- "https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2018/02/January-csv-att-e652S.csv"
final_frame <- integrateWithWait(URL_jan18,ae_trust,loc_ratings)
hos_coords <- getMinHos(pubs,final_frame,key)
return(getFinalString(hos_coords,final_frame))
}
getNearestAandE(x.start,
y.start,
x.end,
y.end,
key,
pubs)
|
/archive/hospitalData.R
|
permissive
|
sebSteinhauser/PubBrawl
|
R
| false
| false
| 4,425
|
r
|
#
#install.packages("googleway")
library(googleway)
#install.packages("geosphere")
library(geosphere)
library(dplyr)
#install.packages("getopt")
library(getopt)
#install.packages("jsonlite")
library(jsonlite)
library(data.table)
library(RCurl)
x.start <- 51.55313
y.start <- -0.111235
x.end <- 51.563376
y.end <- -0.045317
key <- "AIzaSyAdRpFqJ31VMifeFUTrLYcw9VNoaIfwyW4"
pubs <- fread("pubs.tsv")
integrateWithWait <- function(URL,ae_trust,loc_ratings){
x <- getURL(URL)
out <- read.csv(textConnection(x),skip=6,header=FALSE, stringsAsFactors = FALSE)
df <- out %>% select(V3,V4,V6,V7,V9)
df$total_attendance <- df$V4+df$V6
df$total_over_4hrs <- df$V7+df$V9
df$total_under_4hrs <- df$total_attendance - df$total_over_4hrs
df$percent_under_4hrs <- (df$total_under_4hrs/df$total_attendance)*100
final <- df %>% select(V3,percent_under_4hrs)
wait_times <- inner_join(final,ae_trust,by=c("V3"="V2"))
final_frame <- inner_join(wait_times,loc_ratings,by=c("V1"="hospital"))
names(final_frame) <- c("trust","percent_under_4hrs","centre","lat","long","google_review")
final_frame[is.na(final_frame)] <- "Not available"
return(final_frame)
}
getHospRatings <- function(x.start,y.start,x.end,y.end,key){
x.mid <- midPoint(c(x.start, y.start), c(x.end, y.end))[1]
y.mid <- midPoint(c(x.start, y.start), c(x.end, y.end))[2]
# Query google API to find all pubs within a radius around
pubs.list <- google_places(search_string = 'accident and emergency',
location = c(x.mid, y.mid),
radius = 50000,
#rankby = 'distance',
#place_type = c('bar', 'restaurant'),
key = key)
accidentandemergency <- data.frame(unique(pubs.list$results$name),pubs.list$results$geometry$location, pubs.list$results$rating)
uc.list <- google_places(search_string = 'urgent care centre',
location = c(x.mid, y.mid),
radius = 50000,
#rankby = 'distance',
#place_type = c('bar', 'restaurant'),
key = key)
urgent_Care <- data.frame(unique(uc.list$results$name),uc.list$results$geometry$location, uc.list$results$rating)
names(urgent_Care) <- c("hospital","lat","long","google_review")
names(accidentandemergency) <- c("hospital","lat","long","google_review")
loc_ratings <- rbind(urgent_Care,accidentandemergency)
return(loc_ratings)
}
getMinHos <- function(pubs,final_frame,key){
hospital_co <- final_frame %>% dplyr::select(lat,long)
hospital.dt <- data.table(hospital_co)[, hosp := seq(1:nrow(hospital_co))]
test2 <- split(hospital.dt, by = "hosp")
hos2 <- lapply(test2, function(x) unlist(x[, hosp := NULL]))
df <- google_distance(origins = c(pubs[1]$lat,pubs[1]$lng),
destinations = hos2,
mode="driving",
key = key)
ind <- which.min(df$rows$elements[[1]]$distance[,2])
return(data.frame(t(data.frame(hos2[ind]))))
}
getFinalString <- function(hos_coords,final_frame){
final_frame$lat <- as.character(final_frame$lat)
final_frame$long <- as.character(final_frame$long)
final <- final_frame %>% filter(lat==as.character(hos_coords[[1]]) & long==as.character(hos_coords[[2]]))
return(paste0("The nearest A&E to your Pub Brawl (TM) is ",final$centre,".\n It has an average google review of ",final$google_review," and ",round(final$percent_under_4hrs,2),"% of people get seen within 4 hours,\nso you probably won't die."))
}
##### MAIN ##########
getNearestAandE <- function(x.start,
y.start,
x.end,
y.end,
key,
pubs){
loc_ratings <- getHospRatings(x.start,y.start,x.end,y.end,key)
ae_trust <- fread("london_AandE_plusTrust.csv",header=FALSE)
URL_jan18 <- "https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2018/02/January-csv-att-e652S.csv"
final_frame <- integrateWithWait(URL_jan18,ae_trust,loc_ratings)
hos_coords <- getMinHos(pubs,final_frame,key)
return(getFinalString(hos_coords,final_frame))
}
getNearestAandE(x.start,
y.start,
x.end,
y.end,
key,
pubs)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_constructors.R
\name{asmap_step}
\alias{asmap_step}
\title{A wrapper around ASMap::mstmap with some error handling.
Associates success or error and p.value with the output.}
\usage{
asmap_step(obj, p.value = 1e-06)
}
\arguments{
\item{obj}{A cross object suitable for mstmap().}
\item{p.value}{A value between 0 and 1.}
}
\description{
A wrapper around ASMap::mstmap with some error handling.
Associates success or error and p.value with the output.
}
|
/man/asmap_step.Rd
|
no_license
|
alexwhan/genomap
|
R
| false
| true
| 535
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_constructors.R
\name{asmap_step}
\alias{asmap_step}
\title{A wrapper around ASMap::mstmap with some error handling.
Associates success or error and p.value with the output.}
\usage{
asmap_step(obj, p.value = 1e-06)
}
\arguments{
\item{obj}{A cross object suitable for mstmap().}
\item{p.value}{A value between 0 and 1.}
}
\description{
A wrapper around ASMap::mstmap with some error handling.
Associates success or error and p.value with the output.
}
|
make.mi.map <- function(project){
METADIR <- "/group/stranger-lab/askol/TCGA/TCGA_MetaData"
meta.miRNA.file <- paste0(METADIR,"/",project,"_miRNA_meta.txt")
mi.map <- read.table(file=meta.miRNA.file, header=T, as.is=T, sep="\t")
names(mi.map) <- gsub("file_id", "file_id_mi", names(mi.map))
names(mi.map) <- gsub("cases.0.samples.0.","", names(mi.map))
if (!any(grep("gender", names(mi.map)))){
mi.map$cases.0.demographic.gender = NA
}
mi.map <- gender.fill(mi.map)
return(mi.map)
}
make.m.map <- function(project){
METADIR <- "/group/stranger-lab/askol/TCGA/TCGA_MetaData"
meta.mRNA.file <- paste0(METADIR,"/",project,"_mRNA_meta.txt")
m.map <- read.table(file=meta.mRNA.file, header=T, as.is=T, sep="\t")
names(m.map) <- gsub("file_name", "file_name_m", names(m.map))
m.map$file_name_m = gsub("\\..*","",m.map$file_name_m)
names(m.map) <- gsub("cases.0.samples.0.","", names(m.map))
m.map <- gender.fill(m.map)
return(m.map)
}
gender.fill <- function(map){
## LOOK FOR PROBLEMS WITH GENDER ASSIGNMENT IN MAP FILE ##
sex.tbl <- table(map[,c("cases.0.case_id","cases.0.demographic.gender")])
## IF SEX IS MISSING, THEN THERE WILL BE A COLUMN WITH NO COLNAME THAT HAS
## A 1 WHERE NO MALE OR FEMALE VALUE WAS STATED. REMOVE THIS COLUMN SINCE THE
## MALE AND FEMALE COLUMNS WILL EACH HAVE 0
ind <- which(colnames(sex.tbl) == "")
if (length(ind) >0){
sex.tbl = sex.tbl[, -ind]
}
sex.tbl <- 1*(sex.tbl > 0)
ind <- rownames(sex.tbl)[(rowSums(sex.tbl) > 1)]
if (length(ind) > 0){
print("Case ids ",rownames(sex.tbl)[ind], "have more than one sex assignment")
print("Removing these samples . . .")
}
## TAKES CARE OF CASE of MULTIPLE SAMPLES AND ONE IS MISSING
## A GENDER ASSIGNMENT. IT WILL BE ASSIGNED THE SAME AS THE OTHER SAMPLES WITH
## GENEDER ASSIGNMENTS
sex.update <- data.frame(case.id = rownames(sex.tbl),
gender = NA)
ind = sex.tbl%*%c(1,2)
sex.update$gender[ind !=0] <- colnames(sex.tbl)[ind[ind!=0]]
map <- merge(map, sex.update, by.x = "cases.0.case_id", by.y="case.id", all.x=T,
all.y=FALSE)
return(map)
}
make.map.files <- function(project){
m.map <- make.mi.map.file(project)
mi.map <- make.m.map.file(project)
return(list(m.map = m.map, mi.map = mi.map))
}
if (0){
dupe.ids <- unique(m.map$cases.0.samples.0.sample_id[duplicated( m.map$cases.0.samples.0.sample_id)])
for (id in dupe.ids){
print(m.map[m.map$cases.0.samples.0.sample_id == id,])
}
}
get.data <- function(m.file, mi.file, m.map, mi.map, gene.info){
## GET MI_RNA DATA ##
mi <- get.mi.data(mi.file, mi.map)
## GET MRNA DATA ##
m <- get.m.data(m.file, m.map, gene.info)
return(list(m = m, mi = mi))
}
get.mi.data <- function(file, mi.map){
head <- read.table(file = file, as.is=T, nrow=1, header=F)
mi <- read.table(file = file, as.is=T, header=F, skip=1)
names(mi) <- c(paste0("A",head))
ind <- grep("miRNA_ID", mi[,1])
if (length(ind)>0){
mi <- mi[-ind,]
}
mi[,-1] <- sapply(mi[,-1], as.numeric)
rownames(mi) <- mi[,1]
## CLEAN UP DATA (REMOVE NORMAL SAMPLES; AVERAGE OR REMOVE DUPES) ##
mi <- straighten.up(mi, "", mi.map, data.type="mi")
return(mi)
}
get.m.data <- function(countFile, fpkmFile, map, gene.info){
head <- read.table(file = countFile, as.is=T, nrow=1)
m <- read.table(file = countFile, as.is=T, header=F, skip=1)
names(m) <- c("gene.id",paste0("A",head))
rownames(m) <- gsub("\\.[0-9]+", "", m[,1])
m$gene.id <- rownames(m)
## GET FPKM DATA: WILL BE USED FOR CHOOSING GENES TO KEEP BASED ON ##
## TPM OF 0.10 IN > 20% OF SAMPLES ##
head <- read.table(file = fpkmFile, as.is=T, nrow=2)
skip = 1
## A COUPLE FILES HAVE TWO HEADER LINES, REMOVE IF NECESSARY ##
if (grep("ENSG", head[2,1]) == T){
skip=2
}
head <- head[1,]
f <- read.table(file = fpkmFile, as.is=T, header=F, skip=skip)
names(f) <- paste0("A",head)
names(f)[1] <- "gene.id"
f$gene.id <- gsub("\\.[0-9]+", "", f$gene.id)
## ensmbl <- data.frame(ensmbl = as.character(m$gene.id), stringsAsFactors=FALSE)
## gene.info.in.m <- gene.info[gene.info$ensembl_gene_id %in% ensmbl[,1],]
## ENSEMBL ID MAPS TO MULTIPLE GENES. CONCATINATE GENES WITH AND REDUCE MULTIPLE
## ENTRIES OF SINGLE ENSEMBL ID TO A SINGLE ENTRY
## gene.info.in.m <- concat.dupes(gene.info.in.m)
## MERGE ENSEMBL NAMES FROM MRNA DATA WITH GENE INFORMATION FROM BIOMART
## ensmbl <- merge(ensmbl, gene.info.in.m[,c("ensembl_gene_id", "hgnc_symbol")],
## by.x= "ensmbl", by.y="ensembl_gene_id",
## all.x=T, all.y=F)
## NOTE THAT ABOUT 3400 GENES DONT MAP TO A GENE NAME (NA or "")
## !!!!! EXCLUDING THESE GENES !!!! ##
## ANOTHER 27K+ ENSEMBL IDS HAVE "" FOR HGNC_SYMBOL.
## AT LEAST SOME ARE ANTISENSE RNA OR PROCESSED TRANSCRIPT ##
## ASSUMING THEY DON'T MAP FOR A REASON ##
## ind.exclude <- which(is.na(ensmbl$hgnc_symbol) | ensmbl$hgnc_symbol == "")
## ensmbl$hgnc_symbol[ind.exclude] <- ensmbl$ensmbl[ind.exclude]
## m <- merge(ensmbl, m, by.x = "ensmbl", by.y="gene.id", all.x = FALSE, all.y=TRUE,
## sort = FALSE)
## REMOVE GENES WITH NO NAMES ##
## m <- m[-grep("ENSG",m$hgnc_symbol), ]
m <- straighten.up(m, f, map, data.type="m")
return(m)
}
straighten.up <- function(data, f="", map, data.type){
## TRIM EXTRA COLUMNS IF DATATYPE IS MIRNA ##
map$file_id_use = ""
if (data.type == "mi"){
names(data)[1] = "ID"
map$file_id_use <- map$file_id_mi
}else{
names(data)[1] <- "ensmbl"
map$file_id_use <- map$file_name_m
}
## REMOVE NORMAL ##
norm.ind <- grep("Normal", map$sample_type)
norm.id <- map$file_id_use[norm.ind]
if (length(norm.ind)>0){
norm.ind <- which(gsub("^A","",names(data)) %in% norm.id)
data <- data[, -norm.ind]
print(paste0("Removing ",length(norm.ind), " normal samples."))
}
dupe.inds <- which(duplicated(map$sample_id))
dupes <- unique(map$sample_id[dupe.inds])
if (length(dupes)>0){
print(paste0(length(dupe.inds)," duplicates from ",length(dupes), " samples"))
}
## TAKE CARE OF DUPLICATE SAMPLES DEPENDING ON THE AMOUNT OF MISSINGNESS ##
for (samp.id in dupes){
file.ids <- map$file_id_use[map$sample_id %in% samp.id]
ind <- which(gsub("^A","", names(data)) %in% file.ids)
if (length(ind) == 0){
print(paste0("No mir/mrna data for samp.id ", samp.id, " found"))
next
}
prop.zeros <- colSums(data[,ind]>0, na.rm=TRUE)/(nrow(data))
min.zeros = min(prop.zeros)
dif.zeros = prop.zeros - min.zeros
rmv.dif.zeros <- which(dif.zeros > 0.1)
## REMOVE SAMPLES IF DIFFERENCE IN THE NUMBER OF ZEROS > 10% ##
## REMOVES SAMPLES WITH THE MOST ZEROS ##
if (length(rmv.dif.zeros) > 0){
data <- data[,-ind[rmv.dif.zeros]]
ind <- ind[-rmv.dif.zeros]
}
## AVERAGE EXPRESSION IF NUMBER OF REPS TWO OR MORE ##
if (length(ind) > 1){
data[, ind[1]] = rowMeans(data[,ind], na.rm=T)
data <- data[,-ind[-1],]
}
}
## UPDATE MAP FILE ##
ids <- gsub("^A", "", names(data))
if (data.type == "mi"){
map$file_id_use <- map$file_id_mi
}else{
map$file_id_use <- map$file_name_m
}
## REMOVE INFO FOR SAMPLES NOT IN EXPRESSION DATA ##
map <- map[map$file_id_use %in% ids,]
pref.order <- c("Primary Tumor", "Additional - New Primary",
"Recurrent Tumor", "Metastatic", "Additional Metastatic",
"Primary Blood Derived Cancer - Peripheral Blood")
## FIND DUPLICATE CASE IDS ##
dupe.ind <- which(duplicated(map$cases.0.case_id))
dupe.ids <- unique(map$cases.0.case_id[dupe.ind])
if (length(dupe.ids) > 0){
print(paste0(length(dupe.ind), " duplicates by case, made up of ",
length(dupe.ids), " cases."))
## PICK PREFFED SAMPLE FOR EACH DUPLICATE CASE ##
for (id in dupe.ids){
inds <- grep(id, map$cases.0.case_id)
file.ids <- map$file_id_use[inds]
ind.use <- match(map$sample_type[inds], pref.order)
ind.use <- which(ind.use == min(ind.use))
if (length(ind.use) > 1){
ind.use = max(ind.use)
}
file.id.rm <- map$file_id_use[inds[-ind.use]]
data <- data[,-which(gsub("^A","", names(data)) %in% file.id.rm)]
}
}
## CLEAN DATA TO EXCLUDE GENES WITH LOW COUNT INFORMATION ##
if (data.type == "mi"){
data <- clean.data(data, f="", data.type=data.type)
}else{
data <- clean.data(data, f=f, data.type=data.type)
}
## REPLACE FILE ID (CURRENT HEADER) WITH CASE ID ##
ord <- match(gsub("A","",names(data)), map$file_id_use)
names(data)[is.na(ord)==F] <- paste0("A",map$cases.0.case_id[ord[is.na(ord)==F]])
## CONSOLIDATE MAP TO HAVE ONLY A SINGLE ENTRY PER CASE.ID ##
map <- map[map$cases.0.case_id %in% gsub("A","",names(data)),]
tbl <- table(map$cases.0.demographic.gender)
print(paste("Number", paste0(names(tbl),"s : ", tbl), collapse="; "))
## REMOVE CASES WITH NO GENDER ##
ind <- map$cases.0.case_id[is.na(map$cases.0.demographic.gender)]
ind <- which(names(data) %in% ind)
if (length(ind) > 0){
data <- data[,-ind]
}
return(data)
}
clean.data <- function(data, f="", data.type = "m"){
num.cols <- which(sapply(data, is.numeric))
rna.id.col <- which(names(data) %in% c("ID","ensmbl"))
## MIRNA DOES NOT USE TPM CUTOFF BECAUSE THE LENGTH OF MIRNA ARE LESS
## THAN THE READ LENGTH (DON'T EXPECT NUMBER OF READS TO INCREASE WITH MIRNA
## LENGTH
if (data.type == "mi"){
rm.ind <- which( rowMeans(data[,num.cols] <=5, na.rm=T) >=.8)
rm.ind.1 <- which( apply(data[, num.cols], 1, sd, na.rm=T) < 4)
rm.id <- data[intersect(rm.ind, rm.ind.1), rna.id.col]
rm.ind <- which(data[, rna.id.col] %in% rm.id)
}
## GTEX CRITERIA IS EXCLUDE IF 20% OF SAMPLES HAVE 6 OR FEWER READS OR
## IF 20% OF SAMPLES HAVE TPM OF <= 0.10 {CHANGED TO 80% BECAUSE GOAL IS
## IS DIFFERENTIAL EXPRESSION}
if (data.type == "m"){
## WHICH GENES HAVE 20% OR MORE OF SAMPLES WITH 6 OR FEWER READS
rm.ind <- which( rowMeans(data[,num.cols] <=6, na.rm=T) >=.8)
rm.id.1 <- data[rm.ind, rna.id.col]
## DETERMINE SAMPLES WITH TPM <= .1 ##
## TMP_I = (FPKM_I)/SUM_J(FPKM_J)*10^6 FOR AN INDIVIDUAL
num.cols <- which(sapply(f, is.numeric))
f.tpm <- apply(f[,num.cols], 2, function(x) x/sum(x, na.rm=T))*10^6
rm.ind.f <- which(
apply(f.tpm, 1, function(x) sum(x<=0.10, na.rm=T)>=(0.80*length(num.cols)))
)
rm.id <- union(f$gene.id[rm.ind.f], rm.id.1)
rm.ind <- which(data[,rna.id.col] %in% rm.id)
}
print(paste0("Setting ",length(rm.ind), " of ",nrow(data), " to missing. ",
nrow(data)-length(rm.ind), " are not missing"))
data <- data[-rm.ind , ]
## EXCLUDE GENE/MIRNA IF 10 or FEWER ENTRIES HAVE DATA (E.G. REST NAS)
rm.genes <- which(rowSums(!is.na(data[,num.cols]))<=10)
if (length(rm.genes) >0){
data <- data[-rm.genes,]
print(paste0("Removing ",length(rm.genes), " genes due to too many NAs. ",
"Fewer than 10 subjects with counts. "))
print(paste0(length(num.cols) - length(rm.genes), " genes/mirs remain"))
}
return(data)
}
concat.dupes <- function(gi){
dupe.ind <- which(duplicated(gi$ensembl_gene_id))
dupe.names <- unique(gi$ensembl_gene_id[dupe.ind])
for (name in dupe.names){
ind <- which(gi$ensembl_gene_id == name)
gi$hgnc_symbol[ind[1]] <- paste(gi$hgnc_symbol[ind], collapse=";")
gi <- gi[-ind[-1],]
}
return(gi)
}
process.mi.gene <- function(data){
data$miRNA = gsub("-",".",data$miRNA)
data$miRNA = gsub("miR","mir", data$miRNA)
data$miRNA = gsub("\\.5p|\\.3p","", data$miRNA)
data$miRNA.alt1 <- paste0(data$miRNA, ".1")
data$miRNA.alt2 <- paste0(data$miRNA, ".2")
data$miRNA.alt3 <- paste0(data$miRNA, ".3")
data$miRNA.alt4 <- paste0(data$miRNA, ".4")
return(data)
}
get.xy.mirs <- function(){
## GET MIR LOCATIONS ##
mir.locs <- read.table(file = "/group/stranger-lab/askol/TCGA/hsa.gff3", skip = 13,
header=FALSE, sep="\t")
mir.locs[,9] = gsub(".*Name=","",mir.locs[,9])
mir.locs[,9] = gsub(";.*","", mir.locs[,9])
mir.locs <- mir.locs[,c(9,1)]
names(mir.locs) <- c("mir","chr")
xy.mirs <- mir.locs$mir[grep("x|y|X|Y",mir.locs$chr)]
xy.mirs <- unique(xy.mirs)
xy.mirs <- gsub("-","\\.",xy.mirs)
## REMOVE 3P/5P FROM END OF MIRNA NAME ##
xy.mirs <- gsub("miR","mir", xy.mirs)
xy.mirs <- gsub("\\.3p$|\\.5p$","", xy.mirs)
return(xy.mirs)
}
get.mir.info <- function(){
## GET MIR LOCATIONS FOR PRIMARY TRANSCRIPTS ##
mir.locs <- read.table(file = "/group/stranger-lab/askol/TCGA/hsa.gff3", skip = 13,
header=FALSE, sep="\t")
## KEEP ONLY PRIMARY TRANSCRIPT ENTRIES ##
keep.ind <- grep("primary",mir.locs[,3])
mir.locs <- mir.locs[ keep.ind, c(9,1,4,5)]
names(mir.locs) <- c("mir","chr","start","end")
mir.locs$mir = gsub(".*Name=","",mir.locs$mir)
mir.locs$mir = gsub(";.*","", mir.locs$mir)
return(mir.locs)
}
get.mir.info.trim.name <- function(){
mir.locs <- get.mir.info()
##
mirs <- strsplit(split="-", mir.locs$mir)
mirs <- sapply(mirs, function(x){paste(x[1:3], collapse="-")})
mir.locs$mir.long <- mir.locs$mir
mir.locs$mir <- mirs
chrs <- with(mir.locs, tapply(chr, mir, paste, collapse=","))
chrs <- data.frame(mir = rownames(chrs), chrs=chrs)
starts <- with(mir.locs, tapply(start, mir, paste, collapse=","))
starts <- data.frame(mir = rownames(starts), starts=starts)
ends <- with(mir.locs, tapply(end, mir, paste, collapse=","))
ends <- data.frame(mir = rownames(ends), ends=ends)
mir.longs <- with(mir.locs, tapply(mir.long, mir, paste, collapse=","))
mir.longs <- data.frame(mir = rownames(mir.longs), ends=ends)
mir.locs <- merge(mir.locs, chrs, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, starts, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, ends, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, mir.longs, by="mir", all.x=T, all.y=F)
## REMOVE DUPES ##
dupe.ind <- which(duplicated(mir.locs$mir))
mir.locs <- mir.locs[-dupe.ind , -which(names(mir.locs) %in%
c("chr","start","end"))]
return(mir.locs)
}
|
/Create_Production_Data_funcs.r
|
no_license
|
askol/TCGA_mRNA_miRNA
|
R
| false
| false
| 15,416
|
r
|
make.mi.map <- function(project){
METADIR <- "/group/stranger-lab/askol/TCGA/TCGA_MetaData"
meta.miRNA.file <- paste0(METADIR,"/",project,"_miRNA_meta.txt")
mi.map <- read.table(file=meta.miRNA.file, header=T, as.is=T, sep="\t")
names(mi.map) <- gsub("file_id", "file_id_mi", names(mi.map))
names(mi.map) <- gsub("cases.0.samples.0.","", names(mi.map))
if (!any(grep("gender", names(mi.map)))){
mi.map$cases.0.demographic.gender = NA
}
mi.map <- gender.fill(mi.map)
return(mi.map)
}
make.m.map <- function(project){
METADIR <- "/group/stranger-lab/askol/TCGA/TCGA_MetaData"
meta.mRNA.file <- paste0(METADIR,"/",project,"_mRNA_meta.txt")
m.map <- read.table(file=meta.mRNA.file, header=T, as.is=T, sep="\t")
names(m.map) <- gsub("file_name", "file_name_m", names(m.map))
m.map$file_name_m = gsub("\\..*","",m.map$file_name_m)
names(m.map) <- gsub("cases.0.samples.0.","", names(m.map))
m.map <- gender.fill(m.map)
return(m.map)
}
gender.fill <- function(map){
## LOOK FOR PROBLEMS WITH GENDER ASSIGNMENT IN MAP FILE ##
sex.tbl <- table(map[,c("cases.0.case_id","cases.0.demographic.gender")])
## IF SEX IS MISSING, THEN THERE WILL BE A COLUMN WITH NO COLNAME THAT HAS
## A 1 WHERE NO MALE OR FEMALE VALUE WAS STATED. REMOVE THIS COLUMN SINCE THE
## MALE AND FEMALE COLUMNS WILL EACH HAVE 0
ind <- which(colnames(sex.tbl) == "")
if (length(ind) >0){
sex.tbl = sex.tbl[, -ind]
}
sex.tbl <- 1*(sex.tbl > 0)
ind <- rownames(sex.tbl)[(rowSums(sex.tbl) > 1)]
if (length(ind) > 0){
print("Case ids ",rownames(sex.tbl)[ind], "have more than one sex assignment")
print("Removing these samples . . .")
}
## TAKES CARE OF CASE of MULTIPLE SAMPLES AND ONE IS MISSING
## A GENDER ASSIGNMENT. IT WILL BE ASSIGNED THE SAME AS THE OTHER SAMPLES WITH
## GENEDER ASSIGNMENTS
sex.update <- data.frame(case.id = rownames(sex.tbl),
gender = NA)
ind = sex.tbl%*%c(1,2)
sex.update$gender[ind !=0] <- colnames(sex.tbl)[ind[ind!=0]]
map <- merge(map, sex.update, by.x = "cases.0.case_id", by.y="case.id", all.x=T,
all.y=FALSE)
return(map)
}
make.map.files <- function(project){
m.map <- make.mi.map.file(project)
mi.map <- make.m.map.file(project)
return(list(m.map = m.map, mi.map = mi.map))
}
if (0){
dupe.ids <- unique(m.map$cases.0.samples.0.sample_id[duplicated( m.map$cases.0.samples.0.sample_id)])
for (id in dupe.ids){
print(m.map[m.map$cases.0.samples.0.sample_id == id,])
}
}
get.data <- function(m.file, mi.file, m.map, mi.map, gene.info){
## GET MI_RNA DATA ##
mi <- get.mi.data(mi.file, mi.map)
## GET MRNA DATA ##
m <- get.m.data(m.file, m.map, gene.info)
return(list(m = m, mi = mi))
}
get.mi.data <- function(file, mi.map){
head <- read.table(file = file, as.is=T, nrow=1, header=F)
mi <- read.table(file = file, as.is=T, header=F, skip=1)
names(mi) <- c(paste0("A",head))
ind <- grep("miRNA_ID", mi[,1])
if (length(ind)>0){
mi <- mi[-ind,]
}
mi[,-1] <- sapply(mi[,-1], as.numeric)
rownames(mi) <- mi[,1]
## CLEAN UP DATA (REMOVE NORMAL SAMPLES; AVERAGE OR REMOVE DUPES) ##
mi <- straighten.up(mi, "", mi.map, data.type="mi")
return(mi)
}
get.m.data <- function(countFile, fpkmFile, map, gene.info){
head <- read.table(file = countFile, as.is=T, nrow=1)
m <- read.table(file = countFile, as.is=T, header=F, skip=1)
names(m) <- c("gene.id",paste0("A",head))
rownames(m) <- gsub("\\.[0-9]+", "", m[,1])
m$gene.id <- rownames(m)
## GET FPKM DATA: WILL BE USED FOR CHOOSING GENES TO KEEP BASED ON ##
## TPM OF 0.10 IN > 20% OF SAMPLES ##
head <- read.table(file = fpkmFile, as.is=T, nrow=2)
skip = 1
## A COUPLE FILES HAVE TWO HEADER LINES, REMOVE IF NECESSARY ##
if (grep("ENSG", head[2,1]) == T){
skip=2
}
head <- head[1,]
f <- read.table(file = fpkmFile, as.is=T, header=F, skip=skip)
names(f) <- paste0("A",head)
names(f)[1] <- "gene.id"
f$gene.id <- gsub("\\.[0-9]+", "", f$gene.id)
## ensmbl <- data.frame(ensmbl = as.character(m$gene.id), stringsAsFactors=FALSE)
## gene.info.in.m <- gene.info[gene.info$ensembl_gene_id %in% ensmbl[,1],]
## ENSEMBL ID MAPS TO MULTIPLE GENES. CONCATINATE GENES WITH AND REDUCE MULTIPLE
## ENTRIES OF SINGLE ENSEMBL ID TO A SINGLE ENTRY
## gene.info.in.m <- concat.dupes(gene.info.in.m)
## MERGE ENSEMBL NAMES FROM MRNA DATA WITH GENE INFORMATION FROM BIOMART
## ensmbl <- merge(ensmbl, gene.info.in.m[,c("ensembl_gene_id", "hgnc_symbol")],
## by.x= "ensmbl", by.y="ensembl_gene_id",
## all.x=T, all.y=F)
## NOTE THAT ABOUT 3400 GENES DONT MAP TO A GENE NAME (NA or "")
## !!!!! EXCLUDING THESE GENES !!!! ##
## ANOTHER 27K+ ENSEMBL IDS HAVE "" FOR HGNC_SYMBOL.
## AT LEAST SOME ARE ANTISENSE RNA OR PROCESSED TRANSCRIPT ##
## ASSUMING THEY DON'T MAP FOR A REASON ##
## ind.exclude <- which(is.na(ensmbl$hgnc_symbol) | ensmbl$hgnc_symbol == "")
## ensmbl$hgnc_symbol[ind.exclude] <- ensmbl$ensmbl[ind.exclude]
## m <- merge(ensmbl, m, by.x = "ensmbl", by.y="gene.id", all.x = FALSE, all.y=TRUE,
## sort = FALSE)
## REMOVE GENES WITH NO NAMES ##
## m <- m[-grep("ENSG",m$hgnc_symbol), ]
m <- straighten.up(m, f, map, data.type="m")
return(m)
}
straighten.up <- function(data, f="", map, data.type){
## TRIM EXTRA COLUMNS IF DATATYPE IS MIRNA ##
map$file_id_use = ""
if (data.type == "mi"){
names(data)[1] = "ID"
map$file_id_use <- map$file_id_mi
}else{
names(data)[1] <- "ensmbl"
map$file_id_use <- map$file_name_m
}
## REMOVE NORMAL ##
norm.ind <- grep("Normal", map$sample_type)
norm.id <- map$file_id_use[norm.ind]
if (length(norm.ind)>0){
norm.ind <- which(gsub("^A","",names(data)) %in% norm.id)
data <- data[, -norm.ind]
print(paste0("Removing ",length(norm.ind), " normal samples."))
}
dupe.inds <- which(duplicated(map$sample_id))
dupes <- unique(map$sample_id[dupe.inds])
if (length(dupes)>0){
print(paste0(length(dupe.inds)," duplicates from ",length(dupes), " samples"))
}
## TAKE CARE OF DUPLICATE SAMPLES DEPENDING ON THE AMOUNT OF MISSINGNESS ##
for (samp.id in dupes){
file.ids <- map$file_id_use[map$sample_id %in% samp.id]
ind <- which(gsub("^A","", names(data)) %in% file.ids)
if (length(ind) == 0){
print(paste0("No mir/mrna data for samp.id ", samp.id, " found"))
next
}
prop.zeros <- colSums(data[,ind]>0, na.rm=TRUE)/(nrow(data))
min.zeros = min(prop.zeros)
dif.zeros = prop.zeros - min.zeros
rmv.dif.zeros <- which(dif.zeros > 0.1)
## REMOVE SAMPLES IF DIFFERENCE IN THE NUMBER OF ZEROS > 10% ##
## REMOVES SAMPLES WITH THE MOST ZEROS ##
if (length(rmv.dif.zeros) > 0){
data <- data[,-ind[rmv.dif.zeros]]
ind <- ind[-rmv.dif.zeros]
}
## AVERAGE EXPRESSION IF NUMBER OF REPS TWO OR MORE ##
if (length(ind) > 1){
data[, ind[1]] = rowMeans(data[,ind], na.rm=T)
data <- data[,-ind[-1],]
}
}
## UPDATE MAP FILE ##
ids <- gsub("^A", "", names(data))
if (data.type == "mi"){
map$file_id_use <- map$file_id_mi
}else{
map$file_id_use <- map$file_name_m
}
## REMOVE INFO FOR SAMPLES NOT IN EXPRESSION DATA ##
map <- map[map$file_id_use %in% ids,]
pref.order <- c("Primary Tumor", "Additional - New Primary",
"Recurrent Tumor", "Metastatic", "Additional Metastatic",
"Primary Blood Derived Cancer - Peripheral Blood")
## FIND DUPLICATE CASE IDS ##
dupe.ind <- which(duplicated(map$cases.0.case_id))
dupe.ids <- unique(map$cases.0.case_id[dupe.ind])
if (length(dupe.ids) > 0){
print(paste0(length(dupe.ind), " duplicates by case, made up of ",
length(dupe.ids), " cases."))
## PICK PREFFED SAMPLE FOR EACH DUPLICATE CASE ##
for (id in dupe.ids){
inds <- grep(id, map$cases.0.case_id)
file.ids <- map$file_id_use[inds]
ind.use <- match(map$sample_type[inds], pref.order)
ind.use <- which(ind.use == min(ind.use))
if (length(ind.use) > 1){
ind.use = max(ind.use)
}
file.id.rm <- map$file_id_use[inds[-ind.use]]
data <- data[,-which(gsub("^A","", names(data)) %in% file.id.rm)]
}
}
## CLEAN DATA TO EXCLUDE GENES WITH LOW COUNT INFORMATION ##
if (data.type == "mi"){
data <- clean.data(data, f="", data.type=data.type)
}else{
data <- clean.data(data, f=f, data.type=data.type)
}
## REPLACE FILE ID (CURRENT HEADER) WITH CASE ID ##
ord <- match(gsub("A","",names(data)), map$file_id_use)
names(data)[is.na(ord)==F] <- paste0("A",map$cases.0.case_id[ord[is.na(ord)==F]])
## CONSOLIDATE MAP TO HAVE ONLY A SINGLE ENTRY PER CASE.ID ##
map <- map[map$cases.0.case_id %in% gsub("A","",names(data)),]
tbl <- table(map$cases.0.demographic.gender)
print(paste("Number", paste0(names(tbl),"s : ", tbl), collapse="; "))
## REMOVE CASES WITH NO GENDER ##
ind <- map$cases.0.case_id[is.na(map$cases.0.demographic.gender)]
ind <- which(names(data) %in% ind)
if (length(ind) > 0){
data <- data[,-ind]
}
return(data)
}
clean.data <- function(data, f="", data.type = "m"){
num.cols <- which(sapply(data, is.numeric))
rna.id.col <- which(names(data) %in% c("ID","ensmbl"))
## MIRNA DOES NOT USE TPM CUTOFF BECAUSE THE LENGTH OF MIRNA ARE LESS
## THAN THE READ LENGTH (DON'T EXPECT NUMBER OF READS TO INCREASE WITH MIRNA
## LENGTH
if (data.type == "mi"){
rm.ind <- which( rowMeans(data[,num.cols] <=5, na.rm=T) >=.8)
rm.ind.1 <- which( apply(data[, num.cols], 1, sd, na.rm=T) < 4)
rm.id <- data[intersect(rm.ind, rm.ind.1), rna.id.col]
rm.ind <- which(data[, rna.id.col] %in% rm.id)
}
## GTEX CRITERIA IS EXCLUDE IF 20% OF SAMPLES HAVE 6 OR FEWER READS OR
## IF 20% OF SAMPLES HAVE TPM OF <= 0.10 {CHANGED TO 80% BECAUSE GOAL IS
## IS DIFFERENTIAL EXPRESSION}
if (data.type == "m"){
## WHICH GENES HAVE 20% OR MORE OF SAMPLES WITH 6 OR FEWER READS
rm.ind <- which( rowMeans(data[,num.cols] <=6, na.rm=T) >=.8)
rm.id.1 <- data[rm.ind, rna.id.col]
## DETERMINE SAMPLES WITH TPM <= .1 ##
## TMP_I = (FPKM_I)/SUM_J(FPKM_J)*10^6 FOR AN INDIVIDUAL
num.cols <- which(sapply(f, is.numeric))
f.tpm <- apply(f[,num.cols], 2, function(x) x/sum(x, na.rm=T))*10^6
rm.ind.f <- which(
apply(f.tpm, 1, function(x) sum(x<=0.10, na.rm=T)>=(0.80*length(num.cols)))
)
rm.id <- union(f$gene.id[rm.ind.f], rm.id.1)
rm.ind <- which(data[,rna.id.col] %in% rm.id)
}
print(paste0("Setting ",length(rm.ind), " of ",nrow(data), " to missing. ",
nrow(data)-length(rm.ind), " are not missing"))
data <- data[-rm.ind , ]
## EXCLUDE GENE/MIRNA IF 10 or FEWER ENTRIES HAVE DATA (E.G. REST NAS)
rm.genes <- which(rowSums(!is.na(data[,num.cols]))<=10)
if (length(rm.genes) >0){
data <- data[-rm.genes,]
print(paste0("Removing ",length(rm.genes), " genes due to too many NAs. ",
"Fewer than 10 subjects with counts. "))
print(paste0(length(num.cols) - length(rm.genes), " genes/mirs remain"))
}
return(data)
}
concat.dupes <- function(gi){
dupe.ind <- which(duplicated(gi$ensembl_gene_id))
dupe.names <- unique(gi$ensembl_gene_id[dupe.ind])
for (name in dupe.names){
ind <- which(gi$ensembl_gene_id == name)
gi$hgnc_symbol[ind[1]] <- paste(gi$hgnc_symbol[ind], collapse=";")
gi <- gi[-ind[-1],]
}
return(gi)
}
process.mi.gene <- function(data){
data$miRNA = gsub("-",".",data$miRNA)
data$miRNA = gsub("miR","mir", data$miRNA)
data$miRNA = gsub("\\.5p|\\.3p","", data$miRNA)
data$miRNA.alt1 <- paste0(data$miRNA, ".1")
data$miRNA.alt2 <- paste0(data$miRNA, ".2")
data$miRNA.alt3 <- paste0(data$miRNA, ".3")
data$miRNA.alt4 <- paste0(data$miRNA, ".4")
return(data)
}
get.xy.mirs <- function(){
## GET MIR LOCATIONS ##
mir.locs <- read.table(file = "/group/stranger-lab/askol/TCGA/hsa.gff3", skip = 13,
header=FALSE, sep="\t")
mir.locs[,9] = gsub(".*Name=","",mir.locs[,9])
mir.locs[,9] = gsub(";.*","", mir.locs[,9])
mir.locs <- mir.locs[,c(9,1)]
names(mir.locs) <- c("mir","chr")
xy.mirs <- mir.locs$mir[grep("x|y|X|Y",mir.locs$chr)]
xy.mirs <- unique(xy.mirs)
xy.mirs <- gsub("-","\\.",xy.mirs)
## REMOVE 3P/5P FROM END OF MIRNA NAME ##
xy.mirs <- gsub("miR","mir", xy.mirs)
xy.mirs <- gsub("\\.3p$|\\.5p$","", xy.mirs)
return(xy.mirs)
}
get.mir.info <- function(){
## GET MIR LOCATIONS FOR PRIMARY TRANSCRIPTS ##
mir.locs <- read.table(file = "/group/stranger-lab/askol/TCGA/hsa.gff3", skip = 13,
header=FALSE, sep="\t")
## KEEP ONLY PRIMARY TRANSCRIPT ENTRIES ##
keep.ind <- grep("primary",mir.locs[,3])
mir.locs <- mir.locs[ keep.ind, c(9,1,4,5)]
names(mir.locs) <- c("mir","chr","start","end")
mir.locs$mir = gsub(".*Name=","",mir.locs$mir)
mir.locs$mir = gsub(";.*","", mir.locs$mir)
return(mir.locs)
}
get.mir.info.trim.name <- function(){
mir.locs <- get.mir.info()
##
mirs <- strsplit(split="-", mir.locs$mir)
mirs <- sapply(mirs, function(x){paste(x[1:3], collapse="-")})
mir.locs$mir.long <- mir.locs$mir
mir.locs$mir <- mirs
chrs <- with(mir.locs, tapply(chr, mir, paste, collapse=","))
chrs <- data.frame(mir = rownames(chrs), chrs=chrs)
starts <- with(mir.locs, tapply(start, mir, paste, collapse=","))
starts <- data.frame(mir = rownames(starts), starts=starts)
ends <- with(mir.locs, tapply(end, mir, paste, collapse=","))
ends <- data.frame(mir = rownames(ends), ends=ends)
mir.longs <- with(mir.locs, tapply(mir.long, mir, paste, collapse=","))
mir.longs <- data.frame(mir = rownames(mir.longs), ends=ends)
mir.locs <- merge(mir.locs, chrs, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, starts, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, ends, by="mir", all.x=T, all.y=F)
mir.locs <- merge(mir.locs, mir.longs, by="mir", all.x=T, all.y=F)
## REMOVE DUPES ##
dupe.ind <- which(duplicated(mir.locs$mir))
mir.locs <- mir.locs[-dupe.ind , -which(names(mir.locs) %in%
c("chr","start","end"))]
return(mir.locs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-eta-pairs.R
\name{eta_pairs}
\alias{eta_pairs}
\title{This creates an eta correlation which defines the relationship between parameters}
\usage{
eta_pairs(
title,
dname = NULL,
type.eta = c("mode", "mean"),
text_color = "black",
is.shrink = TRUE,
is.smooth = TRUE,
smooth = NULL,
point = NULL,
shrink = NULL,
is.hline = FALSE,
hline = NULL,
...
)
}
\arguments{
\item{title}{character the plot title}
\item{dname}{name of dataset to be used}
\item{type.eta}{\code{character} type of eat can be 'mode' or 'mean'.'mode' by default}
\item{text_color}{color of the correlation text in the upper matrix}
\item{is.shrink}{\code{logical} if TRUE add shrinkage to the plot}
\item{is.smooth}{\code{logical} if TRUE add smoothing to lower matrix plots}
\item{smooth}{\code{list} geom_smooth graphical parameters}
\item{point}{\code{list} geom_point graphical parameter}
\item{shrink}{\code{list} shrinkage graphical parameter}
\item{is.hline}{\code{logical} if TRUE add horizontal line to lower matrix plots}
\item{hline}{\code{list} geom_hline graphical parameters}
\item{...}{others graphics arguments passed to \code{\link{pmx_gpar}} internal object.}
}
\value{
ecorrel object
}
\description{
This creates an eta correlation which defines the relationship between parameters
}
\seealso{
Other plot_pmx:
\code{\link{distrib}()},
\code{\link{eta_cov}()},
\code{\link{individual}()},
\code{\link{plot_pmx.distrib}()},
\code{\link{plot_pmx.eta_cov}()},
\code{\link{plot_pmx.eta_pairs}()},
\code{\link{plot_pmx.individual}()},
\code{\link{plot_pmx.pmx_dens}()},
\code{\link{plot_pmx.pmx_gpar}()},
\code{\link{plot_pmx.pmx_qq}()},
\code{\link{plot_pmx.residual}()},
\code{\link{plot_pmx}()}
}
\concept{plot_pmx}
|
/man/eta_pairs.Rd
|
no_license
|
csetraynor/ggPMX
|
R
| false
| true
| 1,817
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-eta-pairs.R
\name{eta_pairs}
\alias{eta_pairs}
\title{This creates an eta correlation which defines the relationship between parameters}
\usage{
eta_pairs(
title,
dname = NULL,
type.eta = c("mode", "mean"),
text_color = "black",
is.shrink = TRUE,
is.smooth = TRUE,
smooth = NULL,
point = NULL,
shrink = NULL,
is.hline = FALSE,
hline = NULL,
...
)
}
\arguments{
\item{title}{character the plot title}
\item{dname}{name of dataset to be used}
\item{type.eta}{\code{character} type of eat can be 'mode' or 'mean'.'mode' by default}
\item{text_color}{color of the correlation text in the upper matrix}
\item{is.shrink}{\code{logical} if TRUE add shrinkage to the plot}
\item{is.smooth}{\code{logical} if TRUE add smoothing to lower matrix plots}
\item{smooth}{\code{list} geom_smooth graphical parameters}
\item{point}{\code{list} geom_point graphical parameter}
\item{shrink}{\code{list} shrinkage graphical parameter}
\item{is.hline}{\code{logical} if TRUE add horizontal line to lower matrix plots}
\item{hline}{\code{list} geom_hline graphical parameters}
\item{...}{others graphics arguments passed to \code{\link{pmx_gpar}} internal object.}
}
\value{
ecorrel object
}
\description{
This creates an eta correlation which defines the relationship between parameters
}
\seealso{
Other plot_pmx:
\code{\link{distrib}()},
\code{\link{eta_cov}()},
\code{\link{individual}()},
\code{\link{plot_pmx.distrib}()},
\code{\link{plot_pmx.eta_cov}()},
\code{\link{plot_pmx.eta_pairs}()},
\code{\link{plot_pmx.individual}()},
\code{\link{plot_pmx.pmx_dens}()},
\code{\link{plot_pmx.pmx_gpar}()},
\code{\link{plot_pmx.pmx_qq}()},
\code{\link{plot_pmx.residual}()},
\code{\link{plot_pmx}()}
}
\concept{plot_pmx}
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
t$Date <- as.Date(t$Date, "%d/%m/%Y")
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
t <- t[complete.cases(t),]
dateTime <- paste(t$Date, t$Time)
dateTime <- setNames(dateTime, "DateTime")
t <- t[ ,!(names(t) %in% c("Date","Time"))]
t <- cbind(dateTime, t)
t$dateTime <- as.POSIXct(dateTime)
##Histogram
hist(t$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
|
/plot1.R
|
no_license
|
sergeypara/ExData_Plotting1
|
R
| false
| false
| 747
|
r
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
t$Date <- as.Date(t$Date, "%d/%m/%Y")
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
t <- t[complete.cases(t),]
dateTime <- paste(t$Date, t$Time)
dateTime <- setNames(dateTime, "DateTime")
t <- t[ ,!(names(t) %in% c("Date","Time"))]
t <- cbind(dateTime, t)
t$dateTime <- as.POSIXct(dateTime)
##Histogram
hist(t$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
|
\name{rowMAFs}
\alias{rowMAFs}
\title{
Rowwise Minor Allele Frequency
}
\description{
Computes for each SNP represented by a row of a matrix the frequency of the minor allele.
}
\usage{
rowMAFs(x, check = TRUE)
}
\arguments{
\item{x}{a matrix in which each row represents a SNP and each column a subject,
where the genotypes of each SNP are coded by 1 (for the homozygous reference genotype),
2 (heterozygous), and 3 (homozygous variant). NAs are also allowed.}
\item{check}{should it be checked if the matrix contains values differing from 1, 2, and 3?
It is highly recommended to leave \code{check = TRUE}. Setting \code{check = FALSE}
reduces the computation time only slightly.}
}
\value{a vector containing the minor allele frequency of the SNPs represented by \code{x}.
}
\author{
Holger Schwender, \email{holger.schwender@udo.edu}
}
\keyword{array}
\keyword{manip}
|
/man/rowMAFs.Rd
|
no_license
|
cran/scrime
|
R
| false
| false
| 918
|
rd
|
\name{rowMAFs}
\alias{rowMAFs}
\title{
Rowwise Minor Allele Frequency
}
\description{
Computes for each SNP represented by a row of a matrix the frequency of the minor allele.
}
\usage{
rowMAFs(x, check = TRUE)
}
\arguments{
\item{x}{a matrix in which each row represents a SNP and each column a subject,
where the genotypes of each SNP are coded by 1 (for the homozygous reference genotype),
2 (heterozygous), and 3 (homozygous variant). NAs are also allowed.}
\item{check}{should it be checked if the matrix contains values differing from 1, 2, and 3?
It is highly recommended to leave \code{check = TRUE}. Setting \code{check = FALSE}
reduces the computation time only slightly.}
}
\value{a vector containing the minor allele frequency of the SNPs represented by \code{x}.
}
\author{
Holger Schwender, \email{holger.schwender@udo.edu}
}
\keyword{array}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_internship}
\alias{is_internship}
\title{Determine if posting is an internship}
\usage{
is_internship(text)
}
\arguments{
\item{text}{= text string}
}
\value{
overwrites the old rdata file with an updated version
(you can never remove existing terms, only add new ones)
}
\description{
Determine if posting is an internship
}
|
/resmatch/man/is_internship.Rd
|
no_license
|
wjburton/resume-matching
|
R
| false
| true
| 421
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_internship}
\alias{is_internship}
\title{Determine if posting is an internship}
\usage{
is_internship(text)
}
\arguments{
\item{text}{= text string}
}
\value{
overwrites the old rdata file with an updated version
(you can never remove existing terms, only add new ones)
}
\description{
Determine if posting is an internship
}
|
# Author: Matthew Phelps
# Desc: Compare expected number of outbreaks to take hold against the actual number
rm(list = ls())
devtools::install_github('matthew-phelps/CholeraDataDK', force = F)
library(CholeraDataDK)
library(tidyverse)
source("functions.R")
load("data/r0.Rdata")
par_cases <- CholeraDataDK::parish_cases
par_cases <- parishDataPrep(par_cases)
thresh <- seq(0.1, 5, length.out = 100)
obs_prob <- unlist(lapply(thresh, probOutbreak, par_cases)) %>%
cbind(thresh, .) %>%
data.frame() %>%
`colnames<-`(c("AR", "prob"))
problem_data <- par_cases %>%
filter(is.na(AR) & Cases > 10)
expProb <- function(r0) {
1 - 1 / r0
}
r0 <- r0[r0[["method"]]!="TD", ]
r0_vec <- seq(min(r0$ci_l), max(r0$ci_u), length.out = 100)
r0_prob <- expProb(r0_vec)
plotOutbreakPr(obs_prob, r0_prob)
|
/outbreak-probability.R
|
no_license
|
matthew-phelps/Cholera-DK-paper1
|
R
| false
| false
| 802
|
r
|
# Author: Matthew Phelps
# Desc: Compare expected number of outbreaks to take hold against the actual number
rm(list = ls())
devtools::install_github('matthew-phelps/CholeraDataDK', force = F)
library(CholeraDataDK)
library(tidyverse)
source("functions.R")
load("data/r0.Rdata")
par_cases <- CholeraDataDK::parish_cases
par_cases <- parishDataPrep(par_cases)
thresh <- seq(0.1, 5, length.out = 100)
obs_prob <- unlist(lapply(thresh, probOutbreak, par_cases)) %>%
cbind(thresh, .) %>%
data.frame() %>%
`colnames<-`(c("AR", "prob"))
problem_data <- par_cases %>%
filter(is.na(AR) & Cases > 10)
expProb <- function(r0) {
1 - 1 / r0
}
r0 <- r0[r0[["method"]]!="TD", ]
r0_vec <- seq(min(r0$ci_l), max(r0$ci_u), length.out = 100)
r0_prob <- expProb(r0_vec)
plotOutbreakPr(obs_prob, r0_prob)
|
dataset<-read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Fitting decison tree regression to dataset
regressor= rpart(formula= Salary ~ ., data=dataset, control=rpart.control(minsplit=0.1))
#predicting a new result
y_pred= predict(regressor, data.frame(Level=6.5))
#visualising the result
x_grid= seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x=dataset$Level, y=dataset$Salary), colour='red') +
geom_line(aes(x=x_grid, y=predict(regressor, newdata = data.frame(Level=x_grid))), colour='blue') +
ggtitle('Truth or Bluff (Regression Model)') +
xlab('Level') +
ylab('Salary')
|
/decisontree.R
|
no_license
|
rupinthakur23/Sentiment-Analysis
|
R
| false
| false
| 655
|
r
|
dataset<-read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Fitting decison tree regression to dataset
regressor= rpart(formula= Salary ~ ., data=dataset, control=rpart.control(minsplit=0.1))
#predicting a new result
y_pred= predict(regressor, data.frame(Level=6.5))
#visualising the result
x_grid= seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x=dataset$Level, y=dataset$Salary), colour='red') +
geom_line(aes(x=x_grid, y=predict(regressor, newdata = data.frame(Level=x_grid))), colour='blue') +
ggtitle('Truth or Bluff (Regression Model)') +
xlab('Level') +
ylab('Salary')
|
setwd("~/Documents/Etudes_Informatique/Cours/UTC/GI04/SY09_Data_Mining/TPs")
getwd()
source("TP0/prodtrans.R")
A<-matrix(1:9,nrow=3,byrow=T)
B<-matrix(c(5,3,7,4,6,3,1,6,3,2,8,5),nrow=4,byrow=T)
ptA = prodtrans(A)
ptB = prodtrans(B)
|
/TP0/src/1_3_Scripts_Fonctions.R
|
no_license
|
raphaelhamonnais/UTC_SY09_DataMining
|
R
| false
| false
| 231
|
r
|
setwd("~/Documents/Etudes_Informatique/Cours/UTC/GI04/SY09_Data_Mining/TPs")
getwd()
source("TP0/prodtrans.R")
A<-matrix(1:9,nrow=3,byrow=T)
B<-matrix(c(5,3,7,4,6,3,1,6,3,2,8,5),nrow=4,byrow=T)
ptA = prodtrans(A)
ptB = prodtrans(B)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{tidy.sclr}
\alias{tidy.sclr}
\title{Tidy a \code{sclr} object.}
\usage{
\method{tidy}{sclr}(x, ci_level = 0.95, ...)
}
\arguments{
\item{x}{An object returned by \code{\link{sclr}}.}
\item{ci_level}{Confidence level for the intervals.}
\item{...}{Not used. Needed to match generic signature.}
}
\value{
A \code{\link[tibble]{tibble}} with one row per model parameter.
Columns:
\item{term}{Name of model parameter.}
\item{estimate}{Point estimate.}
\item{std_error}{Standard error.}
\item{conf_low}{Lower bound of the confidence interval.}
\item{conf_high}{Upper bound of the confidence interval.}
}
\description{
Summarises the objects returned by \code{\link{sclr}}
into a \code{\link[tibble]{tibble}}.
}
|
/man/tidy.sclr.Rd
|
no_license
|
cran/sclr
|
R
| false
| true
| 804
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{tidy.sclr}
\alias{tidy.sclr}
\title{Tidy a \code{sclr} object.}
\usage{
\method{tidy}{sclr}(x, ci_level = 0.95, ...)
}
\arguments{
\item{x}{An object returned by \code{\link{sclr}}.}
\item{ci_level}{Confidence level for the intervals.}
\item{...}{Not used. Needed to match generic signature.}
}
\value{
A \code{\link[tibble]{tibble}} with one row per model parameter.
Columns:
\item{term}{Name of model parameter.}
\item{estimate}{Point estimate.}
\item{std_error}{Standard error.}
\item{conf_low}{Lower bound of the confidence interval.}
\item{conf_high}{Upper bound of the confidence interval.}
}
\description{
Summarises the objects returned by \code{\link{sclr}}
into a \code{\link[tibble]{tibble}}.
}
|
# Read the base zip file "exdata_data_NEI_data.zip" that contains the data
## This first line will likely take a few seconds. Be patient!
dataInput_file <- "exdata_data_NEI_data.zip"
if (!file.exists(dataInput_file)) {
data_URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(data_URL, destfile = dataInput_file)
unzip (zipfile = dataInput_file)
}
# Verify if "summarySCC_PM25.rds" file was downloaded before. If not, just do it.
if (!exists("NEI")) {
NEI <- readRDS("summarySCC_PM25.rds")
}
dim(NEI)
head(NEI)
# Verify if "Source_Classification_Code.rds" file was downloaded before. If not, just do it.
if (!exists("SCC")) {
SCC <- readRDS("Source_Classification_Code.rds")
}
dim(SCC)
head(SCC)
## Have total emissions from PM2.5 decreased in the Baltimore City,
## Maryland (fips == "24510")
## from 1999 to 2008? Use the base plotting system to make a plot answering this question.
subset_NEI <- NEI[NEI$fips=="24510", ]
total_emissions_PM25 <- aggregate(Emissions ~ year, subset_NEI, sum)
png('plot2.png')
barplot(height=total_emissions_PM25$Emissions,
names.arg=total_emissions_PM25$year,
xlab="years",
ylab=expression('total PM'[2.5]*' emission'),
main=expression('Total PM'[2.5]*' in the Baltimore City, MD emissions at various years'))
dev.off()
|
/plot2.R
|
no_license
|
myrba2win/exploratory-data-analysis-course-project-2
|
R
| false
| false
| 1,355
|
r
|
# Read the base zip file "exdata_data_NEI_data.zip" that contains the data
## This first line will likely take a few seconds. Be patient!
dataInput_file <- "exdata_data_NEI_data.zip"
if (!file.exists(dataInput_file)) {
data_URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(data_URL, destfile = dataInput_file)
unzip (zipfile = dataInput_file)
}
# Verify if "summarySCC_PM25.rds" file was downloaded before. If not, just do it.
if (!exists("NEI")) {
NEI <- readRDS("summarySCC_PM25.rds")
}
dim(NEI)
head(NEI)
# Verify if "Source_Classification_Code.rds" file was downloaded before. If not, just do it.
if (!exists("SCC")) {
SCC <- readRDS("Source_Classification_Code.rds")
}
dim(SCC)
head(SCC)
## Have total emissions from PM2.5 decreased in the Baltimore City,
## Maryland (fips == "24510")
## from 1999 to 2008? Use the base plotting system to make a plot answering this question.
subset_NEI <- NEI[NEI$fips=="24510", ]
total_emissions_PM25 <- aggregate(Emissions ~ year, subset_NEI, sum)
png('plot2.png')
barplot(height=total_emissions_PM25$Emissions,
names.arg=total_emissions_PM25$year,
xlab="years",
ylab=expression('total PM'[2.5]*' emission'),
main=expression('Total PM'[2.5]*' in the Baltimore City, MD emissions at various years'))
dev.off()
|
#' GH random variate
#' Eq. (5)
#'
#' @param mu GH parameter in the paper
#' @param alpha GH parameter in the paper
#' @param beta GH parameter in the paper
#' @param delta GH parameter in the paper
#' @param p GH parameter in the paper
#' @param param (mu, delta, alpha, beta, p) instead of above
#' @param n.quad number of quadrature points
#' @param correct normalize weights if TRUE
#' @param GIGrvg If TRUE, use GIGrvg::rgig instead
#' @param antithetic
#' @import stats
#'
gh.rand <- function(n=99, mu=0, alpha=1, beta=0, delta=1, p=-0.5,
param=c(mu, delta, alpha, beta, p),
n.quad=50, correct=T, GIGrvg=F, antithetic=T) {
mu <- param[1]
delta <- param[2]
gamma <- sqrt(param[3]^2 - param[4]^2)
beta <- param[4]
p <- param[5]
n_nig <- ifelse(antithetic, n/2, n)
if(GIGrvg){
x <- GIGrvg::rgig(n_nig, lambda=p, psi=gamma^2, chi=delta^2)
} else {
x <- gig.rand(n_nig, gamma=gamma, delta=delta, p=p,
n.quad=n.quad, correct=correct, antithetic=F)
}
if(antithetic){
z <- rnorm(n/2)
y <- mu + beta*x + sqrt(x)*c(z, -z)
} else {
z <- rnorm(n)
y <- mu + beta*x + sqrt(x)*z
}
return( y )
}
|
/pkg/R/gh_rand.R
|
permissive
|
PyFE/InvGaussianQuad-R
|
R
| false
| false
| 1,198
|
r
|
#' GH random variate
#' Eq. (5)
#'
#' @param mu GH parameter in the paper
#' @param alpha GH parameter in the paper
#' @param beta GH parameter in the paper
#' @param delta GH parameter in the paper
#' @param p GH parameter in the paper
#' @param param (mu, delta, alpha, beta, p) instead of above
#' @param n.quad number of quadrature points
#' @param correct normalize weights if TRUE
#' @param GIGrvg If TRUE, use GIGrvg::rgig instead
#' @param antithetic
#' @import stats
#'
gh.rand <- function(n=99, mu=0, alpha=1, beta=0, delta=1, p=-0.5,
param=c(mu, delta, alpha, beta, p),
n.quad=50, correct=T, GIGrvg=F, antithetic=T) {
mu <- param[1]
delta <- param[2]
gamma <- sqrt(param[3]^2 - param[4]^2)
beta <- param[4]
p <- param[5]
n_nig <- ifelse(antithetic, n/2, n)
if(GIGrvg){
x <- GIGrvg::rgig(n_nig, lambda=p, psi=gamma^2, chi=delta^2)
} else {
x <- gig.rand(n_nig, gamma=gamma, delta=delta, p=p,
n.quad=n.quad, correct=correct, antithetic=F)
}
if(antithetic){
z <- rnorm(n/2)
y <- mu + beta*x + sqrt(x)*c(z, -z)
} else {
z <- rnorm(n)
y <- mu + beta*x + sqrt(x)*z
}
return( y )
}
|
rm(list = ls())
library(tidyverse)
library(ggplot2)
library(randomForest)
library(rmarkdown)
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Chapter2_SoundscapeTemporalAssessment", ...))
}
df <- read.csv(getDataPath("15.02.2022_completedf.csv")) %>%
dplyr::select(RFclass, general_category, index_value, Date, week_day, Recording_time, month, index, moon_illu, TempOut, HumOut, Rain, EBI_RANGE, NDVI_MEAN, MSAVI_RANGE, everything(), -X) %>%
droplevels()
df$month <- as.factor(df$month)
# Climatic ----
df_monthly <- dplyr::select(df, RFclass, general_category, index_value, index, Date, week_day, Recording_time, month, moon_illu, TempOut, HumOut, Rain) %>%
na.exclude(.) %>%
droplevels(.)
## General category; climatic vars + moon ----
rf_general_monthly <- randomForest(general_category ~ moon_illu + TempOut + HumOut + Rain + week_day + Recording_time + month, data = df_monthly, importance = T, proximity = T)
print(rf_general_climatic)
varImpPlot(rf_general_climatic)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
### Optimising
importance <- as.data.frame(importance(rf_general)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
model_data <- dplyr::select(df_monthly, general_category, all_of(importance)) %>%
droplevels(.)
mtry <- tuneRF(model_data[-1],model_data$general_category, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
#RFclass; climatic vars + moon ----
rf_class <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + Recording_time + month, data = df_monthly)
print(rf)
varImpPlot(rf)
#Biophony only; climatic vars + moon ----
df_biophony <- dplyr::select(df, RFclass, general_category, index_value, index, Date, week_day, Recording_time, month, moon_illu, TempOut, HumOut, Rain) %>%
filter(general_category == "biophony") %>%
na.exclude(.) %>%
droplevels(.)
rf_general <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + month + Recording_time, data = df_biophony, importance = T, proximity = T)
print(rf_general)
varImpPlot(rf_general)
#Optimising
importance <- as.data.frame(importance(rf_general)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_biophony, RFclass, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(model_data[-1],model_data$RFclass, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf_general <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + month + Recording_time, data = df_biophony, importance = T, proximity = T, mtry = mtry)
print(rf_general)
varImpPlot(rf_general)
nmds <- metaMDS(df_biophony[,9:11], k = 2)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
#General category; satellite ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE) %>%
na.exclude(.) %>%
droplevels(.)
rf_general <- randomForest(general_category ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE, data = df_monthly)
print(rf_general)
varImpPlot(rf_general)
#BEST RESULT: RFclass; climatic vars + moon ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE, TempOut) %>%
dplyr::filter(general_category == "biophony") %>%
na.exclude(.) %>%
droplevels(.)
rf <- randomForest(RFclass ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut, data = df_monthly, proximity = T, importance = T)
print(rf)
varImpPlot(rf)
#Optimising
importance <- as.data.frame(importance(rf)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_monthly, RFclass, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(df_monthly[-1],df_monthly$RFclass, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf <- randomForest(RFclass ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut, data = df_monthly, proximity = T, importance = T, mtry = 4)
print(rf)
varImpPlot(rf)
#BEST RESULT: general category; climatic vars + moon ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE, TempOut, moon_illu, Rain, HumOut) %>%
dplyr::filter(general_category == "biophony" | general_category == "anthrophony" | general_category == "geophony") %>%
na.exclude(.) %>%
droplevels(.)
rf <- randomForest(general_category ~ Date + week_day + month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut + moon_illu + Rain + HumOut, data = df_monthly, proximity = T, importance = T)
print(rf)
varImpPlot(rf)
#Optimising
importance <- as.data.frame(importance(rf)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_monthly, general_category, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(df_monthly[-1],df_monthly$general_category, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf <- randomForest(general_category ~ ., data = model_data, proximity = T, importance = T, mtry = best.m)
print(rf)
varImpPlot(rf)
importance <- as.data.frame(importance(rf))
|
/Chapter2_LongTermAcousticMonitoring/17.02.2022_StatsExploratory.R
|
permissive
|
QutEcoacoustics/spatial-acoustics
|
R
| false
| false
| 5,841
|
r
|
rm(list = ls())
library(tidyverse)
library(ggplot2)
library(randomForest)
library(rmarkdown)
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Chapter2_SoundscapeTemporalAssessment", ...))
}
df <- read.csv(getDataPath("15.02.2022_completedf.csv")) %>%
dplyr::select(RFclass, general_category, index_value, Date, week_day, Recording_time, month, index, moon_illu, TempOut, HumOut, Rain, EBI_RANGE, NDVI_MEAN, MSAVI_RANGE, everything(), -X) %>%
droplevels()
df$month <- as.factor(df$month)
# Climatic ----
df_monthly <- dplyr::select(df, RFclass, general_category, index_value, index, Date, week_day, Recording_time, month, moon_illu, TempOut, HumOut, Rain) %>%
na.exclude(.) %>%
droplevels(.)
## General category; climatic vars + moon ----
rf_general_monthly <- randomForest(general_category ~ moon_illu + TempOut + HumOut + Rain + week_day + Recording_time + month, data = df_monthly, importance = T, proximity = T)
print(rf_general_climatic)
varImpPlot(rf_general_climatic)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
### Optimising
importance <- as.data.frame(importance(rf_general)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
model_data <- dplyr::select(df_monthly, general_category, all_of(importance)) %>%
droplevels(.)
mtry <- tuneRF(model_data[-1],model_data$general_category, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
#RFclass; climatic vars + moon ----
rf_class <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + Recording_time + month, data = df_monthly)
print(rf)
varImpPlot(rf)
#Biophony only; climatic vars + moon ----
df_biophony <- dplyr::select(df, RFclass, general_category, index_value, index, Date, week_day, Recording_time, month, moon_illu, TempOut, HumOut, Rain) %>%
filter(general_category == "biophony") %>%
na.exclude(.) %>%
droplevels(.)
rf_general <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + month + Recording_time, data = df_biophony, importance = T, proximity = T)
print(rf_general)
varImpPlot(rf_general)
#Optimising
importance <- as.data.frame(importance(rf_general)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_biophony, RFclass, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(model_data[-1],model_data$RFclass, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf_general <- randomForest(RFclass ~ moon_illu + TempOut + HumOut + Rain + week_day + month + Recording_time, data = df_biophony, importance = T, proximity = T, mtry = mtry)
print(rf_general)
varImpPlot(rf_general)
nmds <- metaMDS(df_biophony[,9:11], k = 2)
nmds <- metaMDS(df_monthly[,9:12], k = 2)
#General category; satellite ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE) %>%
na.exclude(.) %>%
droplevels(.)
rf_general <- randomForest(general_category ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE, data = df_monthly)
print(rf_general)
varImpPlot(rf_general)
#BEST RESULT: RFclass; climatic vars + moon ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE, TempOut) %>%
dplyr::filter(general_category == "biophony") %>%
na.exclude(.) %>%
droplevels(.)
rf <- randomForest(RFclass ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut, data = df_monthly, proximity = T, importance = T)
print(rf)
varImpPlot(rf)
#Optimising
importance <- as.data.frame(importance(rf)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_monthly, RFclass, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(df_monthly[-1],df_monthly$RFclass, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf <- randomForest(RFclass ~ month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut, data = df_monthly, proximity = T, importance = T, mtry = 4)
print(rf)
varImpPlot(rf)
#BEST RESULT: general category; climatic vars + moon ----
df_monthly <- dplyr::select(df, RFclass, general_category, Date, week_day, Recording_time, month, NDVI_MEAN, EBI_RANGE, MSAVI_RANGE, TempOut, moon_illu, Rain, HumOut) %>%
dplyr::filter(general_category == "biophony" | general_category == "anthrophony" | general_category == "geophony") %>%
na.exclude(.) %>%
droplevels(.)
rf <- randomForest(general_category ~ Date + week_day + month + NDVI_MEAN + EBI_RANGE + MSAVI_RANGE + Recording_time + TempOut + moon_illu + Rain + HumOut, data = df_monthly, proximity = T, importance = T)
print(rf)
varImpPlot(rf)
#Optimising
importance <- as.data.frame(importance(rf)) %>%
filter(., MeanDecreaseAccuracy >= 0) %>%
row.names(.)
#
model_data <- dplyr::select(df_monthly, general_category, all_of(importance)) %>%
droplevels(.)
#floor(sqrt(ncol(model_data) - 1))
mtry <- tuneRF(df_monthly[-1],df_monthly$general_category, ntreeTry=500,
stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
rf <- randomForest(general_category ~ ., data = model_data, proximity = T, importance = T, mtry = best.m)
print(rf)
varImpPlot(rf)
importance <- as.data.frame(importance(rf))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fs_em.R
\name{fs_em}
\alias{fs_em}
\title{Calculate EM-estimates of m- and u-probabilities}
\usage{
fs_em(patterns, mprobs0 = list(0.95), uprobs0 = list(0.02), p0 = 0.05)
}
\arguments{
\item{patterns}{either a table of patterns (as output by
\code{\link{tabulate}}) or pairs with comparison columns (as output by
\code{\link{compare}}).}
\item{mprobs0, uprobs0}{initial values of the m- and u-probabilities. These
should be lists with numeric values. The names of the elements in the list
should correspond to the names in \code{by_x} in \code{\link{link}}.}
\item{p0}{the initial estimate of the probability that a pair is a match.}
}
\description{
Calculate EM-estimates of m- and u-probabilities
}
|
/man/fs_em.Rd
|
no_license
|
markvanderloo/reclin
|
R
| false
| true
| 782
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fs_em.R
\name{fs_em}
\alias{fs_em}
\title{Calculate EM-estimates of m- and u-probabilities}
\usage{
fs_em(patterns, mprobs0 = list(0.95), uprobs0 = list(0.02), p0 = 0.05)
}
\arguments{
\item{patterns}{either a table of patterns (as output by
\code{\link{tabulate}}) or pairs with comparison columns (as output by
\code{\link{compare}}).}
\item{mprobs0, uprobs0}{initial values of the m- and u-probabilities. These
should be lists with numeric values. The names of the elements in the list
should correspond to the names in \code{by_x} in \code{\link{link}}.}
\item{p0}{the initial estimate of the probability that a pair is a match.}
}
\description{
Calculate EM-estimates of m- and u-probabilities
}
|
rois_sunburst <- function(dataset = NULL, rois = c("CH", "BS", "CB"), parents = TRUE, savepath = NULL){
#Set Up
paxTOallen <- function(paxinos){
round(214+(20-(paxinos*1000))/25)
}
get_roi <- function(dataset, roi = c('MO', 'TH')){
out <- unlist(lapply(roi, function(x)sum(dataset$acronym %in% get.sub.structure(x))))
roi.data <- data.frame(acronym = roi, cell.count = out)
return(roi.data)
}
#Assemble Nested ROIs List
init_rois <- rois
for(i in 1:length(init_rois)){
if(parents == TRUE){ #This adds in parent regions - only do this if parents == TRUE
new_parent_addition <- wholebrain::get.acronym.parent(init_rois[i])
while(new_parent_addition != "grey"){
rois <- c(rois, new_parent_addition)
new_parent_addition <- wholebrain::get.acronym.parent(new_parent_addition)
}
}
new_child_additions <- wholebrain::get.acronym.child(init_rois[i]) #This adds in child regions - these must always be added
next_level_additions <- c()
while(length(new_child_additions) > 0){
for(j in 1:length(new_child_additions)){
next_level_additions <- c(next_level_additions, wholebrain::get.acronym.child(new_child_additions[j]))
}
rois <- c(rois, new_child_additions)
new_child_additions <- next_level_additions
next_level_additions <- c()
new_child_additions <- new_child_additions[is.na(new_child_additions) == FALSE]
}
}
rois <- unique(rois)
rois <- rois[is.na(rois) == FALSE]
#Get Paths
paths <- c()
for(i in 1:length(rois)){
most_recent <- wholebrain::get.acronym.parent(rois[i])
most_recent_path <- rois[i]
if(grepl("-", most_recent_path)){
most_recent_path <- gsub("-", "_", most_recent_path)
}
if(parents == TRUE){ #Finds paths all the way to the CH/BS layer
if(rois[i] != "grey"){
while(most_recent != "grey" & most_recent_path != "grey"){
next_path <- wholebrain::get.acronym.parent(most_recent)
if(grepl("-", most_recent)){
most_recent <- gsub("-", "_", most_recent)
}
most_recent_path <- paste0(most_recent, "-", most_recent_path)
most_recent <- next_path
}
paths <- c(paths, most_recent_path)
}
}
if(parents == FALSE){ #Finds paths only to the initially specified ROI
roi_parent <- wholebrain::get.acronym.parent(rois[1])
if(rois[i] != roi_parent){
while(most_recent != roi_parent & most_recent_path != roi_parent){
next_path <- wholebrain::get.acronym.parent(most_recent)
if(grepl("-", most_recent)){
most_recent <- gsub("-", "_", most_recent)
}
most_recent_path <- paste0(most_recent, "-", most_recent_path)
most_recent <- next_path
}
paths <- c(paths, most_recent_path)
}
}
}
for(i in 1:length(paths)){
if(grepl("-", paths[i]) == FALSE){
paths[i] <- paste0(paths[i], "-end")
}
}
#Get Cell Counts
counts <- c()
initial_counts <- get_roi(glass_dataset_exact_AP, roi=rois)
for(i in 1:length(rois)){
if(is.na(wholebrain::get.acronym.child(rois[i]))){
new_count <- initial_counts$cell.count[i]
}
else{
subregion_list_with_counts <- get_roi(glass_dataset_exact_AP, roi=c(wholebrain::get.acronym.child(rois[i])))
subregion_count <- 0
for (j in 1:length(subregion_list_with_counts$cell.count)){
if(subregion_list_with_counts$acronym[j] %in% rois){
subregion_count <- subregion_count + subregion_list_with_counts$cell.count[j]
}
new_count <- initial_counts$cell.count[i] - subregion_count
}
}
counts <- c(counts, new_count)
}
#Get Colors
colorv <- c()
for(i in 1:length(rois)){
newcolor <- wholebrain::color.from.acronym(rois[i])
colorv <- c(colorv, newcolor)
}
rois2 <- rois
for(i in 1:length(rois2)){
if(grepl("-", rois2[i])){
rois2[i] <- gsub("-", "_", rois2[i])
}
}
colorv <- c(colorv, "ffffff")
rois2 <- c(rois2, "end")
#Create and Save Plots
sunburst_wholebrain <- structure(list(paths, counts), Names = c("paths", "counts"), class = "data.frame", row.names = c(NA, -25L))
sunburst_plot <- sunburstR::sunburst(sunburst_wholebrain, count = TRUE, percent = TRUE, colors = list(range = colorv, domain = rois2), legend = FALSE, breadcrumb = list(w = 65, h = 30, r = 100, s = 0))
if(is.null(savepath) == FALSE) {
file = paste0(savepath, "/sunburst.RData")
save(sunburst_plot, file = file)
}
}
#Example of Use
rois_sunburst(dataset = all_dataset, rois = c("CB"), parents = TRUE, savepath = "C:/Users/joeyd/Desktop/NIH/PZSA Testing/PoC - Sunburst ROIs Function")
|
/rois_sunburst.R
|
no_license
|
jdknguyen/ZephyrShinyApp
|
R
| false
| false
| 4,881
|
r
|
rois_sunburst <- function(dataset = NULL, rois = c("CH", "BS", "CB"), parents = TRUE, savepath = NULL){
#Set Up
paxTOallen <- function(paxinos){
round(214+(20-(paxinos*1000))/25)
}
get_roi <- function(dataset, roi = c('MO', 'TH')){
out <- unlist(lapply(roi, function(x)sum(dataset$acronym %in% get.sub.structure(x))))
roi.data <- data.frame(acronym = roi, cell.count = out)
return(roi.data)
}
#Assemble Nested ROIs List
init_rois <- rois
for(i in 1:length(init_rois)){
if(parents == TRUE){ #This adds in parent regions - only do this if parents == TRUE
new_parent_addition <- wholebrain::get.acronym.parent(init_rois[i])
while(new_parent_addition != "grey"){
rois <- c(rois, new_parent_addition)
new_parent_addition <- wholebrain::get.acronym.parent(new_parent_addition)
}
}
new_child_additions <- wholebrain::get.acronym.child(init_rois[i]) #This adds in child regions - these must always be added
next_level_additions <- c()
while(length(new_child_additions) > 0){
for(j in 1:length(new_child_additions)){
next_level_additions <- c(next_level_additions, wholebrain::get.acronym.child(new_child_additions[j]))
}
rois <- c(rois, new_child_additions)
new_child_additions <- next_level_additions
next_level_additions <- c()
new_child_additions <- new_child_additions[is.na(new_child_additions) == FALSE]
}
}
rois <- unique(rois)
rois <- rois[is.na(rois) == FALSE]
#Get Paths
paths <- c()
for(i in 1:length(rois)){
most_recent <- wholebrain::get.acronym.parent(rois[i])
most_recent_path <- rois[i]
if(grepl("-", most_recent_path)){
most_recent_path <- gsub("-", "_", most_recent_path)
}
if(parents == TRUE){ #Finds paths all the way to the CH/BS layer
if(rois[i] != "grey"){
while(most_recent != "grey" & most_recent_path != "grey"){
next_path <- wholebrain::get.acronym.parent(most_recent)
if(grepl("-", most_recent)){
most_recent <- gsub("-", "_", most_recent)
}
most_recent_path <- paste0(most_recent, "-", most_recent_path)
most_recent <- next_path
}
paths <- c(paths, most_recent_path)
}
}
if(parents == FALSE){ #Finds paths only to the initially specified ROI
roi_parent <- wholebrain::get.acronym.parent(rois[1])
if(rois[i] != roi_parent){
while(most_recent != roi_parent & most_recent_path != roi_parent){
next_path <- wholebrain::get.acronym.parent(most_recent)
if(grepl("-", most_recent)){
most_recent <- gsub("-", "_", most_recent)
}
most_recent_path <- paste0(most_recent, "-", most_recent_path)
most_recent <- next_path
}
paths <- c(paths, most_recent_path)
}
}
}
for(i in 1:length(paths)){
if(grepl("-", paths[i]) == FALSE){
paths[i] <- paste0(paths[i], "-end")
}
}
#Get Cell Counts
counts <- c()
initial_counts <- get_roi(glass_dataset_exact_AP, roi=rois)
for(i in 1:length(rois)){
if(is.na(wholebrain::get.acronym.child(rois[i]))){
new_count <- initial_counts$cell.count[i]
}
else{
subregion_list_with_counts <- get_roi(glass_dataset_exact_AP, roi=c(wholebrain::get.acronym.child(rois[i])))
subregion_count <- 0
for (j in 1:length(subregion_list_with_counts$cell.count)){
if(subregion_list_with_counts$acronym[j] %in% rois){
subregion_count <- subregion_count + subregion_list_with_counts$cell.count[j]
}
new_count <- initial_counts$cell.count[i] - subregion_count
}
}
counts <- c(counts, new_count)
}
#Get Colors
colorv <- c()
for(i in 1:length(rois)){
newcolor <- wholebrain::color.from.acronym(rois[i])
colorv <- c(colorv, newcolor)
}
rois2 <- rois
for(i in 1:length(rois2)){
if(grepl("-", rois2[i])){
rois2[i] <- gsub("-", "_", rois2[i])
}
}
colorv <- c(colorv, "ffffff")
rois2 <- c(rois2, "end")
#Create and Save Plots
sunburst_wholebrain <- structure(list(paths, counts), Names = c("paths", "counts"), class = "data.frame", row.names = c(NA, -25L))
sunburst_plot <- sunburstR::sunburst(sunburst_wholebrain, count = TRUE, percent = TRUE, colors = list(range = colorv, domain = rois2), legend = FALSE, breadcrumb = list(w = 65, h = 30, r = 100, s = 0))
if(is.null(savepath) == FALSE) {
file = paste0(savepath, "/sunburst.RData")
save(sunburst_plot, file = file)
}
}
#Example of Use
rois_sunburst(dataset = all_dataset, rois = c("CB"), parents = TRUE, savepath = "C:/Users/joeyd/Desktop/NIH/PZSA Testing/PoC - Sunburst ROIs Function")
|
#' analyse.0.0
#'
#' After dividing up the graph into connected.components,
#' we do \code{\link{analyse.connected.component.critical.points}} on each one for extracting pk variables definitions.
#' for other unknown variables, currently we just keep them as is.
#' We may add more code for analysing those part(the other part) in future.
#' @param eqns equations
#' @param ... extra.rules
#' @return the found definitions for pk variables, and other non-pk definitions are kept as is
analyse.0.0 = function(eqns, ...){
gg = create.abstractGraph.from.equations(eqns)
# divided up into connected.components
cps = divide.graph(gg$graph)
analysed.defns = list()
other.rels = list()
for(i in 1:length(cps)){
component.eqns.ind = unique(gg$backref[ restriction.morphism.of.graph.at.v(gg$graph, cps[[i]]) ] )
if (length(component.eqns.ind)==0) {
stop('Detect a component without equation reference,
might be caused by an isolated constant initialization.')
}
foo = analyse.connected.component.critical.points(
# this will remove those initialization of a tempvariable
# e.g. `TEMP[1][Q]` = 1
# and there is no edges corresponding to this equation in the graph
# and it disappears
# to avoid this, we should ensure the passed in eqns has removed those initialization lines
eqns[ component.eqns.ind ] , ...)
if(length(foo$critical.relations)>0) {
analysed.defns[[ length(analysed.defns)+1 ]] = foo$critical.relations
}
if(length(foo$left.eqns)>0){
other.rels = c(other.rels, foo$left.eqns)
}
}
list(analysed.defns=analysed.defns, other.rels=other.rels)
}
#' analyse.connected.component.critical.points
#'
#' The working horse for \code{analyse.connected.component.critical.points}
#' combind \code{topo.sort},
#' \code{get.definition.of.v},
#' and use \code{pattern.distrib.*}
#' to analysis to some extent general equations
#' assuming eqns are connected.component
#' Try to figure out the meaning of the critical points in the connected component
#' And return the left ind of the eqns
#' if no PK lhs is found, return just the left inds
#' @param eqns equations
#' @param socp set of critical points, critical points should be an lhs, and is necessary for its offsprins critical points
#' @param extra.rules also allow to add a list of pattern and actions to extend the functionality of this function
#' @param skip.patterns if skip pattern matchers
#' @param ... other parameters may be passed to the pattern functions
#' @return information on variability
analyse.connected.component.first.critical.points = function(eqns,socp,extra.rules=NULL,skip.patterns=FALSE, ...) {
gg = create.abstractGraph.from.equations(eqns)
ind0 = topo.sort(gg$graph)
variables = gg$vertex.table
ind.socp = match.symbolic(socp, variables)
ind.socp = ind.socp[ complete.cases(ind.socp) ]
# This gg is generated by a subset eqns than the whole graph,
# the critical point for this eqn must be again a lhs in this gg, not only the hence original gg
ind.socp = intersect(gg$lhs.ind, ind.socp)
if (length(ind.socp)==0) {
return( list(seq_along(eqns),
list(critical.varname=as.symbol(NA),
type='not found',
defn=NULL),
depend.on=NULL
))
}
# find pk's, in a topological order
v0 = gg$vertex.table[[ as.integer((ind0$L[ ind0$L %in% ind.socp ])[1]) ]]
defn = get.definition.of.v(eqns, v0)
# match patterns one by one
# random variable from theta's, which cost degree of freedom
if (!skip.patterns) {
pat = pattern.distrib.normal(defn$final.e, ...)
# simulate swith-case
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
# random variable from theta's, which cost degree of freedom
pat = pattern.distrib.lognormal(defn$final.e, ...)
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
}
# depend on previously, not directly on theta's, eta's
if(!list.any.is.nonmem.par(defn$depend.on)){
return(list(defn$ind,list(
critical.varname=v0,
type='related.on.previous.variables',
defn=defn$final.e),
depend.on = defn$depend.on
))
}
pat = pattern.distrib.fixed(defn$final.e)
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
if (!is.null(extra.rules) && is.list(extra.rules) && length(extra.rules)>0) {
for(rule in extra.rules){
if (!is.null(pat <- rule$pat(defn))) {
re = list(defn$ind,
rule$action(defn,pat),
depend.on=defn$depend.on)
re[[2]]$critical.varname=v0
return(re)
}
}
}
return(list(defn$ind,list(
critical.varname=v0,
type='unknown',
defn=defn$final.e),
depend.on = defn$depend.on
))
}
#' analyse.connected.component.first.critical.points
#'
#' for a connected component, try to extract PK variables (as lhs) one by one
#' If no pk variables found, return eqns as is
#' actually, what we can get are not PK variables, but however the critical point varibles
#'
#' @param eqns
#' @param ... extra.rules
#' @return found pk relations and left equations(might be for other use)
analyse.connected.component.critical.points = function(eqns, socp.filter=socp.filter.default, ...){
gg = create.abstractGraph.from.equations(eqns)
variables = gg$vertex.table
socp.ind = socp.filter(gg=gg, variables=variables, eqns=eqns)
# them, iterating increasing the socp set
if (length(socp.ind)==0) {
# If component has no pk variables, return them as is
return(list(
critical.relations=list(),
left.eqns=eqns))
}
# init iter.variables
re = list()
left.eqns = list()
eqns.iter = eqns
while(length(eqns.iter)>0){
foo = analyse.connected.component.first.critical.points(eqns.iter, variables[socp.ind], ...)
# if new sections depend on :
# variable as a lhs,
# not in socp,
# not a nonmem.parameter
# this variable should be appended in socp
if (foo[[2]]$type=='not found'){
left.eqns = c(left.eqns, eqns.iter[ foo[[1]] ])
break()
}
dep.ind = match.symbolic(foo$depend.on, variables)
dep.ind = intersect( dep.ind, gg$lhs.ind )
dep.ind = setdiff(dep.ind, socp.ind)
if (length(dep.ind) > 0 ) {
socp.ind = union(socp.ind, dep.ind)
# if critical points changed, need restart
re = list()
left.eqns = list()
eqns.iter = eqns
next()
}
re[[ length(re)+1 ]]=foo[[2]]
eqns.iter = eqns.iter[ -foo[[1]] ]
}
list(critical.relations=re,left.eqns=left.eqns)
}
#' get.definition.of.v
#'
#' find necessary equations for a given variable
#' @param eqns equations
#' @param vn symbol of variable
#' @return evaluated value of \code{vn} using \code{eqns} and the depending variables
get.definition.of.v = function(eqns,vn){
env0 = new.env(parent=emptyenv())
# Note:
# This simple stratigy can't handle IF/THEN/ELSE correctly
# e.g.
# Q = 1
# IF (MIXNUM==2) Q=2
# actually Q = f(MIXNUM)
# However, if we use the eval.symbolic, Q is evaluated to 0
# and the warning equations has more relations than in subgraph will happen,
# because Q disappears from subgraph of vn.expression
# two ways of fixing this
# (1) add a translations layer to change the IF statements to a function
# (2) or change the eval.symbolic to correctly handle the IF
eval.symbolic(eqns, env0)
gg = create.abstractGraph.from.equations(eqns)
vn.expression = eval.symbolic(vn, env0)
vn.rhs = extract.vertices(vn.expression)$rhs.symbol
vn.rhs.ind = match.symbolic(vn.rhs, gg$vertex.table)
vn.ind = match.symbolic(vn, gg$vertex.table)
G = gg$graph
subg.of.vn = intersect(
connected.component(G, vn.rhs.ind, direction=1),
connected.component(G, vn.ind, direction=0))
ind = unique(gg$backref[ restriction.morphism.of.graph.at.v(G,subg.of.vn) ])
re = integer(0)
for(i in ind){
if (! all(gg$eqns.vertex[[ i ]] %in% subg.of.vn) ){
warning(sprintf('Equation: %s has more relations than in subgraph, will be dropped!', paste(deparse(eqns[[i]]),collapse=' ')))
} else {
re = c(re, i)
}
}
list(final.e=vn.expression,
depend.on=vn.rhs,
intermediate=gg$vertex.table[setdiff(subg.of.vn, union(vn.ind, vn.rhs.ind))],
ind = re)
}
|
/symbolicR/R/00symbolic.003eqns.R
|
no_license
|
isabella232/symbolicR
|
R
| false
| false
| 9,467
|
r
|
#' analyse.0.0
#'
#' After dividing up the graph into connected.components,
#' we do \code{\link{analyse.connected.component.critical.points}} on each one for extracting pk variables definitions.
#' for other unknown variables, currently we just keep them as is.
#' We may add more code for analysing those part(the other part) in future.
#' @param eqns equations
#' @param ... extra.rules
#' @return the found definitions for pk variables, and other non-pk definitions are kept as is
analyse.0.0 = function(eqns, ...){
gg = create.abstractGraph.from.equations(eqns)
# divided up into connected.components
cps = divide.graph(gg$graph)
analysed.defns = list()
other.rels = list()
for(i in 1:length(cps)){
component.eqns.ind = unique(gg$backref[ restriction.morphism.of.graph.at.v(gg$graph, cps[[i]]) ] )
if (length(component.eqns.ind)==0) {
stop('Detect a component without equation reference,
might be caused by an isolated constant initialization.')
}
foo = analyse.connected.component.critical.points(
# this will remove those initialization of a tempvariable
# e.g. `TEMP[1][Q]` = 1
# and there is no edges corresponding to this equation in the graph
# and it disappears
# to avoid this, we should ensure the passed in eqns has removed those initialization lines
eqns[ component.eqns.ind ] , ...)
if(length(foo$critical.relations)>0) {
analysed.defns[[ length(analysed.defns)+1 ]] = foo$critical.relations
}
if(length(foo$left.eqns)>0){
other.rels = c(other.rels, foo$left.eqns)
}
}
list(analysed.defns=analysed.defns, other.rels=other.rels)
}
#' analyse.connected.component.critical.points
#'
#' The working horse for \code{analyse.connected.component.critical.points}
#' combind \code{topo.sort},
#' \code{get.definition.of.v},
#' and use \code{pattern.distrib.*}
#' to analysis to some extent general equations
#' assuming eqns are connected.component
#' Try to figure out the meaning of the critical points in the connected component
#' And return the left ind of the eqns
#' if no PK lhs is found, return just the left inds
#' @param eqns equations
#' @param socp set of critical points, critical points should be an lhs, and is necessary for its offsprins critical points
#' @param extra.rules also allow to add a list of pattern and actions to extend the functionality of this function
#' @param skip.patterns if skip pattern matchers
#' @param ... other parameters may be passed to the pattern functions
#' @return information on variability
analyse.connected.component.first.critical.points = function(eqns,socp,extra.rules=NULL,skip.patterns=FALSE, ...) {
gg = create.abstractGraph.from.equations(eqns)
ind0 = topo.sort(gg$graph)
variables = gg$vertex.table
ind.socp = match.symbolic(socp, variables)
ind.socp = ind.socp[ complete.cases(ind.socp) ]
# This gg is generated by a subset eqns than the whole graph,
# the critical point for this eqn must be again a lhs in this gg, not only the hence original gg
ind.socp = intersect(gg$lhs.ind, ind.socp)
if (length(ind.socp)==0) {
return( list(seq_along(eqns),
list(critical.varname=as.symbol(NA),
type='not found',
defn=NULL),
depend.on=NULL
))
}
# find pk's, in a topological order
v0 = gg$vertex.table[[ as.integer((ind0$L[ ind0$L %in% ind.socp ])[1]) ]]
defn = get.definition.of.v(eqns, v0)
# match patterns one by one
# random variable from theta's, which cost degree of freedom
if (!skip.patterns) {
pat = pattern.distrib.normal(defn$final.e, ...)
# simulate swith-case
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
# random variable from theta's, which cost degree of freedom
pat = pattern.distrib.lognormal(defn$final.e, ...)
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
}
# depend on previously, not directly on theta's, eta's
if(!list.any.is.nonmem.par(defn$depend.on)){
return(list(defn$ind,list(
critical.varname=v0,
type='related.on.previous.variables',
defn=defn$final.e),
depend.on = defn$depend.on
))
}
pat = pattern.distrib.fixed(defn$final.e)
if(!is.null(pat)){
return(list(defn$ind,list(
critical.varname=v0,
type='directly.related.to.nonmem.parameters',
defn=pat),
depend.on = defn$depend.on
))
}
if (!is.null(extra.rules) && is.list(extra.rules) && length(extra.rules)>0) {
for(rule in extra.rules){
if (!is.null(pat <- rule$pat(defn))) {
re = list(defn$ind,
rule$action(defn,pat),
depend.on=defn$depend.on)
re[[2]]$critical.varname=v0
return(re)
}
}
}
return(list(defn$ind,list(
critical.varname=v0,
type='unknown',
defn=defn$final.e),
depend.on = defn$depend.on
))
}
#' analyse.connected.component.first.critical.points
#'
#' for a connected component, try to extract PK variables (as lhs) one by one
#' If no pk variables found, return eqns as is
#' actually, what we can get are not PK variables, but however the critical point varibles
#'
#' @param eqns
#' @param ... extra.rules
#' @return found pk relations and left equations(might be for other use)
analyse.connected.component.critical.points = function(eqns, socp.filter=socp.filter.default, ...){
gg = create.abstractGraph.from.equations(eqns)
variables = gg$vertex.table
socp.ind = socp.filter(gg=gg, variables=variables, eqns=eqns)
# them, iterating increasing the socp set
if (length(socp.ind)==0) {
# If component has no pk variables, return them as is
return(list(
critical.relations=list(),
left.eqns=eqns))
}
# init iter.variables
re = list()
left.eqns = list()
eqns.iter = eqns
while(length(eqns.iter)>0){
foo = analyse.connected.component.first.critical.points(eqns.iter, variables[socp.ind], ...)
# if new sections depend on :
# variable as a lhs,
# not in socp,
# not a nonmem.parameter
# this variable should be appended in socp
if (foo[[2]]$type=='not found'){
left.eqns = c(left.eqns, eqns.iter[ foo[[1]] ])
break()
}
dep.ind = match.symbolic(foo$depend.on, variables)
dep.ind = intersect( dep.ind, gg$lhs.ind )
dep.ind = setdiff(dep.ind, socp.ind)
if (length(dep.ind) > 0 ) {
socp.ind = union(socp.ind, dep.ind)
# if critical points changed, need restart
re = list()
left.eqns = list()
eqns.iter = eqns
next()
}
re[[ length(re)+1 ]]=foo[[2]]
eqns.iter = eqns.iter[ -foo[[1]] ]
}
list(critical.relations=re,left.eqns=left.eqns)
}
#' get.definition.of.v
#'
#' find necessary equations for a given variable
#' @param eqns equations
#' @param vn symbol of variable
#' @return evaluated value of \code{vn} using \code{eqns} and the depending variables
get.definition.of.v = function(eqns,vn){
env0 = new.env(parent=emptyenv())
# Note:
# This simple stratigy can't handle IF/THEN/ELSE correctly
# e.g.
# Q = 1
# IF (MIXNUM==2) Q=2
# actually Q = f(MIXNUM)
# However, if we use the eval.symbolic, Q is evaluated to 0
# and the warning equations has more relations than in subgraph will happen,
# because Q disappears from subgraph of vn.expression
# two ways of fixing this
# (1) add a translations layer to change the IF statements to a function
# (2) or change the eval.symbolic to correctly handle the IF
eval.symbolic(eqns, env0)
gg = create.abstractGraph.from.equations(eqns)
vn.expression = eval.symbolic(vn, env0)
vn.rhs = extract.vertices(vn.expression)$rhs.symbol
vn.rhs.ind = match.symbolic(vn.rhs, gg$vertex.table)
vn.ind = match.symbolic(vn, gg$vertex.table)
G = gg$graph
subg.of.vn = intersect(
connected.component(G, vn.rhs.ind, direction=1),
connected.component(G, vn.ind, direction=0))
ind = unique(gg$backref[ restriction.morphism.of.graph.at.v(G,subg.of.vn) ])
re = integer(0)
for(i in ind){
if (! all(gg$eqns.vertex[[ i ]] %in% subg.of.vn) ){
warning(sprintf('Equation: %s has more relations than in subgraph, will be dropped!', paste(deparse(eqns[[i]]),collapse=' ')))
} else {
re = c(re, i)
}
}
list(final.e=vn.expression,
depend.on=vn.rhs,
intermediate=gg$vertex.table[setdiff(subg.of.vn, union(vn.ind, vn.rhs.ind))],
ind = re)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworkscm_operations.R
\name{opsworkscm_export_server_engine_attribute}
\alias{opsworkscm_export_server_engine_attribute}
\title{Exports a specified server engine attribute as a base64-encoded string}
\usage{
opsworkscm_export_server_engine_attribute(ExportAttributeName,
ServerName, InputAttributes)
}
\arguments{
\item{ExportAttributeName}{[required] The name of the export attribute. Currently, the supported export
attribute is \code{Userdata}. This exports a user data script that includes
parameters and values provided in the \code{InputAttributes} list.}
\item{ServerName}{[required] The name of the server from which you are exporting the attribute.}
\item{InputAttributes}{The list of engine attributes. The list type is \code{EngineAttribute}. An
\code{EngineAttribute} list item is a pair that includes an attribute name
and its value. For the \code{Userdata} ExportAttributeName, the following are
supported engine attribute names.
\itemize{
\item \strong{RunList} In Chef, a list of roles or recipes that are run in the
specified order. In Puppet, this parameter is ignored.
\item \strong{OrganizationName} In Chef, an organization name. AWS OpsWorks for
Chef Automate always creates the organization \code{default}. In Puppet,
this parameter is ignored.
\item \strong{NodeEnvironment} In Chef, a node environment (for example,
development, staging, or one-box). In Puppet, this parameter is
ignored.
\item \strong{NodeClientVersion} In Chef, the version of the Chef engine (three
numbers separated by dots, such as 13.8.5). If this attribute is
empty, OpsWorks for Chef Automate uses the most current version. In
Puppet, this parameter is ignored.
}}
}
\description{
Exports a specified server engine attribute as a base64-encoded string.
For example, you can export user data that you can use in EC2 to
associate nodes with a server.
}
\details{
This operation is synchronous.
A \code{ValidationException} is raised when parameters of the request are not
valid. A \code{ResourceNotFoundException} is thrown when the server does not
exist. An \code{InvalidStateException} is thrown when the server is in any of
the following states: CREATING, TERMINATED, FAILED or DELETING.
}
\section{Request syntax}{
\preformatted{svc$export_server_engine_attribute(
ExportAttributeName = "string",
ServerName = "string",
InputAttributes = list(
list(
Name = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
/cran/paws.management/man/opsworkscm_export_server_engine_attribute.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 2,529
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworkscm_operations.R
\name{opsworkscm_export_server_engine_attribute}
\alias{opsworkscm_export_server_engine_attribute}
\title{Exports a specified server engine attribute as a base64-encoded string}
\usage{
opsworkscm_export_server_engine_attribute(ExportAttributeName,
ServerName, InputAttributes)
}
\arguments{
\item{ExportAttributeName}{[required] The name of the export attribute. Currently, the supported export
attribute is \code{Userdata}. This exports a user data script that includes
parameters and values provided in the \code{InputAttributes} list.}
\item{ServerName}{[required] The name of the server from which you are exporting the attribute.}
\item{InputAttributes}{The list of engine attributes. The list type is \code{EngineAttribute}. An
\code{EngineAttribute} list item is a pair that includes an attribute name
and its value. For the \code{Userdata} ExportAttributeName, the following are
supported engine attribute names.
\itemize{
\item \strong{RunList} In Chef, a list of roles or recipes that are run in the
specified order. In Puppet, this parameter is ignored.
\item \strong{OrganizationName} In Chef, an organization name. AWS OpsWorks for
Chef Automate always creates the organization \code{default}. In Puppet,
this parameter is ignored.
\item \strong{NodeEnvironment} In Chef, a node environment (for example,
development, staging, or one-box). In Puppet, this parameter is
ignored.
\item \strong{NodeClientVersion} In Chef, the version of the Chef engine (three
numbers separated by dots, such as 13.8.5). If this attribute is
empty, OpsWorks for Chef Automate uses the most current version. In
Puppet, this parameter is ignored.
}}
}
\description{
Exports a specified server engine attribute as a base64-encoded string.
For example, you can export user data that you can use in EC2 to
associate nodes with a server.
}
\details{
This operation is synchronous.
A \code{ValidationException} is raised when parameters of the request are not
valid. A \code{ResourceNotFoundException} is thrown when the server does not
exist. An \code{InvalidStateException} is thrown when the server is in any of
the following states: CREATING, TERMINATED, FAILED or DELETING.
}
\section{Request syntax}{
\preformatted{svc$export_server_engine_attribute(
ExportAttributeName = "string",
ServerName = "string",
InputAttributes = list(
list(
Name = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr'
firm_i <- 'qualtrics'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
####################### DEFINE MODELS ###################################
m4_beta2 <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_2") + absdiff("cent_pow_n0_2") +
cycle(3) + cycle(4) + cycle(5)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4_beta2'
##
# SET RESAMPLES
##
R <- 2000
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 3, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
|
/R/awareness_AMJ_RNR_TERGM_m4-beta2.R
|
no_license
|
sdownin/compnet
|
R
| false
| false
| 2,440
|
r
|
cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr'
firm_i <- 'qualtrics'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
####################### DEFINE MODELS ###################################
m4_beta2 <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_2") + absdiff("cent_pow_n0_2") +
cycle(3) + cycle(4) + cycle(5)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4_beta2'
##
# SET RESAMPLES
##
R <- 2000
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 3, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
|
\name{score.mopt_env}
\alias{score.mopt_env}
\title{compute the simulated score from the
the chain. This is the matrix of the derivative of the
moments with respect to the parameters around the best parameter
value.}
\usage{
score.mopt_env(me)
}
\description{
compute the simulated score from the the chain. This is
the matrix of the derivative of the moments with respect
to the parameters around the best parameter value.
}
|
/man/score.mopt_env.Rd
|
no_license
|
priscillafialho/mopt
|
R
| false
| false
| 435
|
rd
|
\name{score.mopt_env}
\alias{score.mopt_env}
\title{compute the simulated score from the
the chain. This is the matrix of the derivative of the
moments with respect to the parameters around the best parameter
value.}
\usage{
score.mopt_env(me)
}
\description{
compute the simulated score from the the chain. This is
the matrix of the derivative of the moments with respect
to the parameters around the best parameter value.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auto.R
\name{auto_dark_light_theme}
\alias{auto_dark_light_theme}
\alias{set_theme_light}
\alias{set_theme_dark}
\alias{use_theme_light}
\alias{use_theme_dark}
\alias{use_theme_toggle}
\alias{use_theme_auto}
\title{Automatically or manually toggle light and dark themes}
\usage{
set_theme_light(theme = NULL)
set_theme_dark(theme = NULL)
use_theme_light(quietly = FALSE)
use_theme_dark(quietly = FALSE)
use_theme_toggle(quietly = FALSE)
use_theme_auto(dark_start = "18:00", dark_end = "6:00", quietly = FALSE)
}
\arguments{
\item{theme}{The name of the theme, or \code{NULL} to use current theme.}
\item{quietly}{Suppress confirmation messages}
\item{dark_start}{Start time of dark mode, in 24-hour \code{"HH:MM"} format.}
\item{dark_end}{End time of dark mode, in 24-hour \code{"HH:MM"} format.}
}
\description{
These functions help manage switching between preferred dark and light
themes. Use \code{rsthemes::set_theme_light()} and \code{rsthemes::set_theme_dark()} to
declare your preferred light/dark themes. Set the preferred theme in your
\verb{~/.Rprofile} (see the section below) for ease of switching. Then use the
\code{use_theme_light()} or \code{use_theme_dark()} to switch to your preferred theme.
Switch between dark and light themes with \code{use_theme_toggle()} or
automatically set your theme to dark mode using \code{use_theme_auto()}.
}
\section{Functions}{
\itemize{
\item \code{set_theme_light}: Set default light theme
\item \code{set_theme_dark}: Set default dark theme
\item \code{use_theme_light}: Use default light theme
\item \code{use_theme_dark}: Use default dark theme
\item \code{use_theme_toggle}: Toggle between dark and light themes
\item \code{use_theme_auto}: Auto switch between dark and light themes
}}
\section{Set preferred theme in \code{.Rprofile}}{
Add the following to your \verb{~/.Rprofile} (see \code{\link[usethis:edit_r_profile]{usethis::edit_r_profile()}}) to
declare your default themes:\preformatted{if (interactive() && requireNamespace("rsthemes", quietly = TRUE)) \{
# Set preferred themes if not handled elsewhere..
rsthemes::set_theme_light("One Light \{rsthemes\}") # light theme
rsthemes::set_theme_dark("One Dark \{rsthemes\}") # dark theme
# Whenever the R session restarts inside RStudio...
setHook("rstudio.sessionInit", function(isNewSession) \{
# Automatically choose the correct theme based on time of day
rsthemes::use_theme_auto(dark_start = "18:00", dark_end = "6:00")
\}, action = "append")
\}
}
If you'd rather not use this approach, you can simply declare the global
options that declare the default themes, but you won't be able to use
\code{\link[=use_theme_auto]{use_theme_auto()}} at startup.\preformatted{# ~/.Rprofile
rsthemes::set_theme_light("One Light \{rsthemes\}")
rsthemes::set_theme_dark("One Dark \{rsthemes\}")
}\preformatted{# ~/.Rprofile
options(
rsthemes.theme_light = "One Light \{rsthemes\}",
rsthemes.theme_dark = "One Dark \{rsthemes\}"
)
}
}
\section{RStudio Addins}{
\pkg{rsthemes} includes four RStudio addins to help you easily switch between
light and dark modes. You can set the default dark or light theme to the
current theme. You can also toggle between light and dark mode or switch
to the automatically chosen light/dark theme based on time of day. You can
set a keyboard shortcut to \strong{Toggle Dark Mode} or
\strong{Auto Choose Dark or Light Theme} from the \emph{Modify Keyboard Shortcuts...}
window under the RSTudio \emph{Tools} menu.
}
|
/man/auto_dark_light_theme.Rd
|
permissive
|
g66m/rsthemes
|
R
| false
| true
| 3,577
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auto.R
\name{auto_dark_light_theme}
\alias{auto_dark_light_theme}
\alias{set_theme_light}
\alias{set_theme_dark}
\alias{use_theme_light}
\alias{use_theme_dark}
\alias{use_theme_toggle}
\alias{use_theme_auto}
\title{Automatically or manually toggle light and dark themes}
\usage{
set_theme_light(theme = NULL)
set_theme_dark(theme = NULL)
use_theme_light(quietly = FALSE)
use_theme_dark(quietly = FALSE)
use_theme_toggle(quietly = FALSE)
use_theme_auto(dark_start = "18:00", dark_end = "6:00", quietly = FALSE)
}
\arguments{
\item{theme}{The name of the theme, or \code{NULL} to use current theme.}
\item{quietly}{Suppress confirmation messages}
\item{dark_start}{Start time of dark mode, in 24-hour \code{"HH:MM"} format.}
\item{dark_end}{End time of dark mode, in 24-hour \code{"HH:MM"} format.}
}
\description{
These functions help manage switching between preferred dark and light
themes. Use \code{rsthemes::set_theme_light()} and \code{rsthemes::set_theme_dark()} to
declare your preferred light/dark themes. Set the preferred theme in your
\verb{~/.Rprofile} (see the section below) for ease of switching. Then use the
\code{use_theme_light()} or \code{use_theme_dark()} to switch to your preferred theme.
Switch between dark and light themes with \code{use_theme_toggle()} or
automatically set your theme to dark mode using \code{use_theme_auto()}.
}
\section{Functions}{
\itemize{
\item \code{set_theme_light}: Set default light theme
\item \code{set_theme_dark}: Set default dark theme
\item \code{use_theme_light}: Use default light theme
\item \code{use_theme_dark}: Use default dark theme
\item \code{use_theme_toggle}: Toggle between dark and light themes
\item \code{use_theme_auto}: Auto switch between dark and light themes
}}
\section{Set preferred theme in \code{.Rprofile}}{
Add the following to your \verb{~/.Rprofile} (see \code{\link[usethis:edit_r_profile]{usethis::edit_r_profile()}}) to
declare your default themes:\preformatted{if (interactive() && requireNamespace("rsthemes", quietly = TRUE)) \{
# Set preferred themes if not handled elsewhere..
rsthemes::set_theme_light("One Light \{rsthemes\}") # light theme
rsthemes::set_theme_dark("One Dark \{rsthemes\}") # dark theme
# Whenever the R session restarts inside RStudio...
setHook("rstudio.sessionInit", function(isNewSession) \{
# Automatically choose the correct theme based on time of day
rsthemes::use_theme_auto(dark_start = "18:00", dark_end = "6:00")
\}, action = "append")
\}
}
If you'd rather not use this approach, you can simply declare the global
options that declare the default themes, but you won't be able to use
\code{\link[=use_theme_auto]{use_theme_auto()}} at startup.\preformatted{# ~/.Rprofile
rsthemes::set_theme_light("One Light \{rsthemes\}")
rsthemes::set_theme_dark("One Dark \{rsthemes\}")
}\preformatted{# ~/.Rprofile
options(
rsthemes.theme_light = "One Light \{rsthemes\}",
rsthemes.theme_dark = "One Dark \{rsthemes\}"
)
}
}
\section{RStudio Addins}{
\pkg{rsthemes} includes four RStudio addins to help you easily switch between
light and dark modes. You can set the default dark or light theme to the
current theme. You can also toggle between light and dark mode or switch
to the automatically chosen light/dark theme based on time of day. You can
set a keyboard shortcut to \strong{Toggle Dark Mode} or
\strong{Auto Choose Dark or Light Theme} from the \emph{Modify Keyboard Shortcuts...}
window under the RSTudio \emph{Tools} menu.
}
|
install.packages("reshape2")
install.packages("ggplot2")
library("reshape2")
library("ggplot2")
require(gdata)
################################
# proportions for DAFi results #
################################
lji_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/LJI_props_4.xlsx",header= TRUE, sep = ",", sheet = 2)
rownames(lji_dafi)<-lji_dafi[,1]
lji_dafi<-lji_dafi[,2:12]
lji_dafi <- as.data.frame(t(lji_dafi))
emorya_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/emoryA_props_2.xlsx",header= TRUE, sep = ",", sheet=2)
rownames(emorya_dafi)<-emorya_dafi[,1]
emorya_dafi<-emorya_dafi[,2:12]
emorya_dafi <- as.data.frame(t(emorya_dafi))
emoryb_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/emoryB_props_2.xlsx",header= TRUE, sep = ",", sheet=2)
rownames(emoryb_dafi)<-emoryb_dafi[,1]
emoryb_dafi<-emoryb_dafi[,2:12]
emoryb_dafi <- as.data.frame(t(emoryb_dafi))
#mtsinai_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new/Mtsinai_props_2.xlsx",header= TRUE, sep = ",")
mtsinai_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/Mtsinai_props_4.xlsx",header= TRUE, sep = ",",sheet=2)
rownames(mtsinai_dafi)<-mtsinai_dafi[,1]
mtsinai_dafi<-mtsinai_dafi[,2:12]
mtsinai_dafi <- as.data.frame(t(mtsinai_dafi))
#Take an average of emory results for DAFi
r_cv <- row.names(emorya_dafi)
c_cv <- colnames(emorya_dafi)
emory <- c()
emory <- (emorya_dafi+emoryb_dafi)/2
cv_new <- c()
cv_new <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_new) <- r_cv
colnames(cv_new) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_new[i,j]<-sd(c(emory[i,j],lji_dafi[i,j],mtsinai_dafi[i,j])/mean(c(emory[i,j],lji_dafi[i,j],mtsinai_dafi[i,j])))
}
}
for (j in 1:6){
cv_new[10,j]<-sd(c(emory[10,j],lji_dafi[10,j])/mean(c(emory[10,j],lji_dafi[10,j])))
}
for (j in 1:6){
cv_new[11,j]<-sd(c(emory[11,j],lji_dafi[11,j],mtsinai_dafi[11,j])/mean(c(emory[11,j],lji_dafi[11,j],mtsinai_dafi[11,j])))
}
#####################################################################################################################
#without taking average#
cv_dafi <- data.frame(matrix(ncol = 6,nrow = 11))
r_cv <- row.names(emorya_dafi)
c_cv <- colnames(emorya_dafi)
rownames(cv_dafi) <- r_cv
colnames(cv_dafi) <- c_cv
i=1
j=1
for (i in 1:9) {
for (j in 1:6){
cv_dafi[i,j] <- sd(c(emorya_dafi[i,j],lji_dafi[i,j],emoryb_dafi[i,j]))/mean(c(emorya_dafi[i,j],lji_dafi[i,j],emoryb_dafi[i,j]))
}
}
#####################################################################################################################
##################################
# Manual gating cell proportions #
##################################
lji_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/lji_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(lji_manual)<-lji_manual[,1]
lji_manual<-lji_manual[,2:12]
lji_manual <- as.data.frame(t(lji_manual))
emorya_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/emorya_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(emorya_manual)<-emorya_manual[,1]
emorya_manual<-emorya_manual[,2:12]
emorya_manual <- as.data.frame(t(emorya_manual))
emoryb_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/emoryb_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(emoryb_manual)<-emoryb_manual[,1]
emoryb_manual<-emoryb_manual[,2:12]
emoryb_manual <- as.data.frame(t(emoryb_manual))
mtsinai_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/MtSinai_manual.xlsx",header=TRUE, sep = ",", sheet = 2)
rownames(mtsinai_manual)<-mtsinai_manual[,1]
mtsinai_manual<-mtsinai_manual[,2:12]
mtsinai_manual <- as.data.frame(t(mtsinai_manual))
#Taking an average of emory results for manual
emory_m <- c()
emory_m <- (emorya_manual+emoryb_manual)/2
cv_m <- c()
cv_m <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_m) <- r_cv
colnames(cv_m) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_m[i,j]<-sd(c(emory_m[i,j],lji_manual[i,j],mtsinai_manual[i,j])/mean(c(emory_m[i,j],lji_manual[i,j],mtsinai_manual[i,j])))
}
}
for (j in 1:6){
cv_m[10,j]<-sd(c(emory_m[10,j],lji_manual[10,j])/mean(c(emory_m[10,j],lji_manual[10,j])))
}
for (j in 1:6){
cv_m[11,j]<-sd(c(emory_m[11,j],lji_manual[11,j],mtsinai_manual[11,j])/mean(c(emory_m[11,j],lji_manual[11,j],mtsinai_manual[11,j])))
}
#Plotting
cv_avg <- data.frame(melt(cv_m),melt(cv_new)[2])
names(cv_avg) <- c("population", "Manual" , "DAFi")
df2_new <- melt(cv_avg)
quartz()
ggplot(data = df2_new)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#####################################################################################################################
#without taking average##
cv_manual <- data.frame(matrix(ncol = 6,nrow = 11))
r_cv_m <- row.names(emorya_manual)
c_cv_m <- colnames(emorya_manual)
rownames(cv_manual) <- r_cv_m
colnames(cv_manual) <- c_cv_m
i=1
j=1
for (i in 1:11) {
for (j in 1:6){
cv_manual[i,j] <- sd(c(emorya_manual[i,j],lji_manual[i,j],emoryb_manual[i,j]))/mean(c(emorya_manual[i,j],lji_manual[i,j],emoryb_manual[i,j]))
}
}
cv <- data.frame(melt(cv_dafi),melt(cv_manual)[2])
names(cv) <- c("population", "DAFi" , "Manual")
df2 <- melt(cv)
quartz()
ggplot(data = df2)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
##############################################################################################################################################
plot(lji_dafi[,1],mtsinai_dafi[,1])
abline(lm(mtsinai_dafi[,1]~lji_dafi[,1]))
lji_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_LJI_manual.xlsx")
emory_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_pB_manual.xlsx")
mtsinai_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_MtSinai_manual.xlsx")
df$x <- lji_dafi[1,]
df$y <- mtsinai_dafi[1,]
ggplot(aes(lji_dafi[,1],emory_dafi[,1]), geom_point(shape=1))
#########################################################################################################################################
cv1 <- data.frame(replicate(6,rnorm(11,mean = 0, sd =1)))
cv2 <- data.frame(replicate(6,rnorm(11,mean = 0, sd =1)))
View(cv2)
View(lji_dafi)
rownames(cv1) <- colnames(lji_dafi)
colnames(cv1) <- rownames(lji_dafi)
rownames(cv2) <- colnames(lji_dafi)
colnames(cv2) <- rownames(lji_dafi)
cv <- data.frame(melt(cv1),melt(cv2)[2])
names(cv) <- c("population", "cv1" , "cv2")
df2 <- melt(cv)
quartz()
ggplot(data = df2)+geom_boxplot(aes(x=population,y=value,fill=variable),alpha=0.7)+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
####################
####################
#comparing the proportions across the institutes for DAFi and manual
#Monocytes
mono_dafi <- as.data.frame(cbind(lji_dafi[,1], emoryb_dafi[,1],mtsinai_dafi[,1]),row.names = TRUE,colnames = TRUE)
colnames(mono_dafi)<-c("lji","emory","mtsinai")
mono_manual <- as.data.frame(cbind(lji_manual[,1], emoryb_manual[,1],mtsinai_manual[,1]),row.names = TRUE,colnames = TRUE)
colnames(mono_manual)<-c("lji","emory","mtsinai")
mono_avg <- data.frame(melt(mono_manual),melt(mono_dafi)[2])
names(mono_avg)<-c("Center","Manual","DAFi")
monocytes <- melt(mono_avg)
quartz()
ggplot(data = na.omit(monocytes))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("Monocytes Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#Bcells
bcells_dafi <- as.data.frame(cbind(lji_dafi[,2], emoryb_dafi[,2],mtsinai_dafi[,2]),row.names = TRUE,colnames = TRUE)
colnames(bcells_dafi)<-c("lji","emory","mtsinai")
bcells_manual <- as.data.frame(cbind(lji_manual[,2], emoryb_manual[,2],mtsinai_manual[,2]),row.names = TRUE,colnames = TRUE)
colnames(bcells_manual)<-c("lji","emory","mtsinai")
bcells_avg <- data.frame(melt(bcells_manual),melt(bcells_dafi)[2])
names(bcells_avg)<-c("Center","Manual","DAFi")
bcells <- melt(bcells_avg)
quartz()
ggplot(data = bcells)+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("B-cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the NK cell population across the institutes for DAFi and manual
NKcell_dafi <- as.data.frame(cbind(lji_dafi[,3], emory[,3],mtsinai_dafi[,3]),row.names = TRUE,colnames = TRUE)
colnames(NKcell_dafi)<-c("lji","emory","mtsinai")
NKcell_manual <- as.data.frame(cbind(lji_manual[,3], emory_m[,3],mtsinai_manual[,3]),row.names = TRUE,colnames = TRUE)
colnames(NKcell_manual)<-c("lji","emory","mtsinai")
NKcell_avg <- data.frame(melt(NKcell_manual),melt(NKcell_dafi)[2])
names(NKcell_avg)<-c("Center","Manual","DAFi")
NKcell <- melt(NKcell_avg)
quartz()
ggplot(data = na.omit(NKcell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("NK cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the T cell population across the institutes for DAFi and manual
Tcell_dafi <- as.data.frame(cbind(lji_dafi[,4], emory[,4],mtsinai_dafi[,4]),row.names = TRUE,colnames = TRUE)
colnames(Tcell_dafi)<-c("lji","emory","mtsinai")
Tcell_manual <- as.data.frame(cbind(lji_manual[,4], emory_m[,4],mtsinai_manual[,4]),row.names = TRUE,colnames = TRUE)
colnames(Tcell_manual)<-c("lji","emory","mtsinai")
Tcell_avg <- data.frame(melt(Tcell_manual),melt(Tcell_dafi)[2])
names(Tcell_avg)<-c("Center","Manual","DAFi")
Tcell <- melt(Tcell_avg)
quartz()
ggplot(data = na.omit(Tcell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#CD4+ T cell
mydata_dafi <- as.data.frame(cbind(lji_dafi[,5], emoryb_dafi[,5],mtsinai_dafi[,5]),row.names = TRUE,colnames = TRUE)
colnames(mydata_dafi)<-c("lji","emory","mtsinai")
mydata_manual <- as.data.frame(cbind(lji_manual[,5], emoryb_manual[,5],mtsinai_manual[,5]),row.names = TRUE,colnames = TRUE)
colnames(mydata_manual)<-c("lji","emory","mtsinai")
mydata_avg <- data.frame(melt(mydata_manual),melt(mydata_dafi)[2])
names(mydata_avg)<-c("Center","Manual","DAFi")
mydata <- melt(mydata_avg)
quartz()
ggplot(data = na.omit(mydata))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("CD4+ T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the CD8+ T cell population across the institutes for DAFi and manual
T8cell_dafi <- as.data.frame(cbind(lji_dafi[,6], emoryb_dafi[,6],mtsinai_dafi[,6]),row.names = TRUE,colnames = TRUE)
colnames(T8cell_dafi)<-c("lji","emory","mtsinai")
T8cell_manual <- as.data.frame(cbind(lji_manual[,6], emoryb_manual[,6],mtsinai_manual[,6]),row.names = TRUE,colnames = TRUE)
colnames(T8cell_manual)<-c("lji","emory","mtsinai")
T8cell_avg <- data.frame(melt(T8cell_manual),melt(T8cell_dafi)[2])
names(T8cell_avg)<-c("Center","Manual","DAFi")
T8cell <- melt(T8cell_avg)
quartz()
ggplot(data = na.omit(T8cell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("CD8+ T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
################################
#Previous results
################################
lji_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_LJI_prop.csv",header= TRUE, sep = ",")
rownames(lji_pre)<-lji_pre[,1]
lji_pre<-lji_pre[,2:12]
lji_pre <- as.data.frame(t(lji_pre))
emorya_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_pA_prop.csv",header= TRUE, sep = ",")
rownames(emorya_pre)<-emorya_pre[,1]
emorya_pre<-emorya_pre[,2:12]
emorya_pre <- as.data.frame(t(emorya_pre))
emoryb_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_pB_prop.csv",header= TRUE, sep = ",")
rownames(emoryb_pre)<-emoryb_pre[,1]
emoryb_pre<-emoryb_pre[,2:12]
emoryb_pre <- as.data.frame(t(emoryb_pre))
mtsinai_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_MtSinai_prop.csv",header=TRUE, sep = ",")
rownames(mtsinai_pre)<-mtsinai_pre[,1]
mtsinai_pre<-mtsinai_pre[,2:12]
mtsinai_pre <- as.data.frame(t(mtsinai_pre))
#Taking an average of emory results for manual
emory_p <- c()
emory_p <- (emorya_pre+emoryb_pre)/2
cv_p <- c()
cv_p <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_p) <- r_cv
colnames(cv_p) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_p[i,j]<-sd(c(emory_p[i,j],lji_pre[i,j],mtsinai_pre[i,j])/mean(c(emory_p[i,j],lji_pre[i,j],mtsinai_pre[i,j])))
}
}
for (j in 1:6){
cv_p[10,j]<-sd(c(emory_p[10,j],lji_pre[10,j])/mean(c(emory_p[10,j],lji_pre[10,j])))
}
for (j in 1:6){
cv_p[11,j]<-sd(c(emory_p[11,j],lji_pre[11,j],mtsinai_pre[11,j])/mean(c(emory_p[11,j],lji_pre[11,j],mtsinai_pre[11,j])))
}
#Plotting
cv_avg <- data.frame(melt(cv_m),melt(cv_new)[2],melt(cv_p)[2])
names(cv_avg) <- c("population", "Manual" , "DAFi","previous_DAFi")
df2_new <- melt(cv_avg)
quartz()
ggplot(data = df2_new)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
############################################################
# Linear regression analysis between DAFi and Manual gating
############################################################
par(mfrow = c(3,3))
quartz()
for(i in 1:6){
qplot(lji_dafi[,i],emory[,i])+stat_smooth(method="lm", col="red") +
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
}
lm(lji_dafi$Monocytes~emory$Monocytes)
qplot(lji_dafi$Monocytes,emory$Monocytes)+ stat_smooth(method="lm", col="red")+
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
lm(lji_dafi$`NK cells`~emory$`NK cells`)
qplot(lji_dafi$`NK cells`, emory$`NK cells`) + stat_smooth(method="lm", col="red")+
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
####Correlation snslysis
quartz(title="LJI")
par(mfrow=c(3,3))
#layout.show(6)
for(i in 1:6){
my_data <- as.data.frame(cbind(mtsinai_dafi[,5],mtsinai_manual[,5]))
ggscatter(my_data,x="V1",y="V2", add="reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",xlab="DAFi", ylab="Manual",title="T cells")
}
dev.off()
|
/CV_Plots.R
|
no_license
|
am794/PreprocessingAndPlotting
|
R
| false
| false
| 17,833
|
r
|
install.packages("reshape2")
install.packages("ggplot2")
library("reshape2")
library("ggplot2")
require(gdata)
################################
# proportions for DAFi results #
################################
lji_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/LJI_props_4.xlsx",header= TRUE, sep = ",", sheet = 2)
rownames(lji_dafi)<-lji_dafi[,1]
lji_dafi<-lji_dafi[,2:12]
lji_dafi <- as.data.frame(t(lji_dafi))
emorya_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/emoryA_props_2.xlsx",header= TRUE, sep = ",", sheet=2)
rownames(emorya_dafi)<-emorya_dafi[,1]
emorya_dafi<-emorya_dafi[,2:12]
emorya_dafi <- as.data.frame(t(emorya_dafi))
emoryb_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/emoryB_props_2.xlsx",header= TRUE, sep = ",", sheet=2)
rownames(emoryb_dafi)<-emoryb_dafi[,1]
emoryb_dafi<-emoryb_dafi[,2:12]
emoryb_dafi <- as.data.frame(t(emoryb_dafi))
#mtsinai_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new/Mtsinai_props_2.xlsx",header= TRUE, sep = ",")
mtsinai_dafi <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_DAFi_new_2/Mtsinai_props_4.xlsx",header= TRUE, sep = ",",sheet=2)
rownames(mtsinai_dafi)<-mtsinai_dafi[,1]
mtsinai_dafi<-mtsinai_dafi[,2:12]
mtsinai_dafi <- as.data.frame(t(mtsinai_dafi))
#Take an average of emory results for DAFi
r_cv <- row.names(emorya_dafi)
c_cv <- colnames(emorya_dafi)
emory <- c()
emory <- (emorya_dafi+emoryb_dafi)/2
cv_new <- c()
cv_new <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_new) <- r_cv
colnames(cv_new) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_new[i,j]<-sd(c(emory[i,j],lji_dafi[i,j],mtsinai_dafi[i,j])/mean(c(emory[i,j],lji_dafi[i,j],mtsinai_dafi[i,j])))
}
}
for (j in 1:6){
cv_new[10,j]<-sd(c(emory[10,j],lji_dafi[10,j])/mean(c(emory[10,j],lji_dafi[10,j])))
}
for (j in 1:6){
cv_new[11,j]<-sd(c(emory[11,j],lji_dafi[11,j],mtsinai_dafi[11,j])/mean(c(emory[11,j],lji_dafi[11,j],mtsinai_dafi[11,j])))
}
#####################################################################################################################
#without taking average#
cv_dafi <- data.frame(matrix(ncol = 6,nrow = 11))
r_cv <- row.names(emorya_dafi)
c_cv <- colnames(emorya_dafi)
rownames(cv_dafi) <- r_cv
colnames(cv_dafi) <- c_cv
i=1
j=1
for (i in 1:9) {
for (j in 1:6){
cv_dafi[i,j] <- sd(c(emorya_dafi[i,j],lji_dafi[i,j],emoryb_dafi[i,j]))/mean(c(emorya_dafi[i,j],lji_dafi[i,j],emoryb_dafi[i,j]))
}
}
#####################################################################################################################
##################################
# Manual gating cell proportions #
##################################
lji_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/lji_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(lji_manual)<-lji_manual[,1]
lji_manual<-lji_manual[,2:12]
lji_manual <- as.data.frame(t(lji_manual))
emorya_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/emorya_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(emorya_manual)<-emorya_manual[,1]
emorya_manual<-emorya_manual[,2:12]
emorya_manual <- as.data.frame(t(emorya_manual))
emoryb_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/emoryb_manual.csv",header= TRUE, sep = ",", sheet = 2)
rownames(emoryb_manual)<-emoryb_manual[,1]
emoryb_manual<-emoryb_manual[,2:12]
emoryb_manual <- as.data.frame(t(emoryb_manual))
mtsinai_manual <- read.xls("/Users/amandava/Desktop/HIPC_IOF_amandava/IOF_new/props_manual/MtSinai_manual.xlsx",header=TRUE, sep = ",", sheet = 2)
rownames(mtsinai_manual)<-mtsinai_manual[,1]
mtsinai_manual<-mtsinai_manual[,2:12]
mtsinai_manual <- as.data.frame(t(mtsinai_manual))
#Taking an average of emory results for manual
emory_m <- c()
emory_m <- (emorya_manual+emoryb_manual)/2
cv_m <- c()
cv_m <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_m) <- r_cv
colnames(cv_m) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_m[i,j]<-sd(c(emory_m[i,j],lji_manual[i,j],mtsinai_manual[i,j])/mean(c(emory_m[i,j],lji_manual[i,j],mtsinai_manual[i,j])))
}
}
for (j in 1:6){
cv_m[10,j]<-sd(c(emory_m[10,j],lji_manual[10,j])/mean(c(emory_m[10,j],lji_manual[10,j])))
}
for (j in 1:6){
cv_m[11,j]<-sd(c(emory_m[11,j],lji_manual[11,j],mtsinai_manual[11,j])/mean(c(emory_m[11,j],lji_manual[11,j],mtsinai_manual[11,j])))
}
#Plotting
cv_avg <- data.frame(melt(cv_m),melt(cv_new)[2])
names(cv_avg) <- c("population", "Manual" , "DAFi")
df2_new <- melt(cv_avg)
quartz()
ggplot(data = df2_new)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#####################################################################################################################
#without taking average##
cv_manual <- data.frame(matrix(ncol = 6,nrow = 11))
r_cv_m <- row.names(emorya_manual)
c_cv_m <- colnames(emorya_manual)
rownames(cv_manual) <- r_cv_m
colnames(cv_manual) <- c_cv_m
i=1
j=1
for (i in 1:11) {
for (j in 1:6){
cv_manual[i,j] <- sd(c(emorya_manual[i,j],lji_manual[i,j],emoryb_manual[i,j]))/mean(c(emorya_manual[i,j],lji_manual[i,j],emoryb_manual[i,j]))
}
}
cv <- data.frame(melt(cv_dafi),melt(cv_manual)[2])
names(cv) <- c("population", "DAFi" , "Manual")
df2 <- melt(cv)
quartz()
ggplot(data = df2)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
##############################################################################################################################################
plot(lji_dafi[,1],mtsinai_dafi[,1])
abline(lm(mtsinai_dafi[,1]~lji_dafi[,1]))
lji_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_LJI_manual.xlsx")
emory_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_pB_manual.xlsx")
mtsinai_man <- read.xls("/Users/amandava/Desktop/HIPC_IOF_Analysis/Compare_centers/Manual_prop/stats_MtSinai_manual.xlsx")
df$x <- lji_dafi[1,]
df$y <- mtsinai_dafi[1,]
ggplot(aes(lji_dafi[,1],emory_dafi[,1]), geom_point(shape=1))
#########################################################################################################################################
cv1 <- data.frame(replicate(6,rnorm(11,mean = 0, sd =1)))
cv2 <- data.frame(replicate(6,rnorm(11,mean = 0, sd =1)))
View(cv2)
View(lji_dafi)
rownames(cv1) <- colnames(lji_dafi)
colnames(cv1) <- rownames(lji_dafi)
rownames(cv2) <- colnames(lji_dafi)
colnames(cv2) <- rownames(lji_dafi)
cv <- data.frame(melt(cv1),melt(cv2)[2])
names(cv) <- c("population", "cv1" , "cv2")
df2 <- melt(cv)
quartz()
ggplot(data = df2)+geom_boxplot(aes(x=population,y=value,fill=variable),alpha=0.7)+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
####################
####################
#comparing the proportions across the institutes for DAFi and manual
#Monocytes
mono_dafi <- as.data.frame(cbind(lji_dafi[,1], emoryb_dafi[,1],mtsinai_dafi[,1]),row.names = TRUE,colnames = TRUE)
colnames(mono_dafi)<-c("lji","emory","mtsinai")
mono_manual <- as.data.frame(cbind(lji_manual[,1], emoryb_manual[,1],mtsinai_manual[,1]),row.names = TRUE,colnames = TRUE)
colnames(mono_manual)<-c("lji","emory","mtsinai")
mono_avg <- data.frame(melt(mono_manual),melt(mono_dafi)[2])
names(mono_avg)<-c("Center","Manual","DAFi")
monocytes <- melt(mono_avg)
quartz()
ggplot(data = na.omit(monocytes))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("Monocytes Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#Bcells
bcells_dafi <- as.data.frame(cbind(lji_dafi[,2], emoryb_dafi[,2],mtsinai_dafi[,2]),row.names = TRUE,colnames = TRUE)
colnames(bcells_dafi)<-c("lji","emory","mtsinai")
bcells_manual <- as.data.frame(cbind(lji_manual[,2], emoryb_manual[,2],mtsinai_manual[,2]),row.names = TRUE,colnames = TRUE)
colnames(bcells_manual)<-c("lji","emory","mtsinai")
bcells_avg <- data.frame(melt(bcells_manual),melt(bcells_dafi)[2])
names(bcells_avg)<-c("Center","Manual","DAFi")
bcells <- melt(bcells_avg)
quartz()
ggplot(data = bcells)+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("B-cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the NK cell population across the institutes for DAFi and manual
NKcell_dafi <- as.data.frame(cbind(lji_dafi[,3], emory[,3],mtsinai_dafi[,3]),row.names = TRUE,colnames = TRUE)
colnames(NKcell_dafi)<-c("lji","emory","mtsinai")
NKcell_manual <- as.data.frame(cbind(lji_manual[,3], emory_m[,3],mtsinai_manual[,3]),row.names = TRUE,colnames = TRUE)
colnames(NKcell_manual)<-c("lji","emory","mtsinai")
NKcell_avg <- data.frame(melt(NKcell_manual),melt(NKcell_dafi)[2])
names(NKcell_avg)<-c("Center","Manual","DAFi")
NKcell <- melt(NKcell_avg)
quartz()
ggplot(data = na.omit(NKcell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("NK cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the T cell population across the institutes for DAFi and manual
Tcell_dafi <- as.data.frame(cbind(lji_dafi[,4], emory[,4],mtsinai_dafi[,4]),row.names = TRUE,colnames = TRUE)
colnames(Tcell_dafi)<-c("lji","emory","mtsinai")
Tcell_manual <- as.data.frame(cbind(lji_manual[,4], emory_m[,4],mtsinai_manual[,4]),row.names = TRUE,colnames = TRUE)
colnames(Tcell_manual)<-c("lji","emory","mtsinai")
Tcell_avg <- data.frame(melt(Tcell_manual),melt(Tcell_dafi)[2])
names(Tcell_avg)<-c("Center","Manual","DAFi")
Tcell <- melt(Tcell_avg)
quartz()
ggplot(data = na.omit(Tcell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#CD4+ T cell
mydata_dafi <- as.data.frame(cbind(lji_dafi[,5], emoryb_dafi[,5],mtsinai_dafi[,5]),row.names = TRUE,colnames = TRUE)
colnames(mydata_dafi)<-c("lji","emory","mtsinai")
mydata_manual <- as.data.frame(cbind(lji_manual[,5], emoryb_manual[,5],mtsinai_manual[,5]),row.names = TRUE,colnames = TRUE)
colnames(mydata_manual)<-c("lji","emory","mtsinai")
mydata_avg <- data.frame(melt(mydata_manual),melt(mydata_dafi)[2])
names(mydata_avg)<-c("Center","Manual","DAFi")
mydata <- melt(mydata_avg)
quartz()
ggplot(data = na.omit(mydata))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("CD4+ T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
#comparing the CD8+ T cell population across the institutes for DAFi and manual
T8cell_dafi <- as.data.frame(cbind(lji_dafi[,6], emoryb_dafi[,6],mtsinai_dafi[,6]),row.names = TRUE,colnames = TRUE)
colnames(T8cell_dafi)<-c("lji","emory","mtsinai")
T8cell_manual <- as.data.frame(cbind(lji_manual[,6], emoryb_manual[,6],mtsinai_manual[,6]),row.names = TRUE,colnames = TRUE)
colnames(T8cell_manual)<-c("lji","emory","mtsinai")
T8cell_avg <- data.frame(melt(T8cell_manual),melt(T8cell_dafi)[2])
names(T8cell_avg)<-c("Center","Manual","DAFi")
T8cell <- melt(T8cell_avg)
quartz()
ggplot(data = na.omit(T8cell))+geom_boxplot(aes(x=Center,y=value,fill=variable),alpha=0.7)+ggtitle("CD8+ T cell Population proportion ")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
################################
#Previous results
################################
lji_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_LJI_prop.csv",header= TRUE, sep = ",")
rownames(lji_pre)<-lji_pre[,1]
lji_pre<-lji_pre[,2:12]
lji_pre <- as.data.frame(t(lji_pre))
emorya_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_pA_prop.csv",header= TRUE, sep = ",")
rownames(emorya_pre)<-emorya_pre[,1]
emorya_pre<-emorya_pre[,2:12]
emorya_pre <- as.data.frame(t(emorya_pre))
emoryb_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_pB_prop.csv",header= TRUE, sep = ",")
rownames(emoryb_pre)<-emoryb_pre[,1]
emoryb_pre<-emoryb_pre[,2:12]
emoryb_pre <- as.data.frame(t(emoryb_pre))
mtsinai_pre <- read.csv("/Users/amandava/Desktop/HIPC_IOF_amandava/HIPC_IOF_Analysis/Compare_centers/DAFi_prop/DAFi_MtSinai_prop.csv",header=TRUE, sep = ",")
rownames(mtsinai_pre)<-mtsinai_pre[,1]
mtsinai_pre<-mtsinai_pre[,2:12]
mtsinai_pre <- as.data.frame(t(mtsinai_pre))
#Taking an average of emory results for manual
emory_p <- c()
emory_p <- (emorya_pre+emoryb_pre)/2
cv_p <- c()
cv_p <- data.frame(matrix(ncol = 6,nrow = 11))
rownames(cv_p) <- r_cv
colnames(cv_p) <- c_cv
i=1
j=1
for (i in 1:9){
for (j in 1:6){
cv_p[i,j]<-sd(c(emory_p[i,j],lji_pre[i,j],mtsinai_pre[i,j])/mean(c(emory_p[i,j],lji_pre[i,j],mtsinai_pre[i,j])))
}
}
for (j in 1:6){
cv_p[10,j]<-sd(c(emory_p[10,j],lji_pre[10,j])/mean(c(emory_p[10,j],lji_pre[10,j])))
}
for (j in 1:6){
cv_p[11,j]<-sd(c(emory_p[11,j],lji_pre[11,j],mtsinai_pre[11,j])/mean(c(emory_p[11,j],lji_pre[11,j],mtsinai_pre[11,j])))
}
#Plotting
cv_avg <- data.frame(melt(cv_m),melt(cv_new)[2],melt(cv_p)[2])
names(cv_avg) <- c("population", "Manual" , "DAFi","previous_DAFi")
df2_new <- melt(cv_avg)
quartz()
ggplot(data = df2_new)+geom_boxplot(aes(x=population,y=value,fill=variable))+ggtitle("Coefficient of variability")+theme_bw()+theme(plot.title = element_text(size = 14, family = "Tahoma", face = "bold"),text = element_text(size = 12, family = "Tahoma"),
axis.title = element_text(face="bold"),axis.text.x=element_text(size = 11)) +scale_fill_brewer(palette = "Accent")
############################################################
# Linear regression analysis between DAFi and Manual gating
############################################################
par(mfrow = c(3,3))
quartz()
for(i in 1:6){
qplot(lji_dafi[,i],emory[,i])+stat_smooth(method="lm", col="red") +
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
}
lm(lji_dafi$Monocytes~emory$Monocytes)
qplot(lji_dafi$Monocytes,emory$Monocytes)+ stat_smooth(method="lm", col="red")+
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
lm(lji_dafi$`NK cells`~emory$`NK cells`)
qplot(lji_dafi$`NK cells`, emory$`NK cells`) + stat_smooth(method="lm", col="red")+
scale_y_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL) +
scale_x_continuous(breaks = c(0.2,0.4,0.6,0.8,1.0),minor_breaks = NULL)
####Correlation snslysis
quartz(title="LJI")
par(mfrow=c(3,3))
#layout.show(6)
for(i in 1:6){
my_data <- as.data.frame(cbind(mtsinai_dafi[,5],mtsinai_manual[,5]))
ggscatter(my_data,x="V1",y="V2", add="reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",xlab="DAFi", ylab="Manual",title="T cells")
}
dev.off()
|
dat <- read.table("../Datasets/questionnaire.csv", sep = ";", h = T)
library(ggplot2)
library(reshape2)
library(ggridges)
dat <- dat[,c(1, order(colMeans(dat[,-1])) + 1)]
cmn <- data.frame(ID = 1:(ncol(dat)-1), value = round(colMeans(dat[,-1]),1))
tmp <- melt(dat, id.vars = "ID")
g1 <- ggplot(tmp, aes(x = value, y = factor(variable))) +
geom_density_ridges(alpha = 0.8, fill = "cornflowerblue") +
xlab("probability (in %)") +
ylab("expression") + geom_point() + geom_jitter(width = 1, height = 0.1) +
geom_vline(xintercept = 50, lty = "dotted") +
geom_text(data = cmn, aes(x = value, y = ID + 0.5, label = round(value,1)), colour = "red")
plot(g1)
ggsave("questionnaire.pdf", width = 11, height = 7)
|
/Code/L01_ProbabilityQuestionnaire/visualize.R
|
no_license
|
anhnguyendepocen/hse_bayesian
|
R
| false
| false
| 721
|
r
|
dat <- read.table("../Datasets/questionnaire.csv", sep = ";", h = T)
library(ggplot2)
library(reshape2)
library(ggridges)
dat <- dat[,c(1, order(colMeans(dat[,-1])) + 1)]
cmn <- data.frame(ID = 1:(ncol(dat)-1), value = round(colMeans(dat[,-1]),1))
tmp <- melt(dat, id.vars = "ID")
g1 <- ggplot(tmp, aes(x = value, y = factor(variable))) +
geom_density_ridges(alpha = 0.8, fill = "cornflowerblue") +
xlab("probability (in %)") +
ylab("expression") + geom_point() + geom_jitter(width = 1, height = 0.1) +
geom_vline(xintercept = 50, lty = "dotted") +
geom_text(data = cmn, aes(x = value, y = ID + 0.5, label = round(value,1)), colour = "red")
plot(g1)
ggsave("questionnaire.pdf", width = 11, height = 7)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/docgen.R
\name{docgen}
\alias{docgen}
\title{Automatically generate package documentation from a gentest() result}
\usage{
docgen(gentest_result, title = attr(gentest_result, "function"))
}
\arguments{
\item{title}{String, the title of the .
Default: a function name stored in \code{gentest_result}.
The documentation file is saved as \code{XXX.Rd} -- where
\code{XXX} is determined by the function name stored in
the \code{gentest_result} -- in the sub-directory \code{man}
of the current working directory. This sub-directory needs to be
created first, if it does not yet exist.}
\item{gentest_restult}{Value returned by \code{\link[gentest]{gentest}}.}
}
\description{
Generates automatically the documentation for a function tested with
\code{\link[gentest]{gentest}}. Takes into consideration only those
results which did not return errors during tests.
of the current working directory
}
|
/man/docgen.Rd
|
no_license
|
alekrutkowski/gentest
|
R
| false
| false
| 983
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/docgen.R
\name{docgen}
\alias{docgen}
\title{Automatically generate package documentation from a gentest() result}
\usage{
docgen(gentest_result, title = attr(gentest_result, "function"))
}
\arguments{
\item{title}{String, the title of the .
Default: a function name stored in \code{gentest_result}.
The documentation file is saved as \code{XXX.Rd} -- where
\code{XXX} is determined by the function name stored in
the \code{gentest_result} -- in the sub-directory \code{man}
of the current working directory. This sub-directory needs to be
created first, if it does not yet exist.}
\item{gentest_restult}{Value returned by \code{\link[gentest]{gentest}}.}
}
\description{
Generates automatically the documentation for a function tested with
\code{\link[gentest]{gentest}}. Takes into consideration only those
results which did not return errors during tests.
of the current working directory
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudbuild.R
\name{cr_build_wait}
\alias{cr_build_wait}
\title{Wait for a Build to run}
\usage{
cr_build_wait(op = .Last.value, projectId = cr_project_get(),
task_id = NULL)
}
\arguments{
\item{op}{The operation build object to wait for}
\item{projectId}{The projectId}
\item{task_id}{A possible RStudio job taskId to increment status upon}
}
\value{
A gar_Build object \link{Build}
}
\description{
This will repeatedly call \link{cr_build_status} whilst the status is STATUS_UNKNOWN, QUEUED or WORKING
}
\seealso{
Other Cloud Build functions: \code{\link{Build}},
\code{\link{RepoSource}}, \code{\link{Source}},
\code{\link{StorageSource}}, \code{\link{cr_build_make}},
\code{\link{cr_build_status}},
\code{\link{cr_build_upload_gcs}},
\code{\link{cr_build_write}},
\code{\link{cr_build_yaml}}, \code{\link{cr_build}}
}
\concept{Cloud Build functions}
|
/man/cr_build_wait.Rd
|
no_license
|
j450h1/googleCloudRunner
|
R
| false
| true
| 947
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudbuild.R
\name{cr_build_wait}
\alias{cr_build_wait}
\title{Wait for a Build to run}
\usage{
cr_build_wait(op = .Last.value, projectId = cr_project_get(),
task_id = NULL)
}
\arguments{
\item{op}{The operation build object to wait for}
\item{projectId}{The projectId}
\item{task_id}{A possible RStudio job taskId to increment status upon}
}
\value{
A gar_Build object \link{Build}
}
\description{
This will repeatedly call \link{cr_build_status} whilst the status is STATUS_UNKNOWN, QUEUED or WORKING
}
\seealso{
Other Cloud Build functions: \code{\link{Build}},
\code{\link{RepoSource}}, \code{\link{Source}},
\code{\link{StorageSource}}, \code{\link{cr_build_make}},
\code{\link{cr_build_status}},
\code{\link{cr_build_upload_gcs}},
\code{\link{cr_build_write}},
\code{\link{cr_build_yaml}}, \code{\link{cr_build}}
}
\concept{Cloud Build functions}
|
# 1. Merges the training and the test sets to create one data set.
# set working directory
setwd("~/Coursera/Assignments/3. Getting and Cleaning data/UCI HAR Dataset")
# Reading data from train folder
#import features data
features = read.table('features.txt', header = FALSE);
#import activity_labels data
activityType = read.table('activity_labels.txt', header = FALSE);
#import subject_train data
subjectTrain = read.table('train/subject_train.txt', header = FALSE);
#import x_train data
xTrain = read.table('train/x_train.txt', header = FALSE);
#import y_train data
yTrain = read.table('train/y_train.txt', header = FALSE);
# Assign column names to the imported train data sets
colnames(activityType) = c('activityId', 'activityType');
colnames(subjectTrain) = "subjectId";
colnames(xTrain) = features[,2];
colnames(yTrain) = "activityId";
# Merge train data sets: subjectTrain, xTrain & yTrain
trainingData = cbind(yTrain, subjectTrain, xTrain);
# Readind data from test folder
#import subject_test data
subjectTest = read.table('test/subject_test.txt', header = FALSE);
#import x_test data
xTest = read.table('test/x_test.txt', header = FALSE);
#import y_test data
yTest = read.table('test/y_test.txt', header = FALSE);
# Assign column names to the imported test data sets
colnames(subjectTest) = "subjectId";
colnames(xTest) = features[,2];
colnames(yTest) = "activityId";
# Merge test data sets: yTest, subjectTest & xTest
testData = cbind(yTest, subjectTest, xTest);
# Merge train & test data sets
finalData = rbind(trainingData, testData);
# create a vector for the column names in finalData for which we will select mean & stddev
colNames = colnames(finalData);
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# create a Vector that contains TRUE values for the ID, mean & stdev columns and FALSE for others
logicalVector = (grepl("activity..", colNames) |
grepl("subject..", colNames) |
grepl("mean..", colNames) |
grepl("std..", colNames)
);
# subset finalData table based on the logical vector to retain only desired columns
finalData = finalData[logicalVector == TRUE];
# 3. Uses descriptive activity names to name the activities in the data set
# merge finalData set with the activity type table to include descriptive activity names
finalData = merge(finalData, activityType, by = "activityId", all.x = TRUE);
# update the colNames vector to include new column names after merging
colNames = colnames(finalData);
# 4. Appropriately label the data set with descriptive activity names.
# clean up the variable names
for(i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std$","StdDev",colNames[i])
colNames[i] = gsub("-mean","Mean",colNames[i])
colNames[i] = gsub("^(t)","time",colNames[i])
colNames[i] = gsub("^(f)","freq",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyro",colNames[i])
colNames[i] = gsub("AccMag","AccMagnitude",colNames[i])
colNames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colNames[i])
colNames[i] = gsub("JerkMag","JerkMagnitude",colNames[i])
colNames[i] = gsub("GyroMag","GyroMagnitude",colNames[i])
};
# reassigning the new descriptive column names to the finalData set
colnames(finalData) = colNames;
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# create a new table, finalDataNoActivityType without the activityType column
finalDataNoActivityType = finalData[, names(finalData) != 'activityType'];
# summarize the finalDataNoActivityType table to include the mean of each variable for each activity and subject
tidyData = aggregate(finalDataNoActivityType[, names(finalDataNoActivityType) != c('activityId', 'subjectId')], by = list(activityId = finalDataNoActivityType$activityId,subjectId = finalDataNoActivityType$subjectId),mean);
# merge tidyData with activityType to include descriptive activity names
tidyData = merge(tidyData,activityType,by='activityId', all.x = TRUE);
# export tidyData set
write.table(tidyData, 'tidy.txt', row.names = FALSE, sep = '\t');
|
/run_analysis.R
|
no_license
|
kchawala/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 4,480
|
r
|
# 1. Merges the training and the test sets to create one data set.
# set working directory
setwd("~/Coursera/Assignments/3. Getting and Cleaning data/UCI HAR Dataset")
# Reading data from train folder
#import features data
features = read.table('features.txt', header = FALSE);
#import activity_labels data
activityType = read.table('activity_labels.txt', header = FALSE);
#import subject_train data
subjectTrain = read.table('train/subject_train.txt', header = FALSE);
#import x_train data
xTrain = read.table('train/x_train.txt', header = FALSE);
#import y_train data
yTrain = read.table('train/y_train.txt', header = FALSE);
# Assign column names to the imported train data sets
colnames(activityType) = c('activityId', 'activityType');
colnames(subjectTrain) = "subjectId";
colnames(xTrain) = features[,2];
colnames(yTrain) = "activityId";
# Merge train data sets: subjectTrain, xTrain & yTrain
trainingData = cbind(yTrain, subjectTrain, xTrain);
# Readind data from test folder
#import subject_test data
subjectTest = read.table('test/subject_test.txt', header = FALSE);
#import x_test data
xTest = read.table('test/x_test.txt', header = FALSE);
#import y_test data
yTest = read.table('test/y_test.txt', header = FALSE);
# Assign column names to the imported test data sets
colnames(subjectTest) = "subjectId";
colnames(xTest) = features[,2];
colnames(yTest) = "activityId";
# Merge test data sets: yTest, subjectTest & xTest
testData = cbind(yTest, subjectTest, xTest);
# Merge train & test data sets
finalData = rbind(trainingData, testData);
# create a vector for the column names in finalData for which we will select mean & stddev
colNames = colnames(finalData);
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# create a Vector that contains TRUE values for the ID, mean & stdev columns and FALSE for others
logicalVector = (grepl("activity..", colNames) |
grepl("subject..", colNames) |
grepl("mean..", colNames) |
grepl("std..", colNames)
);
# subset finalData table based on the logical vector to retain only desired columns
finalData = finalData[logicalVector == TRUE];
# 3. Uses descriptive activity names to name the activities in the data set
# merge finalData set with the activity type table to include descriptive activity names
finalData = merge(finalData, activityType, by = "activityId", all.x = TRUE);
# update the colNames vector to include new column names after merging
colNames = colnames(finalData);
# 4. Appropriately label the data set with descriptive activity names.
# clean up the variable names
for(i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std$","StdDev",colNames[i])
colNames[i] = gsub("-mean","Mean",colNames[i])
colNames[i] = gsub("^(t)","time",colNames[i])
colNames[i] = gsub("^(f)","freq",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyro",colNames[i])
colNames[i] = gsub("AccMag","AccMagnitude",colNames[i])
colNames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colNames[i])
colNames[i] = gsub("JerkMag","JerkMagnitude",colNames[i])
colNames[i] = gsub("GyroMag","GyroMagnitude",colNames[i])
};
# reassigning the new descriptive column names to the finalData set
colnames(finalData) = colNames;
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# create a new table, finalDataNoActivityType without the activityType column
finalDataNoActivityType = finalData[, names(finalData) != 'activityType'];
# summarize the finalDataNoActivityType table to include the mean of each variable for each activity and subject
tidyData = aggregate(finalDataNoActivityType[, names(finalDataNoActivityType) != c('activityId', 'subjectId')], by = list(activityId = finalDataNoActivityType$activityId,subjectId = finalDataNoActivityType$subjectId),mean);
# merge tidyData with activityType to include descriptive activity names
tidyData = merge(tidyData,activityType,by='activityId', all.x = TRUE);
# export tidyData set
write.table(tidyData, 'tidy.txt', row.names = FALSE, sep = '\t');
|
#!/usr/bin/env Rscript
library(BBmisc)
library(cluster)
library(dbscan)
library(scatterplot3d)
library(factoextra)
orange <- data.frame(Orange)
head(orange)
summary(orange)
dist_mat <- daisy(orange, metric=c("euclidean"))
summary(dist_mat)
hc_complete <- hclust(dist_mat, method = "complete")
fviz_dend(hc_complete, k = 3, horiz = T, main = "Single method", labels_track_height = 2, rect = TRUE)
png("dbsplot_4.9.png")
dbs <- fpc::dbscan(dist_mat, eps = 0.12, MinPts = 4, method=c("dist"), scale = FALSE)
#plot(dbs, orange, main = "DBSCAN", frame = FALSE)
scatterplot3d(x=orange$Tree, y=orange$age, z=orange$circumference, color = dbs$cluster + 1)
#There are 2 outliers
|
/Lab 5/ex_4.9.r
|
no_license
|
UTBM-AGH-courses/agh-data-mining
|
R
| false
| false
| 676
|
r
|
#!/usr/bin/env Rscript
library(BBmisc)
library(cluster)
library(dbscan)
library(scatterplot3d)
library(factoextra)
orange <- data.frame(Orange)
head(orange)
summary(orange)
dist_mat <- daisy(orange, metric=c("euclidean"))
summary(dist_mat)
hc_complete <- hclust(dist_mat, method = "complete")
fviz_dend(hc_complete, k = 3, horiz = T, main = "Single method", labels_track_height = 2, rect = TRUE)
png("dbsplot_4.9.png")
dbs <- fpc::dbscan(dist_mat, eps = 0.12, MinPts = 4, method=c("dist"), scale = FALSE)
#plot(dbs, orange, main = "DBSCAN", frame = FALSE)
scatterplot3d(x=orange$Tree, y=orange$age, z=orange$circumference, color = dbs$cluster + 1)
#There are 2 outliers
|
6bc46efa0544ea965c54006c982b4bcc c2_BMC_p2_k8.qdimacs 8744 28194
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Mangassarian-Veneris/BMC/c2_BMC_p2_k8/c2_BMC_p2_k8.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 64
|
r
|
6bc46efa0544ea965c54006c982b4bcc c2_BMC_p2_k8.qdimacs 8744 28194
|
# Libraries and Data -----
library(tidyverse)
train <- read_csv('titanic/data/train.csv')
#test <- read_csv('titanic/data/test.csv')
# Cleaning -----
# Looking at nulls
lapply(train, function (x) sum(is.na(x))) # Age, Cabin, Embarked
# Age
# First thing I noticed is people missing age are disproportionately Pclass = 3
# How do Pclass = 3 Age compare to Pclass = 1 or 2?
train %>%
filter(!is.na(Age)) %>%
group_by(Pclass) %>%
summarize(count = n(),
mean_surv = mean(Survived),
mean_age = mean(Age),
sd_age = sd(Age))
# Means look pretty different but also have decent variance
# Run ANOVA to check for statistical significance
fit <- aov(Age ~ Pclass, data = train)
summary(fit)
# Very significant. As such, going to provide mean by group to fill in missing values
train <- train %>%
group_by(Pclass) %>%
mutate(Age = if_else(is.na(Age), mean(Age, na.rm = T), Age)) %>%
ungroup()
# Cabin
train %>% count(Cabin) # Maybe the level means something or perhaps is just a proxy for class
# Looking at cabin levels by class
train$cabin_level <- str_extract(train$Cabin, '[A-Z]')
train %>%
group_by(Pclass, cabin_level) %>%
summarize(count = n(),
mean_surv = mean(Survived),
mean_age = mean(Age),
sd_age = sd(Age))
# A few thing to note:
# Not many missing but most missing Pclass 2 and 3
# Having a cabin number appears to have little impact for Pclass 1
# It does for Pclass 2 and 3 but v small sample size
# Correlation: cabin vs non-cabin
train$has_cabin <- if_else(is.na(train$cabin_level), 0, 1)
cor(train$has_cabin, train$Survived)
# Not strongly correlated at all...going to leave null for now but may revisit
# during feature engineering
# Embarked -- only two people staying in same cabin...not going to worry
# about them for now
# Feature Engineering -----
|
/titanic/EDA.R
|
no_license
|
tfaulk13/kaggle
|
R
| false
| false
| 1,871
|
r
|
# Libraries and Data -----
library(tidyverse)
train <- read_csv('titanic/data/train.csv')
#test <- read_csv('titanic/data/test.csv')
# Cleaning -----
# Looking at nulls
lapply(train, function (x) sum(is.na(x))) # Age, Cabin, Embarked
# Age
# First thing I noticed is people missing age are disproportionately Pclass = 3
# How do Pclass = 3 Age compare to Pclass = 1 or 2?
train %>%
filter(!is.na(Age)) %>%
group_by(Pclass) %>%
summarize(count = n(),
mean_surv = mean(Survived),
mean_age = mean(Age),
sd_age = sd(Age))
# Means look pretty different but also have decent variance
# Run ANOVA to check for statistical significance
fit <- aov(Age ~ Pclass, data = train)
summary(fit)
# Very significant. As such, going to provide mean by group to fill in missing values
train <- train %>%
group_by(Pclass) %>%
mutate(Age = if_else(is.na(Age), mean(Age, na.rm = T), Age)) %>%
ungroup()
# Cabin
train %>% count(Cabin) # Maybe the level means something or perhaps is just a proxy for class
# Looking at cabin levels by class
train$cabin_level <- str_extract(train$Cabin, '[A-Z]')
train %>%
group_by(Pclass, cabin_level) %>%
summarize(count = n(),
mean_surv = mean(Survived),
mean_age = mean(Age),
sd_age = sd(Age))
# A few thing to note:
# Not many missing but most missing Pclass 2 and 3
# Having a cabin number appears to have little impact for Pclass 1
# It does for Pclass 2 and 3 but v small sample size
# Correlation: cabin vs non-cabin
train$has_cabin <- if_else(is.na(train$cabin_level), 0, 1)
cor(train$has_cabin, train$Survived)
# Not strongly correlated at all...going to leave null for now but may revisit
# during feature engineering
# Embarked -- only two people staying in same cabin...not going to worry
# about them for now
# Feature Engineering -----
|
#############################################################
## Stat 202A - Homework 4
## Author: Anurag Pande
## Date : 27-10-2016
## Description: This script implements stagewise regression
## (epsilon boosting)
#############################################################
#############################################################
## INSTRUCTIONS: Please fill in the missing lines of code
## only where specified. Do not change function names,
## function inputs or outputs. You can add examples at the
## end of the script (in the "Optional examples" section) to
## double-check your work, but MAKE SURE TO COMMENT OUT ALL
## OF YOUR EXAMPLES BEFORE SUBMITTING.
##
## Very important: Do not use the function "setwd" anywhere
## in your code. If you do, I will be unable to grade your
## work since R will attempt to change my working directory
## to one that does not exist.
#############################################################
######################################
## Function 1: Stagewise regression ##
######################################
swRegression <- function(X, Y, numIter = 3000, epsilon = 0.0001){
# Perform stagewise regression (epsilon boosting) of Y on X
#
# X: Matrix of explanatory variables.
# Y: Response vector
# numIter: Number of iterations ("T" in class notes)
# epsilon: Update step size (should be small)
#
# Returns a matrix containing the stepwise solution vector
# for each iteration.
#######################
## FILL IN WITH CODE ##
#######################
p = dim(X)[2]
beta = matrix(rep(0, p), nrow = p)
db = matrix(rep(0, p), nrow = p)
beta_all = matrix(rep(0, p*numIter), nrow = p)
R = Y
for (t in 1:numIter)
{
for (j in 1:p)
db[j] = sum(R*X[, j])
j = which.max(abs(db))
beta[j] = beta[j]+db[j]*epsilon
R = R - X[, j]*db[j]*epsilon
beta_all[, t] = beta
}
#PLOTTING
#matplot(t(matrix(rep(1, p), nrow = 1)%*%abs(beta_all)), t(beta_all), type = 'l')
## Function should output the matrix beta_all, the
## solution to the stagewise regression problem.
## beta_all is p x numIter
return(beta_all)
}
#n = 100
#p = 500
#s = 10
#X = matrix(rnorm(n*p),nrow = n)
#beta_true = matrix(rep(0,p), nrow = p)
#beta_true[1:s] = 1:s
#Y=X%*%beta_true + rnorm(n)
#swRegression(X,Y)
|
/HW4/Submission/StagewiseRegression.R
|
no_license
|
anurag1212/Statistics
|
R
| false
| false
| 2,328
|
r
|
#############################################################
## Stat 202A - Homework 4
## Author: Anurag Pande
## Date : 27-10-2016
## Description: This script implements stagewise regression
## (epsilon boosting)
#############################################################
#############################################################
## INSTRUCTIONS: Please fill in the missing lines of code
## only where specified. Do not change function names,
## function inputs or outputs. You can add examples at the
## end of the script (in the "Optional examples" section) to
## double-check your work, but MAKE SURE TO COMMENT OUT ALL
## OF YOUR EXAMPLES BEFORE SUBMITTING.
##
## Very important: Do not use the function "setwd" anywhere
## in your code. If you do, I will be unable to grade your
## work since R will attempt to change my working directory
## to one that does not exist.
#############################################################
######################################
## Function 1: Stagewise regression ##
######################################
swRegression <- function(X, Y, numIter = 3000, epsilon = 0.0001){
# Perform stagewise regression (epsilon boosting) of Y on X
#
# X: Matrix of explanatory variables.
# Y: Response vector
# numIter: Number of iterations ("T" in class notes)
# epsilon: Update step size (should be small)
#
# Returns a matrix containing the stepwise solution vector
# for each iteration.
#######################
## FILL IN WITH CODE ##
#######################
p = dim(X)[2]
beta = matrix(rep(0, p), nrow = p)
db = matrix(rep(0, p), nrow = p)
beta_all = matrix(rep(0, p*numIter), nrow = p)
R = Y
for (t in 1:numIter)
{
for (j in 1:p)
db[j] = sum(R*X[, j])
j = which.max(abs(db))
beta[j] = beta[j]+db[j]*epsilon
R = R - X[, j]*db[j]*epsilon
beta_all[, t] = beta
}
#PLOTTING
#matplot(t(matrix(rep(1, p), nrow = 1)%*%abs(beta_all)), t(beta_all), type = 'l')
## Function should output the matrix beta_all, the
## solution to the stagewise regression problem.
## beta_all is p x numIter
return(beta_all)
}
#n = 100
#p = 500
#s = 10
#X = matrix(rnorm(n*p),nrow = n)
#beta_true = matrix(rep(0,p), nrow = p)
#beta_true[1:s] = 1:s
#Y=X%*%beta_true + rnorm(n)
#swRegression(X,Y)
|
# Review
# object <- function(object)
# All of R is about understanding objects and functions.
#
# Objects are made up of 5 data structures:
# vectors, matrices, arrays - can only contain 1 type of information
# lists, and dataframes - can hold different types of information
# These structures can hold types information characterised as:
# logical, character, integer, numeric/double
# NA (NaN, Inf)
# There are many functions to help us better understand objects
class()
typeof()
str()
View()
names()
attributes()
dims()
length()
nrow()
is.character()
is.numeric()
is.factor()
is.logical()
# Subsetting
# Part of working with R is being table to take apart objects and rearrange the
# parts.
# Indexing
# One-dimension
vec <- sample(c(0:9), 100, replace = TRUE)
vec[2]
# Two dimensions
mat <- matrix(c(1, 2, 3, 3, 2, 1), ncol = 2)
matrix[1, 2]
# Lists
x <- list(a = c(1, 2), b = c(4, 4), c = c(6, 8), d = c(9, 11))
x[[1]]
x[[1]][1]
x[1]
# These give different results
class(x[[1]])
str(x[[1]])
class(x[1])
str(x[1])
attributes(x[1])
# $ for named elements in a list
x$a
class(x$a)
x$a[1]
# Selecting multiple elements
x <- letters
x[c(1, 2, 6)]
x <- sample(c(0:9), 100, replace = TRUE)
x[x < 5]
x < 5
d <- data_frame(number = sample(0:9, 100, replace = TRUE),
character = rep(c("a", "b"), 50))
lapply(d, class)
d[unlist(lapply(d, is.numeric))] %>% head()
d[ sapply(d, is.numeric)] %>% head()
# You can name elements in data structures besides lists.
x <- c( 1, 4, 6, 9)
str(x)
x <- c(a = 1, b = 4, c = 6, d = 9)
str(x)
names(x)
attributes(x)
attr(x, "description") <- "This is a named vector"
attributes(x)
# But $ only works with lists
x$a
x <- list(a = 1, b = 4, c = 6, d = 9)
x$a
# Combining objects can be tricky.
# Differnt data types willl typically reduce to the type with the lowest level
# of information
x <- c(1, "character")
x
class(x)
x <- c(1, TRUE, "character")
x
class(x)
x <- c(1, TRUE, FALSE)
x
class(x)
# Vectors can be combined to make matrices, but be careful
m <- rbind(sample(0:9, 100, replace = TRUE),
c("a", "b"))
class(m)
View(m)
# Dataframes will prevent you from doing this
m <- rbind(sample(0:9, 100, replace = TRUE),
c("a", "b")) %>% as.data.frame() # Not this way
m <- data_frame(sample(0:9, 100, replace = TRUE),
c("a", "b"))
# There are other functions to help switch between information types
as.character()
as.numeric()
as.factor()
# Factors
# Factors are a special kind of numeric variable with labels attached to each
# value, signifying categorical (nominal, ordered) data.
f <- sample(c("Yes", "No", "Maybe"), size = 100, replace = TRUE,
prob = c(0.3, 0.6, 0.1))
f.1 <- factor(f)
# The "levels" are the labels
levels(f.1)
table(f.1)
# Confirm the structure
str(f.1)
# The underlying numbers:
table(as.numeric(f.1))
# The order of the levels matters. By default, they will be in alphabetial order
sample(letters[c(1, 5, 8)], size = 100, replace = TRUE) %>%
factor() %>%
levels()
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor() %>%
class()
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor(levels = c("e", "h", "a"))
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor(levels = c("e", "h", "a"), ordered = TRUE) %>%
class()
sample(c(1, 2, 10, 20, 100), size = 100, replace = TRUE) %>%
factor() %>%
class()
sample(as.character(c(1, 2, 10, 20, 100)), size = 100, replace = TRUE) %>%
factor() %>%
class()
# Reordering levels
levels(f.1) <- c("Yes", "No", "Maybe")
table(f.1) # Bad!
levels(f.1) <- rev(levels(f.1)) # Switch it back
table(f.1)
# Do it with factor()
f.1 <- factor(f, levels = rev(levels(f.1)))
table(f.1) # Correct
f.1 <- relevel(f.1, ref = "Maybe")
table(f.1)
table(as.numeric(f.1)) # Convert to the underlying number
# Do it manually
f.1 <- factor(f, levels = c("Maybe", "Yes", "No"))
table(f.1) # Correct
# You need to use the exising levels
f.1 <- factor(f, levels = c("A", "B", "C")) # Bad
f.1 <- factor(f)
f.2 <- factor(f, labels = c("A", "B", "C")) # Use the labels option
table(f.1, f.2)
levels(f.2) # The labels become the levels forevermore
# Numbers as factors
f <- sample(c(10, 20, 50, 60, 65, 90), size = 100, replace = TRUE)
f.1 <- factor(f)
levels(f.1)
str(f.1)
f.1 %>% as.numeric() %>% table() # No
as.numeric(levels(f.1)[f.1]) %>% table() # Yes
f.1 <- cut(f, 4) # Equally spaced levels
table(f.1)
str(f.1)
f.1 <- cut(f, 4, labels = c("Low", "Med", "High", "Very High"))
table(f.1)
levels(f.1)
as.character(f.1)
# ~ equally sized levels
f.1 <- cut(f, breaks = quantile(f, 0:4/4))
table(f.1)
levels(f.1)
# User defined cuts
bmi <- rnorm(100, 26, 4)
qplot(bmi)
bmi <- cut(bmi, c(0, 18.5, 25, 30, max(bmi)),
labels = c("UW", "NW", "OW", "OB"))
table(bmi)
# Reordering levels based on other values
data <- data_frame(number = rnorm(100, 0, 1),
factor = factor(sample(letters[1:5], 100, replace = TRUE)))
levels(data$factor)
data <- group_by(data, factor) %>%
summarise(mean = mean(number)) %>%
full_join(data, by = "factor")
table(data$factor, data$mean)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
data$factor <- reorder(data$factor, data$mean)
levels(data$factor)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
data <- group_by(data, factor) %>%
summarise(count = n()) %>%
full_join(data, by = "factor")
data$factor <- reorder(data$factor, data$count)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
# Creating/Manipulating vectors (or variables)
paste( "text", "more text", "even more", sep = " x ")
paste0("text", "more text", "even more") # sep = ""
rep(c(1, 2, 3), times = 100)
rep(c(1, 2, 3), each = 3)
c(1:10)
seq(1:10)
seq(from = 1, to = 100, by = 2)
seq(from = 1, to = 100, length.out = 10)
x <- letters[3:21]
seq_along(x)
sample(letters, n = 100, replace = TRUE)
sample(letters, n = 10, replace = FALSE)
rnorm(10, 0, 1) %>% qplot()
rnorm(10000, 0, 1) %>% qplot()
d <- data_frame(A = sample(c(0:9), size = 100, replace = TRUE),
B = sample(c(0:9), size = 100, replace = TRUE),
C = sample(c(0:9), size = 100, replace = TRUE),
D = sample(c(0:9), size = 100, replace = TRUE))
d$total <- rowSums(d[c(1:4)])
d$mean <- d$total / 4
d$mean2 <- rowMeans(d[c(1:4)])
ggplot(d, aes(x = mean, y = mean2)) + geom_point()
x <- c(c(1:10000), rep(c(1, 2, 3), each = 2), c(100001:10000000))
x[duplicated(x)]
x <- c(c(1:10), rep(c(1, 2, 3), each = 2))
x[unique(x)]
length(x) - length(unique(x))
# Tidy data
# Manipulating data frames
data <- data[, unlist(lapply(data, function(x) !all(is.na(x))))] # blank cols
data <- data[rowSums(is.na(data)) != ncol(data),] # Remove blank rows
|
/examples (1).R
|
permissive
|
dantalus/Rcode
|
R
| false
| false
| 7,556
|
r
|
# Review
# object <- function(object)
# All of R is about understanding objects and functions.
#
# Objects are made up of 5 data structures:
# vectors, matrices, arrays - can only contain 1 type of information
# lists, and dataframes - can hold different types of information
# These structures can hold types information characterised as:
# logical, character, integer, numeric/double
# NA (NaN, Inf)
# There are many functions to help us better understand objects
class()
typeof()
str()
View()
names()
attributes()
dims()
length()
nrow()
is.character()
is.numeric()
is.factor()
is.logical()
# Subsetting
# Part of working with R is being table to take apart objects and rearrange the
# parts.
# Indexing
# One-dimension
vec <- sample(c(0:9), 100, replace = TRUE)
vec[2]
# Two dimensions
mat <- matrix(c(1, 2, 3, 3, 2, 1), ncol = 2)
matrix[1, 2]
# Lists
x <- list(a = c(1, 2), b = c(4, 4), c = c(6, 8), d = c(9, 11))
x[[1]]
x[[1]][1]
x[1]
# These give different results
class(x[[1]])
str(x[[1]])
class(x[1])
str(x[1])
attributes(x[1])
# $ for named elements in a list
x$a
class(x$a)
x$a[1]
# Selecting multiple elements
x <- letters
x[c(1, 2, 6)]
x <- sample(c(0:9), 100, replace = TRUE)
x[x < 5]
x < 5
d <- data_frame(number = sample(0:9, 100, replace = TRUE),
character = rep(c("a", "b"), 50))
lapply(d, class)
d[unlist(lapply(d, is.numeric))] %>% head()
d[ sapply(d, is.numeric)] %>% head()
# You can name elements in data structures besides lists.
x <- c( 1, 4, 6, 9)
str(x)
x <- c(a = 1, b = 4, c = 6, d = 9)
str(x)
names(x)
attributes(x)
attr(x, "description") <- "This is a named vector"
attributes(x)
# But $ only works with lists
x$a
x <- list(a = 1, b = 4, c = 6, d = 9)
x$a
# Combining objects can be tricky.
# Differnt data types willl typically reduce to the type with the lowest level
# of information
x <- c(1, "character")
x
class(x)
x <- c(1, TRUE, "character")
x
class(x)
x <- c(1, TRUE, FALSE)
x
class(x)
# Vectors can be combined to make matrices, but be careful
m <- rbind(sample(0:9, 100, replace = TRUE),
c("a", "b"))
class(m)
View(m)
# Dataframes will prevent you from doing this
m <- rbind(sample(0:9, 100, replace = TRUE),
c("a", "b")) %>% as.data.frame() # Not this way
m <- data_frame(sample(0:9, 100, replace = TRUE),
c("a", "b"))
# There are other functions to help switch between information types
as.character()
as.numeric()
as.factor()
# Factors
# Factors are a special kind of numeric variable with labels attached to each
# value, signifying categorical (nominal, ordered) data.
f <- sample(c("Yes", "No", "Maybe"), size = 100, replace = TRUE,
prob = c(0.3, 0.6, 0.1))
f.1 <- factor(f)
# The "levels" are the labels
levels(f.1)
table(f.1)
# Confirm the structure
str(f.1)
# The underlying numbers:
table(as.numeric(f.1))
# The order of the levels matters. By default, they will be in alphabetial order
sample(letters[c(1, 5, 8)], size = 100, replace = TRUE) %>%
factor() %>%
levels()
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor() %>%
class()
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor(levels = c("e", "h", "a"))
sample(letters[c(5, 8, 1)], size = 100, replace = TRUE) %>%
factor(levels = c("e", "h", "a"), ordered = TRUE) %>%
class()
sample(c(1, 2, 10, 20, 100), size = 100, replace = TRUE) %>%
factor() %>%
class()
sample(as.character(c(1, 2, 10, 20, 100)), size = 100, replace = TRUE) %>%
factor() %>%
class()
# Reordering levels
levels(f.1) <- c("Yes", "No", "Maybe")
table(f.1) # Bad!
levels(f.1) <- rev(levels(f.1)) # Switch it back
table(f.1)
# Do it with factor()
f.1 <- factor(f, levels = rev(levels(f.1)))
table(f.1) # Correct
f.1 <- relevel(f.1, ref = "Maybe")
table(f.1)
table(as.numeric(f.1)) # Convert to the underlying number
# Do it manually
f.1 <- factor(f, levels = c("Maybe", "Yes", "No"))
table(f.1) # Correct
# You need to use the exising levels
f.1 <- factor(f, levels = c("A", "B", "C")) # Bad
f.1 <- factor(f)
f.2 <- factor(f, labels = c("A", "B", "C")) # Use the labels option
table(f.1, f.2)
levels(f.2) # The labels become the levels forevermore
# Numbers as factors
f <- sample(c(10, 20, 50, 60, 65, 90), size = 100, replace = TRUE)
f.1 <- factor(f)
levels(f.1)
str(f.1)
f.1 %>% as.numeric() %>% table() # No
as.numeric(levels(f.1)[f.1]) %>% table() # Yes
f.1 <- cut(f, 4) # Equally spaced levels
table(f.1)
str(f.1)
f.1 <- cut(f, 4, labels = c("Low", "Med", "High", "Very High"))
table(f.1)
levels(f.1)
as.character(f.1)
# ~ equally sized levels
f.1 <- cut(f, breaks = quantile(f, 0:4/4))
table(f.1)
levels(f.1)
# User defined cuts
bmi <- rnorm(100, 26, 4)
qplot(bmi)
bmi <- cut(bmi, c(0, 18.5, 25, 30, max(bmi)),
labels = c("UW", "NW", "OW", "OB"))
table(bmi)
# Reordering levels based on other values
data <- data_frame(number = rnorm(100, 0, 1),
factor = factor(sample(letters[1:5], 100, replace = TRUE)))
levels(data$factor)
data <- group_by(data, factor) %>%
summarise(mean = mean(number)) %>%
full_join(data, by = "factor")
table(data$factor, data$mean)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
data$factor <- reorder(data$factor, data$mean)
levels(data$factor)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
data <- group_by(data, factor) %>%
summarise(count = n()) %>%
full_join(data, by = "factor")
data$factor <- reorder(data$factor, data$count)
ggplot(data, aes(x = factor, fill = mean)) +
geom_bar()
# Creating/Manipulating vectors (or variables)
paste( "text", "more text", "even more", sep = " x ")
paste0("text", "more text", "even more") # sep = ""
rep(c(1, 2, 3), times = 100)
rep(c(1, 2, 3), each = 3)
c(1:10)
seq(1:10)
seq(from = 1, to = 100, by = 2)
seq(from = 1, to = 100, length.out = 10)
x <- letters[3:21]
seq_along(x)
sample(letters, n = 100, replace = TRUE)
sample(letters, n = 10, replace = FALSE)
rnorm(10, 0, 1) %>% qplot()
rnorm(10000, 0, 1) %>% qplot()
d <- data_frame(A = sample(c(0:9), size = 100, replace = TRUE),
B = sample(c(0:9), size = 100, replace = TRUE),
C = sample(c(0:9), size = 100, replace = TRUE),
D = sample(c(0:9), size = 100, replace = TRUE))
d$total <- rowSums(d[c(1:4)])
d$mean <- d$total / 4
d$mean2 <- rowMeans(d[c(1:4)])
ggplot(d, aes(x = mean, y = mean2)) + geom_point()
x <- c(c(1:10000), rep(c(1, 2, 3), each = 2), c(100001:10000000))
x[duplicated(x)]
x <- c(c(1:10), rep(c(1, 2, 3), each = 2))
x[unique(x)]
length(x) - length(unique(x))
# Tidy data
# Manipulating data frames
data <- data[, unlist(lapply(data, function(x) !all(is.na(x))))] # blank cols
data <- data[rowSums(is.na(data)) != ncol(data),] # Remove blank rows
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c2_500_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.earth", par.vals = list(), predict.type = "prob")
#:# hash
#:# 5e04a6c8ecb3a5db8fb53680931e39e2
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_fri_c2_500_25/classification_binaryClass/5e04a6c8ecb3a5db8fb53680931e39e2/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 692
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c2_500_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.earth", par.vals = list(), predict.type = "prob")
#:# hash
#:# 5e04a6c8ecb3a5db8fb53680931e39e2
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
###############################################################
### FUNCTIONS ###
###############################################################
source("r/stats.R")
freq <- '
"4" = "Always",
"3" = "Usually",
"2" = "Sometimes/Never",
"1" = "Sometimes/Never",
"-7" = "Don\'t know/Non-response",
"-8" = "Don\'t know/Non-response",
"-9" = "Don\'t know/Non-response",
"-1" = "Inapplicable"'
freq_fmt <- '
proc format;
value freq
4 = "Always"
3 = "Usually"
1 - 2 = "Sometimes/Never"
-9 - -7 = "Don\'t know/Non-response"
-1 = "Inapplicable";
run;
'
load_data <- function(rows,cols,year,lang="r"){
if(lang=="r") LANG = "R" else LANG = lang
year <- as.numeric(year)
yr <- substring(year,3,4)
yb <- substring(year-1,3,4)
ya <- substring(year+1,3,4)
prefix = strsplit(cols,"_")[[1]][1]
load_sg = rows
load_agevar = (prefix %in% c("adult","child") & !(load_sg %in% age_subgrps))
if(load_agevar) load_sg <- c("agevar",load_sg)
code <- readSource(sprintf("../shared/%s/load/load_fyc.%s",lang,LANG))
code <- code %>% add(subgrp_code(grps=load_sg,lang=lang))
code <- code %>% add(readSource(sprintf("%s/grps/%s.%s",lang,cols,LANG)))
code %>% rsub(type=lang,year=year,yy=yr,ya=ya,yb=yb)
}
##############################################
get_r_code <- function(rows,cols,stat="",year=2014){
yr <- substring(year,3,4)
gp <- rows[rows != "ind"]
by <- paste0(gp,collapse="+")
prefix = strsplit(cols,"_")[[1]][1]
if(!cols %in% names(meps_svyby)) svy = prefix else svy = cols
dsgn = switch(prefix,"adult"="design_saq","diab"="design_diab","design_fyc")
code <- readSource('../shared/r/load/load_pkg.R')
code <- code %>% add(load_data(rows,cols,year,lang="r"))
code <- code %>% add(readSource(sprintf("../shared/r/svydesign/%s.R",dsgn)))
# Select svy or svyby depending on subgroups
if(length(gp)==0) meps_code = meps_svy[[svy]] else meps_code = meps_svyby[[svy]]
code %>% add(meps_code) %>%
rsub(PUFdir="C:/MEPS", get_file_names(year),yy=yr,FUN='svymean', by=by, formula=cols,freq=freq)
}
get_sas_code <- function(rows,cols,stat="",year=2014){
yr <- substring(year,3,4)
code <- load_data(rows,cols,year,lang="sas")
rs <- rows[rows != 'ind']
if(length(rs) > 0){
fmt <- paste(rs,paste0(rs,"."),collapse=" ")
grp <- paste0(rs,"*")
gp <- rs
where <- sprintf("and %s ne .",gp)
}else{
fmt <- grp <- gp <- where <- ""
}
code %>%
rsub(type='sas',PUFdir="C:\\\\MEPS", get_file_names(year),
yy=yr,fmt=fmt,where=where,grp=grp,gp=gp,freq_fmt=freq_fmt)
}
|
/mepstrends/hc_tables/hc2_care/app_code.R
|
no_license
|
dancy0824/meps_summary_tables_v3
|
R
| false
| false
| 2,667
|
r
|
###############################################################
### FUNCTIONS ###
###############################################################
source("r/stats.R")
freq <- '
"4" = "Always",
"3" = "Usually",
"2" = "Sometimes/Never",
"1" = "Sometimes/Never",
"-7" = "Don\'t know/Non-response",
"-8" = "Don\'t know/Non-response",
"-9" = "Don\'t know/Non-response",
"-1" = "Inapplicable"'
freq_fmt <- '
proc format;
value freq
4 = "Always"
3 = "Usually"
1 - 2 = "Sometimes/Never"
-9 - -7 = "Don\'t know/Non-response"
-1 = "Inapplicable";
run;
'
load_data <- function(rows,cols,year,lang="r"){
if(lang=="r") LANG = "R" else LANG = lang
year <- as.numeric(year)
yr <- substring(year,3,4)
yb <- substring(year-1,3,4)
ya <- substring(year+1,3,4)
prefix = strsplit(cols,"_")[[1]][1]
load_sg = rows
load_agevar = (prefix %in% c("adult","child") & !(load_sg %in% age_subgrps))
if(load_agevar) load_sg <- c("agevar",load_sg)
code <- readSource(sprintf("../shared/%s/load/load_fyc.%s",lang,LANG))
code <- code %>% add(subgrp_code(grps=load_sg,lang=lang))
code <- code %>% add(readSource(sprintf("%s/grps/%s.%s",lang,cols,LANG)))
code %>% rsub(type=lang,year=year,yy=yr,ya=ya,yb=yb)
}
##############################################
get_r_code <- function(rows,cols,stat="",year=2014){
yr <- substring(year,3,4)
gp <- rows[rows != "ind"]
by <- paste0(gp,collapse="+")
prefix = strsplit(cols,"_")[[1]][1]
if(!cols %in% names(meps_svyby)) svy = prefix else svy = cols
dsgn = switch(prefix,"adult"="design_saq","diab"="design_diab","design_fyc")
code <- readSource('../shared/r/load/load_pkg.R')
code <- code %>% add(load_data(rows,cols,year,lang="r"))
code <- code %>% add(readSource(sprintf("../shared/r/svydesign/%s.R",dsgn)))
# Select svy or svyby depending on subgroups
if(length(gp)==0) meps_code = meps_svy[[svy]] else meps_code = meps_svyby[[svy]]
code %>% add(meps_code) %>%
rsub(PUFdir="C:/MEPS", get_file_names(year),yy=yr,FUN='svymean', by=by, formula=cols,freq=freq)
}
get_sas_code <- function(rows,cols,stat="",year=2014){
yr <- substring(year,3,4)
code <- load_data(rows,cols,year,lang="sas")
rs <- rows[rows != 'ind']
if(length(rs) > 0){
fmt <- paste(rs,paste0(rs,"."),collapse=" ")
grp <- paste0(rs,"*")
gp <- rs
where <- sprintf("and %s ne .",gp)
}else{
fmt <- grp <- gp <- where <- ""
}
code %>%
rsub(type='sas',PUFdir="C:\\\\MEPS", get_file_names(year),
yy=yr,fmt=fmt,where=where,grp=grp,gp=gp,freq_fmt=freq_fmt)
}
|
#Must somehow add a column with wickets after each over
setwd("~/R Work")
ipl_deliveries <- read.csv("./deliveries.csv", stringsAsFactors = FALSE, header = TRUE)
#Which match is in which season?
matches <- read.csv("./matches.csv", header = TRUE, stringsAsFactors = FALSE)
match_season <- subset(matches, select = c("id", "winner", "season"))
ipl_ball <- merge(ipl_deliveries, match_season, by.x = "match_id", by.y = "id")
#Let's subset only the first innings part, because the two innings are gonna be different
innings_one <- ipl_ball[ipl_ball$inning == 1,]
over_by_over <- aggregate(innings_one$total_runs ~ innings_one$over + innings_one$inning + innings_one$match_id + innings_one$season, innings_one, sum)
wickets_by_over <- aggregate(innings_one$player_dismissed ~innings_one$over + innings_one$inning + innings_one$match_id + innings_one$season, innings_one, function(x)sum(x != ""))
cumulative_wickets <- ave(wickets_by_over$`innings_one$player_dismissed`, wickets_by_over$`innings_one$inning`, wickets_by_over$`innings_one$match_id`, FUN = cumsum)
cumulative_score <- ave(over_by_over$`innings_one$total_runs`, over_by_over$`innings_one$inning`, over_by_over$`innings_one$match_id`, FUN = cumsum)
over_by_over_wic <- cbind(over_by_over, cumulative_score, cumulative_wickets)
over_by_over_08_wic <- over_by_over_wic
score_by_over_wic <- split(over_by_over_08_wic, f = list(over_by_over_08_wic$`innings_one$match_id`, over_by_over_08_wic$`innings_one$inning`))
#Convert the awkward score_by_over_08 list to a data frame with all the scores
score_by_over_wic_df <- as.data.frame(score_by_over_wic[[1]][[1]])
colnames(score_by_over_wic_df) <- "Overs"
for(i in 1:636)
{
runs_wic <- as.data.frame(cbind(score_by_over_wic[[i]][1], score_by_over_wic[[i]][6], score_by_over_wic[[i]][7]))
colnames(runs_wic) <- c("Overs", "Runs", "Wickets")
score_by_over_wic_df <- merge(score_by_over_wic_df, runs_wic, by = "Overs", all = TRUE)
}
names_wic_df <- vector(mode = "character")
names_wic_df[1] <- "Overs"
for(i in 2:1273)
{
if(i %% 2 == 0)
{
names_wic_df[i] <- paste0("Match ", (i %/% 2), "Runs")
}
else
{
names_wic_df[i] <- paste0("Match", (i %/% 2), "Wickets" )
}
}
colnames(score_by_over_wic_df) <- names_wic_df
#Calculate the maximum score in each innings (might not always be the 20th over score) and maximum wickets
max_score_wic <- vector(mode = "integer")
max_score_wic[1] <- 20
for(i in 2:1273)
{
max_score_wic[i] <- max(score_by_over_wic_df[i], na.rm = TRUE)
}
score_by_over_wic_df <- rbind(score_by_over_wic_df, max_score_wic)
#Take transpose so the variable to predict is a column
t_score_wic_df <- as.data.frame((t(score_by_over_wic_df)))
#Let's separate out the wickets
wickets_set <- data.frame()
for(i in 2:1273)
{
if(i %% 2 != 0)
{
wickets_set <- rbind(wickets_set, t_score_wic_df[i,])
}
}
#Delete the wickets rows from the data frame
toDelete <- seq(3, 1273, 2)
t_score_wic_df <- t_score_wic_df[-toDelete,]
#cbind the wickets to the end of this
overs <- t_score_wic_df[1,]
wickets_set <- rbind(overs, wickets_set)
wickets_names <- vector(mode = "character")
for(i in 1:20)
{
wickets_names[i] <- paste0("Over", i, "Wickets")
}
wickets_names[21] <- "total_wickets"
colnames(wickets_set) <- wickets_names
runs_names <- vector(mode = "character")
for(i in 1:20)
{
runs_names[i] <- paste0("Over", i, "Runs")
}
runs_names[21] <- "total_runs"
colnames(t_score_wic_df) <- runs_names
t_score_wic_df <- cbind(t_score_wic_df, wickets_set)
#Now, build the linear model
linear_model_wic <- lm(t_score_wic_df$total_runs ~ t_score_wic_df$Over6Runs + t_score_wic_df$Over6Wickets + t_score_wic_df$Over15Runs + t_score_wic_df$Over15Wickets, t_score_08_df)
summary(linear_model_wic)
|
/ipl_linear_regression_with_wickets.R
|
no_license
|
apoorvabhide/ipl_scores_prediction
|
R
| false
| false
| 3,734
|
r
|
#Must somehow add a column with wickets after each over
setwd("~/R Work")
ipl_deliveries <- read.csv("./deliveries.csv", stringsAsFactors = FALSE, header = TRUE)
#Which match is in which season?
matches <- read.csv("./matches.csv", header = TRUE, stringsAsFactors = FALSE)
match_season <- subset(matches, select = c("id", "winner", "season"))
ipl_ball <- merge(ipl_deliveries, match_season, by.x = "match_id", by.y = "id")
#Let's subset only the first innings part, because the two innings are gonna be different
innings_one <- ipl_ball[ipl_ball$inning == 1,]
over_by_over <- aggregate(innings_one$total_runs ~ innings_one$over + innings_one$inning + innings_one$match_id + innings_one$season, innings_one, sum)
wickets_by_over <- aggregate(innings_one$player_dismissed ~innings_one$over + innings_one$inning + innings_one$match_id + innings_one$season, innings_one, function(x)sum(x != ""))
cumulative_wickets <- ave(wickets_by_over$`innings_one$player_dismissed`, wickets_by_over$`innings_one$inning`, wickets_by_over$`innings_one$match_id`, FUN = cumsum)
cumulative_score <- ave(over_by_over$`innings_one$total_runs`, over_by_over$`innings_one$inning`, over_by_over$`innings_one$match_id`, FUN = cumsum)
over_by_over_wic <- cbind(over_by_over, cumulative_score, cumulative_wickets)
over_by_over_08_wic <- over_by_over_wic
score_by_over_wic <- split(over_by_over_08_wic, f = list(over_by_over_08_wic$`innings_one$match_id`, over_by_over_08_wic$`innings_one$inning`))
#Convert the awkward score_by_over_08 list to a data frame with all the scores
score_by_over_wic_df <- as.data.frame(score_by_over_wic[[1]][[1]])
colnames(score_by_over_wic_df) <- "Overs"
for(i in 1:636)
{
runs_wic <- as.data.frame(cbind(score_by_over_wic[[i]][1], score_by_over_wic[[i]][6], score_by_over_wic[[i]][7]))
colnames(runs_wic) <- c("Overs", "Runs", "Wickets")
score_by_over_wic_df <- merge(score_by_over_wic_df, runs_wic, by = "Overs", all = TRUE)
}
names_wic_df <- vector(mode = "character")
names_wic_df[1] <- "Overs"
for(i in 2:1273)
{
if(i %% 2 == 0)
{
names_wic_df[i] <- paste0("Match ", (i %/% 2), "Runs")
}
else
{
names_wic_df[i] <- paste0("Match", (i %/% 2), "Wickets" )
}
}
colnames(score_by_over_wic_df) <- names_wic_df
#Calculate the maximum score in each innings (might not always be the 20th over score) and maximum wickets
max_score_wic <- vector(mode = "integer")
max_score_wic[1] <- 20
for(i in 2:1273)
{
max_score_wic[i] <- max(score_by_over_wic_df[i], na.rm = TRUE)
}
score_by_over_wic_df <- rbind(score_by_over_wic_df, max_score_wic)
#Take transpose so the variable to predict is a column
t_score_wic_df <- as.data.frame((t(score_by_over_wic_df)))
#Let's separate out the wickets
wickets_set <- data.frame()
for(i in 2:1273)
{
if(i %% 2 != 0)
{
wickets_set <- rbind(wickets_set, t_score_wic_df[i,])
}
}
#Delete the wickets rows from the data frame
toDelete <- seq(3, 1273, 2)
t_score_wic_df <- t_score_wic_df[-toDelete,]
#cbind the wickets to the end of this
overs <- t_score_wic_df[1,]
wickets_set <- rbind(overs, wickets_set)
wickets_names <- vector(mode = "character")
for(i in 1:20)
{
wickets_names[i] <- paste0("Over", i, "Wickets")
}
wickets_names[21] <- "total_wickets"
colnames(wickets_set) <- wickets_names
runs_names <- vector(mode = "character")
for(i in 1:20)
{
runs_names[i] <- paste0("Over", i, "Runs")
}
runs_names[21] <- "total_runs"
colnames(t_score_wic_df) <- runs_names
t_score_wic_df <- cbind(t_score_wic_df, wickets_set)
#Now, build the linear model
linear_model_wic <- lm(t_score_wic_df$total_runs ~ t_score_wic_df$Over6Runs + t_score_wic_df$Over6Wickets + t_score_wic_df$Over15Runs + t_score_wic_df$Over15Wickets, t_score_08_df)
summary(linear_model_wic)
|
bonf <- 3.23*(10^-7)
highlights <- function(X) {
top <- min(X,na.rm=T)
wh <- which(X==top)
if(length(wh)>1) { cat("more than one top hit: ",paste((X)[wh],collapse=","),"\n") }
next.row <- c(wh[1],top,length(X),length(which(X<bonf)))
return(next.row)
}
conditional <- function(X) {
top <- min(X,na.rm=T)
wh <- which(X==top)
if(length(wh)>1) { cat("more than one top hit: ",paste(wh,collapse=","),"\n") }
next.row <- c(wh,top,length(X),length(which(X<bonf)))
return(next.row)
}
setwd("/chiswick/data/ncooper/iChipData")
library(reader)
require(genoset); source("~/github/plumbCNV/FunctionsCNVAnalysis.R")
if(T) {
doc.path <- "ImChip_T1D_20130913"
docs <- cat.path(doc.path,list.files(doc.path))
table1 <- reader(docs[1])
excl <- reader(docs[2])
prv(table1)
prv(excl)
table1a <- table1[-which(rownames(table1) %in% rownames(excl)),]
table1a <- table1a[order(table1a[,3]),]
table1a <- table1a[order(table1a[,2]),]
#table1a <- table1a[order(table1a[,11]),]
prv.large(table1a[,c(3,10,12,15,18)-1],rows=100,cols=7)
poz <- as.numeric(table1a[,3])
# iChip regions
if(F) {
print(load("/chiswick/data/ncooper/iChipData/dense.ic.regions.b36.RData"))
## gives regions.gr
cyto <- get.cyto(); cyto[["gene"]] <- rownames(cyto)
cy.ranges <- toGenomeOrder(as(regions.gr,"RangedData"))
cy.rangesX <- annot.cnv(cy.ranges,gs=cyto) ### YES!!!! ###
ichip.regions <- cy.rangesX
save(ichip.regions,file="/chiswick/data/ncooper/iChipData/ichip.regions.RData")
} else {
print(load("/chiswick/data/ncooper/iChipData/ichip.regions.RData"))
}
table.ranges <- RangedData(ranges=IRanges(start=poz,end=poz,names=table1a[,1]),
space=table1a[,2],OR=table1a[,9],p.value=table1a[,11],fam.OR=table1a[,12],
fam.p.value=table1a[,14],meta.OR=table1a[,15],meta.p.value=table1a[,17])
cyto <- get.cyto(); cyto[["gene"]] <- rownames(cyto)
gs <- ichip.regions
table.ranges1 <- annot.cnv(table.ranges,gs=gs)
gs[["gene"]] <- paste("EXT",gs[["gene"]],sep="_")
start(gs) <- pmax(1,(start(gs)-50000))
end(gs) <- end(gs)+50000
table.ranges2 <- annot.cnv(table.ranges,gs=gs)
gs2 <- cyto
gs2[["gene"]] <- paste("OTHER",gs2[["gene"]],sep="_")
table.ranges3 <- annot.cnv(table.ranges,gs=gs2)
table.ranges1[["gene"]][which(table.ranges1$gene=="")] <- table.ranges2[["gene"]][which(table.ranges1$gene=="")]
table.ranges1[["gene"]][which(table.ranges1$gene=="")] <- table.ranges3[["gene"]][which(table.ranges1$gene=="")]
table.ranges <- table.ranges1
MHC <- RangedData(ranges=IRanges(start=25000000,end=35000000),space=6)
remv <- queryHits(findOverlaps(table.ranges[6],MHC))
#new6 <- table.ranges[6][-remv,]
st6 <- (chrIndices(table.ranges)[6,1]-1)
TR <- table.ranges[-(remv+st6),]
if(length(queryHits(findOverlaps(TR[6],MHC)))) { cat("MHC not removed\n") } else { cat("MHC successfully removed\n") }
# chr(cy.ranges) <- gsub("chr","",chr(cy.ranges))
# cy.chr <- gsub("chr","",chr(cy.ranges))
# annot.cnv(table.ranges,gs=cy.ranges)
# table.rangesX <- annot.cnv(table.ranges,gs=cy.ranges)
topsnplist <- reader(fn="/chiswick/data/ncooper/iChipData/topsnplist.txt")
#table.ranges <- annot.cnv(table.ranges,gs=gs)
jj <- order(TR[["meta.p.value"]])
tt <- as.data.frame(TR)[jj,]
print(load("all.support.RData"))
kk2 <- all.support$SNP[match(topsnplist,all.support$dbSNP)]
tt <- tt[kk2,]
kk <- which(as.numeric(tt[["meta.p.value"]])<(10^-5))
tt <- tt[kk,]
prv.large(tt,rows=100)
}
if(T) {
## extract infuva-controls.txt 3 txt 1 2 5 6 12 1nplist.txt")
#table.ranges <- annot.cnv(table.ranges,gs=gs)
#do for each 'row' (regional summary)
out.list <- tapply(tt$meta.p.value,tt$gene,highlights) # main stats
out.snps <- tapply(tt$names,tt$gene,"[",1) # top snp (1st because sorted)
grp.snps <- tapply(tt$names,tt$gene,c) # snp list
# convert list to data.frame and format
out.frame <- cbind(sapply(out.list,"[",1),sapply(out.list,"[",2),sapply(out.list,"[",3),sapply(out.list,"[",4))
colnames(out.frame) <- c("whichSNP","best.p.value","region.hits","hits<bonferroni")
out.frame <- cbind(out.frame,out.frame[,3]-out.frame[,4])
colnames(out.frame)[5] <- c("hits>=bonferroni")
out.frame[,1] <- as.character(out.frame[,1])
out.frame[,1] <- out.snps
###
top.snps <- out.frame[as.numeric(out.frame[,"best.p.value"])<(10^-5),"whichSNP"]
names(top.snps)[1] <- "ChrX"
top.snps.dat <- out.frame[as.numeric(out.frame[,"best.p.value"])<(10^-5),]
save(top.snps, top.snps.dat, file="topSNPs.RData")
# add genes for each region
genes <- get.gene.annot()
bandz <- paste(chr(genes),genes[["band"]],sep="")
nmz <- rownames(out.frame)
nmz <- gsub("OTHER_","",nmz)
nmz <- gsub("EXT_","",nmz)
genz <- lapply(nmz,function(nmz) { genes[["gene"]][which(bandz %in% nmz)] })
## actually this is too many genes to look at!
## GATHER THE REGIONS ##
all.nms <- names(grp.snps)
three.list <- all.chr <- vector("list",3) # hold the normal, extended and other lists
all.pref1 <- substr(all.nms,1,2)
three.list[[1]] <- all.nms[!((all.pref1 %in% "OT") | (all.pref1 %in% "EX"))]
three.list[[2]] <- gsub("EXT_","",all.nms[all.pref1 %in% "EX"])
three.list[[3]] <- gsub("OTHER_","",all.nms[all.pref1 %in% "OT"])
for (cc in 1:length(three.list)) {
all.chr[[cc]] <- substr(three.list[[cc]],1,2)
selec <- which(substr(three.list[[cc]],2,2) %in% c("p","q"))
all.chr[[cc]][selec] <- substr(three.list[[cc]],1,1)[selec]
}
names(three.list) <- c("","EXT","OTHER")
## NB must choose rule to account for OTHER_ and EXT_
## NB: original code source was: "/home/chrisw/local/R/scripts/hapmap-rates.R"
#recomWindow <- function(chr,st,en=st,window=0.1,bp=0,do.plot=FALSE,
# add.plot=FALSE,do.lines=TRUE,...)
}
stop()
### FOLLOWUP ANALYSIS FOR SNPs ###
topsnplist <- reader(fn="/chiswick/data/ncooper/iChipData/topsnplist.txt")
topsnplist[topsnplist=="rs6679677"] <- "rs2476601"
######## do each chr ##########
#chrz <- 12 # add all chrs here
followup <- TRUE
if(followup) {
chrz <- 1:22
} else {
chrz <- narm(as.numeric(unique(unlist(all.chr))))
}
for(next.chr in chrz) {
# load data for one chromosome at a time
Header(paste("Chromosome",next.chr))
ofn <- "/chiswick/data/ncooper/iChipData/temp.ichip-data.RData"
chr <- next.chr; #st <- 1; en <- get.chr.lens()[next.chr]
###system(paste("~/github/iChip/load-ichip.R chr=",chr," file=",out.file=ofn,sep=""))
#system("~/github/iChip/load-ichip.R --chr",chr,"--start",st,"--end",en,"--file",out.file=ofn)
print(load(cat.path(fn=ofn,suf=next.chr,ext="RData")))
#annotated.snp.support, t1d.data, t1d.support, control.data, control.support
Ph <- rep(c(1,0),times=c(nrow(t1d.data),nrow(control.data)))
myData <- rbind(t1d.data,control.data)
covs <- FALSE
if(covs) {
region13 <- c(t1d.support$b58cregion,control.support$b58cregion)
the.sex <- c(t1d.support$sex + control.support$sex)
}
### NOW CYCLE THROUGH SNP GROUPS ON THIS CHR ####
# grpz <- 4 # work out which groups to analyse
if(followup) {
which.snps <- which(annotated.snp.support$dbSNP %in% topsnplist)
snps.next <- annotated.snp.support$dbSNP[which.snps]
snps.next.SNP <- annotated.snp.support$SNP[which.snps]
snps.locs <- annotated.snp.support$Pos[which.snps]
snp.rd <- RangedData(ranges=IRanges(startSnps.locs,
endSnps.locs,names=annotated.snp.support$SNP[which.snps]),
space=rep(next.chr,length(snps.locs)))
snp.rd <- annot.cnv(snp.rd,gs=cyto); colnames(snp.rd) <- "band"
bands <- snp.rd$band
nxt.window <- lapply(snps.locs, function(X,...) { recomWindow(st=X,...) },chr=next.chr,window=1,bp=50000)
st.window <- lapply(nxt.window, "[",1)
en.window <- lapply(nxt.window, "[",2)
n.snps <- vector("list",length(st.window))
for(cc in 1:length(st.window)) {
n.snps[[cc]] <- which(annotated.snp.support$Chr==next.chr &
annotated.snp.support$Pos>=st.window[cc] &
annotated.snp.support$Pos<=en.window[cc] &
(!annotated.snp.support$SNP %in% rownames(excl))
)
}
grp.labs <- lapply(n.snps,function(X) { annotated.snp.support$SNP[X] })
grp.snps <- lapply(n.snps,function(X) { annotated.snp.support$dbSNP[X] })
for (cc in 1:length(grp.labs)) {
grp.snps[[cc]][is.na(grp.snps[[cc]])] <- grp.labs[[cc]][is.na(grp.snps[[cc]])]
grp.snps[[cc]][duplicated(grp.snps[[cc]])] <- grp.labs[[cc]][duplicated(grp.snps[[cc]])]
}
names(grp.snps) <- names(grp.labs) <- bands
grpz <- 1:length(bands)
} else {
mainhit <- three.list[[1]][which(all.chr[[1]]==paste(next.chr))]
nearhit <- paste("EXT",three.list[[2]][which(all.chr[[2]]==paste(next.chr))],sep="_")
outhit <- paste("OTHER",three.list[[3]][which(all.chr[[3]]==paste(next.chr))],sep="_")
grpz <- which(names(grp.snps) %in% mainhit)
}
if(length(grpz)==0) { next }
for(grp in grpz) {
the.sig <- NULL
cat("Testing group:",grp,", around SNP",snps.next.SNP[grp],"[",snps.next[grp],"]"," band",bands[grp],"\n")
if(!followup) {
snpid.list <- annotated.snp.support$SNP[match(grp.snps[[grp]],annotated.snp.support$dbSNP)]
} else {
snpid.list <- grp.labs[[grp]]
}
snpid.list <- gsub("-",".",snpid.list,fixed=T)
snpid.list <- gsub("1kg_","X1kg_",snpid.list,fixed=T)
snpid.list <- gsub(",","_",snpid.list,fixed=T)
grp.snps[[grp]] <- gsub("-",".",grp.snps[[grp]],fixed=T)
grp.snps[[grp]] <- gsub("1kg_","X1kg_",grp.snps[[grp]],fixed=T)
grp.snps[[grp]] <- gsub(",","_",grp.snps[[grp]],fixed=T)
#gsub("1kg_","X1kg_",snpid.list,fixed=T)
were.missing <- which(!snpid.list %in% colnames(myData))
# prv(snpid.list)
if(length(were.missing)>0){
cat("missing SNPs from grp list:\n"); print(grp.snps[[grp]][were.missing]) ; cat("\n")
snpid.list <- snpid.list[-were.missing]
rename <- grp.snps[[grp]][-were.missing]
} else {
rename <- grp.snps[[grp]]
}
# conditional analysis p value?
#sum(sapply(grp.snps,length))
#[1] 2319
#> .05/2319
#[1] 2.156102e-05
# prv(snpid.list)
if(length(snpid.list)<1) { warning("no snps left to test in this set"); next }
myDataFilt <- myData[,snpid.list]
colnames(myDataFilt) <- rename
if(followup) {
top.snp <- as.numeric(myDataFilt[,snps.next[grp]])-1
} else {
top.snp <- as.numeric(myDataFilt[,1])-1
}
#snp.rhs.tests(as.formula("Ph ~ 1"),snp.data=myDataFilt,allow.missing=.1,tests=rename)
#snp.rhs.tests(as.formula("Ph ~ top.snp"),snp.data=myDataFilt[,-1],allow.missing=.1,tests=rename[-1])
### now loop through until no further covariates are found ##
#if(is.null(the.sig)) { the.sig <- top.snp } # else it will be all the conditionally significant so far
found.conds <- TRUE
if(followup) {
excl.cols <- which(colnames(myDataFilt)==snps.next[grp])[1]
} else {
excl.cols <- 1
}
first <- T
sig.reg.snps <- snps.next[grp] #"top.snp" # snpid.list[1]
while(found.conds) {
## ## hack to convert selection to a matrix ##
## top.snp <- as.data.frame(myDataFilt[,excl.cols])
## for(jj in 1:ncol(top.snp)) {
## top.snp[,jj] <- as.numeric(top.snp[,jj])-1
## }
## top.snp <- as.matrix(top.snp)
##
# prv(top.snp)
terms <- paste(sig.reg.snps,collapse=" + ")
#print(colnames(myDataFilt)); print(terms)
if(covs) {
cond.res <- snp.rhs.tests(as.formula(paste("Ph ~ top.snp + strata(region13) + the.sex")),snp.data=myDataFilt[,],allow.missing=.1,tests=rename[-excl.cols])
} else {
cat("testing",terms,"[",excl.cols,"]\n")
top.snp[top.snp<0] <- NA
# prv.large(top.snp)
## print(summary(apply(top.snp,1,function(X) { length(which(is.na(X))) })))
mod.txt <- paste("Ph ~ top.snp")
for (dd in 1:length(sig.reg.snps)) {
if(!exists(sig.reg.snps[dd])) {
nuxt <- as.numeric(myDataFilt[,sig.reg.snps[dd]])-1
nuxt[nuxt<0] <- NA
assign(sig.reg.snps[dd],nuxt)
}
}
mod.txt <- paste("Pheno ~",terms)
## cond.res <- snp.rhs.tests(as.formula(mod.txt),snp.data=myDataFilt[,-excl.cols]) #,allow.missing=.1,uncertain=TRUE,tests=rename[-excl.cols])
cov.data1 <- myDataFilt[,-excl.cols];
cond.res <- list()
cov.data <- as.data.frame(cov.data1)
for(jj in 1:ncol(cov.data)) {
nuxt <- as.numeric(cov.data[,jj])-1
nuxt[nuxt<0] <- NA
cov.data[,jj] <- nuxt
# assign(colnames(cov.data)[jj], nuxt)
}
if(first) {
row.mis <- (apply(cov.data,1,function(X) { length(which(is.na(X)))/length(X) }))
}
col.mis <- apply(cov.data,2,function(X) { length(which(is.na(X)))/length(X) })
cov.data <- cov.data[row.mis<.03,col.mis<.03]
if(first){
Pheno <- Ph[row.mis<.03]
for(jj in 1:length(sig.reg.snps)) {
assign(terms[jj],get(sig.reg.snps[jj])[row.mis<.03])
}
for(jj in 1:ncol(cov.data)) {
#print(colnames(cov.data)[jj])
assign(colnames(cov.data)[jj], cov.data[,jj])
}
}
#cov.data <- as.matrix(cov.data)
cat("testing",ncol(cov.data),"snps\n")
## cond.res <- snp.rhs.tests(as.formula(paste(mod.txt)), snp.data=cov.data1)
#print(cond.res)
# print(head(p.value(cond.res)[order(p.value(cond.res))],20) )
for (dd in 1:ncol(cov.data)) {
nxt <- glm(as.formula(paste(mod.txt,colnames(cov.data)[dd],sep=" + ")), family = binomial(logit),data=cov.data)
cond.res[[dd]] <- mysumfun(nxt,p.digits=250)[[1]][,2]
}
#print(cond.res)
}
if(is.list(cond.res)) { p.cond <- do.call("rbind",args=cond.res) } else { p.cond <- p.value(cond.res) }
if(!is.null(dim(p.cond))) { p.cond <- p.cond[,ncol(p.cond)] }
cond.sigs <- which(p.cond<=bonf)
if(length(cond.sigs)>0) {
naz <- is.na(p.cond)
the.p <- min(p.cond[!naz],na.rm=T)
the.min <- which(p.cond[!naz]==the.p)
if(length(the.min)>1) {
# cat("min length",length(the.min),"\n");
# the.chi <- max(chi.squared(cond.res)[!naz],na.rm=T)
# the.min <- which(chi.squared(cond.res)[!naz]==the.chi)
if(length(the.min)>1) { cat("min chi length",length(the.min),"\n"); the.min <- the.min[1] }
}
the.sig.lab <- rename[-excl.cols][the.min]
cat("new snp:",the.sig.lab," with conditional p value:",the.p,"\n")
the.sig <- myDataFilt[,the.sig.lab]
excl.cols <- c(excl.cols,which(colnames(myDataFilt)==the.sig.lab))
} else {
found.conds <- FALSE
}
sig.reg.snps <- colnames(myDataFilt)[excl.cols]
glm.result <- glm(as.formula(mod.txt), family = binomial(logit))
ii <- mysumfun(glm.result,p.digits=250,o.digits=3)
print(ii)
first <- FALSE
if(length(excl.cols)>5) { found.conds <- FALSE }
}
print(sig.reg.snps) # append this to the list of top/conditional snps
}
## add chr result
}
### OTHER STUFF
if(F) {
tab <- all.summary[order(all.summary[,"CHR"]),]
colnames(table1a)
cn <- c("CHR","MAF_UKcontrol","OR_CC","P_CC","OR_Fam","P_Fam","OR_Meta","P_Meta")
snp.list <- outframe$whichSNP
more.data <- table1a[match(snp.list,table1a[,1]),cn]
colnames(more.data)[2] <- "MAF"
all.summary <- cbind(out.frame,more.data)
print(all.summary[order(all.summary[,"CHR"]),])
do.one.chr <- function(chr.tab) {
grp <- tapply(chr.tab$gene,chr.tab$gene,function(X) X)
maxs <- tapply(chr.tab$p.value,chr.tab$gene,min,na.rm=T)
#names(maxs) <- grp
}
#chr.results.list <- lapply(tt,do.one.chr)
results.list <- do.one.chr(tt)
prv(results.list)
gen.list <- names(results.list)
gen.list.spl <- strsplit(gen.list,";",fixed=T)
prv(gs)
bands <- lapply(gen.list.spl,
function(X) {
ind <- match(X,gs$gene)
OO <- paste(unique(paste(chr(gs)[ind],gs$band[ind],sep="")),collapse=";")
return(OO) })
res2 <- data.frame(genes=substr(gen.list,1,20),p.value=as.numeric(results.list),band=unlist(bands))
res2 <- res2[order(res2[,2]),]
prv.large(res2,rows=50,cols=3)
unique(res2$band)
}
#table1:
# 1.Chr 2.causal 3.best 4.r2 5.all.M>m 6.MAF 7.OR 8.P 9.prev 10.All 11.OR 12.P 13.Ref
# 1: band - function lookup from location (chr=3, pos =4)
# 2: gene names - function lookup from location (chr=3, pos =4)
# 3: rsid - snpid in col2, some of which will need rsid lookup
# 4: r^2: could look in dataset at col2, versus c#9
# 5: col 5 > col 6 ?
# 6: col 7
# 7: col 10
# 8: col 12
# 9: prev snp?
# 10: prev snp alleles ?
# 11: prev snp OR ?
# 12: prev snp P ?
# 13: prev snp ref ?
|
/working.R
|
no_license
|
nicholasjcooper/iChip
|
R
| false
| false
| 17,142
|
r
|
bonf <- 3.23*(10^-7)
highlights <- function(X) {
top <- min(X,na.rm=T)
wh <- which(X==top)
if(length(wh)>1) { cat("more than one top hit: ",paste((X)[wh],collapse=","),"\n") }
next.row <- c(wh[1],top,length(X),length(which(X<bonf)))
return(next.row)
}
conditional <- function(X) {
top <- min(X,na.rm=T)
wh <- which(X==top)
if(length(wh)>1) { cat("more than one top hit: ",paste(wh,collapse=","),"\n") }
next.row <- c(wh,top,length(X),length(which(X<bonf)))
return(next.row)
}
setwd("/chiswick/data/ncooper/iChipData")
library(reader)
require(genoset); source("~/github/plumbCNV/FunctionsCNVAnalysis.R")
if(T) {
doc.path <- "ImChip_T1D_20130913"
docs <- cat.path(doc.path,list.files(doc.path))
table1 <- reader(docs[1])
excl <- reader(docs[2])
prv(table1)
prv(excl)
table1a <- table1[-which(rownames(table1) %in% rownames(excl)),]
table1a <- table1a[order(table1a[,3]),]
table1a <- table1a[order(table1a[,2]),]
#table1a <- table1a[order(table1a[,11]),]
prv.large(table1a[,c(3,10,12,15,18)-1],rows=100,cols=7)
poz <- as.numeric(table1a[,3])
# iChip regions
if(F) {
print(load("/chiswick/data/ncooper/iChipData/dense.ic.regions.b36.RData"))
## gives regions.gr
cyto <- get.cyto(); cyto[["gene"]] <- rownames(cyto)
cy.ranges <- toGenomeOrder(as(regions.gr,"RangedData"))
cy.rangesX <- annot.cnv(cy.ranges,gs=cyto) ### YES!!!! ###
ichip.regions <- cy.rangesX
save(ichip.regions,file="/chiswick/data/ncooper/iChipData/ichip.regions.RData")
} else {
print(load("/chiswick/data/ncooper/iChipData/ichip.regions.RData"))
}
table.ranges <- RangedData(ranges=IRanges(start=poz,end=poz,names=table1a[,1]),
space=table1a[,2],OR=table1a[,9],p.value=table1a[,11],fam.OR=table1a[,12],
fam.p.value=table1a[,14],meta.OR=table1a[,15],meta.p.value=table1a[,17])
cyto <- get.cyto(); cyto[["gene"]] <- rownames(cyto)
gs <- ichip.regions
table.ranges1 <- annot.cnv(table.ranges,gs=gs)
gs[["gene"]] <- paste("EXT",gs[["gene"]],sep="_")
start(gs) <- pmax(1,(start(gs)-50000))
end(gs) <- end(gs)+50000
table.ranges2 <- annot.cnv(table.ranges,gs=gs)
gs2 <- cyto
gs2[["gene"]] <- paste("OTHER",gs2[["gene"]],sep="_")
table.ranges3 <- annot.cnv(table.ranges,gs=gs2)
table.ranges1[["gene"]][which(table.ranges1$gene=="")] <- table.ranges2[["gene"]][which(table.ranges1$gene=="")]
table.ranges1[["gene"]][which(table.ranges1$gene=="")] <- table.ranges3[["gene"]][which(table.ranges1$gene=="")]
table.ranges <- table.ranges1
MHC <- RangedData(ranges=IRanges(start=25000000,end=35000000),space=6)
remv <- queryHits(findOverlaps(table.ranges[6],MHC))
#new6 <- table.ranges[6][-remv,]
st6 <- (chrIndices(table.ranges)[6,1]-1)
TR <- table.ranges[-(remv+st6),]
if(length(queryHits(findOverlaps(TR[6],MHC)))) { cat("MHC not removed\n") } else { cat("MHC successfully removed\n") }
# chr(cy.ranges) <- gsub("chr","",chr(cy.ranges))
# cy.chr <- gsub("chr","",chr(cy.ranges))
# annot.cnv(table.ranges,gs=cy.ranges)
# table.rangesX <- annot.cnv(table.ranges,gs=cy.ranges)
topsnplist <- reader(fn="/chiswick/data/ncooper/iChipData/topsnplist.txt")
#table.ranges <- annot.cnv(table.ranges,gs=gs)
jj <- order(TR[["meta.p.value"]])
tt <- as.data.frame(TR)[jj,]
print(load("all.support.RData"))
kk2 <- all.support$SNP[match(topsnplist,all.support$dbSNP)]
tt <- tt[kk2,]
kk <- which(as.numeric(tt[["meta.p.value"]])<(10^-5))
tt <- tt[kk,]
prv.large(tt,rows=100)
}
if(T) {
## extract infuva-controls.txt 3 txt 1 2 5 6 12 1nplist.txt")
#table.ranges <- annot.cnv(table.ranges,gs=gs)
#do for each 'row' (regional summary)
out.list <- tapply(tt$meta.p.value,tt$gene,highlights) # main stats
out.snps <- tapply(tt$names,tt$gene,"[",1) # top snp (1st because sorted)
grp.snps <- tapply(tt$names,tt$gene,c) # snp list
# convert list to data.frame and format
out.frame <- cbind(sapply(out.list,"[",1),sapply(out.list,"[",2),sapply(out.list,"[",3),sapply(out.list,"[",4))
colnames(out.frame) <- c("whichSNP","best.p.value","region.hits","hits<bonferroni")
out.frame <- cbind(out.frame,out.frame[,3]-out.frame[,4])
colnames(out.frame)[5] <- c("hits>=bonferroni")
out.frame[,1] <- as.character(out.frame[,1])
out.frame[,1] <- out.snps
###
top.snps <- out.frame[as.numeric(out.frame[,"best.p.value"])<(10^-5),"whichSNP"]
names(top.snps)[1] <- "ChrX"
top.snps.dat <- out.frame[as.numeric(out.frame[,"best.p.value"])<(10^-5),]
save(top.snps, top.snps.dat, file="topSNPs.RData")
# add genes for each region
genes <- get.gene.annot()
bandz <- paste(chr(genes),genes[["band"]],sep="")
nmz <- rownames(out.frame)
nmz <- gsub("OTHER_","",nmz)
nmz <- gsub("EXT_","",nmz)
genz <- lapply(nmz,function(nmz) { genes[["gene"]][which(bandz %in% nmz)] })
## actually this is too many genes to look at!
## GATHER THE REGIONS ##
all.nms <- names(grp.snps)
three.list <- all.chr <- vector("list",3) # hold the normal, extended and other lists
all.pref1 <- substr(all.nms,1,2)
three.list[[1]] <- all.nms[!((all.pref1 %in% "OT") | (all.pref1 %in% "EX"))]
three.list[[2]] <- gsub("EXT_","",all.nms[all.pref1 %in% "EX"])
three.list[[3]] <- gsub("OTHER_","",all.nms[all.pref1 %in% "OT"])
for (cc in 1:length(three.list)) {
all.chr[[cc]] <- substr(three.list[[cc]],1,2)
selec <- which(substr(three.list[[cc]],2,2) %in% c("p","q"))
all.chr[[cc]][selec] <- substr(three.list[[cc]],1,1)[selec]
}
names(three.list) <- c("","EXT","OTHER")
## NB must choose rule to account for OTHER_ and EXT_
## NB: original code source was: "/home/chrisw/local/R/scripts/hapmap-rates.R"
#recomWindow <- function(chr,st,en=st,window=0.1,bp=0,do.plot=FALSE,
# add.plot=FALSE,do.lines=TRUE,...)
}
stop()
### FOLLOWUP ANALYSIS FOR SNPs ###
topsnplist <- reader(fn="/chiswick/data/ncooper/iChipData/topsnplist.txt")
topsnplist[topsnplist=="rs6679677"] <- "rs2476601"
######## do each chr ##########
#chrz <- 12 # add all chrs here
followup <- TRUE
if(followup) {
chrz <- 1:22
} else {
chrz <- narm(as.numeric(unique(unlist(all.chr))))
}
for(next.chr in chrz) {
# load data for one chromosome at a time
Header(paste("Chromosome",next.chr))
ofn <- "/chiswick/data/ncooper/iChipData/temp.ichip-data.RData"
chr <- next.chr; #st <- 1; en <- get.chr.lens()[next.chr]
###system(paste("~/github/iChip/load-ichip.R chr=",chr," file=",out.file=ofn,sep=""))
#system("~/github/iChip/load-ichip.R --chr",chr,"--start",st,"--end",en,"--file",out.file=ofn)
print(load(cat.path(fn=ofn,suf=next.chr,ext="RData")))
#annotated.snp.support, t1d.data, t1d.support, control.data, control.support
Ph <- rep(c(1,0),times=c(nrow(t1d.data),nrow(control.data)))
myData <- rbind(t1d.data,control.data)
covs <- FALSE
if(covs) {
region13 <- c(t1d.support$b58cregion,control.support$b58cregion)
the.sex <- c(t1d.support$sex + control.support$sex)
}
### NOW CYCLE THROUGH SNP GROUPS ON THIS CHR ####
# grpz <- 4 # work out which groups to analyse
if(followup) {
which.snps <- which(annotated.snp.support$dbSNP %in% topsnplist)
snps.next <- annotated.snp.support$dbSNP[which.snps]
snps.next.SNP <- annotated.snp.support$SNP[which.snps]
snps.locs <- annotated.snp.support$Pos[which.snps]
snp.rd <- RangedData(ranges=IRanges(startSnps.locs,
endSnps.locs,names=annotated.snp.support$SNP[which.snps]),
space=rep(next.chr,length(snps.locs)))
snp.rd <- annot.cnv(snp.rd,gs=cyto); colnames(snp.rd) <- "band"
bands <- snp.rd$band
nxt.window <- lapply(snps.locs, function(X,...) { recomWindow(st=X,...) },chr=next.chr,window=1,bp=50000)
st.window <- lapply(nxt.window, "[",1)
en.window <- lapply(nxt.window, "[",2)
n.snps <- vector("list",length(st.window))
for(cc in 1:length(st.window)) {
n.snps[[cc]] <- which(annotated.snp.support$Chr==next.chr &
annotated.snp.support$Pos>=st.window[cc] &
annotated.snp.support$Pos<=en.window[cc] &
(!annotated.snp.support$SNP %in% rownames(excl))
)
}
grp.labs <- lapply(n.snps,function(X) { annotated.snp.support$SNP[X] })
grp.snps <- lapply(n.snps,function(X) { annotated.snp.support$dbSNP[X] })
for (cc in 1:length(grp.labs)) {
grp.snps[[cc]][is.na(grp.snps[[cc]])] <- grp.labs[[cc]][is.na(grp.snps[[cc]])]
grp.snps[[cc]][duplicated(grp.snps[[cc]])] <- grp.labs[[cc]][duplicated(grp.snps[[cc]])]
}
names(grp.snps) <- names(grp.labs) <- bands
grpz <- 1:length(bands)
} else {
mainhit <- three.list[[1]][which(all.chr[[1]]==paste(next.chr))]
nearhit <- paste("EXT",three.list[[2]][which(all.chr[[2]]==paste(next.chr))],sep="_")
outhit <- paste("OTHER",three.list[[3]][which(all.chr[[3]]==paste(next.chr))],sep="_")
grpz <- which(names(grp.snps) %in% mainhit)
}
if(length(grpz)==0) { next }
for(grp in grpz) {
the.sig <- NULL
cat("Testing group:",grp,", around SNP",snps.next.SNP[grp],"[",snps.next[grp],"]"," band",bands[grp],"\n")
if(!followup) {
snpid.list <- annotated.snp.support$SNP[match(grp.snps[[grp]],annotated.snp.support$dbSNP)]
} else {
snpid.list <- grp.labs[[grp]]
}
snpid.list <- gsub("-",".",snpid.list,fixed=T)
snpid.list <- gsub("1kg_","X1kg_",snpid.list,fixed=T)
snpid.list <- gsub(",","_",snpid.list,fixed=T)
grp.snps[[grp]] <- gsub("-",".",grp.snps[[grp]],fixed=T)
grp.snps[[grp]] <- gsub("1kg_","X1kg_",grp.snps[[grp]],fixed=T)
grp.snps[[grp]] <- gsub(",","_",grp.snps[[grp]],fixed=T)
#gsub("1kg_","X1kg_",snpid.list,fixed=T)
were.missing <- which(!snpid.list %in% colnames(myData))
# prv(snpid.list)
if(length(were.missing)>0){
cat("missing SNPs from grp list:\n"); print(grp.snps[[grp]][were.missing]) ; cat("\n")
snpid.list <- snpid.list[-were.missing]
rename <- grp.snps[[grp]][-were.missing]
} else {
rename <- grp.snps[[grp]]
}
# conditional analysis p value?
#sum(sapply(grp.snps,length))
#[1] 2319
#> .05/2319
#[1] 2.156102e-05
# prv(snpid.list)
if(length(snpid.list)<1) { warning("no snps left to test in this set"); next }
myDataFilt <- myData[,snpid.list]
colnames(myDataFilt) <- rename
if(followup) {
top.snp <- as.numeric(myDataFilt[,snps.next[grp]])-1
} else {
top.snp <- as.numeric(myDataFilt[,1])-1
}
#snp.rhs.tests(as.formula("Ph ~ 1"),snp.data=myDataFilt,allow.missing=.1,tests=rename)
#snp.rhs.tests(as.formula("Ph ~ top.snp"),snp.data=myDataFilt[,-1],allow.missing=.1,tests=rename[-1])
### now loop through until no further covariates are found ##
#if(is.null(the.sig)) { the.sig <- top.snp } # else it will be all the conditionally significant so far
found.conds <- TRUE
if(followup) {
excl.cols <- which(colnames(myDataFilt)==snps.next[grp])[1]
} else {
excl.cols <- 1
}
first <- T
sig.reg.snps <- snps.next[grp] #"top.snp" # snpid.list[1]
while(found.conds) {
## ## hack to convert selection to a matrix ##
## top.snp <- as.data.frame(myDataFilt[,excl.cols])
## for(jj in 1:ncol(top.snp)) {
## top.snp[,jj] <- as.numeric(top.snp[,jj])-1
## }
## top.snp <- as.matrix(top.snp)
##
# prv(top.snp)
terms <- paste(sig.reg.snps,collapse=" + ")
#print(colnames(myDataFilt)); print(terms)
if(covs) {
cond.res <- snp.rhs.tests(as.formula(paste("Ph ~ top.snp + strata(region13) + the.sex")),snp.data=myDataFilt[,],allow.missing=.1,tests=rename[-excl.cols])
} else {
cat("testing",terms,"[",excl.cols,"]\n")
top.snp[top.snp<0] <- NA
# prv.large(top.snp)
## print(summary(apply(top.snp,1,function(X) { length(which(is.na(X))) })))
mod.txt <- paste("Ph ~ top.snp")
for (dd in 1:length(sig.reg.snps)) {
if(!exists(sig.reg.snps[dd])) {
nuxt <- as.numeric(myDataFilt[,sig.reg.snps[dd]])-1
nuxt[nuxt<0] <- NA
assign(sig.reg.snps[dd],nuxt)
}
}
mod.txt <- paste("Pheno ~",terms)
## cond.res <- snp.rhs.tests(as.formula(mod.txt),snp.data=myDataFilt[,-excl.cols]) #,allow.missing=.1,uncertain=TRUE,tests=rename[-excl.cols])
cov.data1 <- myDataFilt[,-excl.cols];
cond.res <- list()
cov.data <- as.data.frame(cov.data1)
for(jj in 1:ncol(cov.data)) {
nuxt <- as.numeric(cov.data[,jj])-1
nuxt[nuxt<0] <- NA
cov.data[,jj] <- nuxt
# assign(colnames(cov.data)[jj], nuxt)
}
if(first) {
row.mis <- (apply(cov.data,1,function(X) { length(which(is.na(X)))/length(X) }))
}
col.mis <- apply(cov.data,2,function(X) { length(which(is.na(X)))/length(X) })
cov.data <- cov.data[row.mis<.03,col.mis<.03]
if(first){
Pheno <- Ph[row.mis<.03]
for(jj in 1:length(sig.reg.snps)) {
assign(terms[jj],get(sig.reg.snps[jj])[row.mis<.03])
}
for(jj in 1:ncol(cov.data)) {
#print(colnames(cov.data)[jj])
assign(colnames(cov.data)[jj], cov.data[,jj])
}
}
#cov.data <- as.matrix(cov.data)
cat("testing",ncol(cov.data),"snps\n")
## cond.res <- snp.rhs.tests(as.formula(paste(mod.txt)), snp.data=cov.data1)
#print(cond.res)
# print(head(p.value(cond.res)[order(p.value(cond.res))],20) )
for (dd in 1:ncol(cov.data)) {
nxt <- glm(as.formula(paste(mod.txt,colnames(cov.data)[dd],sep=" + ")), family = binomial(logit),data=cov.data)
cond.res[[dd]] <- mysumfun(nxt,p.digits=250)[[1]][,2]
}
#print(cond.res)
}
if(is.list(cond.res)) { p.cond <- do.call("rbind",args=cond.res) } else { p.cond <- p.value(cond.res) }
if(!is.null(dim(p.cond))) { p.cond <- p.cond[,ncol(p.cond)] }
cond.sigs <- which(p.cond<=bonf)
if(length(cond.sigs)>0) {
naz <- is.na(p.cond)
the.p <- min(p.cond[!naz],na.rm=T)
the.min <- which(p.cond[!naz]==the.p)
if(length(the.min)>1) {
# cat("min length",length(the.min),"\n");
# the.chi <- max(chi.squared(cond.res)[!naz],na.rm=T)
# the.min <- which(chi.squared(cond.res)[!naz]==the.chi)
if(length(the.min)>1) { cat("min chi length",length(the.min),"\n"); the.min <- the.min[1] }
}
the.sig.lab <- rename[-excl.cols][the.min]
cat("new snp:",the.sig.lab," with conditional p value:",the.p,"\n")
the.sig <- myDataFilt[,the.sig.lab]
excl.cols <- c(excl.cols,which(colnames(myDataFilt)==the.sig.lab))
} else {
found.conds <- FALSE
}
sig.reg.snps <- colnames(myDataFilt)[excl.cols]
glm.result <- glm(as.formula(mod.txt), family = binomial(logit))
ii <- mysumfun(glm.result,p.digits=250,o.digits=3)
print(ii)
first <- FALSE
if(length(excl.cols)>5) { found.conds <- FALSE }
}
print(sig.reg.snps) # append this to the list of top/conditional snps
}
## add chr result
}
### OTHER STUFF
if(F) {
tab <- all.summary[order(all.summary[,"CHR"]),]
colnames(table1a)
cn <- c("CHR","MAF_UKcontrol","OR_CC","P_CC","OR_Fam","P_Fam","OR_Meta","P_Meta")
snp.list <- outframe$whichSNP
more.data <- table1a[match(snp.list,table1a[,1]),cn]
colnames(more.data)[2] <- "MAF"
all.summary <- cbind(out.frame,more.data)
print(all.summary[order(all.summary[,"CHR"]),])
do.one.chr <- function(chr.tab) {
grp <- tapply(chr.tab$gene,chr.tab$gene,function(X) X)
maxs <- tapply(chr.tab$p.value,chr.tab$gene,min,na.rm=T)
#names(maxs) <- grp
}
#chr.results.list <- lapply(tt,do.one.chr)
results.list <- do.one.chr(tt)
prv(results.list)
gen.list <- names(results.list)
gen.list.spl <- strsplit(gen.list,";",fixed=T)
prv(gs)
bands <- lapply(gen.list.spl,
function(X) {
ind <- match(X,gs$gene)
OO <- paste(unique(paste(chr(gs)[ind],gs$band[ind],sep="")),collapse=";")
return(OO) })
res2 <- data.frame(genes=substr(gen.list,1,20),p.value=as.numeric(results.list),band=unlist(bands))
res2 <- res2[order(res2[,2]),]
prv.large(res2,rows=50,cols=3)
unique(res2$band)
}
#table1:
# 1.Chr 2.causal 3.best 4.r2 5.all.M>m 6.MAF 7.OR 8.P 9.prev 10.All 11.OR 12.P 13.Ref
# 1: band - function lookup from location (chr=3, pos =4)
# 2: gene names - function lookup from location (chr=3, pos =4)
# 3: rsid - snpid in col2, some of which will need rsid lookup
# 4: r^2: could look in dataset at col2, versus c#9
# 5: col 5 > col 6 ?
# 6: col 7
# 7: col 10
# 8: col 12
# 9: prev snp?
# 10: prev snp alleles ?
# 11: prev snp OR ?
# 12: prev snp P ?
# 13: prev snp ref ?
|
library(MiDA)
### Name: MiNTreesAjust
### Title: Ajust number of trees parameter for fitting generalized boosted
### regression models
### Aliases: MiNTreesAjust
### ** Examples
#get gene expression and specimen data
data("IMexpression");data("IMspecimen")
#sample expression matrix and specimen data for binary classification,
#only "NORM" and "EBV" specimens are left
SampleMatrix<-MiDataSample(IMexpression, IMspecimen$diagnosis,"norm", "ebv")
SampleSpecimen<-MiSpecimenSample(IMspecimen$diagnosis, "norm", "ebv")
#Fitting, low tuning for faster running. Test ntrees
set.seed(1)
ClassRes<-MiNTreesAjust(SampleMatrix, SampleSpecimen, test.frac = 5, times = 3,
ntrees = c(10, 20), shrinkage = 1, intdepth = 2)
ClassRes[[1]] # train accuracy
ClassRes[[2]] # test accuracy
|
/data/genthat_extracted_code/MiDA/examples/MiNTreesAjust.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 806
|
r
|
library(MiDA)
### Name: MiNTreesAjust
### Title: Ajust number of trees parameter for fitting generalized boosted
### regression models
### Aliases: MiNTreesAjust
### ** Examples
#get gene expression and specimen data
data("IMexpression");data("IMspecimen")
#sample expression matrix and specimen data for binary classification,
#only "NORM" and "EBV" specimens are left
SampleMatrix<-MiDataSample(IMexpression, IMspecimen$diagnosis,"norm", "ebv")
SampleSpecimen<-MiSpecimenSample(IMspecimen$diagnosis, "norm", "ebv")
#Fitting, low tuning for faster running. Test ntrees
set.seed(1)
ClassRes<-MiNTreesAjust(SampleMatrix, SampleSpecimen, test.frac = 5, times = 3,
ntrees = c(10, 20), shrinkage = 1, intdepth = 2)
ClassRes[[1]] # train accuracy
ClassRes[[2]] # test accuracy
|
## makeCacheMatrix creates an object of type makeCacheMatrix() that
## stores a matrix (provided as a formal parameter by user) & provides
## 4 methods for operating on the object. [1] set() allows the user to
## reset the matrix. [2] get() allows the user to get the value of the matrix.
## [3] setinv() allows the function cacheSolve() to cache the inverse of the
## matrix. [4] getinv allows the user to get the value of the local var 'inv',
## which is the inverse that's cached by cacheSolve() when cacheSolve() is
## given an argument of type makeCacheMatrix().
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inv.cacheSolve) inv <<- inv.cacheSolve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() caches the inverse of the matrix stored in the object of
## type makeCacheMatrix(). It's assumed that the matrix provided as a formal
## param to makeCacheMatrix is always invertible. See previous comment for
## further details. Example use of these 2 functions:
##
## matrix1 <- matrix(c(2,2,3,2), nrow = 2)
## myMatrix <- makeCacheMatrix(matrix1)
## cacheSolve(myMatrix)
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv.cacheSolve <- solve(data, ...) # inverse of the matrix.
x$setinv(inv.cacheSolve)
inv.cacheSolve
}
|
/cachematrix.R
|
no_license
|
fbgithub1/ProgrammingAssignment2
|
R
| false
| false
| 1,677
|
r
|
## makeCacheMatrix creates an object of type makeCacheMatrix() that
## stores a matrix (provided as a formal parameter by user) & provides
## 4 methods for operating on the object. [1] set() allows the user to
## reset the matrix. [2] get() allows the user to get the value of the matrix.
## [3] setinv() allows the function cacheSolve() to cache the inverse of the
## matrix. [4] getinv allows the user to get the value of the local var 'inv',
## which is the inverse that's cached by cacheSolve() when cacheSolve() is
## given an argument of type makeCacheMatrix().
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inv.cacheSolve) inv <<- inv.cacheSolve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() caches the inverse of the matrix stored in the object of
## type makeCacheMatrix(). It's assumed that the matrix provided as a formal
## param to makeCacheMatrix is always invertible. See previous comment for
## further details. Example use of these 2 functions:
##
## matrix1 <- matrix(c(2,2,3,2), nrow = 2)
## myMatrix <- makeCacheMatrix(matrix1)
## cacheSolve(myMatrix)
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv.cacheSolve <- solve(data, ...) # inverse of the matrix.
x$setinv(inv.cacheSolve)
inv.cacheSolve
}
|
library(igraph)
library(foreach)
library(doParallel)
library(dplyr)
library(tidyr)
library(broom)
source('functions/data_generators.R')
source('functions/covariate_functions.R')
source('functions/response_functions.R')
source('functions/existing_estimators.R')
source('functions/proposed_estimators.R')
source('functions/variance_estimators.R')
source('functions/precompute_matrices.R')
calculate_true_ATE = function(param, g, pid) {
y1 = dynamic_time_response(w=rep(1, vcount(g)), g, param)
y0 = dynamic_time_response(w=rep(0, vcount(g)), g, param)
c(pid=pid, ATE=mean(y1) - mean(y0))
}
#load('data/caltech.Rdata')
load('data/smallworld_large.Rdata')
set.seed(2018)
n_reps = 5000
n_cores = 50
registerDoParallel(cores=n_cores)
# params = purrr::cross(list(
# b_intercept = 0,
# b_direct = 1,
# b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
# max_t = c(2, 4),
# is_probit = TRUE,
# noise_sd = c(1)
# ))
params = purrr::cross(list(
b_intercept = -0.5,
b_direct = 1,
b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
max_t = c(2, 4),
is_probit = TRUE,
noise_sd = 1 # c(1, 3)
))
print('Calculating true ATE...')
# Calculate true ATE by simulation
true_ATE = foreach(i = 1:length(params), .combine=rbind) %do% {
param = params[[i]]
print(unlist(param))
foreach(rep = 1:n_reps, .combine=rbind, .inorder=FALSE) %dopar% {
calculate_true_ATE(param, g_sm_large, pid=i)
}
} %>%
data.frame %>%
group_by(pid) %>%
summarise(ATE=mean(ATE))
print(proc.time())
write.csv(true_ATE, file='results/sim_lim/true_ATE_probit.csv', row.names=FALSE)
|
/sim_lim_truth.R
|
permissive
|
waterknows/regression-adjustments
|
R
| false
| false
| 1,610
|
r
|
library(igraph)
library(foreach)
library(doParallel)
library(dplyr)
library(tidyr)
library(broom)
source('functions/data_generators.R')
source('functions/covariate_functions.R')
source('functions/response_functions.R')
source('functions/existing_estimators.R')
source('functions/proposed_estimators.R')
source('functions/variance_estimators.R')
source('functions/precompute_matrices.R')
calculate_true_ATE = function(param, g, pid) {
y1 = dynamic_time_response(w=rep(1, vcount(g)), g, param)
y0 = dynamic_time_response(w=rep(0, vcount(g)), g, param)
c(pid=pid, ATE=mean(y1) - mean(y0))
}
#load('data/caltech.Rdata')
load('data/smallworld_large.Rdata')
set.seed(2018)
n_reps = 5000
n_cores = 50
registerDoParallel(cores=n_cores)
# params = purrr::cross(list(
# b_intercept = 0,
# b_direct = 1,
# b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
# max_t = c(2, 4),
# is_probit = TRUE,
# noise_sd = c(1)
# ))
params = purrr::cross(list(
b_intercept = -0.5,
b_direct = 1,
b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
max_t = c(2, 4),
is_probit = TRUE,
noise_sd = 1 # c(1, 3)
))
print('Calculating true ATE...')
# Calculate true ATE by simulation
true_ATE = foreach(i = 1:length(params), .combine=rbind) %do% {
param = params[[i]]
print(unlist(param))
foreach(rep = 1:n_reps, .combine=rbind, .inorder=FALSE) %dopar% {
calculate_true_ATE(param, g_sm_large, pid=i)
}
} %>%
data.frame %>%
group_by(pid) %>%
summarise(ATE=mean(ATE))
print(proc.time())
write.csv(true_ATE, file='results/sim_lim/true_ATE_probit.csv', row.names=FALSE)
|
## setup dataset with all the relevant data on the individual
user_wc = data_orig0 %>%
group_by(Id) %>%
summarize(posts = n(),
Female = max(Female),
WC = median(WC),
WPS = median(WPS),
Sixltr = median(Sixltr),
Number.Characters = median(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Year = max(as.character(PhD_Year)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations))
#plot individual stats for word count
ggplot(user_wc,aes(reorder(Job_Title_S,WC,mean),WC))+
stat_summary()+
xlab('Job Title') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,WC,mean),WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,WC))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,WC,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(reorder(Job_Title_S,WPS,mean),WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,WPS,mean),WPS))+
stat_summary()+
xlab('Discipline')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,WPS))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,WPS,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(reorder(Job_Title_S,Sixltr,mean),Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,Sixltr,mean),Sixltr))+
stat_summary()+
xlab('Discipline')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,Sixltr))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm',se=F)+
scale_x_log10()+
scale_y_log10()
thread_normalizer = data_orig0 %>%
group_by(ThreadId) %>%
summarize(Thread_WC_mean = mean(WC),
Thread_WPS_mean = mean(WPS),
Thread_WC_sd = sd(WC),
Thread_WPS_sd = sd(WPS))
## here we aggregate across thread
user_thread_wc = data_orig0 %>%
left_join(thread_normalizer) %>%
group_by(Id,ThreadId) %>%
summarize(posts = n(),
Female = max(Female),
WC = (mean(WC)-mean(Thread_WC_mean))/mean(Thread_WC_sd),
WPS = (mean(WPS)-mean(Thread_WPS_mean))/mean(Thread_WPS_sd),
Sixltr = mean(Sixltr),
Number.Characters = sum(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations),
Citations_Year = max(Citations_Year),
H_Index = max(H_Index),
i10_Index = max(i10_Index)) %>%
group_by(Id) %>%
summarize(posts = sum(posts),
Female = max(Female),
WC = mean(WC),
WPS = mean(WPS),
WC_max = max(WC),
WPS_max = max(WPS),
Sixltr = mean(Sixltr),
Number.Characters = median(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations),
Citations_Year = max(Citations_Year),
H_Index = max(H_Index),
i10_Index = max(i10_Index))
## repoeat word count plots but summed over thread
ggplot(user_thread_wc,aes(Job_Title_S,WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(Discipline,WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(posts,WC))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_thread_wc,aes(Total_Citations,WC,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_thread_wc,aes(Total_Citations,WC,color=Job_Title_S))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_thread_wc,aes(H_Index,WC,color=Job_Title_S))+
stat_summary_bin()
ggplot(user_thread_wc,aes(i10_Index,WC,color=Job_Title_S))+
stat_summary_bin()
ggplot(user_thread_wc,aes(i10_Index,WC))+
stat_summary_bin()
## double checking the posts stats
# qplot((user_thread_wc = data_orig0 %>%
# group_by(Id,Thread) %>%
# summarize(posts = n(),
# Female = max(Female),
# WC = mean(WC),
# WPS = mean(WPS),
# Sixltr = mean(Sixltr),
# Number.Characters = sum(Number.Characters),
# AcademicHierarchyStrict = max(AcademicHierarchyStrict),
# Job_Title_S = max(Job_Title_S),
# Discipline = max(Discipline),
# PhD_Year = max(PhD_Year),
# PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
# Workplace_SR_Bin = min(Workplace_SR_Bin),
# Total_Citations = max(Total_Citations)))$posts)
## now look at WPS
ggplot(user_thread_wc,aes(Job_Title_S,WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(Discipline,WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(posts,WPS))+
stat_summary_bin()+
geom_smooth(method='lm')
ggplot(user_thread_wc,aes(Total_Citations,WPS,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
## and now Six letter words
ggplot(user_wc,aes(Job_Title_S,Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(Discipline,Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,Sixltr))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(Discipline)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
#Look by discipline
ggplot(user_wc,aes(Total_Citations,WPS,color=as.factor(Discipline)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
model_wps = glm(WPS ~ . - Id - WPS_max - WC - WC_max, family = 'gaussian', user_thread_wc )
summary(model_wps)
model_wps_squared = glm(WPS ~ Discipline*Citations_Year, family = 'gaussian', user_thread_wc )
summary(model_wps_squared)
model_six = glm(Sixltr ~ . - Id - WPS - WC - PhD_Year - Discipline, family = 'gaussian', user_wc )
summary(model_six)
model_six_squared = glm(Sixltr ~ Discipline + Total_Citations + AcademicHierarchyStrict , family = 'gaussian', user_wc )
summary(model_six_squared)
model_six_simp = glm(Sixltr ~ Total_Citations, family = 'gaussian', user_wc )
summary(model_six_simp)
|
/hypothesis_2.R
|
no_license
|
lots-of-things/edge_forum
|
R
| false
| false
| 8,031
|
r
|
## setup dataset with all the relevant data on the individual
user_wc = data_orig0 %>%
group_by(Id) %>%
summarize(posts = n(),
Female = max(Female),
WC = median(WC),
WPS = median(WPS),
Sixltr = median(Sixltr),
Number.Characters = median(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Year = max(as.character(PhD_Year)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations))
#plot individual stats for word count
ggplot(user_wc,aes(reorder(Job_Title_S,WC,mean),WC))+
stat_summary()+
xlab('Job Title') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,WC,mean),WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,WC))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,WC,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(reorder(Job_Title_S,WPS,mean),WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,WPS,mean),WPS))+
stat_summary()+
xlab('Discipline')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,WPS))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,WPS,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(reorder(Job_Title_S,Sixltr,mean),Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(reorder(Discipline,Sixltr,mean),Sixltr))+
stat_summary()+
xlab('Discipline')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,Sixltr))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm',se=F)+
scale_x_log10()+
scale_y_log10()
thread_normalizer = data_orig0 %>%
group_by(ThreadId) %>%
summarize(Thread_WC_mean = mean(WC),
Thread_WPS_mean = mean(WPS),
Thread_WC_sd = sd(WC),
Thread_WPS_sd = sd(WPS))
## here we aggregate across thread
user_thread_wc = data_orig0 %>%
left_join(thread_normalizer) %>%
group_by(Id,ThreadId) %>%
summarize(posts = n(),
Female = max(Female),
WC = (mean(WC)-mean(Thread_WC_mean))/mean(Thread_WC_sd),
WPS = (mean(WPS)-mean(Thread_WPS_mean))/mean(Thread_WPS_sd),
Sixltr = mean(Sixltr),
Number.Characters = sum(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations),
Citations_Year = max(Citations_Year),
H_Index = max(H_Index),
i10_Index = max(i10_Index)) %>%
group_by(Id) %>%
summarize(posts = sum(posts),
Female = max(Female),
WC = mean(WC),
WPS = mean(WPS),
WC_max = max(WC),
WPS_max = max(WPS),
Sixltr = mean(Sixltr),
Number.Characters = median(Number.Characters),
AcademicHierarchyStrict = max(AcademicHierarchyStrict),
Job_Title_S = max(as.character(Job_Title_S)),
Discipline = max(as.character(Discipline)),
PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
Workplace_SR_Bin = min(Workplace_SR_Bin),
Total_Citations = max(Total_Citations),
Citations_Year = max(Citations_Year),
H_Index = max(H_Index),
i10_Index = max(i10_Index))
## repoeat word count plots but summed over thread
ggplot(user_thread_wc,aes(Job_Title_S,WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(Discipline,WC))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(posts,WC))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_thread_wc,aes(Total_Citations,WC,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_thread_wc,aes(Total_Citations,WC,color=Job_Title_S))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_thread_wc,aes(H_Index,WC,color=Job_Title_S))+
stat_summary_bin()
ggplot(user_thread_wc,aes(i10_Index,WC,color=Job_Title_S))+
stat_summary_bin()
ggplot(user_thread_wc,aes(i10_Index,WC))+
stat_summary_bin()
## double checking the posts stats
# qplot((user_thread_wc = data_orig0 %>%
# group_by(Id,Thread) %>%
# summarize(posts = n(),
# Female = max(Female),
# WC = mean(WC),
# WPS = mean(WPS),
# Sixltr = mean(Sixltr),
# Number.Characters = sum(Number.Characters),
# AcademicHierarchyStrict = max(AcademicHierarchyStrict),
# Job_Title_S = max(Job_Title_S),
# Discipline = max(Discipline),
# PhD_Year = max(PhD_Year),
# PhD_Institution_SR_Bin = min(PhD_Institution_SR_Bin),
# Workplace_SR_Bin = min(Workplace_SR_Bin),
# Total_Citations = max(Total_Citations)))$posts)
## now look at WPS
ggplot(user_thread_wc,aes(Job_Title_S,WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(Discipline,WPS))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_thread_wc,aes(posts,WPS))+
stat_summary_bin()+
geom_smooth(method='lm')
ggplot(user_thread_wc,aes(Total_Citations,WPS,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
## and now Six letter words
ggplot(user_wc,aes(Job_Title_S,Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(Discipline,Sixltr))+
stat_summary()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(user_wc,aes(posts,Sixltr))+
stat_summary()+
geom_smooth(method='lm')
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(AcademicHierarchyStrict)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
ggplot(user_wc,aes(Total_Citations,Sixltr,color=as.factor(Discipline)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
#Look by discipline
ggplot(user_wc,aes(Total_Citations,WPS,color=as.factor(Discipline)))+
stat_summary_bin()+
geom_smooth(method='lm')+
scale_x_log10()+
scale_y_log10()
model_wps = glm(WPS ~ . - Id - WPS_max - WC - WC_max, family = 'gaussian', user_thread_wc )
summary(model_wps)
model_wps_squared = glm(WPS ~ Discipline*Citations_Year, family = 'gaussian', user_thread_wc )
summary(model_wps_squared)
model_six = glm(Sixltr ~ . - Id - WPS - WC - PhD_Year - Discipline, family = 'gaussian', user_wc )
summary(model_six)
model_six_squared = glm(Sixltr ~ Discipline + Total_Citations + AcademicHierarchyStrict , family = 'gaussian', user_wc )
summary(model_six_squared)
model_six_simp = glm(Sixltr ~ Total_Citations, family = 'gaussian', user_wc )
summary(model_six_simp)
|
library(MASS)
### Name: Null
### Title: Null Spaces of Matrices
### Aliases: Null
### Keywords: algebra
### ** Examples
# The function is currently defined as
function(M)
{
tmp <- qr(M)
set <- if(tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank)
qr.Q(tmp, complete = TRUE)[, set, drop = FALSE]
}
|
/data/genthat_extracted_code/MASS/examples/Null.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 323
|
r
|
library(MASS)
### Name: Null
### Title: Null Spaces of Matrices
### Aliases: Null
### Keywords: algebra
### ** Examples
# The function is currently defined as
function(M)
{
tmp <- qr(M)
set <- if(tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank)
qr.Q(tmp, complete = TRUE)[, set, drop = FALSE]
}
|
# Setting the column classes as character for Date and Time Columns
# Setting the column classes as Numeric for rest of the seven fields
cc <- c(rep("character",2),rep("numeric",7))
# Reading just the header row and converting it into a character vector
# This is required since i am skipping reading of all the unwanted date-range observations
cname<-as.character(read.csv("household_power_consumption.txt",sep = ";",na.strings = "?",nrows = 1,header = FALSE,stringsAsFactors = FALSE))
# Reading the data set for 2880 values from 66638th row as the the two dates relevant for analysis occur from here..
dat<-read.csv(file = "household_power_consumption.txt",sep=";",colClasses = cc, skip = 66637, header = FALSE, na.strings = "?",nrows = 2880)
## Alternatively we can read the entire file using nrow = 2,075,259 and use subset to eliminate rest of the data values. However i observed it takes more than 30 seconds in my system.
##dat<-read.csv(file = "household_power_consumption.txt",sep=";",colClasses = cc, header = TRUE, na.strings = "?",nrows = 2075260)
## dat<-subset(dat, (Date == "1/02/2007" | Date == "2/02/2007"))
## However i have preferred selecting the rows to see if i can avoid the delay in reading the big file
#Setting the colnames for the imported data file set. This was warranted as i have to skip the data import for the first 66K rows
colnames(dat)<-cname
## converting the Date character column into Date
dat$Date <- as.Date(dat$Date,"%d/%m/%Y")
##Concatenating Date with Time and assigning it to Time column in dataframe
dat$Time <- strptime(paste(dat$Date,dat$Time),format = "%Y-%m-%d %H:%M:%S")
## Setting the graphical parameter to 1 row and 1 col.
par(mfrow = c(1,1))
## Plotting the Active power against Time and settling axis labels
with(dat, plot(Time, Global_active_power, type = "l", ylab = "Global Active Power(kilowatts", xlab = ""))
## Copying the output from one device to another
dev.copy(png, "plot2.png", height =480, width = 480)
# Closing the png device opened
dev.off()
|
/plot2.R
|
no_license
|
ajoykum/ExData_Plotting1
|
R
| false
| false
| 2,035
|
r
|
# Setting the column classes as character for Date and Time Columns
# Setting the column classes as Numeric for rest of the seven fields
cc <- c(rep("character",2),rep("numeric",7))
# Reading just the header row and converting it into a character vector
# This is required since i am skipping reading of all the unwanted date-range observations
cname<-as.character(read.csv("household_power_consumption.txt",sep = ";",na.strings = "?",nrows = 1,header = FALSE,stringsAsFactors = FALSE))
# Reading the data set for 2880 values from 66638th row as the the two dates relevant for analysis occur from here..
dat<-read.csv(file = "household_power_consumption.txt",sep=";",colClasses = cc, skip = 66637, header = FALSE, na.strings = "?",nrows = 2880)
## Alternatively we can read the entire file using nrow = 2,075,259 and use subset to eliminate rest of the data values. However i observed it takes more than 30 seconds in my system.
##dat<-read.csv(file = "household_power_consumption.txt",sep=";",colClasses = cc, header = TRUE, na.strings = "?",nrows = 2075260)
## dat<-subset(dat, (Date == "1/02/2007" | Date == "2/02/2007"))
## However i have preferred selecting the rows to see if i can avoid the delay in reading the big file
#Setting the colnames for the imported data file set. This was warranted as i have to skip the data import for the first 66K rows
colnames(dat)<-cname
## converting the Date character column into Date
dat$Date <- as.Date(dat$Date,"%d/%m/%Y")
##Concatenating Date with Time and assigning it to Time column in dataframe
dat$Time <- strptime(paste(dat$Date,dat$Time),format = "%Y-%m-%d %H:%M:%S")
## Setting the graphical parameter to 1 row and 1 col.
par(mfrow = c(1,1))
## Plotting the Active power against Time and settling axis labels
with(dat, plot(Time, Global_active_power, type = "l", ylab = "Global Active Power(kilowatts", xlab = ""))
## Copying the output from one device to another
dev.copy(png, "plot2.png", height =480, width = 480)
# Closing the png device opened
dev.off()
|
######################### R Libraries Required #################################
################################################################################
library(shiny)
library(shinyWidgets)
library(shinydashboard)
library(dashboardthemes)
library(openxlsx)
library(readxl)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(lubridate)
library(sjlabelled)
library(lessR)
library(tidyverse)
library(zoo)
library(forecast)
############################### MAIN APP #######################################
ui <- dashboardPage(
# dashboard title
title = "Fitpal - Fitness Tracker",
dashboardHeader(
title = shinyDashboardLogo(
theme = "purple_gradient",
boldText = "FitPal:",
mainText = "Fitness Tracker",
badgeText = "v1.1"
)),
dashboardSidebar(
sidebarUserPanel(name = textOutput("text1"),
subtitle = a(href = "#",
icon("circle",
class = "text-success"),
"Online")
),
sidebarMenu(
id = "tabs",
# upload data & demographic information
menuItem(text = "Update User Profile", tabName = "user_profile",
icon = icon("id-card")),
# data exploratory analysis & activity review
menuItem(text = "View Activity Dashboard", tabName = "dashboard",
icon = icon("dashboard")),
# set goals
menuItem(text = "Set Your Goals", tabName = "change_goals", icon =
icon("bullseye")),
# view progress
menuItem(text = "Track Your Progress", icon = icon("bar-chart-o"),
menuSubItem("Activity", tabName = "subitem1"),
menuSubItem("Weight Loss", tabName = "subitem2")
)
),
width = "250px",
collapsed = FALSE
),
dashboardBody(
# upload theme
shinyDashboardThemes(
theme = "blue_gradient"),
tabItems(
tabItem(
# User profile & demographic input
tabName = "user_profile",
h3("User Profile", tagList(shiny::icon("id-card"))),
fluidRow(
box(textInput("name", "Name", placeholder = "Enter Your Name")),
box(selectInput("gender", "Select Your Gender",
list("Male", "Female", "Transgender",
"Non-binary","Prefer Not To Respond")
)
)
),
fluidRow(
box(numericInput("height", "Enter Your Height (in)",
value = 60 , min = 20, max = 110, step = 1)),
box(numericInput("weight", "Enter Your Weight (lb)",
value = 170 , min = 5, max = 700, step = 1))
),
# Data Upload or Data Sample Selection
h3("Health Data", tagList(shiny::icon("table"))),
fluidRow(
box(fileInput(inputId = "user_data",
label = "File upload:",
accept = ".xlsx"),
# Use uploaded data set
actionButton(
inputId = "use_uploaded",
label = "Use Uploaded File",
class = "btn-primary")
),
# choose sample data set sheet to preview
box(selectInput("sample_data",
"Choose a sample sheet",
choices = c("Hourly Steps",
"Hourly Calories",
"Hourly Intensities",
"Hourly Stand",
"Daily Steps",
"Daily Intensities",
"Daily Activity",
"Sleep Minutes",
"Daily Sleep",
"Daily Stand")),
# Download 'template' / sample data set
downloadButton("download_sample",
"Download Sample Data"),
# Use sample data set
actionButton(
inputId = "use_sample",
label = "Use Sample Data",
class = "btn-primary")
),
# preview sample data set sheet selected
box(tableOutput("sample_table"), width = 12)
)
),
tabItem(
# Review BMI graph and Set Goals
tabName = "change_goals",
# BMI Graph
h3("BMI Spectrum", tagList(shiny::icon("hand-holding-heart"))),
fluidRow(
box(
plotOutput(outputId = "bmiplot"), width = 12
)
),
# Goal Setting
h3("Set Your Goals", tagList(shiny::icon("bullseye"))),
fluidRow(
box(
knobInput(
inputId = "steps_goal",
label = "Daily Steps",
value = 9000,
min = 0,
max = 100000,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "stand_goal",
label = "Daily Standing Hours",
value = 6,
min = 0,
max = 24,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "exercise_goal",
label = "Daily Calories Burnt",
value = 1800,
min = 0,
max = 4000,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "sleep_goal",
label = "Daily Sleep Hours",
value = 8,
min = 0,
max = 24,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
)
),
# Update goals
actionButton(
inputId = "set_goal",
label = "Update Goals",
class = "btn-primary"),
h3("Start Your Weight Loss Journey",
tagList(shiny::icon("weight"))),
fluidRow(
box(
knobInput(
inputId = "weight_goal",
label = "Set weight desired at the end of the month(lb)",
value = 127,
min = 60,
max = 204,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
)
),
actionButton(
inputId = "set_weight_goal",
label = "Update Weight Goal",
class = "btn-primary")
),
tabItem(
# Activity Dashboard & Exploratory Analysis
tabName = "dashboard",
h3("Activity Dashboard", tagList(shiny::icon("tachometer-alt"))),
fluidRow(
# tab for steps analysis
tabBox(
title = tagList(shiny::icon("walking"), "Steps"),
id = "steps", height = "auto",
tabPanel(
title = "Hourly",
plotOutput(outputId = "hourlysteps")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "dailysteps")),
tabPanel(title = "Historic",
plotOutput(outputId = "historicsteps"))
),
# tab for calories burnt analysis
tabBox(
title = tagList(shiny::icon("running"), "Calories"),
id = "cals", height = "auto",
tabPanel(
title = "Hourly",
plotOutput(outputId = "hourlycalories")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklycalories")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historiccalories"))
),
# tab for intensity analysis
tabBox(
title = tagList(shiny::icon("dumbbell"), "Intensity"),
id = "intensity", height = "auto",
tabPanel(
title = "Daily",
plotOutput(outputId = "dailyintensity")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklyintensity")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicintensity"))
),
# tab for distance analysis
tabBox(
title = tagList(shiny::icon("road"), "Distance"),
id = "distance", height = "auto",
tabPanel(
title = "Daily",
plotOutput(outputId = "dailydist")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklydist")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicdist"))
),
# tab for sleep analysis
tabBox(
title = tagList(shiny::icon("bed"), "Sleep"),
id = "sleep", height = "auto", width = 12,
tabPanel(
title = "Daily",
plotOutput(outputId = "dailysleep")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklysleep")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicsleep"))
)
),
),
# DELETE?
tabItem(
tabName = "subitem1",
h3("Don't Break The Chain", tagList(shiny::icon("calendar-check"))),
fluidRow(
box(
h4("Historical Goal Completion"),
plotOutput(outputId = "BreakChain")
),
box(
h4("Streak Count"),
plotOutput(outputId = "activity_streak")
)
),
h3("Daily Tracker", tagList(shiny::icon("watch-fitness"))),
fluidRow(
box(
h4("Daily Steps Goal"),
plotOutput(outputId = "step_goal_plot")
),
box(
h4("Daily Stand Goal"),
plotOutput(outputId = "stand_goal_plot")
),
box(
h4("Daily Excercise Goal"),
plotOutput(outputId = "calories_goal_plot")
)
)
),
tabItem(
tabName = "subitem2",
h3("Historical Weight", tagList(shiny::icon("dumbbell"))),
fluidRow(
box(
plotOutput(outputId = "weightLogInfo_plot") , width = 12
)
),
h3("Weight Loss Tracker", tagList(shiny::icon("bullseye"))),
fluidRow(
box(
valueBoxOutput("calories", width = 12) ,width = 12
),
box(
valueBoxOutput("Target", width = 6),
valueBoxOutput("Prediction", width = 6),width = 12
)
)
)
) # tab items
) # end body
) # end dash page
server <- function(input, output, session) {
############################# USER PROFILE #####################################
sample_Input <- reactive({
input$use_sample
# select data set to show in preview
switch(input$sample_data,
"Hourly Steps" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlySteps",
col_name=T)),
"Hourly Calories" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyCalories",
col_name=T)),
"Hourly Intensities" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyIntensities",
col_name=T)),
"Hourly Stand" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyStand",
col_name=T)),
"Daily Steps" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailySteps",
col_name=T)),
"Daily Intensities" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailyIntensities",
col_name=T)),
"Daily Activity" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailyActivity",
col_name=T)),
"Sleep Minutes" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="minuteSleep",
col_name=T)),
"Daily Sleep" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="sleepDay",
col_name=T)),
"Daily Stand" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="standDay",
col_name=T))
)
})
# sample data set sheet
output$sample_table <- renderTable({
head(sample_Input()[1:ncol(sample_Input())], n = 10)
})
# download template/sample data
sample <- loadWorkbook("Joined_Dataset_V2.xlsx")
output$download_sample <- downloadHandler(
filename = function() {
file <- "FitPal_Sample-Dataset_Template.xlsx"
},
content = function(file) {
# save all workbook including sheets
saveWorkbook(sample, file = file, overwrite = TRUE)
}
)
########################## ACTIVITY DASHVBOARD #################################
# Hourly Steps
hourlySteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the hourly steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hsteps <- read_excel(input$user_data$datapath, sheet = "hourlySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlySteps")
}
hsteps <- as.data.frame(hsteps)
print(str(hsteps))
hsteps$ActivityHour <- as.POSIXct(hsteps$ActivityHour, format="%Y-%m-%d")
hsteps<- subset(hsteps, ActivityHour> "2016-05-12")
hsteps
})
# hourly steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$hourlysteps <- renderPlot(
hourlySteps() %>%
group_by(ActivityHour) %>%
summarize(StepTotal = mean(StepTotal)) %>%
ggplot(aes(x=ActivityHour, y= StepTotal, fill = StepTotal))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Hourly Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Daily Steps
dailySteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dsteps <- read_excel(input$user_data$datapath, sheet = "dailySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailySteps")
}
dsteps <- as.data.frame(dsteps)
print(str(dsteps))
dsteps$ActivityDay <- as.POSIXct(dsteps$ActivityDay, format = "%Y-%m-%d")
dsteps <- subset(dsteps, ActivityDay > "2016-05-05")
dsteps
})
# daily steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailysteps <- renderPlot(
ggplot(data = dailySteps(), aes(x = ActivityDay, y = StepTotal)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Steps",
x = "Date", y = "Weekly Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Steps
historicSteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
histsteps <- read_excel(input$user_data$datapath, sheet = "dailySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
histsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailySteps")
}
histsteps <- as.data.frame(histsteps)
print(str(histsteps))
histsteps
})
# historic steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicsteps <- renderPlot(
ggplot(data = historicSteps(), aes(x = ActivityDay, y = StepTotal)) +
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date")+
ylab("Total Steps")+
labs(title = "Historic Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Hourly Calories
hourlyCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the hourly calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hcal <- read_excel(input$user_data$datapath, sheet = "hourlyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyCalories")
}
hcal <- as.data.frame(hcal)
print(str(hcal))
hcal$ActivityHour <- as.POSIXct(hcal$ActivityHour, format="%Y-%m-%d")
hcal<- subset(hcal, ActivityHour> "2016-05-12")
hcal
})
# hourly calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$hourlycalories <- renderPlot(
hourlyCalories() %>%
group_by(ActivityHour) %>%
summarize(CaloriesTotal = mean(Calories)) %>%
ggplot(aes(x=ActivityHour, y= CaloriesTotal, fill = CaloriesTotal))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Hourly Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Calories
weeklyCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wcal <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
wcal <- as.data.frame(wcal)
print(str(wcal))
wcal$ActivityDate <-as.POSIXct(wcal$ActivityDate, format = "%Y-%m-%d")
wcal <- subset(wcal, ActivityDate > "2016-05-06")
wcal
})
# weekly calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklycalories <- renderPlot(
ggplot(data = weeklyCalories(), aes(x = ActivityDate, y = Calories)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Calories ",
x = "Date", y = "Weekly Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Calories
historicCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
histcal <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
histcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
histcal <- as.data.frame(histcal)
print(str(histcal))
histcal
})
# historical calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historiccalories <- renderPlot(
ggplot(historicCalories(), aes(x = ActivityDate, y = Calories))+
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date")+
ylab("Calories Burned")+
labs(title = "Historic Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Daily Intensities
dailyIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dint <- read_excel(input$user_data$datapath, sheet = "hourlyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyIntensities")
}
dint <- as.data.frame(dint)
print(str(dint))
dint$ActivityHour <- as.POSIXct(dint$ActivityHour, format = "%Y-%m-%d")
dint <- subset(dint, ActivityHour > "2016-05-06")
dint
})
# daily intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailyintensity <- renderPlot(
ggplot(data = dailyIntensity(),
aes(x=ActivityHour, y= AverageIntensity, fill = AverageIntensity))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Daily Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Intensities
weeklyIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wint <- read_excel(input$user_data$datapath, sheet = "dailyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyIntensities")
}
wint <- as.data.frame(wint)
print(str(wint))
wint$ActivityDay <- as.POSIXct(wint$ActivityDay, format = "%Y-%m-%d")
wint <- subset(wint, ActivityDay > "2016-05-06")
wint <- wint %>% select(ActivityDay,
VeryActiveMinutes,
FairlyActiveMinutes,
LightlyActiveMinutes) %>% data.frame()
wint <- melt(wint, id.vars = c("ActivityDay"))
wint
})
# weekly intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklyintensity <- renderPlot(
ggplot(data = weeklyIntensity(),
aes(fill = variable, y=value, x = ActivityDay)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Weekly Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightlyActiveMinutes' = '#05fbd7',
'FairlyActiveMinutes' = '#17667b',
'VeryActiveMinutes' = "#f8766d"),
aesthetics = "fill")
)
)
# Historic Intensities
historicIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hint <- read_excel(input$user_data$datapath, sheet = "dailyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyIntensities")
}
hint <- as.data.frame(hint)
print(str(hint))
hint <- hint %>% select(ActivityDay,
VeryActiveMinutes,
FairlyActiveMinutes,
LightlyActiveMinutes) %>% data.frame()
hint <- melt(hint, id.vars = c("ActivityDay"))
hint
})
# historic intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicintensity <- renderPlot(
ggplot(data = historicIntensity(),
aes(fill = variable, y=value, x = ActivityDay)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Historic Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightlyActiveMinutes' = '#05fbd7',
'FairlyActiveMinutes' = '#17667b',
'VeryActiveMinutes' = "#f8766d"),
aesthetics = "fill")
)
)
# Weekly Distance
weeklyDistance <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly distance data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wdist <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wdist <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
wdist <- as.data.frame(wdist)
print(str(wdist))
wdist$ActivityDate <- as.POSIXct(wdist$ActivityDate, format = "%Y-%m-%d")
wdist <- subset(wdist, ActivityDate > "2016-05-06")
wdist <- wdist %>% select(ActivityDate,
VeryActiveDistance,
ModeratelyActiveDistance,
LightActiveDistance) %>% data.frame()
wdist <- melt(wdist, id.vars = c("ActivityDate"))
wdist
})
# weekly distance graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklydist <- renderPlot(
ggplot(data = weeklyDistance(),
aes(fill = variable, y=value, x = ActivityDate)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Weekly Distance") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightActiveDistance' = '#05fbd7',
'ModeratelyActiveDistance' = '#17667b',
'VeryActiveDistance' = "#f8766d"),
aesthetics = "fill")
)
)
# Historic Distance
historicDistance <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic distance data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hdist <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hdist <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
hdist <- as.data.frame(hdist)
print(str(hdist))
hdist <- hdist %>% select(ActivityDate,
VeryActiveDistance,
ModeratelyActiveDistance,
LightActiveDistance) %>% data.frame()
hdist <- melt(hdist, id.vars = c("ActivityDate"))
hdist
})
# historic distance graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicdist <- renderPlot(
ggplot(data = historicDistance(),
aes(fill = variable, y= value, x = ActivityDate)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Historic Distance") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightActiveDistance' = '#05fbd7',
'ModeratelyActiveDistance' = '#17667b',
'VeryActiveDistance' = "#f8766d"),
aesthetics = "fill")
)
)
# Daily Sleep
dailySleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dsleep <- read_excel(input$user_data$datapath, sheet = "minuteSleep")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "minuteSleep")
}
dsleep <- dsleep %>% separate(date,sep = " ", c("Days", "Time")) %>%
separate(Time, sep = ":", c("Hours", "Mins", "Seconds"),
extra = "merge", fill = "right") %>%
group_by(Days) %>% summarise(Hours = as.numeric(unique(Hours))) %>%
filter(Days == "2016-05-11")
hours_list <- data.frame(Hour_number = seq(1:24))
dsleep <- merge(hours_list, dsleep, by.x = "Hour_number",
by.y = "Hours", all.x = T)
dsleep <- dsleep %>% transmute(Hour_number = as.numeric(Hour_number),
Num = ifelse(is.na(Days), 0, 1))
print(str(dsleep))
dsleep
})
# daily sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailysleep <- renderPlot(
ggplot(data = dailySleep(), aes(x = Hour_number, y = Num)) +
geom_bar(stat="identity", fill = "#27839a") +
labs(title = "Daily Sleep Trend", x = "Hour") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Sleep
weeklySleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wsleep <- read_excel(input$user_data$datapath, sheet = "sleepDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
}
wsleep$SleepDay <- as.POSIXct(wsleep$SleepDay, format = "%Y-%m-%d")
wsleep <- subset(wsleep, SleepDay > "2016-05-05")
wsleep <- as.data.frame(wsleep)
print(str(wsleep))
wsleep
})
# weekly sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklysleep <- renderPlot(
ggplot(data = weeklySleep(), aes(x = SleepDay, y = TotalMinutesAsleep)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Sleep",
x = "Date", y = "Weekly Sleep") +
geom_abline(slope=0, intercept=420, col = "#f8766d", lty=2) +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Sleep
historicSleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hsleep <- read_excel(input$user_data$datapath, sheet = "sleepDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
}
hsleep$SleepDay <- as.POSIXct(hsleep$SleepDay, format = "%Y-%m-%d")
hsleep <- subset(hsleep, SleepDay > "2016-05-05")
hsleep <- as.data.frame(hsleep)
print(str(hsleep))
hsleep
})
# historic sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicsleep <- renderPlot(
ggplot(data = historicSleep(),
aes(x = SleepDay, y = TotalMinutesAsleep)) +
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date") +
ylab("Sleep Time (in Minutes)") +
geom_abline(slope=0, intercept=420, col = "#f8766d", lty=2) +
labs(title = "Historic Sleep") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
############################ SET YOUR GOALS ####################################
# BMI Graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
# prepare the BMI calculations for uploaded or sample data
output$bmiplot <- renderPlot({
# calculate BMI
weight_ideal <- 100
lb <- input$weight
inches <- input$height
bmi_output <- (lb / (inches^2)) * 703
# BMI Category
if (inches < 58){
weight_ideal <- round(mean(seq(from = 60, to = 114, by = 1)))
} else if (inches == 58){
weight_ideal <- round(mean(seq(from = 91, to = 118, by = 1)))
} else if (inches == 59){
weight_ideal <- round(mean(seq(from = 94, to = 123, by = 1)))
} else if (inches == 60){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 61){
weight_ideal <- round(mean(seq(from = 100, to = 131, by = 1)))
} else if (inches == 62){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 63){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 64){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 65){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 66){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 67){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 68){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 69){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 70){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 71){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 72){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 73){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 74){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 75){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 76){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches > 76) {
weight_ideal <- round(mean(seq(from = 160, to = 220, by = 1)))
}
# calculate ideal and desired BMI
bmi_ideal <- (weight_ideal / (inches^2)) * 703
bmi_desired <- (input$weight_goal / (inches^2)) * 703
# calculate parameters for BMI Spectrum Graph
inch <- seq(from = 10, to = 120)
# min-max
lb_min <- numeric(length(inch)) + 10
lb_max <- numeric(length(inch)) + 690
# underweight
lb_udr <- inch^2 * 18.5 / 703
lb_udr[lb_udr>690] <- 690
lb_udr[lb_udr<10] <- 10
# overweight
lb_ovr <- inch^2 * 25 / 703
lb_ovr[lb_ovr>690] <- 690
lb_ovr[lb_ovr<10] <- 10
# obese
lb_obe <- inch^2 * 30 / 703
lb_obe[lb_obe>690] <- 690
lb_obe[lb_obe<10] <- 10
# create BMI polygon plot
ggplot(data = NULL, aes(x = 5, y = 700)) +
xlim(10,120) + ylim(5,700) +
labs(title = "BMI: Current vs. Desired vs. Ideal",
x = "Height in Inches",
y = "Weight in Pounds",
caption = "Based on data from www.cdc.gov.
Ideal BMI values are averages within recommended weight range.") +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_min,rev(lb_udr)),
fill = "#eb347d"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_udr,rev(lb_ovr)),
fill = "#34bdeb"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_ovr,rev(lb_obe)),
fill = "#9634eb"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_obe,rev(lb_max)),
fill = "#eb4034"),
alpha = 0.8) +
# add markers for BMI Values
annotate(geom = "curve", x = input$height + 40, y = input$weight + 120,
xend = input$height, yend = input$weight, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "white", size = 1) +
annotate(geom = "text", x = input$height + 48, y = input$weight + 120,
label = paste("Your BMI (", round(bmi_output,1),")"
, sep = ""),
colour = "white", size = 4.5, fontface = "bold") +
annotate(geom = "curve", x = input$height + 30, y = weight_ideal + 100,
xend = input$height, yend = weight_ideal, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "black", size = 1) +
annotate(geom = "text", x = input$height + 38, y = weight_ideal + 100,
label = paste("Your Ideal BMI (", round(bmi_ideal,1),")"
, sep = ""),
colour = "black", size = 4.5, fontface = "bold") +
annotate(geom = "curve", x = input$height + 10, y = input$weight_goal + 80,
xend = input$height, yend = input$weight_goal, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "white", size = 1) +
annotate(geom = "text", x = input$height + 18, y = input$weight_goal + 80,
label = paste("Your Desired BMI (", round(bmi_desired,1),")"
, sep = ""),
colour = "white", size = 4.5, fontface = "bold") +
# customize graph title, labels, and legend
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
legend.position = "right",
legend.title = element_text(size = 16, face = "bold"),
legend.text = element_text(size = 12),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_fill_identity(name = "BMI Categories",
guide = "legend",
labels = c("Normal",
"Overweight",
"Underweight",
"Obese"))
})
)
############################# Track your activity ####################################
#Dont break the chain
#historical goal completion chart
df_activity_tall <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
D_dailyActivity <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
D_sleepDay <- read_excel(input$user_data$datapath, sheet = "sleepDay")
D_standDay <- read_excel(input$user_data$datapath, sheet = "standDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
D_dailyActivity <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
D_sleepDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
D_standDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "standDay")
}
D_dailyActivity$ActivityDate <- as.POSIXct(D_dailyActivity$ActivityDate, format = "%Y-%m-%d")
D_sleepDay$SleepDay <- as.POSIXct(D_sleepDay$SleepDay, format = "%Y-%m-%d")
D_standDay$ActivityDate <- as.POSIXct(D_standDay$ActivityDate, format = "%Y-%m-%d")
names(D_sleepDay)[names(D_sleepDay) == 'SleepDay'] <- "ActivityDate"
d_merge <- merge(x=D_dailyActivity,y=D_sleepDay,by ="ActivityDate")
d_merge <- merge(x=d_merge,y=D_standDay,by ="ActivityDate")
d_merge <- d_merge %>% select(ActivityDate, TotalSteps, Calories, TotalMinutesAsleep, TotalStand)
d_merge <- as.data.frame(d_merge)
d_merge$ActivityDate <- as.POSIXct(d_merge$ActivityDate, format = "%Y-%m-%d")
#inputs
step_goal <- as.numeric(input$steps_goal)
exercise_goal <- as.numeric(input$exercise_goal)
sleep_goal <- as.numeric(input$sleep_goal)*60
stand_goal <- as.numeric(input$stand_goal)*60
#
df_activity_tidy <- d_merge %>%
mutate(date = as.Date(ActivityDate)) %>%
rename(move = TotalSteps,
exercise = Calories,
sleep = TotalMinutesAsleep,
stand = TotalStand) %>%
mutate(move_pct = move/step_goal,
exercise_pct = exercise/exercise_goal,
sleep_pct = sleep/sleep_goal,
stand_pct = stand/stand_goal,
move_bool = if_else(move_pct < 1, FALSE, TRUE),
exercise_bool = if_else(exercise_pct < 1, FALSE, TRUE),
sleep_bool = if_else(sleep_pct < 1, FALSE, TRUE),
stand_bool = if_else(stand_pct < 1, FALSE, TRUE))
df_activity_tall_value <- df_activity_tidy %>%
select(date, Move = move, Exercise = exercise, Sleep = sleep, Stand = stand) %>%
gather(category, value, -date)
df_activity_tall_pct <- df_activity_tidy %>%
select(date, Move = move_pct, Exercise = exercise_pct, Sleep = sleep_pct, Stand = stand_pct) %>%
gather(category, pct, -date)
df_activity_tall_bool <- df_activity_tidy %>%
select(date, Move = move_bool, Exercise = exercise_bool, Sleep = sleep_bool, Stand = stand_bool) %>%
gather(category, boolean, -date)
df_activity_tall <- df_activity_tall_value %>%
left_join(df_activity_tall_pct, by = c("date", "category")) %>%
left_join(df_activity_tall_bool, by = c("date", "category")) %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Sleep", "Stand")
),
month = ymd(paste(year(date), month(date), 1, sep = "-")),
week = date - wday(date) + 1,
wday = wday(date),
day = day(date))
df_activity_tall
})
observeEvent(
input$set_goal,
output$BreakChain <- renderPlot(
df_activity_tall() %>%
ggplot(aes(x = wday, y = week, fill = boolean)) +
geom_tile(col = "grey30", na.rm = FALSE) +
theme(panel.grid.major = element_blank()) +
scale_fill_manual(values = c("#f4f4f4", "mediumaquamarine")) +
facet_wrap(~ category) +
coord_fixed(ratio = 0.15) +
guides(fill=FALSE) +
labs(title = NULL,
caption = 'Green:Goal Completed') +
theme(axis.text.x = element_blank())
)
)
#Streak count graph
df_activity_streak <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
D_dailyActivity <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
D_sleepDay <- read_excel(input$user_data$datapath, sheet = "sleepDay")
D_standDay <- read_excel(input$user_data$datapath, sheet = "standDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
D_dailyActivity <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
D_sleepDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
D_standDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "standDay")
}
D_dailyActivity$ActivityDate <- as.POSIXct(D_dailyActivity$ActivityDate, format = "%Y-%m-%d")
D_sleepDay$SleepDay <- as.POSIXct(D_sleepDay$SleepDay, format = "%Y-%m-%d")
D_standDay$ActivityDate <- as.POSIXct(D_standDay$ActivityDate, format = "%Y-%m-%d")
names(D_sleepDay)[names(D_sleepDay) == 'SleepDay'] <- "ActivityDate"
d_merge <- merge(x=D_dailyActivity,y=D_sleepDay,by ="ActivityDate")
d_merge <- merge(x=d_merge,y=D_standDay,by ="ActivityDate")
d_merge <- d_merge %>% select(ActivityDate, TotalSteps, Calories, TotalMinutesAsleep, TotalStand)
d_merge <- as.data.frame(d_merge)
d_merge$ActivityDate <- as.POSIXct(d_merge$ActivityDate, format = "%Y-%m-%d")
#inputs
step_goal <- as.numeric(input$steps_goal)
exercise_goal <- as.numeric(input$exercise_goal)
sleep_goal <- as.numeric(input$sleep_goal)*60
stand_goal <- as.numeric(input$stand_goal)*60
#
df_activity_tidy <- d_merge %>%
mutate(date = as.Date(ActivityDate)) %>%
rename(move = TotalSteps,
exercise = Calories,
sleep = TotalMinutesAsleep,
stand = TotalStand) %>%
mutate(move_pct = move/step_goal,
exercise_pct = exercise/exercise_goal,
sleep_pct = sleep/sleep_goal,
stand_pct = stand/stand_goal,
move_bool = if_else(move_pct < 1, FALSE, TRUE),
exercise_bool = if_else(exercise_pct < 1, FALSE, TRUE),
sleep_bool = if_else(sleep_pct < 1, FALSE, TRUE),
stand_bool = if_else(stand_pct < 1, FALSE, TRUE))
df_activity_tall_value <- df_activity_tidy %>%
select(date, Move = move, Exercise = exercise, Sleep = sleep, Stand = stand) %>%
gather(category, value, -date)
df_activity_tall_pct <- df_activity_tidy %>%
select(date, Move = move_pct, Exercise = exercise_pct, Sleep = sleep_pct, Stand = stand_pct) %>%
gather(category, pct, -date)
df_activity_tall_bool <- df_activity_tidy %>%
select(date, Move = move_bool, Exercise = exercise_bool, Sleep = sleep_bool, Stand = stand_bool) %>%
gather(category, boolean, -date)
df_activity_tall <- df_activity_tall_value %>%
left_join(df_activity_tall_pct, by = c("date", "category")) %>%
left_join(df_activity_tall_bool, by = c("date", "category")) %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Sleep", "Stand")
),
month = ymd(paste(year(date), month(date), 1, sep = "-")),
week = date - wday(date) + 1,
wday = wday(date),
day = day(date))
df_activity_streak <- df_activity_tall_bool %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Stand")
)) %>%
arrange(category, date) %>%
group_by(category,
x = cumsum(c(TRUE, diff(boolean) %in% c(-1))),
y = cumsum(c(TRUE, diff(boolean) %in% c(-1,1)))) %>%
mutate(streak = if_else(boolean == FALSE, 0L, row_number())) %>%
ungroup()
df_activity_streak
})
observeEvent(
input$set_goal,
output$activity_streak <- renderPlot(
ggplot(df_activity_streak(), aes(x = date, y = streak, group = x, col = category)) +
geom_line() +
facet_grid(category~.) +
guides(fill=FALSE) +
labs(title = NULL)
)
)
#Daily Tracker
#Daily steps goal chart
data_steps <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_steps <- read_excel(input$user_data$datapath, sheet = "hourlySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_steps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlySteps")
}
#data_steps$ActivityHour <- as.POSIXct(data_steps$ActivityHour, format = "%Y-%m-%d")
#latest_date <- head(data_steps %>% distinct(ActivityHour) %>% arrange(desc(ActivityHour)),1)
#max(as.Date(data_steps$ActivityHour))
data_steps <- data_steps %>% mutate(date = as_date(ActivityHour)) %>% filter(date == "2016-05-12")
total_steps <- sum(data_steps["StepTotal"])
target_steps <- as.numeric(input$steps_goal)
if (target_steps >= total_steps){
target_steps <- target_steps - total_steps
}
else {
target_steps <- 0
}
data_steps <- data.frame("Steps" = c(rep("Achieved", total_steps), rep("Remaining", target_steps)))
data_steps <- data_steps %>%
filter(Steps != "NA") %>%
group_by(Steps) %>%
count() %>%
ungroup()%>%
arrange(desc(Steps)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_steps
})
observeEvent(
input$set_goal,
output$step_goal_plot <- renderPlot(
ggplot(data = data_steps(),
aes(x = 2, y = percentage, fill = Steps))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#Daily stand goal chart
data_stand <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_stand <- read_excel(input$user_data$datapath, sheet = "hourlyStand")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_stand <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyStand")
}
data_stand <- data_stand %>% mutate(date = as_date(ActivityHour)) %>% filter(date == "2016-05-12")
total_stand <- sum(data_stand["Stand"])
target_stand <- (as.numeric(input$stand_goal)*60)
if (target_stand >= total_stand) {
target_stand <- target_stand - total_stand
}
else {
target_stand <- 0
}
data_stand <- data.frame("Stand" = c(rep("Achieved", total_stand), rep("Remaining", target_stand)))
data_stand <- data_stand %>%
filter(Stand != "NA") %>%
group_by(Stand) %>%
count() %>%
ungroup()%>%
arrange(desc(Stand)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_stand
})
#
observeEvent(
input$set_goal,
output$stand_goal_plot <- renderPlot(
ggplot(data = data_stand(),
aes(x = 2, y = percentage, fill = Stand))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#daily calories chart
data_calories <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_calories <- read_excel(input$user_data$datapath, sheet = "dailyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_calories <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyCalories")
}
data_calories <- data_calories %>% mutate(date = as_date(ActivityDay)) %>% filter(date == "2016-05-12")
total_calories <- sum(data_calories["Calories"])
target_calories <- as.numeric(input$exercise_goal)
if (target_calories >= total_calories){
target_calories <- target_calories - total_calories
}
else{
target_calories <-0
}
data_calories <- data.frame("Calories" = c(rep("Achieved", total_calories), rep("Remaining", target_calories)))
data_calories <- data_calories %>%
filter(Calories != "NA") %>%
group_by(Calories) %>%
count() %>%
ungroup()%>%
arrange(desc(Calories)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_calories
})
#
observeEvent(
input$set_goal,
output$calories_goal_plot <- renderPlot(
ggplot(data = data_calories(),
aes(x = 2, y = percentage, fill = Calories))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#Weight loss tab
#HIstorical Weight Chart
weightLogInfo <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
weightLogInfo <- read_excel(input$user_data$datapath, sheet = "weightLogInfo")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
weightLogInfo <- read_excel("Joined_Dataset_V2.xlsx", sheet = "weightLogInfo")
}
weightLogInfo
})
observeEvent(
c(input$use_uploaded, input$use_sample),
output$weightLogInfo_plot <- renderPlot(
ggplot(weightLogInfo(), aes(x = Date, y = WeightPounds))+
geom_line(color = "#17667b", size = 2) +
xlab("Date")+
ylab("Weight (lb)")+
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
stat_smooth(method = "lm", col = "red")
)
)
#Forecast - ARIMA Model
forcast_calories_burn <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_calories_F <- read_excel(input$user_data$datapath, sheet = "dailyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_calories_F <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyCalories")
}
df<-data.frame(data_calories_F)
df<-select(df, -1)
z <- read.zoo(df, format = "%Y-%m-%d")
class(z)
tss<-as.ts(z)
tss2 <- log(tss)
moving_avg <- aggregate(tss,FUN=mean)
#lines(moving_avg,col='orange',lwd=2)
arima_1 <- auto.arima(tss)
accuracy(arima_1)
list<-forecast(arima_1,h=18)
forcast_calories_burn <- round(sum(list[["mean"]]), digits=0)
#forcast_weight_loss <- forcast_calories_burn/3500
forcast_calories_burn
})
#Weight Loss Tracker
observeEvent(
input$weight_goal,
output$calories <-
if (round(input$weight-(forcast_calories_burn()/3500), digits = 0) >= (input$weight_goal)) {
renderValueBox({
valueBox(value = tags$p("Based on your activity data you will burn " , forcast_calories_burn(), " calories by the end of the month", style = "font-size: 75%;"),
"Calories", icon = icon("exclamation-triangle"), color = "orange")
})
}
else {
renderValueBox({
valueBox(value = tags$p("Based on your activity data you will burn " , forcast_calories_burn(), " calories by the end of the month", style = "font-size: 75%;"),
"Calories", icon = icon("check-circle"), color = "purple")
})
}
)
observeEvent(
input$weight_goal,
output$Prediction <-
if (round(input$weight-(forcast_calories_burn()/3500), digits = 0) >= (input$weight_goal)) {
renderValueBox({
valueBox(value = tags$p("You are predicted to weigh ", round(input$weight-(forcast_calories_burn()/3500), digits = 0), " lbs by the end of the month" , style = "font-size: 50%;"),
#paste0("You will be ","lbs short of your goal at the end of the month"), style = "font-size: 50%;" ,
"Prediction", color = "yellow")
})
}
else {
renderValueBox({
valueBox(value = tags$p("You are predicted to weigh ", round(input$weight-(forcast_calories_burn()/3500), digits = 0), " lbs by the end of the month", style = "font-size: 50%;"),
#paste0("You will be ","lbs short of your goal at the end of the month"), style = "font-size: 50%;" ,
"Prediction", color = "green")
})
}
)
observeEvent(
input$weight_goal,
output$Target <- renderValueBox({
valueBox(value = tags$p("You Desired Weight is ", input$weight_goal, " lbs", style = "font-size: 50%;"),
#paste0("Your Desired Weight is"), , style = "font-size: 50%;" ,
"Target", color = "light-blue")
})
)
#DIsplay User Name in the sidebar
observeEvent(
c(input$use_uploaded, input$use_sample),
output$text1 <- renderText({paste(isolate(input$name))})
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
/fitpal_app.R
|
no_license
|
gargharshul/fitpal-fitness-tracker
|
R
| false
| false
| 63,868
|
r
|
######################### R Libraries Required #################################
################################################################################
library(shiny)
library(shinyWidgets)
library(shinydashboard)
library(dashboardthemes)
library(openxlsx)
library(readxl)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(lubridate)
library(sjlabelled)
library(lessR)
library(tidyverse)
library(zoo)
library(forecast)
############################### MAIN APP #######################################
ui <- dashboardPage(
# dashboard title
title = "Fitpal - Fitness Tracker",
dashboardHeader(
title = shinyDashboardLogo(
theme = "purple_gradient",
boldText = "FitPal:",
mainText = "Fitness Tracker",
badgeText = "v1.1"
)),
dashboardSidebar(
sidebarUserPanel(name = textOutput("text1"),
subtitle = a(href = "#",
icon("circle",
class = "text-success"),
"Online")
),
sidebarMenu(
id = "tabs",
# upload data & demographic information
menuItem(text = "Update User Profile", tabName = "user_profile",
icon = icon("id-card")),
# data exploratory analysis & activity review
menuItem(text = "View Activity Dashboard", tabName = "dashboard",
icon = icon("dashboard")),
# set goals
menuItem(text = "Set Your Goals", tabName = "change_goals", icon =
icon("bullseye")),
# view progress
menuItem(text = "Track Your Progress", icon = icon("bar-chart-o"),
menuSubItem("Activity", tabName = "subitem1"),
menuSubItem("Weight Loss", tabName = "subitem2")
)
),
width = "250px",
collapsed = FALSE
),
dashboardBody(
# upload theme
shinyDashboardThemes(
theme = "blue_gradient"),
tabItems(
tabItem(
# User profile & demographic input
tabName = "user_profile",
h3("User Profile", tagList(shiny::icon("id-card"))),
fluidRow(
box(textInput("name", "Name", placeholder = "Enter Your Name")),
box(selectInput("gender", "Select Your Gender",
list("Male", "Female", "Transgender",
"Non-binary","Prefer Not To Respond")
)
)
),
fluidRow(
box(numericInput("height", "Enter Your Height (in)",
value = 60 , min = 20, max = 110, step = 1)),
box(numericInput("weight", "Enter Your Weight (lb)",
value = 170 , min = 5, max = 700, step = 1))
),
# Data Upload or Data Sample Selection
h3("Health Data", tagList(shiny::icon("table"))),
fluidRow(
box(fileInput(inputId = "user_data",
label = "File upload:",
accept = ".xlsx"),
# Use uploaded data set
actionButton(
inputId = "use_uploaded",
label = "Use Uploaded File",
class = "btn-primary")
),
# choose sample data set sheet to preview
box(selectInput("sample_data",
"Choose a sample sheet",
choices = c("Hourly Steps",
"Hourly Calories",
"Hourly Intensities",
"Hourly Stand",
"Daily Steps",
"Daily Intensities",
"Daily Activity",
"Sleep Minutes",
"Daily Sleep",
"Daily Stand")),
# Download 'template' / sample data set
downloadButton("download_sample",
"Download Sample Data"),
# Use sample data set
actionButton(
inputId = "use_sample",
label = "Use Sample Data",
class = "btn-primary")
),
# preview sample data set sheet selected
box(tableOutput("sample_table"), width = 12)
)
),
tabItem(
# Review BMI graph and Set Goals
tabName = "change_goals",
# BMI Graph
h3("BMI Spectrum", tagList(shiny::icon("hand-holding-heart"))),
fluidRow(
box(
plotOutput(outputId = "bmiplot"), width = 12
)
),
# Goal Setting
h3("Set Your Goals", tagList(shiny::icon("bullseye"))),
fluidRow(
box(
knobInput(
inputId = "steps_goal",
label = "Daily Steps",
value = 9000,
min = 0,
max = 100000,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "stand_goal",
label = "Daily Standing Hours",
value = 6,
min = 0,
max = 24,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "exercise_goal",
label = "Daily Calories Burnt",
value = 1800,
min = 0,
max = 4000,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
),
box(
knobInput(
inputId = "sleep_goal",
label = "Daily Sleep Hours",
value = 8,
min = 0,
max = 24,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
)
),
# Update goals
actionButton(
inputId = "set_goal",
label = "Update Goals",
class = "btn-primary"),
h3("Start Your Weight Loss Journey",
tagList(shiny::icon("weight"))),
fluidRow(
box(
knobInput(
inputId = "weight_goal",
label = "Set weight desired at the end of the month(lb)",
value = 127,
min = 60,
max = 204,
displayPrevious = TRUE,
lineCap = "round",
fgColor = "#428BCA",
inputColor = "#428BCA"
)
)
),
actionButton(
inputId = "set_weight_goal",
label = "Update Weight Goal",
class = "btn-primary")
),
tabItem(
# Activity Dashboard & Exploratory Analysis
tabName = "dashboard",
h3("Activity Dashboard", tagList(shiny::icon("tachometer-alt"))),
fluidRow(
# tab for steps analysis
tabBox(
title = tagList(shiny::icon("walking"), "Steps"),
id = "steps", height = "auto",
tabPanel(
title = "Hourly",
plotOutput(outputId = "hourlysteps")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "dailysteps")),
tabPanel(title = "Historic",
plotOutput(outputId = "historicsteps"))
),
# tab for calories burnt analysis
tabBox(
title = tagList(shiny::icon("running"), "Calories"),
id = "cals", height = "auto",
tabPanel(
title = "Hourly",
plotOutput(outputId = "hourlycalories")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklycalories")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historiccalories"))
),
# tab for intensity analysis
tabBox(
title = tagList(shiny::icon("dumbbell"), "Intensity"),
id = "intensity", height = "auto",
tabPanel(
title = "Daily",
plotOutput(outputId = "dailyintensity")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklyintensity")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicintensity"))
),
# tab for distance analysis
tabBox(
title = tagList(shiny::icon("road"), "Distance"),
id = "distance", height = "auto",
tabPanel(
title = "Daily",
plotOutput(outputId = "dailydist")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklydist")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicdist"))
),
# tab for sleep analysis
tabBox(
title = tagList(shiny::icon("bed"), "Sleep"),
id = "sleep", height = "auto", width = 12,
tabPanel(
title = "Daily",
plotOutput(outputId = "dailysleep")),
tabPanel(
title = "Weekly",
plotOutput(outputId = "weeklysleep")),
tabPanel(
title = "Historic",
plotOutput(outputId = "historicsleep"))
)
),
),
# DELETE?
tabItem(
tabName = "subitem1",
h3("Don't Break The Chain", tagList(shiny::icon("calendar-check"))),
fluidRow(
box(
h4("Historical Goal Completion"),
plotOutput(outputId = "BreakChain")
),
box(
h4("Streak Count"),
plotOutput(outputId = "activity_streak")
)
),
h3("Daily Tracker", tagList(shiny::icon("watch-fitness"))),
fluidRow(
box(
h4("Daily Steps Goal"),
plotOutput(outputId = "step_goal_plot")
),
box(
h4("Daily Stand Goal"),
plotOutput(outputId = "stand_goal_plot")
),
box(
h4("Daily Excercise Goal"),
plotOutput(outputId = "calories_goal_plot")
)
)
),
tabItem(
tabName = "subitem2",
h3("Historical Weight", tagList(shiny::icon("dumbbell"))),
fluidRow(
box(
plotOutput(outputId = "weightLogInfo_plot") , width = 12
)
),
h3("Weight Loss Tracker", tagList(shiny::icon("bullseye"))),
fluidRow(
box(
valueBoxOutput("calories", width = 12) ,width = 12
),
box(
valueBoxOutput("Target", width = 6),
valueBoxOutput("Prediction", width = 6),width = 12
)
)
)
) # tab items
) # end body
) # end dash page
server <- function(input, output, session) {
############################# USER PROFILE #####################################
sample_Input <- reactive({
input$use_sample
# select data set to show in preview
switch(input$sample_data,
"Hourly Steps" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlySteps",
col_name=T)),
"Hourly Calories" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyCalories",
col_name=T)),
"Hourly Intensities" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyIntensities",
col_name=T)),
"Hourly Stand" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="hourlyStand",
col_name=T)),
"Daily Steps" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailySteps",
col_name=T)),
"Daily Intensities" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailyIntensities",
col_name=T)),
"Daily Activity" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="dailyActivity",
col_name=T)),
"Sleep Minutes" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="minuteSleep",
col_name=T)),
"Daily Sleep" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="sleepDay",
col_name=T)),
"Daily Stand" = as.data.frame(read_excel("Joined_Dataset_V2.xlsx",
sheet="standDay",
col_name=T))
)
})
# sample data set sheet
output$sample_table <- renderTable({
head(sample_Input()[1:ncol(sample_Input())], n = 10)
})
# download template/sample data
sample <- loadWorkbook("Joined_Dataset_V2.xlsx")
output$download_sample <- downloadHandler(
filename = function() {
file <- "FitPal_Sample-Dataset_Template.xlsx"
},
content = function(file) {
# save all workbook including sheets
saveWorkbook(sample, file = file, overwrite = TRUE)
}
)
########################## ACTIVITY DASHVBOARD #################################
# Hourly Steps
hourlySteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the hourly steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hsteps <- read_excel(input$user_data$datapath, sheet = "hourlySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlySteps")
}
hsteps <- as.data.frame(hsteps)
print(str(hsteps))
hsteps$ActivityHour <- as.POSIXct(hsteps$ActivityHour, format="%Y-%m-%d")
hsteps<- subset(hsteps, ActivityHour> "2016-05-12")
hsteps
})
# hourly steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$hourlysteps <- renderPlot(
hourlySteps() %>%
group_by(ActivityHour) %>%
summarize(StepTotal = mean(StepTotal)) %>%
ggplot(aes(x=ActivityHour, y= StepTotal, fill = StepTotal))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Hourly Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Daily Steps
dailySteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dsteps <- read_excel(input$user_data$datapath, sheet = "dailySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailySteps")
}
dsteps <- as.data.frame(dsteps)
print(str(dsteps))
dsteps$ActivityDay <- as.POSIXct(dsteps$ActivityDay, format = "%Y-%m-%d")
dsteps <- subset(dsteps, ActivityDay > "2016-05-05")
dsteps
})
# daily steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailysteps <- renderPlot(
ggplot(data = dailySteps(), aes(x = ActivityDay, y = StepTotal)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Steps",
x = "Date", y = "Weekly Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Steps
historicSteps <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic steps data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
histsteps <- read_excel(input$user_data$datapath, sheet = "dailySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
histsteps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailySteps")
}
histsteps <- as.data.frame(histsteps)
print(str(histsteps))
histsteps
})
# historic steps graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicsteps <- renderPlot(
ggplot(data = historicSteps(), aes(x = ActivityDay, y = StepTotal)) +
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date")+
ylab("Total Steps")+
labs(title = "Historic Steps") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Hourly Calories
hourlyCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the hourly calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hcal <- read_excel(input$user_data$datapath, sheet = "hourlyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyCalories")
}
hcal <- as.data.frame(hcal)
print(str(hcal))
hcal$ActivityHour <- as.POSIXct(hcal$ActivityHour, format="%Y-%m-%d")
hcal<- subset(hcal, ActivityHour> "2016-05-12")
hcal
})
# hourly calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$hourlycalories <- renderPlot(
hourlyCalories() %>%
group_by(ActivityHour) %>%
summarize(CaloriesTotal = mean(Calories)) %>%
ggplot(aes(x=ActivityHour, y= CaloriesTotal, fill = CaloriesTotal))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Hourly Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Calories
weeklyCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wcal <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
wcal <- as.data.frame(wcal)
print(str(wcal))
wcal$ActivityDate <-as.POSIXct(wcal$ActivityDate, format = "%Y-%m-%d")
wcal <- subset(wcal, ActivityDate > "2016-05-06")
wcal
})
# weekly calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklycalories <- renderPlot(
ggplot(data = weeklyCalories(), aes(x = ActivityDate, y = Calories)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Calories ",
x = "Date", y = "Weekly Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Calories
historicCalories <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic calories data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
histcal <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
histcal <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
histcal <- as.data.frame(histcal)
print(str(histcal))
histcal
})
# historical calories graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historiccalories <- renderPlot(
ggplot(historicCalories(), aes(x = ActivityDate, y = Calories))+
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date")+
ylab("Calories Burned")+
labs(title = "Historic Calories") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Daily Intensities
dailyIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dint <- read_excel(input$user_data$datapath, sheet = "hourlyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyIntensities")
}
dint <- as.data.frame(dint)
print(str(dint))
dint$ActivityHour <- as.POSIXct(dint$ActivityHour, format = "%Y-%m-%d")
dint <- subset(dint, ActivityHour > "2016-05-06")
dint
})
# daily intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailyintensity <- renderPlot(
ggplot(data = dailyIntensity(),
aes(x=ActivityHour, y= AverageIntensity, fill = AverageIntensity))+
geom_col()+
scale_fill_continuous(low = '#05fbd7', high = "#17667b") +
labs(title = "Daily Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Intensities
weeklyIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wint <- read_excel(input$user_data$datapath, sheet = "dailyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyIntensities")
}
wint <- as.data.frame(wint)
print(str(wint))
wint$ActivityDay <- as.POSIXct(wint$ActivityDay, format = "%Y-%m-%d")
wint <- subset(wint, ActivityDay > "2016-05-06")
wint <- wint %>% select(ActivityDay,
VeryActiveMinutes,
FairlyActiveMinutes,
LightlyActiveMinutes) %>% data.frame()
wint <- melt(wint, id.vars = c("ActivityDay"))
wint
})
# weekly intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklyintensity <- renderPlot(
ggplot(data = weeklyIntensity(),
aes(fill = variable, y=value, x = ActivityDay)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Weekly Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightlyActiveMinutes' = '#05fbd7',
'FairlyActiveMinutes' = '#17667b',
'VeryActiveMinutes' = "#f8766d"),
aesthetics = "fill")
)
)
# Historic Intensities
historicIntensity <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic intensities data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hint <- read_excel(input$user_data$datapath, sheet = "dailyIntensities")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hint <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyIntensities")
}
hint <- as.data.frame(hint)
print(str(hint))
hint <- hint %>% select(ActivityDay,
VeryActiveMinutes,
FairlyActiveMinutes,
LightlyActiveMinutes) %>% data.frame()
hint <- melt(hint, id.vars = c("ActivityDay"))
hint
})
# historic intensity graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicintensity <- renderPlot(
ggplot(data = historicIntensity(),
aes(fill = variable, y=value, x = ActivityDay)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Historic Intensities") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightlyActiveMinutes' = '#05fbd7',
'FairlyActiveMinutes' = '#17667b',
'VeryActiveMinutes' = "#f8766d"),
aesthetics = "fill")
)
)
# Weekly Distance
weeklyDistance <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly distance data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wdist <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wdist <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
wdist <- as.data.frame(wdist)
print(str(wdist))
wdist$ActivityDate <- as.POSIXct(wdist$ActivityDate, format = "%Y-%m-%d")
wdist <- subset(wdist, ActivityDate > "2016-05-06")
wdist <- wdist %>% select(ActivityDate,
VeryActiveDistance,
ModeratelyActiveDistance,
LightActiveDistance) %>% data.frame()
wdist <- melt(wdist, id.vars = c("ActivityDate"))
wdist
})
# weekly distance graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklydist <- renderPlot(
ggplot(data = weeklyDistance(),
aes(fill = variable, y=value, x = ActivityDate)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Weekly Distance") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightActiveDistance' = '#05fbd7',
'ModeratelyActiveDistance' = '#17667b',
'VeryActiveDistance' = "#f8766d"),
aesthetics = "fill")
)
)
# Historic Distance
historicDistance <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic distance data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hdist <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hdist <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
}
hdist <- as.data.frame(hdist)
print(str(hdist))
hdist <- hdist %>% select(ActivityDate,
VeryActiveDistance,
ModeratelyActiveDistance,
LightActiveDistance) %>% data.frame()
hdist <- melt(hdist, id.vars = c("ActivityDate"))
hdist
})
# historic distance graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicdist <- renderPlot(
ggplot(data = historicDistance(),
aes(fill = variable, y= value, x = ActivityDate)) +
geom_bar(position = position_fill(reverse = TRUE), stat="identity") +
labs(title = "Historic Distance") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_colour_manual(values = c('LightActiveDistance' = '#05fbd7',
'ModeratelyActiveDistance' = '#17667b',
'VeryActiveDistance' = "#f8766d"),
aesthetics = "fill")
)
)
# Daily Sleep
dailySleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the daily sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
dsleep <- read_excel(input$user_data$datapath, sheet = "minuteSleep")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
dsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "minuteSleep")
}
dsleep <- dsleep %>% separate(date,sep = " ", c("Days", "Time")) %>%
separate(Time, sep = ":", c("Hours", "Mins", "Seconds"),
extra = "merge", fill = "right") %>%
group_by(Days) %>% summarise(Hours = as.numeric(unique(Hours))) %>%
filter(Days == "2016-05-11")
hours_list <- data.frame(Hour_number = seq(1:24))
dsleep <- merge(hours_list, dsleep, by.x = "Hour_number",
by.y = "Hours", all.x = T)
dsleep <- dsleep %>% transmute(Hour_number = as.numeric(Hour_number),
Num = ifelse(is.na(Days), 0, 1))
print(str(dsleep))
dsleep
})
# daily sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$dailysleep <- renderPlot(
ggplot(data = dailySleep(), aes(x = Hour_number, y = Num)) +
geom_bar(stat="identity", fill = "#27839a") +
labs(title = "Daily Sleep Trend", x = "Hour") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Weekly Sleep
weeklySleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the weekly sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
wsleep <- read_excel(input$user_data$datapath, sheet = "sleepDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
wsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
}
wsleep$SleepDay <- as.POSIXct(wsleep$SleepDay, format = "%Y-%m-%d")
wsleep <- subset(wsleep, SleepDay > "2016-05-05")
wsleep <- as.data.frame(wsleep)
print(str(wsleep))
wsleep
})
# weekly sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$weeklysleep <- renderPlot(
ggplot(data = weeklySleep(), aes(x = SleepDay, y = TotalMinutesAsleep)) +
geom_bar(stat = "identity", fill = "#27839a") +
labs(title = "Weekly Sleep",
x = "Date", y = "Weekly Sleep") +
geom_abline(slope=0, intercept=420, col = "#f8766d", lty=2) +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
# Historic Sleep
historicSleep <- reactive({
c(input$use_uploaded, input$use_sample)
# prepare the historic sleep data from either uploaded or sample data
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
hsleep <- read_excel(input$user_data$datapath, sheet = "sleepDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
hsleep <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
}
hsleep$SleepDay <- as.POSIXct(hsleep$SleepDay, format = "%Y-%m-%d")
hsleep <- subset(hsleep, SleepDay > "2016-05-05")
hsleep <- as.data.frame(hsleep)
print(str(hsleep))
hsleep
})
# historic sleep graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
output$historicsleep <- renderPlot(
ggplot(data = historicSleep(),
aes(x = SleepDay, y = TotalMinutesAsleep)) +
geom_line(color = "#2cdeeb", size = 2) +
xlab("Date") +
ylab("Sleep Time (in Minutes)") +
geom_abline(slope=0, intercept=420, col = "#f8766d", lty=2) +
labs(title = "Historic Sleep") +
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4"))
)
)
############################ SET YOUR GOALS ####################################
# BMI Graph
observeEvent(
c(input$use_uploaded, input$use_sample),
ignoreInit = T,
# prepare the BMI calculations for uploaded or sample data
output$bmiplot <- renderPlot({
# calculate BMI
weight_ideal <- 100
lb <- input$weight
inches <- input$height
bmi_output <- (lb / (inches^2)) * 703
# BMI Category
if (inches < 58){
weight_ideal <- round(mean(seq(from = 60, to = 114, by = 1)))
} else if (inches == 58){
weight_ideal <- round(mean(seq(from = 91, to = 118, by = 1)))
} else if (inches == 59){
weight_ideal <- round(mean(seq(from = 94, to = 123, by = 1)))
} else if (inches == 60){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 61){
weight_ideal <- round(mean(seq(from = 100, to = 131, by = 1)))
} else if (inches == 62){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 63){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 64){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 65){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 66){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 67){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 68){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 69){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 70){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 71){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 72){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 73){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 74){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 75){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches == 76){
weight_ideal <- round(mean(seq(from = 97, to = 127, by = 1)))
} else if (inches > 76) {
weight_ideal <- round(mean(seq(from = 160, to = 220, by = 1)))
}
# calculate ideal and desired BMI
bmi_ideal <- (weight_ideal / (inches^2)) * 703
bmi_desired <- (input$weight_goal / (inches^2)) * 703
# calculate parameters for BMI Spectrum Graph
inch <- seq(from = 10, to = 120)
# min-max
lb_min <- numeric(length(inch)) + 10
lb_max <- numeric(length(inch)) + 690
# underweight
lb_udr <- inch^2 * 18.5 / 703
lb_udr[lb_udr>690] <- 690
lb_udr[lb_udr<10] <- 10
# overweight
lb_ovr <- inch^2 * 25 / 703
lb_ovr[lb_ovr>690] <- 690
lb_ovr[lb_ovr<10] <- 10
# obese
lb_obe <- inch^2 * 30 / 703
lb_obe[lb_obe>690] <- 690
lb_obe[lb_obe<10] <- 10
# create BMI polygon plot
ggplot(data = NULL, aes(x = 5, y = 700)) +
xlim(10,120) + ylim(5,700) +
labs(title = "BMI: Current vs. Desired vs. Ideal",
x = "Height in Inches",
y = "Weight in Pounds",
caption = "Based on data from www.cdc.gov.
Ideal BMI values are averages within recommended weight range.") +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_min,rev(lb_udr)),
fill = "#eb347d"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_udr,rev(lb_ovr)),
fill = "#34bdeb"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_ovr,rev(lb_obe)),
fill = "#9634eb"),
alpha = 0.8) +
geom_polygon(aes(x = c(inch,rev(inch)),
y = c(lb_obe,rev(lb_max)),
fill = "#eb4034"),
alpha = 0.8) +
# add markers for BMI Values
annotate(geom = "curve", x = input$height + 40, y = input$weight + 120,
xend = input$height, yend = input$weight, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "white", size = 1) +
annotate(geom = "text", x = input$height + 48, y = input$weight + 120,
label = paste("Your BMI (", round(bmi_output,1),")"
, sep = ""),
colour = "white", size = 4.5, fontface = "bold") +
annotate(geom = "curve", x = input$height + 30, y = weight_ideal + 100,
xend = input$height, yend = weight_ideal, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "black", size = 1) +
annotate(geom = "text", x = input$height + 38, y = weight_ideal + 100,
label = paste("Your Ideal BMI (", round(bmi_ideal,1),")"
, sep = ""),
colour = "black", size = 4.5, fontface = "bold") +
annotate(geom = "curve", x = input$height + 10, y = input$weight_goal + 80,
xend = input$height, yend = input$weight_goal, curvature = .3,
arrow = arrow(length = unit(2, "mm")),
colour = "white", size = 1) +
annotate(geom = "text", x = input$height + 18, y = input$weight_goal + 80,
label = paste("Your Desired BMI (", round(bmi_desired,1),")"
, sep = ""),
colour = "white", size = 4.5, fontface = "bold") +
# customize graph title, labels, and legend
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
legend.position = "right",
legend.title = element_text(size = 16, face = "bold"),
legend.text = element_text(size = 12),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
scale_fill_identity(name = "BMI Categories",
guide = "legend",
labels = c("Normal",
"Overweight",
"Underweight",
"Obese"))
})
)
############################# Track your activity ####################################
#Dont break the chain
#historical goal completion chart
df_activity_tall <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
D_dailyActivity <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
D_sleepDay <- read_excel(input$user_data$datapath, sheet = "sleepDay")
D_standDay <- read_excel(input$user_data$datapath, sheet = "standDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
D_dailyActivity <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
D_sleepDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
D_standDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "standDay")
}
D_dailyActivity$ActivityDate <- as.POSIXct(D_dailyActivity$ActivityDate, format = "%Y-%m-%d")
D_sleepDay$SleepDay <- as.POSIXct(D_sleepDay$SleepDay, format = "%Y-%m-%d")
D_standDay$ActivityDate <- as.POSIXct(D_standDay$ActivityDate, format = "%Y-%m-%d")
names(D_sleepDay)[names(D_sleepDay) == 'SleepDay'] <- "ActivityDate"
d_merge <- merge(x=D_dailyActivity,y=D_sleepDay,by ="ActivityDate")
d_merge <- merge(x=d_merge,y=D_standDay,by ="ActivityDate")
d_merge <- d_merge %>% select(ActivityDate, TotalSteps, Calories, TotalMinutesAsleep, TotalStand)
d_merge <- as.data.frame(d_merge)
d_merge$ActivityDate <- as.POSIXct(d_merge$ActivityDate, format = "%Y-%m-%d")
#inputs
step_goal <- as.numeric(input$steps_goal)
exercise_goal <- as.numeric(input$exercise_goal)
sleep_goal <- as.numeric(input$sleep_goal)*60
stand_goal <- as.numeric(input$stand_goal)*60
#
df_activity_tidy <- d_merge %>%
mutate(date = as.Date(ActivityDate)) %>%
rename(move = TotalSteps,
exercise = Calories,
sleep = TotalMinutesAsleep,
stand = TotalStand) %>%
mutate(move_pct = move/step_goal,
exercise_pct = exercise/exercise_goal,
sleep_pct = sleep/sleep_goal,
stand_pct = stand/stand_goal,
move_bool = if_else(move_pct < 1, FALSE, TRUE),
exercise_bool = if_else(exercise_pct < 1, FALSE, TRUE),
sleep_bool = if_else(sleep_pct < 1, FALSE, TRUE),
stand_bool = if_else(stand_pct < 1, FALSE, TRUE))
df_activity_tall_value <- df_activity_tidy %>%
select(date, Move = move, Exercise = exercise, Sleep = sleep, Stand = stand) %>%
gather(category, value, -date)
df_activity_tall_pct <- df_activity_tidy %>%
select(date, Move = move_pct, Exercise = exercise_pct, Sleep = sleep_pct, Stand = stand_pct) %>%
gather(category, pct, -date)
df_activity_tall_bool <- df_activity_tidy %>%
select(date, Move = move_bool, Exercise = exercise_bool, Sleep = sleep_bool, Stand = stand_bool) %>%
gather(category, boolean, -date)
df_activity_tall <- df_activity_tall_value %>%
left_join(df_activity_tall_pct, by = c("date", "category")) %>%
left_join(df_activity_tall_bool, by = c("date", "category")) %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Sleep", "Stand")
),
month = ymd(paste(year(date), month(date), 1, sep = "-")),
week = date - wday(date) + 1,
wday = wday(date),
day = day(date))
df_activity_tall
})
observeEvent(
input$set_goal,
output$BreakChain <- renderPlot(
df_activity_tall() %>%
ggplot(aes(x = wday, y = week, fill = boolean)) +
geom_tile(col = "grey30", na.rm = FALSE) +
theme(panel.grid.major = element_blank()) +
scale_fill_manual(values = c("#f4f4f4", "mediumaquamarine")) +
facet_wrap(~ category) +
coord_fixed(ratio = 0.15) +
guides(fill=FALSE) +
labs(title = NULL,
caption = 'Green:Goal Completed') +
theme(axis.text.x = element_blank())
)
)
#Streak count graph
df_activity_streak <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
D_dailyActivity <- read_excel(input$user_data$datapath, sheet = "dailyActivity")
D_sleepDay <- read_excel(input$user_data$datapath, sheet = "sleepDay")
D_standDay <- read_excel(input$user_data$datapath, sheet = "standDay")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
D_dailyActivity <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyActivity")
D_sleepDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "sleepDay")
D_standDay <- read_excel("Joined_Dataset_V2.xlsx", sheet = "standDay")
}
D_dailyActivity$ActivityDate <- as.POSIXct(D_dailyActivity$ActivityDate, format = "%Y-%m-%d")
D_sleepDay$SleepDay <- as.POSIXct(D_sleepDay$SleepDay, format = "%Y-%m-%d")
D_standDay$ActivityDate <- as.POSIXct(D_standDay$ActivityDate, format = "%Y-%m-%d")
names(D_sleepDay)[names(D_sleepDay) == 'SleepDay'] <- "ActivityDate"
d_merge <- merge(x=D_dailyActivity,y=D_sleepDay,by ="ActivityDate")
d_merge <- merge(x=d_merge,y=D_standDay,by ="ActivityDate")
d_merge <- d_merge %>% select(ActivityDate, TotalSteps, Calories, TotalMinutesAsleep, TotalStand)
d_merge <- as.data.frame(d_merge)
d_merge$ActivityDate <- as.POSIXct(d_merge$ActivityDate, format = "%Y-%m-%d")
#inputs
step_goal <- as.numeric(input$steps_goal)
exercise_goal <- as.numeric(input$exercise_goal)
sleep_goal <- as.numeric(input$sleep_goal)*60
stand_goal <- as.numeric(input$stand_goal)*60
#
df_activity_tidy <- d_merge %>%
mutate(date = as.Date(ActivityDate)) %>%
rename(move = TotalSteps,
exercise = Calories,
sleep = TotalMinutesAsleep,
stand = TotalStand) %>%
mutate(move_pct = move/step_goal,
exercise_pct = exercise/exercise_goal,
sleep_pct = sleep/sleep_goal,
stand_pct = stand/stand_goal,
move_bool = if_else(move_pct < 1, FALSE, TRUE),
exercise_bool = if_else(exercise_pct < 1, FALSE, TRUE),
sleep_bool = if_else(sleep_pct < 1, FALSE, TRUE),
stand_bool = if_else(stand_pct < 1, FALSE, TRUE))
df_activity_tall_value <- df_activity_tidy %>%
select(date, Move = move, Exercise = exercise, Sleep = sleep, Stand = stand) %>%
gather(category, value, -date)
df_activity_tall_pct <- df_activity_tidy %>%
select(date, Move = move_pct, Exercise = exercise_pct, Sleep = sleep_pct, Stand = stand_pct) %>%
gather(category, pct, -date)
df_activity_tall_bool <- df_activity_tidy %>%
select(date, Move = move_bool, Exercise = exercise_bool, Sleep = sleep_bool, Stand = stand_bool) %>%
gather(category, boolean, -date)
df_activity_tall <- df_activity_tall_value %>%
left_join(df_activity_tall_pct, by = c("date", "category")) %>%
left_join(df_activity_tall_bool, by = c("date", "category")) %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Sleep", "Stand")
),
month = ymd(paste(year(date), month(date), 1, sep = "-")),
week = date - wday(date) + 1,
wday = wday(date),
day = day(date))
df_activity_streak <- df_activity_tall_bool %>%
mutate(category = as_factor(category#, levels = c("Move", "Exercise", "Stand")
)) %>%
arrange(category, date) %>%
group_by(category,
x = cumsum(c(TRUE, diff(boolean) %in% c(-1))),
y = cumsum(c(TRUE, diff(boolean) %in% c(-1,1)))) %>%
mutate(streak = if_else(boolean == FALSE, 0L, row_number())) %>%
ungroup()
df_activity_streak
})
observeEvent(
input$set_goal,
output$activity_streak <- renderPlot(
ggplot(df_activity_streak(), aes(x = date, y = streak, group = x, col = category)) +
geom_line() +
facet_grid(category~.) +
guides(fill=FALSE) +
labs(title = NULL)
)
)
#Daily Tracker
#Daily steps goal chart
data_steps <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_steps <- read_excel(input$user_data$datapath, sheet = "hourlySteps")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_steps <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlySteps")
}
#data_steps$ActivityHour <- as.POSIXct(data_steps$ActivityHour, format = "%Y-%m-%d")
#latest_date <- head(data_steps %>% distinct(ActivityHour) %>% arrange(desc(ActivityHour)),1)
#max(as.Date(data_steps$ActivityHour))
data_steps <- data_steps %>% mutate(date = as_date(ActivityHour)) %>% filter(date == "2016-05-12")
total_steps <- sum(data_steps["StepTotal"])
target_steps <- as.numeric(input$steps_goal)
if (target_steps >= total_steps){
target_steps <- target_steps - total_steps
}
else {
target_steps <- 0
}
data_steps <- data.frame("Steps" = c(rep("Achieved", total_steps), rep("Remaining", target_steps)))
data_steps <- data_steps %>%
filter(Steps != "NA") %>%
group_by(Steps) %>%
count() %>%
ungroup()%>%
arrange(desc(Steps)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_steps
})
observeEvent(
input$set_goal,
output$step_goal_plot <- renderPlot(
ggplot(data = data_steps(),
aes(x = 2, y = percentage, fill = Steps))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#Daily stand goal chart
data_stand <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_stand <- read_excel(input$user_data$datapath, sheet = "hourlyStand")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_stand <- read_excel("Joined_Dataset_V2.xlsx", sheet = "hourlyStand")
}
data_stand <- data_stand %>% mutate(date = as_date(ActivityHour)) %>% filter(date == "2016-05-12")
total_stand <- sum(data_stand["Stand"])
target_stand <- (as.numeric(input$stand_goal)*60)
if (target_stand >= total_stand) {
target_stand <- target_stand - total_stand
}
else {
target_stand <- 0
}
data_stand <- data.frame("Stand" = c(rep("Achieved", total_stand), rep("Remaining", target_stand)))
data_stand <- data_stand %>%
filter(Stand != "NA") %>%
group_by(Stand) %>%
count() %>%
ungroup()%>%
arrange(desc(Stand)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_stand
})
#
observeEvent(
input$set_goal,
output$stand_goal_plot <- renderPlot(
ggplot(data = data_stand(),
aes(x = 2, y = percentage, fill = Stand))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#daily calories chart
data_calories <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_calories <- read_excel(input$user_data$datapath, sheet = "dailyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_calories <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyCalories")
}
data_calories <- data_calories %>% mutate(date = as_date(ActivityDay)) %>% filter(date == "2016-05-12")
total_calories <- sum(data_calories["Calories"])
target_calories <- as.numeric(input$exercise_goal)
if (target_calories >= total_calories){
target_calories <- target_calories - total_calories
}
else{
target_calories <-0
}
data_calories <- data.frame("Calories" = c(rep("Achieved", total_calories), rep("Remaining", target_calories)))
data_calories <- data_calories %>%
filter(Calories != "NA") %>%
group_by(Calories) %>%
count() %>%
ungroup()%>%
arrange(desc(Calories)) %>%
mutate(percentage = round(n/sum(n),4)*100,
lab.pos = cumsum(percentage)-.5*percentage)
data_calories
})
#
observeEvent(
input$set_goal,
output$calories_goal_plot <- renderPlot(
ggplot(data = data_calories(),
aes(x = 2, y = percentage, fill = Calories))+
geom_bar(stat = "identity")+
coord_polar("y", start = 200) +
geom_text(aes(y = lab.pos, label = paste(percentage,"%", sep = "")), col = "white") +
theme_void() +
scale_fill_brewer(palette = "Dark2")+
xlim(.2,2.5)
)
)
#Weight loss tab
#HIstorical Weight Chart
weightLogInfo <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
weightLogInfo <- read_excel(input$user_data$datapath, sheet = "weightLogInfo")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
weightLogInfo <- read_excel("Joined_Dataset_V2.xlsx", sheet = "weightLogInfo")
}
weightLogInfo
})
observeEvent(
c(input$use_uploaded, input$use_sample),
output$weightLogInfo_plot <- renderPlot(
ggplot(weightLogInfo(), aes(x = Date, y = WeightPounds))+
geom_line(color = "#17667b", size = 2) +
xlab("Date")+
ylab("Weight (lb)")+
theme(plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold"),
panel.background = element_rect(fill = "white"),
axis.line = element_line(size = 1, colour = "black"),
panel.grid.major = element_line(colour = "#f4f4f4")) +
stat_smooth(method = "lm", col = "red")
)
)
#Forecast - ARIMA Model
forcast_calories_burn <- reactive({
c(input$use_uploaded, input$use_sample)
if (is.null(input$user_data) == F & is.null(input$use_uploaded) == F) {
data_calories_F <- read_excel(input$user_data$datapath, sheet = "dailyCalories")
}
else if (is.null(input$user_data) == T | is.null(input$use_sample) == F) {
data_calories_F <- read_excel("Joined_Dataset_V2.xlsx", sheet = "dailyCalories")
}
df<-data.frame(data_calories_F)
df<-select(df, -1)
z <- read.zoo(df, format = "%Y-%m-%d")
class(z)
tss<-as.ts(z)
tss2 <- log(tss)
moving_avg <- aggregate(tss,FUN=mean)
#lines(moving_avg,col='orange',lwd=2)
arima_1 <- auto.arima(tss)
accuracy(arima_1)
list<-forecast(arima_1,h=18)
forcast_calories_burn <- round(sum(list[["mean"]]), digits=0)
#forcast_weight_loss <- forcast_calories_burn/3500
forcast_calories_burn
})
#Weight Loss Tracker
observeEvent(
input$weight_goal,
output$calories <-
if (round(input$weight-(forcast_calories_burn()/3500), digits = 0) >= (input$weight_goal)) {
renderValueBox({
valueBox(value = tags$p("Based on your activity data you will burn " , forcast_calories_burn(), " calories by the end of the month", style = "font-size: 75%;"),
"Calories", icon = icon("exclamation-triangle"), color = "orange")
})
}
else {
renderValueBox({
valueBox(value = tags$p("Based on your activity data you will burn " , forcast_calories_burn(), " calories by the end of the month", style = "font-size: 75%;"),
"Calories", icon = icon("check-circle"), color = "purple")
})
}
)
observeEvent(
input$weight_goal,
output$Prediction <-
if (round(input$weight-(forcast_calories_burn()/3500), digits = 0) >= (input$weight_goal)) {
renderValueBox({
valueBox(value = tags$p("You are predicted to weigh ", round(input$weight-(forcast_calories_burn()/3500), digits = 0), " lbs by the end of the month" , style = "font-size: 50%;"),
#paste0("You will be ","lbs short of your goal at the end of the month"), style = "font-size: 50%;" ,
"Prediction", color = "yellow")
})
}
else {
renderValueBox({
valueBox(value = tags$p("You are predicted to weigh ", round(input$weight-(forcast_calories_burn()/3500), digits = 0), " lbs by the end of the month", style = "font-size: 50%;"),
#paste0("You will be ","lbs short of your goal at the end of the month"), style = "font-size: 50%;" ,
"Prediction", color = "green")
})
}
)
observeEvent(
input$weight_goal,
output$Target <- renderValueBox({
valueBox(value = tags$p("You Desired Weight is ", input$weight_goal, " lbs", style = "font-size: 50%;"),
#paste0("Your Desired Weight is"), , style = "font-size: 50%;" ,
"Target", color = "light-blue")
})
)
#DIsplay User Name in the sidebar
observeEvent(
c(input$use_uploaded, input$use_sample),
output$text1 <- renderText({paste(isolate(input$name))})
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spread.R
\name{spread}
\alias{spread}
\title{Spread a key-value pair across multiple columns}
\usage{
spread(data, key, value, fill = NA, convert = FALSE, drop = TRUE, sep = NULL)
}
\arguments{
\item{data}{A data frame.}
\item{key, value}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to use
for \code{key} and \code{value}.}
\item{fill}{If set, missing values will be replaced with this value. Note
that there are two types of missingness in the input: explicit missing
values (i.e. \code{NA}), and implicit missings, rows that simply aren't
present. Both types of missing value will be replaced by \code{fill}.}
\item{convert}{If \code{TRUE}, \code{\link[=type.convert]{type.convert()}} with \code{asis =
TRUE} will be run on each of the new columns. This is useful if the value
column was a mix of variables that was coerced to a string. If the class of
the value column was factor or date, note that will not be true of the new
columns that are produced, which are coerced to character before type
conversion.}
\item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the
data, filling in missing combinations with \code{fill}.}
\item{sep}{If \code{NULL}, the column names will be taken from the values of
\code{key} variable. If non-\code{NULL}, the column names will be given
by \code{"<key_name><sep><key_value>"}.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}}
Development on \code{spread()} is complete, and for new code we recommend
switching to \code{pivot_wider()}, which is easier to use, more featureful, and
still under active development.
\code{df \%>\% spread(key, value)} is equivalent to
\code{df \%>\% pivot_wider(names_from = key, values_from = value)}
See more details in \code{vignette("pivot")}.
}
\examples{
stocks <- tibble(
time = as.Date("2009-01-01") + 0:9,
X = rnorm(10, 0, 1),
Y = rnorm(10, 0, 2),
Z = rnorm(10, 0, 4)
)
stocksm <- stocks \%>\% gather(stock, price, -time)
stocksm \%>\% spread(stock, price)
stocksm \%>\% spread(time, price)
# Spread and gather are complements
df <- tibble(x = c("a", "b"), y = c(3, 4), z = c(5, 6))
df \%>\%
spread(x, y) \%>\%
gather("x", "y", a:b, na.rm = TRUE)
# Use 'convert = TRUE' to produce variables of mixed type
df <- tibble(
row = rep(c(1, 51), each = 3),
var = rep(c("Sepal.Length", "Species", "Species_num"), 2),
value = c(5.1, "setosa", 1, 7.0, "versicolor", 2)
)
df \%>\% spread(var, value) \%>\% str()
df \%>\% spread(var, value, convert = TRUE) \%>\% str()
}
|
/man/spread.Rd
|
permissive
|
tidyverse/tidyr
|
R
| false
| true
| 2,713
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spread.R
\name{spread}
\alias{spread}
\title{Spread a key-value pair across multiple columns}
\usage{
spread(data, key, value, fill = NA, convert = FALSE, drop = TRUE, sep = NULL)
}
\arguments{
\item{data}{A data frame.}
\item{key, value}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to use
for \code{key} and \code{value}.}
\item{fill}{If set, missing values will be replaced with this value. Note
that there are two types of missingness in the input: explicit missing
values (i.e. \code{NA}), and implicit missings, rows that simply aren't
present. Both types of missing value will be replaced by \code{fill}.}
\item{convert}{If \code{TRUE}, \code{\link[=type.convert]{type.convert()}} with \code{asis =
TRUE} will be run on each of the new columns. This is useful if the value
column was a mix of variables that was coerced to a string. If the class of
the value column was factor or date, note that will not be true of the new
columns that are produced, which are coerced to character before type
conversion.}
\item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the
data, filling in missing combinations with \code{fill}.}
\item{sep}{If \code{NULL}, the column names will be taken from the values of
\code{key} variable. If non-\code{NULL}, the column names will be given
by \code{"<key_name><sep><key_value>"}.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}}
Development on \code{spread()} is complete, and for new code we recommend
switching to \code{pivot_wider()}, which is easier to use, more featureful, and
still under active development.
\code{df \%>\% spread(key, value)} is equivalent to
\code{df \%>\% pivot_wider(names_from = key, values_from = value)}
See more details in \code{vignette("pivot")}.
}
\examples{
stocks <- tibble(
time = as.Date("2009-01-01") + 0:9,
X = rnorm(10, 0, 1),
Y = rnorm(10, 0, 2),
Z = rnorm(10, 0, 4)
)
stocksm <- stocks \%>\% gather(stock, price, -time)
stocksm \%>\% spread(stock, price)
stocksm \%>\% spread(time, price)
# Spread and gather are complements
df <- tibble(x = c("a", "b"), y = c(3, 4), z = c(5, 6))
df \%>\%
spread(x, y) \%>\%
gather("x", "y", a:b, na.rm = TRUE)
# Use 'convert = TRUE' to produce variables of mixed type
df <- tibble(
row = rep(c(1, 51), each = 3),
var = rep(c("Sepal.Length", "Species", "Species_num"), 2),
value = c(5.1, "setosa", 1, 7.0, "versicolor", 2)
)
df \%>\% spread(var, value) \%>\% str()
df \%>\% spread(var, value, convert = TRUE) \%>\% str()
}
|
## Exploratory Data Analysis
## Course Project 1
## Plot 4
# The household_power_consumption.txt file must be located in the working directory
# Set language to English so automatically generated x-axis tic labels will be in English (Windows only)
Sys.setlocale("LC_TIME", "English")
# Read in data for Feb 1, 2007 and Feb 2, 2007 from the Electric power consumption database
data <- read.table("household_power_consumption.txt", # Specify the source file name
sep = ";", # Source data semicolon-separated
skip = 66637, nrow = 2880, # Select data range
col.names = colnames(read.table("household_power_consumption.txt", # Add column headers
sep = ";",
nrow = 1,
header = TRUE)))
# Format the Date column in data so that it can be interpreted and plotted correctly by R
data$Date <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
# Make four line graphs and save to a png file
png(filename = "plot4.png", width = 480, height = 480) # Create the png file
par(mfrow = c(2, 2)) # Set up 2 by 2 arrangement for plots
plot(data$Date, data$Global_active_power, # Plot the first plot
type = "l",
xlab = "",
ylab = "Global Active Power")
plot(data$Date, data$Voltage, # Plot the second plot
type = "l",
xlab = "datetime",
ylab = "Voltage")
plot(data$Date, data$Sub_metering_1, # Create the third plot
type = "l",
xlab = "",
ylab = "Energy sub metering")
lines(data$Date, data$Sub_metering_2, # Add data to the third plot
col = "red")
lines(data$Date, data$Sub_metering_3, # Add more data to the third plot
col = "blue")
legend("topright", # Add legend to the third plot
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"),
lwd = 1,
bty = "n",
cex = 0.95)
plot(data$Date, data$Global_reactive_power, # Plot the fourth plot
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
dev.off() # Exit to save file to computer
|
/figure/plot4.R
|
no_license
|
sdcmma/ExData_Plotting1
|
R
| false
| false
| 2,980
|
r
|
## Exploratory Data Analysis
## Course Project 1
## Plot 4
# The household_power_consumption.txt file must be located in the working directory
# Set language to English so automatically generated x-axis tic labels will be in English (Windows only)
Sys.setlocale("LC_TIME", "English")
# Read in data for Feb 1, 2007 and Feb 2, 2007 from the Electric power consumption database
data <- read.table("household_power_consumption.txt", # Specify the source file name
sep = ";", # Source data semicolon-separated
skip = 66637, nrow = 2880, # Select data range
col.names = colnames(read.table("household_power_consumption.txt", # Add column headers
sep = ";",
nrow = 1,
header = TRUE)))
# Format the Date column in data so that it can be interpreted and plotted correctly by R
data$Date <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
# Make four line graphs and save to a png file
png(filename = "plot4.png", width = 480, height = 480) # Create the png file
par(mfrow = c(2, 2)) # Set up 2 by 2 arrangement for plots
plot(data$Date, data$Global_active_power, # Plot the first plot
type = "l",
xlab = "",
ylab = "Global Active Power")
plot(data$Date, data$Voltage, # Plot the second plot
type = "l",
xlab = "datetime",
ylab = "Voltage")
plot(data$Date, data$Sub_metering_1, # Create the third plot
type = "l",
xlab = "",
ylab = "Energy sub metering")
lines(data$Date, data$Sub_metering_2, # Add data to the third plot
col = "red")
lines(data$Date, data$Sub_metering_3, # Add more data to the third plot
col = "blue")
legend("topright", # Add legend to the third plot
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"),
lwd = 1,
bty = "n",
cex = 0.95)
plot(data$Date, data$Global_reactive_power, # Plot the fourth plot
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
dev.off() # Exit to save file to computer
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_edit_module.R
\name{user_edit_module}
\alias{user_edit_module}
\title{user_edit_module}
\usage{
user_edit_module(
input,
output,
session,
modal_title,
user_to_edit,
open_modal_trigger,
existing_roles,
existing_users
)
}
\arguments{
\item{input}{Shiny server function input}
\item{output}{Shiny sever function output}
\item{session}{Shiny server function session}
\item{modal_title}{the title for the modal}
\item{user_to_edit}{reactive - a one row data frame of the user to edit from the "app_users" table.}
\item{open_modal_trigger}{reactive - a trigger to open the modal}
\item{existing_roles}{reactive data frame of all roles for this app}
\item{existing_users}{reactive data frame of all users of this app. This is used to check that the user
does not add a user that already exists.}
}
\description{
user_edit_module
}
|
/man/user_edit_module.Rd
|
permissive
|
axionbio/polished
|
R
| false
| true
| 931
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_edit_module.R
\name{user_edit_module}
\alias{user_edit_module}
\title{user_edit_module}
\usage{
user_edit_module(
input,
output,
session,
modal_title,
user_to_edit,
open_modal_trigger,
existing_roles,
existing_users
)
}
\arguments{
\item{input}{Shiny server function input}
\item{output}{Shiny sever function output}
\item{session}{Shiny server function session}
\item{modal_title}{the title for the modal}
\item{user_to_edit}{reactive - a one row data frame of the user to edit from the "app_users" table.}
\item{open_modal_trigger}{reactive - a trigger to open the modal}
\item{existing_roles}{reactive data frame of all roles for this app}
\item{existing_users}{reactive data frame of all users of this app. This is used to check that the user
does not add a user that already exists.}
}
\description{
user_edit_module
}
|
context("Group by")
df <- data.frame(x = rep(1:3, each = 10), y = rep(1:6, each = 5))
srcs <- temp_srcs(c("df", "dt", "sqlite", "postgres"))
tbls <- temp_load(srcs, df)
test_that("group_by with add = TRUE adds groups", {
add_groups1 <- function(tbl) groups(group_by(tbl, x, y, add = TRUE))
add_groups2 <- function(tbl) groups(group_by(group_by(tbl, x, add = TRUE), y,
add = TRUE))
expect_equal(add_groups1(tbls$df), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$df), list(quote(x), quote(y)))
expect_equal(add_groups1(tbls$dt), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$dt), list(quote(x), quote(y)))
expect_equal(add_groups1(tbls$sqlite), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$sqlite), list(quote(x), quote(y)))
})
test_that("collect, collapse and compute preserve grouping", {
g <- group_by(tbls$sqlite, x, y)
expect_equal(groups(compute(g)), groups(g))
expect_equal(groups(collapse(g)), groups(g))
expect_equal(groups(collect(g)), groups(g))
})
test_that("joins preserve grouping", {
for (tbl in tbls) {
g <- group_by(tbl, x)
expect_equal(groups(inner_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(left_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(semi_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(anti_join(g, g, by = c("x", "y"))), groups(g))
}
})
test_that("constructors drops groups", {
dt <- lahman_dt() %>% tbl("Batting") %>% group_by(playerID)
df <- lahman_df() %>% tbl("Batting") %>% group_by(playerID)
expect_equal(groups(tbl_dt(dt)), NULL)
expect_equal(groups(tbl_df(df)), NULL)
})
test_that("grouping by constant adds column (#410)", {
grouped <- group_by(mtcars, "cyl") %>% summarise(foo = n())
expect_equal(names(grouped), c('"cyl"', "foo"))
expect_equal(nrow(grouped), 1L)
})
# Test full range of variable types --------------------------------------------
df_var <- data.frame(
l = c(T, F),
i = 1:2,
d = Sys.Date() + 1:2,
f = factor(letters[1:2]),
num = 1:2 + 0.5,
t = Sys.time() + 1:2,
c = letters[1:2],
stringsAsFactors = FALSE
)
srcs <- temp_srcs(c("df", "dt"))
var_tbls <- temp_load(srcs, df_var)
test_that("local group_by preserves variable types", {
for(var in names(df_var)) {
expected <- data.frame(unique(df_var[[var]]), n = 1L,
stringsAsFactors = FALSE)
names(expected)[1] <- var
for(tbl in names(var_tbls)) {
grouped <- group_by_(var_tbls[[tbl]], var)
summarised <- summarise(grouped, n = n())
expect_true(all.equal(summarised, expected),
label = paste0("summarised_", tbl, "_", var))
}
}
})
test_that("mutate does not loose variables (#144)",{
df <- tbl_df(data.frame(a = rep(1:4, 2), b = rep(1:4, each = 2), x = runif(8)))
by_ab <- group_by(df, a, b)
by_a <- summarise( by_ab, x = sum(x))
by_a_quartile <- group_by(by_a, quartile = ntile(x, 4))
expect_equal(names(by_a_quartile), c("a", "b", "x", "quartile" ))
})
test_that("group_by uses shallow copy", {
m1 <- group_by(mtcars, cyl)
expect_true(is.null(groups(mtcars)))
expect_equal(dfloc(mtcars), dfloc(m1))
})
test_that("FactorVisitor handles NA. #183", {
g <- group_by(MASS::survey, M.I)
expect_equal(g$M.I, MASS::survey$M.I)
})
test_that("group_by orders by groups. #242", {
df <- data.frame(a = sample(1:10, 100, replace = TRUE)) %>% group_by(a)
expect_equal( attr(df, "labels")$a, 1:10 )
df <- data.frame(a = sample(letters[1:10], 100, replace = TRUE), stringsAsFactors = FALSE) %>% group_by(a)
expect_equal(attr(df, "labels")$a, letters[1:10] )
df <- data.frame(a = sample(sqrt(1:10), 100, replace = TRUE)) %>% group_by(a)
expect_equal(attr(df, "labels")$a, sqrt(1:10))
})
test_that("group_by uses the white list", {
df <- data.frame( times = 1:5 )
df$times <- as.POSIXlt( seq.Date( Sys.Date(), length.out = 5, by = "day" ) )
expect_error(group_by(df, times))
})
test_that("group_by fails when lists are used as grouping variables (#276)",{
df <- data.frame(x = 1:3)
df$y <- list(1:2, 1:3, 1:4)
expect_error(group_by(df,y))
})
# Data tables ------------------------------------------------------------------
test_that("original data table not modified by grouping", {
dt <- data.table(x = 5:1)
dt2 <- group_by(dt, x)
dt2$y <- 1:5
expect_equal(dt$x, 5:1)
expect_equal(dt$y, NULL)
})
test_that("select(group_by(.)) implicitely adds grouping variables (#170)", {
res <- mtcars %>% group_by(vs) %>% select(mpg)
expect_equal(names(res), c("vs", "mpg"))
res <- mtcars %>% tbl_dt() %>% group_by(vs) %>% select(mpg)
expect_equal(names(res), c("vs", "mpg"))
})
test_that("grouped_df errors on empty vars (#398)",{
m <- mtcars %>% group_by(cyl)
attr(m, "vars") <- NULL
attr(m, "indices") <- NULL
expect_error( m %>% do(mpg = mean(.$mpg)) )
})
test_that("group_by only creates one group for NA (#401)", {
x <- as.numeric(c(NA,NA,NA,10:1,10:1))
w <- c(20,30,40,1:10,1:10)*10
n_distinct(x) # 11 OK
res <- data.frame(x=x,w=w) %>% group_by(x) %>% summarise(n=n())
expect_equal(nrow(res), 11L)
})
test_that("data.table invalid .selfref issue (#475)", {
dt <- data.table(x=1:5, y=6:10)
expect_that((dt %>% group_by(x))[, z := 2L], not(gives_warning()))
dt <- data.table(x=1:5, y=6:10)
expect_that((dt %>% group_by(x) %>% summarise(z = y^2))[, foo := 1L], not(gives_warning()))
})
test_that("there can be 0 groups (#486)", {
data <- data.frame(a = numeric(0), g = character(0)) %>% group_by(g)
expect_equal(length(data$a), 0L)
expect_equal(length(data$g), 0L)
expect_equal(attr(data, "group_sizes"), integer(0))
})
test_that("group_by works with zero-row data frames (#486)", {
dfg <- group_by(data.frame(a = numeric(0), b = numeric(0), g = character(0)), g)
expect_equal(dim(dfg), c(0, 3))
expect_equal(groups(dfg), list(quote(g)))
expect_equal(group_size(dfg), integer(0))
x <- summarise(dfg, n = n())
expect_equal(dim(x), c(0, 2))
expect_equal(groups(x), NULL)
x <- mutate(dfg, c = b + 1)
expect_equal(dim(x), c(0, 4))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- filter(dfg, a == 100)
expect_equal(dim(x), c(0, 3))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- arrange(dfg, a, g)
expect_equal(dim(x), c(0, 3))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- select(dfg, a) # Only select 'a' column; should result in 'g' and 'a'
expect_equal(dim(x), c(0, 2))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
})
test_that("grouped_df requires a list of symbols (#665)", {
features <- list("feat1", "feat2", "feat3")
expect_error( grouped_df(data.frame(feat1=1, feat2=2, feat3=3), features) )
})
test_that("group_by gives meaningful message with unknow column (#716)",{
expect_error( group_by(iris, wrong_name_of_variable), "unknown column" )
})
test_that("[ on grouped_df preserves grouping if subset includes grouping vars", {
df <- data_frame(x = 1:5, ` ` = 6:10)
by_x <- df %>% group_by(x)
expect_equal(by_x %>% groups(), by_x %>% `[`(1:2) %>% groups)
# non-syntactic name
by_ns <- df %>% group_by(` `)
expect_equal(by_ns %>% groups(), by_ns %>% `[`(1:2) %>% groups)
})
test_that("[ on grouped_df drops grouping if subset doesn't include grouping vars", {
by_cyl <- mtcars %>% group_by(cyl)
no_cyl <- by_cyl %>% `[`(c(1, 3))
expect_equal(groups(no_cyl), NULL)
expect_is(no_cyl, "tbl_df")
})
|
/tests/testthat/test-group-by.r
|
no_license
|
quzengyu/dplyr
|
R
| false
| false
| 7,561
|
r
|
context("Group by")
df <- data.frame(x = rep(1:3, each = 10), y = rep(1:6, each = 5))
srcs <- temp_srcs(c("df", "dt", "sqlite", "postgres"))
tbls <- temp_load(srcs, df)
test_that("group_by with add = TRUE adds groups", {
add_groups1 <- function(tbl) groups(group_by(tbl, x, y, add = TRUE))
add_groups2 <- function(tbl) groups(group_by(group_by(tbl, x, add = TRUE), y,
add = TRUE))
expect_equal(add_groups1(tbls$df), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$df), list(quote(x), quote(y)))
expect_equal(add_groups1(tbls$dt), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$dt), list(quote(x), quote(y)))
expect_equal(add_groups1(tbls$sqlite), list(quote(x), quote(y)))
expect_equal(add_groups2(tbls$sqlite), list(quote(x), quote(y)))
})
test_that("collect, collapse and compute preserve grouping", {
g <- group_by(tbls$sqlite, x, y)
expect_equal(groups(compute(g)), groups(g))
expect_equal(groups(collapse(g)), groups(g))
expect_equal(groups(collect(g)), groups(g))
})
test_that("joins preserve grouping", {
for (tbl in tbls) {
g <- group_by(tbl, x)
expect_equal(groups(inner_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(left_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(semi_join(g, g, by = c("x", "y"))), groups(g))
expect_equal(groups(anti_join(g, g, by = c("x", "y"))), groups(g))
}
})
test_that("constructors drops groups", {
dt <- lahman_dt() %>% tbl("Batting") %>% group_by(playerID)
df <- lahman_df() %>% tbl("Batting") %>% group_by(playerID)
expect_equal(groups(tbl_dt(dt)), NULL)
expect_equal(groups(tbl_df(df)), NULL)
})
test_that("grouping by constant adds column (#410)", {
grouped <- group_by(mtcars, "cyl") %>% summarise(foo = n())
expect_equal(names(grouped), c('"cyl"', "foo"))
expect_equal(nrow(grouped), 1L)
})
# Test full range of variable types --------------------------------------------
df_var <- data.frame(
l = c(T, F),
i = 1:2,
d = Sys.Date() + 1:2,
f = factor(letters[1:2]),
num = 1:2 + 0.5,
t = Sys.time() + 1:2,
c = letters[1:2],
stringsAsFactors = FALSE
)
srcs <- temp_srcs(c("df", "dt"))
var_tbls <- temp_load(srcs, df_var)
test_that("local group_by preserves variable types", {
for(var in names(df_var)) {
expected <- data.frame(unique(df_var[[var]]), n = 1L,
stringsAsFactors = FALSE)
names(expected)[1] <- var
for(tbl in names(var_tbls)) {
grouped <- group_by_(var_tbls[[tbl]], var)
summarised <- summarise(grouped, n = n())
expect_true(all.equal(summarised, expected),
label = paste0("summarised_", tbl, "_", var))
}
}
})
test_that("mutate does not loose variables (#144)",{
df <- tbl_df(data.frame(a = rep(1:4, 2), b = rep(1:4, each = 2), x = runif(8)))
by_ab <- group_by(df, a, b)
by_a <- summarise( by_ab, x = sum(x))
by_a_quartile <- group_by(by_a, quartile = ntile(x, 4))
expect_equal(names(by_a_quartile), c("a", "b", "x", "quartile" ))
})
test_that("group_by uses shallow copy", {
m1 <- group_by(mtcars, cyl)
expect_true(is.null(groups(mtcars)))
expect_equal(dfloc(mtcars), dfloc(m1))
})
test_that("FactorVisitor handles NA. #183", {
g <- group_by(MASS::survey, M.I)
expect_equal(g$M.I, MASS::survey$M.I)
})
test_that("group_by orders by groups. #242", {
df <- data.frame(a = sample(1:10, 100, replace = TRUE)) %>% group_by(a)
expect_equal( attr(df, "labels")$a, 1:10 )
df <- data.frame(a = sample(letters[1:10], 100, replace = TRUE), stringsAsFactors = FALSE) %>% group_by(a)
expect_equal(attr(df, "labels")$a, letters[1:10] )
df <- data.frame(a = sample(sqrt(1:10), 100, replace = TRUE)) %>% group_by(a)
expect_equal(attr(df, "labels")$a, sqrt(1:10))
})
test_that("group_by uses the white list", {
df <- data.frame( times = 1:5 )
df$times <- as.POSIXlt( seq.Date( Sys.Date(), length.out = 5, by = "day" ) )
expect_error(group_by(df, times))
})
test_that("group_by fails when lists are used as grouping variables (#276)",{
df <- data.frame(x = 1:3)
df$y <- list(1:2, 1:3, 1:4)
expect_error(group_by(df,y))
})
# Data tables ------------------------------------------------------------------
test_that("original data table not modified by grouping", {
dt <- data.table(x = 5:1)
dt2 <- group_by(dt, x)
dt2$y <- 1:5
expect_equal(dt$x, 5:1)
expect_equal(dt$y, NULL)
})
test_that("select(group_by(.)) implicitely adds grouping variables (#170)", {
res <- mtcars %>% group_by(vs) %>% select(mpg)
expect_equal(names(res), c("vs", "mpg"))
res <- mtcars %>% tbl_dt() %>% group_by(vs) %>% select(mpg)
expect_equal(names(res), c("vs", "mpg"))
})
test_that("grouped_df errors on empty vars (#398)",{
m <- mtcars %>% group_by(cyl)
attr(m, "vars") <- NULL
attr(m, "indices") <- NULL
expect_error( m %>% do(mpg = mean(.$mpg)) )
})
test_that("group_by only creates one group for NA (#401)", {
x <- as.numeric(c(NA,NA,NA,10:1,10:1))
w <- c(20,30,40,1:10,1:10)*10
n_distinct(x) # 11 OK
res <- data.frame(x=x,w=w) %>% group_by(x) %>% summarise(n=n())
expect_equal(nrow(res), 11L)
})
test_that("data.table invalid .selfref issue (#475)", {
dt <- data.table(x=1:5, y=6:10)
expect_that((dt %>% group_by(x))[, z := 2L], not(gives_warning()))
dt <- data.table(x=1:5, y=6:10)
expect_that((dt %>% group_by(x) %>% summarise(z = y^2))[, foo := 1L], not(gives_warning()))
})
test_that("there can be 0 groups (#486)", {
data <- data.frame(a = numeric(0), g = character(0)) %>% group_by(g)
expect_equal(length(data$a), 0L)
expect_equal(length(data$g), 0L)
expect_equal(attr(data, "group_sizes"), integer(0))
})
test_that("group_by works with zero-row data frames (#486)", {
dfg <- group_by(data.frame(a = numeric(0), b = numeric(0), g = character(0)), g)
expect_equal(dim(dfg), c(0, 3))
expect_equal(groups(dfg), list(quote(g)))
expect_equal(group_size(dfg), integer(0))
x <- summarise(dfg, n = n())
expect_equal(dim(x), c(0, 2))
expect_equal(groups(x), NULL)
x <- mutate(dfg, c = b + 1)
expect_equal(dim(x), c(0, 4))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- filter(dfg, a == 100)
expect_equal(dim(x), c(0, 3))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- arrange(dfg, a, g)
expect_equal(dim(x), c(0, 3))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
x <- select(dfg, a) # Only select 'a' column; should result in 'g' and 'a'
expect_equal(dim(x), c(0, 2))
expect_equal(groups(x), list(quote(g)))
expect_equal(group_size(x), integer(0))
})
test_that("grouped_df requires a list of symbols (#665)", {
features <- list("feat1", "feat2", "feat3")
expect_error( grouped_df(data.frame(feat1=1, feat2=2, feat3=3), features) )
})
test_that("group_by gives meaningful message with unknow column (#716)",{
expect_error( group_by(iris, wrong_name_of_variable), "unknown column" )
})
test_that("[ on grouped_df preserves grouping if subset includes grouping vars", {
df <- data_frame(x = 1:5, ` ` = 6:10)
by_x <- df %>% group_by(x)
expect_equal(by_x %>% groups(), by_x %>% `[`(1:2) %>% groups)
# non-syntactic name
by_ns <- df %>% group_by(` `)
expect_equal(by_ns %>% groups(), by_ns %>% `[`(1:2) %>% groups)
})
test_that("[ on grouped_df drops grouping if subset doesn't include grouping vars", {
by_cyl <- mtcars %>% group_by(cyl)
no_cyl <- by_cyl %>% `[`(c(1, 3))
expect_equal(groups(no_cyl), NULL)
expect_is(no_cyl, "tbl_df")
})
|
#' Create a Framework7 page
#'
#' Build a Framework7 page
#'
#' @param ... Slot for shinyMobile skeleton elements: \link{f7Appbar}, \link{f7SingleLayout},
#' \link{f7TabLayout}, \link{f7SplitLayout}.
#' @param init App configuration. See \link{f7Init}.
#' @param title Page title.
#' @param preloader Whether to display a preloader before the app starts.
#' FALSE by default.
#' @param loading_duration Preloader duration.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Page <- function(..., init = f7Init(skin = "auto", theme = "light"), title = NULL, preloader = FALSE,
loading_duration = 3){
shiny::tags$html(
# Head
shiny::tags$head(
shiny::tags$meta(charset = "utf-8"),
shiny::tags$meta(
name = "viewport",
content = "
width=device-width,
initial-scale=1,
maximum-scale=1,
minimum-scale=1,
user-scalable=no,
viewport-fit=cover"
),
# PAW properties
shiny::tags$meta(name = "apple-mobile-web-app-capable", content = "yes"),
shiny::tags$meta(name = "apple-mobile-web-app-title", content = title),
shiny::tags$meta(name = "apple-mobile-web-app-status-bar-style", content = "black-translucent"),
shiny::tags$link(rel = "apple-touch-icon", href = "icons/apple-touch-icon.png"),
shiny::tags$link(rel = "icon", href = "icons/favicon.png"),
shiny::tags$link(rel = "manifest", href = "manifest.json"),
# Splatshscreen for IOS must be in a www folder
shiny::tags$link(href = "splashscreens/iphone5_splash.png", media = "(device-width: 320px) and (device-height: 568px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphone6_splash.png", media = "(device-width: 375px) and (device-height: 667px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphoneplus_splash.png", media = "(device-width: 621px) and (device-height: 1104px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonex_splash.png", media = "(device-width: 375px) and (device-height: 812px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonexr_splash.png", media = "(device-width: 414px) and (device-height: 896px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonexsmax_splash.png", media = "(device-width: 414px) and (device-height: 896px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipad_splash.png", media = "(device-width: 768px) and (device-height: 1024px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro1_splash.png", media = "(device-width: 834px) and (device-height: 1112px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro3_splash.png", media = "(device-width: 834px) and (device-height: 1194px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro2_splash.png", media = "(device-width: 1024px) and (device-height: 1366px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$title(title)
),
# Body
addCSSDeps(
shiny::tags$body(
# preloader
onLoad = if (preloader) {
duration <- loading_duration * 1000
paste0(
"$(function() {
// Preloader
app.dialog.preloader();
setTimeout(function () {
app.dialog.close();
}, ", duration, ");
});
"
)
},
shiny::tags$div(
id = "app",
...
)
)
),
# A bits strange but framework7.js codes do not
# work when placed in the head, as we traditionally do
# with shinydashboard or bs4Dash. We put them here so.
addJSDeps(),
init
)
}
#' Create a Framework7 single layout
#'
#' Build a Framework7 single layout
#'
#' @param ... Content.
#' @param navbar Slot for \link{f7Navbar}.
#' @param toolbar Slot for \link{f7Toolbar}.
#' @param panels Slot for \link{f7Panel}.
#' Wrap in \link[shiny]{tagList} if multiple panels.
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "Single Layout",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' toolbar = f7Toolbar(
#' position = "bottom",
#' f7Link(label = "Link 1", src = "https://www.google.com"),
#' f7Link(label = "Link 2", src = "https://www.google.com", external = TRUE)
#' ),
#' # main content
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Slider("obs", "Number of observations", 0, 1000, 500),
#' plotOutput("distPlot"),
#' footer = tagList(
#' f7Button(color = "blue", label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output) {
#' output$distPlot <- renderPlot({
#' dist <- rnorm(input$obs)
#' hist(dist)
#' })
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7SingleLayout <- function(..., navbar, toolbar = NULL,
panels = NULL, appbar = NULL) {
shiny::tagList(
# appbar goes here
appbar,
# panels go here
panels,
shiny::tags$div(
class = "view view-main",
shiny::tags$div(
class = "page",
# top navbar goes here
navbar,
# toolbar goes here
toolbar,
shiny::tags$div(
class= "page-content",
style = "background-color: gainsboro;",
# page content
...
)
)
)
)
}
#' Create a Framework7 page with tab layout
#'
#' Build a Framework7 page with tab layout
#'
#' @param ... Slot for \link{f7Tabs}.
#' @param navbar Slot for \link{f7Navbar}.
#' @param panels Slot for \link{f7Panel}.
#' Wrap in \link[shiny]{tagList} if multiple panels.
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' library(shinyWidgets)
#'
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' init = f7Init(skin = "md", theme = "light"),
#' f7TabLayout(
#' tags$head(
#' tags$script(
#' "$(function(){
#' $('#tapHold').on('taphold', function () {
#' app.dialog.alert('Tap hold fired!');
#' });
#' });
#' "
#' )
#' ),
#' panels = tagList(
#' f7Panel(title = "Left Panel", side = "left", theme = "light", "Blabla", effect = "cover"),
#' f7Panel(title = "Right Panel", side = "right", theme = "dark", "Blabla", effect = "cover")
#' ),
#' navbar = f7Navbar(
#' title = "Tabs",
#' hairline = FALSE,
#' shadow = TRUE,
#' left_panel = TRUE,
#' right_panel = TRUE
#' ),
#' f7Tabs(
#' animated = FALSE,
#' swipeable = TRUE,
#' f7Tab(
#' tabName = "Tab 1",
#' icon = f7Icon("email"),
#' active = TRUE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Stepper(
#' "obs1",
#' "Number of observations",
#' min = 0,
#' max = 1000,
#' value = 500,
#' step = 100
#' ),
#' plotOutput("distPlot1"),
#' footer = tagList(
#' f7Button(inputId = "tapHold", label = "My button"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' ),
#' f7Tab(
#' tabName = "Tab 2",
#' icon = f7Icon("today"),
#' active = FALSE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Select(
#' inputId = "obs2",
#' label = "Distribution type:",
#' choices = c(
#' "Normal" = "norm",
#' "Uniform" = "unif",
#' "Log-normal" = "lnorm",
#' "Exponential" = "exp"
#' )
#' ),
#' plotOutput("distPlot2"),
#' footer = tagList(
#' f7Button(label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "orange")
#' )
#' )
#' )
#' ),
#' f7Tab(
#' tabName = "Tab 3",
#' icon = f7Icon("cloud_upload"),
#' active = FALSE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7SmartSelect(
#' inputId = "variable",
#' label = "Variables to show:",
#' c("Cylinders" = "cyl",
#' "Transmission" = "am",
#' "Gears" = "gear"),
#' multiple = TRUE,
#' selected = "cyl"
#' ),
#' tableOutput("data"),
#' footer = tagList(
#' f7Button(label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output) {
#' output$distPlot1 <- renderPlot({
#' dist <- rnorm(input$obs1)
#' hist(dist)
#' })
#'
#' output$distPlot2 <- renderPlot({
#' dist <- switch(
#' input$obs2,
#' norm = rnorm,
#' unif = runif,
#' lnorm = rlnorm,
#' exp = rexp,
#' rnorm
#' )
#'
#' hist(dist(500))
#' })
#'
#' output$data <- renderTable({
#' mtcars[, c("mpg", input$variable), drop = FALSE]
#' }, rownames = TRUE)
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7TabLayout <- function(..., navbar, panels = NULL, appbar = NULL) {
shiny::tagList(
# appbar goes here
appbar,
# panels go here
panels,
shiny::tags$div(
class = "view view-main",
# the page wrapper is important for tabs
# to swipe properly. It is not mentionned
# in the doc. Also necessary to adequately
# apply the dark mode
shiny::tags$div(
class = "page",
# top navbar goes here
navbar,
# f7Tabs go here. The toolbar is
# automatically generated
...
)
)
)
}
#' Create a Framework7 split layout
#'
#' This is a modified version of the \link{f7SingleLayout}.
#' It is intended to be used with tablets.
#'
#' @param ... Content.
#' @param navbar Slot for \link{f7Navbar}.
#' @param sidebar Slot for \link{f7Panel}. Particularly we expect the following code:
#' \code{f7Panel(title = "Sidebar", side = "left", theme = "light", "Blabla", style = "reveal")}
#' @param toolbar Slot for \link{f7Toolbar}.
#' @param panels Slot for \link{f7Panel}. Expect only a right panel, for instance:
#' \code{f7Panel(title = "Left Panel", side = "right", theme = "light", "Blabla", style = "cover")}
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' f7SplitLayout(
#' sidebar = f7Panel(
#' inputId = "sidebar",
#' title = "Sidebar",
#' side = "left",
#' theme = "light",
#' f7PanelMenu(
#' id = "menu",
#' f7PanelItem(tabName = "tab1", title = "Tab 1", icon = f7Icon("email"), active = TRUE),
#' f7PanelItem(tabName = "tab2", title = "Tab 2", icon = f7Icon("home"))
#' ),
#' effect = "reveal"
#' ),
#' navbar = f7Navbar(
#' title = "Split Layout",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' toolbar = f7Toolbar(
#' position = "bottom",
#' f7Link(label = "Link 1", src = "https://www.google.com"),
#' f7Link(label = "Link 2", src = "https://www.google.com", external = TRUE)
#' ),
#' # main content
#' f7Items(
#' f7Item(
#' tabName = "tab1",
#' f7Slider("obs", "Number of observations:",
#' min = 0, max = 1000, value = 500
#' ),
#' plotOutput("distPlot")
#' ),
#' f7Item(tabName = "tab2", "Tab 2 content")
#' )
#' )
#' ),
#' server = function(input, output) {
#'
#' observe({
#' print(input$menu)
#' })
#'
#' output$distPlot <- renderPlot({
#' dist <- rnorm(input$obs)
#' hist(dist)
#' })
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#' @export
f7SplitLayout <- function(..., navbar, sidebar, toolbar = NULL,
panels = NULL, appbar = NULL) {
# add margins
items <- shiny::div(...) %>% f7Margin(side = "left") %>% f7Margin(side = "right")
sidebar <- shiny::tagAppendAttributes(sidebar[[2]], class = "panel-in")
# this trick to prevent to select the panel view in the following
# javascript code
sidebar$children[[1]]$attribs$id <- "f7-sidebar-view"
splitSkeleton <- f7SingleLayout(
items,
navbar = navbar,
toolbar = toolbar,
panels = shiny::tagList(
sidebar,
panels
),
appbar = appbar
)
splitTemplateCSS <- shiny::singleton(
shiny::tags$style(
'/* Left Panel right border when it is visible by breakpoint */
.panel-left.panel-visible-by-breakpoint:before {
position: absolute;
right: 0;
top: 0;
height: 100%;
width: 1px;
background: rgba(0,0,0,0.1);
content: "";
z-index: 6000;
}
/* Hide navbar link which opens left panel when it is visible by breakpoint */
.panel-left.panel-visible-by-breakpoint ~ .view .navbar .panel-open[data-panel="left"] {
display: none;
}
/*
Extra borders for main view and left panel for iOS theme when it behaves as panel (before breakpoint size)
*/
.ios .panel-left:not(.panel-visible-by-breakpoint).panel-active ~ .view-main:before,
.ios .panel-left:not(.panel-visible-by-breakpoint).panel-closing ~ .view-main:before {
position: absolute;
left: 0;
top: 0;
height: 100%;
width: 1px;
background: rgba(0,0,0,0.1);
content: "";
z-index: 6000;
}
'
)
)
splitTemplateJS <- shiny::singleton(
shiny::tags$script(
"$(function() {
$('#f7-sidebar').addClass('panel-visible-by-breakpoint');
$('.view:not(\"#f7-sidebar-view\")').addClass('safe-areas');
$('.view:not(\"#f7-sidebar-view\")').css('margin-left', '260px');
});
"
)
)
shiny::tagList(splitTemplateCSS, splitTemplateJS, splitSkeleton)
}
#' Create a Framework7 wrapper for \link{f7Item}
#'
#' Build a Framework7 wrapper for \link{f7Item}
#'
#' @param ... Slot for wrapper for \link{f7Item}.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Items <- function(...){
shiny::tags$div(
class = "tabs-animated-wrap",
shiny::tags$div(
# ios-edges necessary to have
# the good ios rendering
class = "tabs ios-edges",
...
)
)
}
#' Create a Framework7 \link{f7Item}.
#'
#' Similar to \link{f7Tab} but for the \link{f7SplitLayout}.
#'
#' @inheritParams f7Tab
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Item <- function(..., tabName) {
shiny::tags$div(
class = "page-content tab",
id = tabName,
`data-value` = tabName,
style = "background-color: gainsboro;",
...
)
}
|
/R/f7Page.R
|
no_license
|
CristianPachacama/shinyMobile
|
R
| false
| false
| 16,961
|
r
|
#' Create a Framework7 page
#'
#' Build a Framework7 page
#'
#' @param ... Slot for shinyMobile skeleton elements: \link{f7Appbar}, \link{f7SingleLayout},
#' \link{f7TabLayout}, \link{f7SplitLayout}.
#' @param init App configuration. See \link{f7Init}.
#' @param title Page title.
#' @param preloader Whether to display a preloader before the app starts.
#' FALSE by default.
#' @param loading_duration Preloader duration.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Page <- function(..., init = f7Init(skin = "auto", theme = "light"), title = NULL, preloader = FALSE,
loading_duration = 3){
shiny::tags$html(
# Head
shiny::tags$head(
shiny::tags$meta(charset = "utf-8"),
shiny::tags$meta(
name = "viewport",
content = "
width=device-width,
initial-scale=1,
maximum-scale=1,
minimum-scale=1,
user-scalable=no,
viewport-fit=cover"
),
# PAW properties
shiny::tags$meta(name = "apple-mobile-web-app-capable", content = "yes"),
shiny::tags$meta(name = "apple-mobile-web-app-title", content = title),
shiny::tags$meta(name = "apple-mobile-web-app-status-bar-style", content = "black-translucent"),
shiny::tags$link(rel = "apple-touch-icon", href = "icons/apple-touch-icon.png"),
shiny::tags$link(rel = "icon", href = "icons/favicon.png"),
shiny::tags$link(rel = "manifest", href = "manifest.json"),
# Splatshscreen for IOS must be in a www folder
shiny::tags$link(href = "splashscreens/iphone5_splash.png", media = "(device-width: 320px) and (device-height: 568px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphone6_splash.png", media = "(device-width: 375px) and (device-height: 667px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphoneplus_splash.png", media = "(device-width: 621px) and (device-height: 1104px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonex_splash.png", media = "(device-width: 375px) and (device-height: 812px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonexr_splash.png", media = "(device-width: 414px) and (device-height: 896px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/iphonexsmax_splash.png", media = "(device-width: 414px) and (device-height: 896px) and (-webkit-device-pixel-ratio: 3)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipad_splash.png", media = "(device-width: 768px) and (device-height: 1024px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro1_splash.png", media = "(device-width: 834px) and (device-height: 1112px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro3_splash.png", media = "(device-width: 834px) and (device-height: 1194px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$link(href = "splashscreens/ipadpro2_splash.png", media = "(device-width: 1024px) and (device-height: 1366px) and (-webkit-device-pixel-ratio: 2)", rel = "apple-touch-startup-image"),
shiny::tags$title(title)
),
# Body
addCSSDeps(
shiny::tags$body(
# preloader
onLoad = if (preloader) {
duration <- loading_duration * 1000
paste0(
"$(function() {
// Preloader
app.dialog.preloader();
setTimeout(function () {
app.dialog.close();
}, ", duration, ");
});
"
)
},
shiny::tags$div(
id = "app",
...
)
)
),
# A bits strange but framework7.js codes do not
# work when placed in the head, as we traditionally do
# with shinydashboard or bs4Dash. We put them here so.
addJSDeps(),
init
)
}
#' Create a Framework7 single layout
#'
#' Build a Framework7 single layout
#'
#' @param ... Content.
#' @param navbar Slot for \link{f7Navbar}.
#' @param toolbar Slot for \link{f7Toolbar}.
#' @param panels Slot for \link{f7Panel}.
#' Wrap in \link[shiny]{tagList} if multiple panels.
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "Single Layout",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' toolbar = f7Toolbar(
#' position = "bottom",
#' f7Link(label = "Link 1", src = "https://www.google.com"),
#' f7Link(label = "Link 2", src = "https://www.google.com", external = TRUE)
#' ),
#' # main content
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Slider("obs", "Number of observations", 0, 1000, 500),
#' plotOutput("distPlot"),
#' footer = tagList(
#' f7Button(color = "blue", label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output) {
#' output$distPlot <- renderPlot({
#' dist <- rnorm(input$obs)
#' hist(dist)
#' })
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7SingleLayout <- function(..., navbar, toolbar = NULL,
panels = NULL, appbar = NULL) {
shiny::tagList(
# appbar goes here
appbar,
# panels go here
panels,
shiny::tags$div(
class = "view view-main",
shiny::tags$div(
class = "page",
# top navbar goes here
navbar,
# toolbar goes here
toolbar,
shiny::tags$div(
class= "page-content",
style = "background-color: gainsboro;",
# page content
...
)
)
)
)
}
#' Create a Framework7 page with tab layout
#'
#' Build a Framework7 page with tab layout
#'
#' @param ... Slot for \link{f7Tabs}.
#' @param navbar Slot for \link{f7Navbar}.
#' @param panels Slot for \link{f7Panel}.
#' Wrap in \link[shiny]{tagList} if multiple panels.
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' library(shinyWidgets)
#'
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' init = f7Init(skin = "md", theme = "light"),
#' f7TabLayout(
#' tags$head(
#' tags$script(
#' "$(function(){
#' $('#tapHold').on('taphold', function () {
#' app.dialog.alert('Tap hold fired!');
#' });
#' });
#' "
#' )
#' ),
#' panels = tagList(
#' f7Panel(title = "Left Panel", side = "left", theme = "light", "Blabla", effect = "cover"),
#' f7Panel(title = "Right Panel", side = "right", theme = "dark", "Blabla", effect = "cover")
#' ),
#' navbar = f7Navbar(
#' title = "Tabs",
#' hairline = FALSE,
#' shadow = TRUE,
#' left_panel = TRUE,
#' right_panel = TRUE
#' ),
#' f7Tabs(
#' animated = FALSE,
#' swipeable = TRUE,
#' f7Tab(
#' tabName = "Tab 1",
#' icon = f7Icon("email"),
#' active = TRUE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Stepper(
#' "obs1",
#' "Number of observations",
#' min = 0,
#' max = 1000,
#' value = 500,
#' step = 100
#' ),
#' plotOutput("distPlot1"),
#' footer = tagList(
#' f7Button(inputId = "tapHold", label = "My button"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' ),
#' f7Tab(
#' tabName = "Tab 2",
#' icon = f7Icon("today"),
#' active = FALSE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7Select(
#' inputId = "obs2",
#' label = "Distribution type:",
#' choices = c(
#' "Normal" = "norm",
#' "Uniform" = "unif",
#' "Log-normal" = "lnorm",
#' "Exponential" = "exp"
#' )
#' ),
#' plotOutput("distPlot2"),
#' footer = tagList(
#' f7Button(label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "orange")
#' )
#' )
#' )
#' ),
#' f7Tab(
#' tabName = "Tab 3",
#' icon = f7Icon("cloud_upload"),
#' active = FALSE,
#' f7Shadow(
#' intensity = 10,
#' hover = TRUE,
#' f7Card(
#' title = "Card header",
#' f7SmartSelect(
#' inputId = "variable",
#' label = "Variables to show:",
#' c("Cylinders" = "cyl",
#' "Transmission" = "am",
#' "Gears" = "gear"),
#' multiple = TRUE,
#' selected = "cyl"
#' ),
#' tableOutput("data"),
#' footer = tagList(
#' f7Button(label = "My button", src = "https://www.google.com"),
#' f7Badge("Badge", color = "green")
#' )
#' )
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output) {
#' output$distPlot1 <- renderPlot({
#' dist <- rnorm(input$obs1)
#' hist(dist)
#' })
#'
#' output$distPlot2 <- renderPlot({
#' dist <- switch(
#' input$obs2,
#' norm = rnorm,
#' unif = runif,
#' lnorm = rlnorm,
#' exp = rexp,
#' rnorm
#' )
#'
#' hist(dist(500))
#' })
#'
#' output$data <- renderTable({
#' mtcars[, c("mpg", input$variable), drop = FALSE]
#' }, rownames = TRUE)
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7TabLayout <- function(..., navbar, panels = NULL, appbar = NULL) {
shiny::tagList(
# appbar goes here
appbar,
# panels go here
panels,
shiny::tags$div(
class = "view view-main",
# the page wrapper is important for tabs
# to swipe properly. It is not mentionned
# in the doc. Also necessary to adequately
# apply the dark mode
shiny::tags$div(
class = "page",
# top navbar goes here
navbar,
# f7Tabs go here. The toolbar is
# automatically generated
...
)
)
)
}
#' Create a Framework7 split layout
#'
#' This is a modified version of the \link{f7SingleLayout}.
#' It is intended to be used with tablets.
#'
#' @param ... Content.
#' @param navbar Slot for \link{f7Navbar}.
#' @param sidebar Slot for \link{f7Panel}. Particularly we expect the following code:
#' \code{f7Panel(title = "Sidebar", side = "left", theme = "light", "Blabla", style = "reveal")}
#' @param toolbar Slot for \link{f7Toolbar}.
#' @param panels Slot for \link{f7Panel}. Expect only a right panel, for instance:
#' \code{f7Panel(title = "Left Panel", side = "right", theme = "light", "Blabla", style = "cover")}
#' @param appbar Slot for \link{f7Appbar}.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "My app",
#' f7SplitLayout(
#' sidebar = f7Panel(
#' inputId = "sidebar",
#' title = "Sidebar",
#' side = "left",
#' theme = "light",
#' f7PanelMenu(
#' id = "menu",
#' f7PanelItem(tabName = "tab1", title = "Tab 1", icon = f7Icon("email"), active = TRUE),
#' f7PanelItem(tabName = "tab2", title = "Tab 2", icon = f7Icon("home"))
#' ),
#' effect = "reveal"
#' ),
#' navbar = f7Navbar(
#' title = "Split Layout",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' toolbar = f7Toolbar(
#' position = "bottom",
#' f7Link(label = "Link 1", src = "https://www.google.com"),
#' f7Link(label = "Link 2", src = "https://www.google.com", external = TRUE)
#' ),
#' # main content
#' f7Items(
#' f7Item(
#' tabName = "tab1",
#' f7Slider("obs", "Number of observations:",
#' min = 0, max = 1000, value = 500
#' ),
#' plotOutput("distPlot")
#' ),
#' f7Item(tabName = "tab2", "Tab 2 content")
#' )
#' )
#' ),
#' server = function(input, output) {
#'
#' observe({
#' print(input$menu)
#' })
#'
#' output$distPlot <- renderPlot({
#' dist <- rnorm(input$obs)
#' hist(dist)
#' })
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#' @export
f7SplitLayout <- function(..., navbar, sidebar, toolbar = NULL,
panels = NULL, appbar = NULL) {
# add margins
items <- shiny::div(...) %>% f7Margin(side = "left") %>% f7Margin(side = "right")
sidebar <- shiny::tagAppendAttributes(sidebar[[2]], class = "panel-in")
# this trick to prevent to select the panel view in the following
# javascript code
sidebar$children[[1]]$attribs$id <- "f7-sidebar-view"
splitSkeleton <- f7SingleLayout(
items,
navbar = navbar,
toolbar = toolbar,
panels = shiny::tagList(
sidebar,
panels
),
appbar = appbar
)
splitTemplateCSS <- shiny::singleton(
shiny::tags$style(
'/* Left Panel right border when it is visible by breakpoint */
.panel-left.panel-visible-by-breakpoint:before {
position: absolute;
right: 0;
top: 0;
height: 100%;
width: 1px;
background: rgba(0,0,0,0.1);
content: "";
z-index: 6000;
}
/* Hide navbar link which opens left panel when it is visible by breakpoint */
.panel-left.panel-visible-by-breakpoint ~ .view .navbar .panel-open[data-panel="left"] {
display: none;
}
/*
Extra borders for main view and left panel for iOS theme when it behaves as panel (before breakpoint size)
*/
.ios .panel-left:not(.panel-visible-by-breakpoint).panel-active ~ .view-main:before,
.ios .panel-left:not(.panel-visible-by-breakpoint).panel-closing ~ .view-main:before {
position: absolute;
left: 0;
top: 0;
height: 100%;
width: 1px;
background: rgba(0,0,0,0.1);
content: "";
z-index: 6000;
}
'
)
)
splitTemplateJS <- shiny::singleton(
shiny::tags$script(
"$(function() {
$('#f7-sidebar').addClass('panel-visible-by-breakpoint');
$('.view:not(\"#f7-sidebar-view\")').addClass('safe-areas');
$('.view:not(\"#f7-sidebar-view\")').css('margin-left', '260px');
});
"
)
)
shiny::tagList(splitTemplateCSS, splitTemplateJS, splitSkeleton)
}
#' Create a Framework7 wrapper for \link{f7Item}
#'
#' Build a Framework7 wrapper for \link{f7Item}
#'
#' @param ... Slot for wrapper for \link{f7Item}.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Items <- function(...){
shiny::tags$div(
class = "tabs-animated-wrap",
shiny::tags$div(
# ios-edges necessary to have
# the good ios rendering
class = "tabs ios-edges",
...
)
)
}
#' Create a Framework7 \link{f7Item}.
#'
#' Similar to \link{f7Tab} but for the \link{f7SplitLayout}.
#'
#' @inheritParams f7Tab
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Item <- function(..., tabName) {
shiny::tags$div(
class = "page-content tab",
id = tabName,
`data-value` = tabName,
style = "background-color: gainsboro;",
...
)
}
|
remove(list = ls(all = TRUE))
gc()
library(xtable)
library(texreg)
library(yacca)
library(ggplot2)
library(reshape2)
require(Hmisc)
library(foreign)
library(corrgram)
library(xlsx)
library(dplyr)
library(nnet)
options(warn=1)
set.seed(55333)
## Specify directories
data.dir <- "Tax data/"
network.data.dir <- "Network data/"
survey.data.dir <- "Survey data/"
survey.output.dir <- "SurveyTablesPlots/"
model.inputs.dir <- "Inputs/"
library.dir <- "Survey code/SurveyLibrary/"
library <- file.path(library.dir, "library.R")
source(library)
library.dir <- "Library/"
library <- file.path(library.dir, "library.R")
source(library)
### Load eff tax rates
eff.tax.rate <- read.csv("Tax data/US_Income_Tax_Rates_2016.csv", stringsAsFactors = F)
eff.audit.rate <- read.csv("Tax data/Audit_Rates_2015.csv", stringsAsFactors = F)
### Load Survey Data
ego.data <- readRDS(file=paste(survey.data.dir,"ego.data.Rdata",sep=""))
alters.table<-readRDS(file=paste(survey.data.dir,"EgoNet",".Rdata",sep=""))
SD.clean<-readRDS(file=paste(survey.data.dir,"SD.clean",".Rdata",sep=""))
### Load Survey data analysis output from ALP_Survey_Analysis.R
model.param.tab<- readRDS(file=paste(survey.data.dir,"model.param.tab.Rdata",sep=""))
### Create Pop Subsets
subsets <- data.frame(all=rep(T,nrow(ego.data)),
audited = ego.data$hheveraudited,
nonaudited = !(ego.data$hheveraudited),
selfemployed = ego.data$selfemployed,
not.selfemployed = !(ego.data$selfemployed))
tmp <- alters.table$altertaxaudit
in.contact.with.audited.alters<- (tmp$Yes>0 | tmp$`I think so`>0)
subsets$nonaudited.alters.nonaudited <- subsets$nonaudited & !in.contact.with.audited.alters
subsets$nonaudited.alters.audited <- subsets$nonaudited & in.contact.with.audited.alters
subsets$audited.alters.nonaudited <- subsets$audited & !in.contact.with.audited.alters
subsets$audited.alters.audited <- subsets$audited & in.contact.with.audited.alters
### Get selected fields for the regressions
selected.fields <- c("prim_key","currentlivingsituation" ,
"perceivedevasionratepopulation","perceivedevasionrate",
"perceivedpenaltyrate",
"perceivedauditrate",
"perceivedcaught",
"riskauditspenalties_a","riskauditspenalties_b",
"riskauditspenalties_c",
"perceivedtaxrate","tax.rate.threshold.min",
"tax.rate.threshold.max",
"c1.guessed.unmodified","c1.guessed",
"c1.guess.majority.int", "tax.rate.threshold.tri",
"servicestaxes",
"actor",
"s","m",
"perceivedaruprob",
"bombcrateramount")
df<- SD.clean[,selected.fields]
df$actor<- -(df$actor-3) ### NOTE: changed the sign and definition here
df$actor.logical<- df$actor>0
df$servicestaxes<- df$servicestaxes-3
df$preptaxes.CPA <- SD.clean$preptaxes
tmp<-(df$preptaxes.CPA<3)
tmp[df$preptaxes.CPA>3] <- NA
df$preptaxes.CPA <- as.numeric(!(tmp) )
df <- merge(ego.data,df, by ="prim_key")
df$perceivedevasionratepopulation <- as.numeric(df$perceivedevasionratepopulation)
x<- df$n.alters/df$n.alters.tTaxes
x[x==Inf] <- NA
x[is.nan(x)] <-NA
df$prop.alters.tTaxes<-x
subsets$actor <- (df$actor>0)
tmp<- rep("Head.of.Household",nrow(df))
tmp[df$currentlivingsituation==1] <- "Married.Filing.Jointly"
tmp[df$currentlivingsituation==5] <- "Single"
df$filing.status<- tmp
tax.rate.income.bracket.file <- paste(data.dir,"US_Income_Tax_Rates_2013",".csv",sep="")
tax.rate.income.bracket <- read.csv(file=tax.rate.income.bracket.file,head=TRUE,stringsAsFactors = FALSE)
tax.rate.income.bracket <- split(tax.rate.income.bracket,tax.rate.income.bracket$filing.status)
df$income.val.5 <- df$income.val/10^5
income.info<- df[,c("income.val","filing.status")]
names(income.info) <- c("Income","filing.status")
income.tax <-
apply(income.info,1,get.effective.tax.rate,tax.rate.income.bracket=tax.rate.income.bracket)
df$effectivetaxrate <- round(100*income.tax/ income.info$Income,0)
df$diff.perceived.effective.taxrate <- df$perceivedtaxrate - df$effectivetaxrate
df<-ALP.fill.out.missing.variables.for.regression(df)
write.csv(df, paste(survey.data.dir,"Survey_data_input_to_Regression_Analysis",".csv",sep=""),row.names = F)
########################################
### ###
### Load Demo data for the ABM and add missing attributes
########################################
network.data.file <- "PN1"
for( network.data.file in c("PN1","PN10")){
model.pop.attributes<- readRDS(file=paste(network.data.dir,network.data.file,
".demog.person.RData",sep=""))
fields.to.keep<-colnames(model.pop.attributes)
### set intial fields in model.pop.attributes with dummy values.
model.pop.attributes$calcage<- model.pop.attributes$age
model.pop.attributes$income.val.5<- model.pop.attributes$income/10^5
model.pop.attributes$gender <- as.factor(model.pop.attributes$gender)
levels(model.pop.attributes$gender) <- c("Female","Male")
model.pop.attributes$selfemployed <- model.pop.attributes$self.employed
model.pop.attributes$alterselfemployed[model.pop.attributes$self.employed]<-
mean(df$alterselfemployed[df$selfemployed],na.rm=T)
model.pop.attributes$alterselfemployed[!(model.pop.attributes$self.employed)]<-
mean(df$alterselfemployed[!(df$selfemployed)],na.rm=T)
model.pop.attributes$prop.alters.tTaxes <- mean(df$prop.alters.tTaxes,na.rm=T)
model.pop.attributes$actor <- mean(df$actor,na.rm=T)
model.pop.attributes$perceivedevasionratepopulation <- mean(df$perceivedevasionratepopulation,na.rm=T)
model.pop.attributes$everaudited <- F
model.pop.attributes$spouseaudit <- F
model.pop.attributes$altertaxaudit.tTaxes <- mean(df$altertaxaudit.tTaxes,na.rm=T)
model.pop.attributes$servicestaxes <- mean(df$servicestaxes,na.rm=T)
model.pop.attributes$tax.rate.threshold.max <- mean(df$tax.rate.threshold.max,na.rm=T)
model.pop.attributes$perceivedevasionrate <- mean(df$perceivedevasionrate,na.rm=T)
model.pop.attributes$perceivedpenaltyrate <- mean(df$perceivedpenaltyrate,na.rm=T)
model.pop.attributes$perceivedauditrate <- mean(df$perceivedpenaltyrate,na.rm=T)
NA.fields<-names(df)[grepl("NA.Indicator", names(df))]
model.pop.attributes[,NA.fields] <- FALSE
########################################
### ###
### Regression : perceivedtaxrate
########################################
dependent <- "perceivedtaxrate"
covariates <- c("calcage","gender","income.val.5",
"everaudited","spouseaudit","selfemployed")
# "prop.alters.tTaxes","altertaxaudit.tTaxes","alterselfemployed",
# "actor","servicestaxes","perceivedevasionratepopulation",
# "perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression on perceivedtaxrate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedtaxrate",".Rdata",sep=""))
model.pop.attributes$perceivedtaxrate<-round(predict(gillo,newdata=model.pop.attributes),0)/100
fields.to.keep<- c(fields.to.keep,"perceivedtaxrate")
#### Summary stats of the percieved audit rate for different subsets.
perceivedtaxrate <- SD.clean[,"perceivedtaxrate"]
tab <- list()
tab$all<- summaryfunctionFull(perceivedtaxrate)
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedtaxrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedtaxrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedtaxrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-summaryfunctionFull(perceivedtaxrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedtaxrate_summary",".tex",sep=""),
include.rownames=F)
########################################
### ###
### Regression : find who uses CPA
########################################
dependent <- "preptaxes.CPA"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit","perceivedauditrate",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Logistic Regression on whether respondent uses an tax expert to prepare"
gillo<-ALP.glm(df,dependent,covariates,family = binomial(link = "logit"),
threshold = 0.0,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_CPA",".Rdata",sep=""))
model.pop.attributes$preptaxes.CPA<-as.logical(sapply(predict(gillo,newdata=model.pop.attributes,type="response"),stoch.round))
fields.to.keep<- c(fields.to.keep,"preptaxes.CPA")
########################################
### ###
### Regression : find who can be influenced by actor/high earner being audited.
########################################
dependent <- "actor.logical"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit","perceivedauditrate","perceivedpenaltyrate",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Logistic Regression on whether respondent is affected by knowing an actor was audited"
gillo<-ALP.glm(df,dependent,covariates,family = binomial(link = "logit"),
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_actor_affect",".Rdata",sep=""))
model.pop.attributes$actor.logical<-as.logical(sapply(predict(gillo,newdata=model.pop.attributes,type="response"),stoch.round) )
fields.to.keep<- c(fields.to.keep,"actor.logical")
########################################
### ###
### Regression: Perceived audit depends
########################################
dependent <- "perceivedauditrate"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression of the Perceived Audit Rate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedauditrate",".Rdata",sep=""))
reg.perceivedauditrate<-round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Summary stats of the percieved audit rate for different subsets.
perceivedauditrate <- SD.clean[,"perceivedauditrate"]
tab <- list()
tab$all<- summaryfunctionFull(perceivedauditrate[!is.na(perceivedauditrate)])
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedauditrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedauditrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedauditrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-summaryfunctionFull(perceivedauditrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedaudit_summary",".tex",sep=""),
include.rownames=F)
#### Generated distribution of perceived audit rate for those never audited and that know
## no-one that was audited using the survey
pa <- perceivedauditrate[subsets$nonaudited.alters.nonaudited]
r<- do.ALP.per.rate.analysis(per.rate=pa ,lab="Perceived Audit Rate",
scale.up=1, n.bins=50,text.size=12)
print(r)
log.per.audit.dist <- r$fit.log.rate
per.audit.dist<- log.per.audit.dist
per.audit.dist$x <- 10^(log.per.audit.dist$x)
n<- nrow(model.pop.attributes)
sampled.per.audit <-sample(per.audit.dist$x,n, replace = T, prob=per.audit.dist$y)
rescale.to.match.mean <- mean(pa,na.rm=T)/mean(sampled.per.audit)
sampled.per.audit<- rescale.to.match.mean*sampled.per.audit
sampled.per.audit[sampled.per.audit>100] <- 100
summary(sampled.per.audit)
for(k in c("Income")){
print(cor(sampled.per.audit,model.pop.attributes[,k]) )## cor should be ~ 0
}
#### Rearrange the distibution by assigning this perceieved audit rate according to the
### regression model - this will now include the correlations with the significant covariates
sampled.per.audit<- ALP.get.rearranged.sampled.dist(sampled.per.audit,
reg.perceivedauditrate)
sampled.per.audit<- round(sampled.per.audit,3)
sampled.per.audit[sampled.per.audit>1] <- round(sampled.per.audit[sampled.per.audit>1],2)
sampled.per.audit[sampled.per.audit>5] <- round(sampled.per.audit[sampled.per.audit>5],1)
sampled.per.audit[sampled.per.audit>10] <- round(sampled.per.audit[sampled.per.audit>10],0)
model.pop.attributes$perceivedauditrate <- sampled.per.audit/100
for(k in c("Income")){
print(cor(model.pop.attributes$perceivedauditrate,model.pop.attributes[,k]))## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"perceivedauditrate")
########################################
### ###
### Regression2: find how percieved penalty rate
########################################
dependent <- "perceivedpenaltyrate"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression of the Percieved Penalty Rate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedpenaltyrate",".Rdata",sep=""))
reg.perceivedpenaltyrate<-round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Summary stats of the percieved audit rate for different subsets.
perceivedpenaltyrate <- SD.clean$perceivedpenaltyrate
tab <- list()
tab$all<- summaryfunctionFull(perceivedpenaltyrate)
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedpenaltyrate_summary",".tex",sep=""),
include.rownames=F)
#### Generated distribution of percied penalty rate for those never penalized and that know
## no-one that was penalized using the survey
pp <- perceivedpenaltyrate[subsets$nonaudited.alters.nonaudited]
r<- do.ALP.per.rate.analysis(per.rate=pp ,lab="Percieved Penalty Rate",scale.up=1, n.bins=25,text.size=12)
print(r)
log.per.penalty.dist <- r$fit.log.rate
per.penalty.dist<- log.per.penalty.dist
per.penalty.dist$x <- 10^(log.per.penalty.dist$x)
n<- nrow(model.pop.attributes)
sampled.per.penalty <-sample(per.penalty.dist$x,n, replace = T, prob=per.penalty.dist$y)
rescale.to.match.mean <- mean(pp,na.rm=T)/mean(sampled.per.penalty)
sampled.per.penalty<- rescale.to.match.mean*sampled.per.penalty
summary(sampled.per.penalty)
cor(sampled.per.penalty,model.pop.attributes$Income) ## cor should be ~ 0
#### Rearrange the distibution by assigning this perceieved penalty rate according to the
### regression model - this will now include the correlations with the significant covariates
sampled.per.penalty<- ALP.get.rearranged.sampled.dist(sampled.per.penalty,
reg.perceivedpenaltyrate)
model.pop.attributes$perceivedpenaltyrate <- round(sampled.per.penalty,0)/100
for(k in c("Income")){
print(cor(model.pop.attributes$perceivedpenaltyrate,model.pop.attributes[,k]))## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"perceivedpenaltyrate")
source(paste("Survey code/","Calibration.Target.Increased.Perceived.Audit.Rate.R",sep=""))
########################################
### ###
### Regression 3: find how c_1 (i.e.) tax.rate.threshold.max
### depends on age, income.cat , "perceivedevasionratepopulation" "perceivedevasionrate"
###
########################################
### Note we include "perceivedevasionratepopulation" "perceivedevasionrate" to see how fairness (c_1)
### depends on these perceived evasion rates.
dependent <- "tax.rate.threshold.max"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate",
"perceivedauditrate","perceivedpenaltyrate")
lab<- "Linear Regression of the c1 threshold"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_tax.rate.threshold.max",".Rdata",sep=""))
reg.tax.rate.threshold.max <- round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Generated distribution of c1
tab.label<- paste("ALP_c1_fit_corrected","all",sep="_")
for(type.c1.fit in rev(names(model.param.tab$c1))[-1] ){
c1.dist <- model.param.tab$c1[[type.c1.fit]]
n<- nrow(model.pop.attributes)
sampled.c1<-sample(c1.dist$x,n, replace = T, prob=c1.dist$y)
summary(sampled.c1)
cor(sampled.c1,model.pop.attributes$perceivedtaxrate) ## cor should be ~ 0
#### Rearrange the distibution by assigning this c1 according to the
### regression model - this will now include the correlations with the
### significant covariates
boh <- ALP.match.cor(model.pop.attributes$perceivedtaxrate,sampled.c1,rho=0.08762806,verbose=F)
model.pop.attributes[,type.c1.fit]<-round(boh$y,0)/100
for(k in c("perceivedtaxrate","Income")){
print(cor(model.pop.attributes[,type.c1.fit],model.pop.attributes[,k]))
}
fields.to.keep<- c(fields.to.keep,type.c1.fit)
}
########################################
### ###
### Regression 4: find how m
### depends on age, income.cat
########################################
dependent <- "m"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
#"perceivedevasionrate",
"perceivedauditrate")
## note removed ,"perceivedpenaltyrate" as it is colinear with "perceivedauditrate"
lab<- "Linear Regression of the m value"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.00,lab=lab,
survey.output.dir=survey.output.dir,texreg.digits=10)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_m_value.max",".Rdata",sep=""))
reg.m <- predict(gillo,newdata=model.pop.attributes,type="response")
df$m <- predict(gillo,newdata=df,type="response")
#### Generated distribution of m
qP.parameters<- model.param.tab$qP$qP.summary
n<- nrow(model.pop.attributes)
sampled.m <- rpert(n, x.min=as.numeric(qP.parameters["m","1st Qu."]),
x.max= as.numeric(qP.parameters["m","3rd Qu."]),
x.mode=as.numeric(qP.parameters["m","Median"]))
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(sampled.m,model.pop.attributes[,k]))
}
#### Rearrange the distibution by assigning m according to the regression
sampled.m <- ALP.get.rearranged.sampled.dist(sampled.m, reg.m)
model.pop.attributes$m <- sampled.m
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(model.pop.attributes$m,model.pop.attributes[,k]) )## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"m")
########################################
### ###
### Regression 5: find how s
### depends on age, income.cat
########################################
dependent <- "s"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate",
"perceivedauditrate","m")
lab<- "Linear Regression of the s value"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.00,lab=lab,
survey.output.dir=survey.output.dir,texreg.digits=10)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_s_value.max",".Rdata",sep=""))
reg.s <- predict(gillo,newdata=model.pop.attributes,type="response")
#### Generated distribution of m
qP.parameters<- model.param.tab$qP$qP.summary
n<- nrow(model.pop.attributes)
sampled.s <- rpert(n, x.min=as.numeric(qP.parameters["s","1st Qu."]),
x.max= as.numeric(qP.parameters["s","3rd Qu."]),
x.mode=as.numeric(qP.parameters["s","Median"]))
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(sampled.s,model.pop.attributes[,k]))
}
#### Rearrange the distibution by assigning m according to the regression
sampled.s <- ALP.get.rearranged.sampled.dist(sampled.s, reg.s)
model.pop.attributes$s <- sampled.s
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(model.pop.attributes$s,model.pop.attributes[,k]) )## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"s")
########################################
### ###
### Write to File
########################################
fields.to.keep<- unique(fields.to.keep)
model.pop.attributes <- model.pop.attributes[,fields.to.keep]
print(head(model.pop.attributes))
saveRDS(model.pop.attributes,file=paste(network.data.dir,network.data.file,
".demog.person.RData",sep=""))
# colnames(model.pop.attributes)<-tolower(colnames(model.pop.attributes))
# model.pop.attributes$tax.ids <- 1:nrow(model.pop.attributes)
# colnames(model.pop.attributes)[match(c("perceivedauditrate","perceivedpenaltyrate"),
# colnames(model.pop.attributes))]<-
# c("per.audit.rate","per.penalty.rate")
########################################
### ###
### Prepare Population for input to the ABM
###
########################################
pop.data<-model.pop.attributes
names(pop.data) <- tolower(names(pop.data))
pop.data[, 'tax.ids'] <- 1:nrow(pop.data)
pop.data[, 'tax.rate'] <- sapply(pop.data[, "tax.ids"], function(id){
inc.f.stat <- pop.data[id, c("income", "filing.status")]
sub.set <- eff.tax.rate[eff.tax.rate$filing.status == inc.f.stat$filing.status, ]
interval.row <- findInterval(inc.f.stat$income, sub.set$min, rightmost.closed = T)
sub.set[interval.row, 'tax.rate']
})
i <- which(names(pop.data) == "perceivedauditrate")
names(pop.data)[i] <- "per.audit.rate"
i <- which(names(pop.data) == "perceivedpenaltyrate")
names(pop.data)[i] <- "per.penalty.rate"
write.csv(pop.data,
paste(model.inputs.dir,network.data.file,"_population_data",".csv",sep=""),
row.names = FALSE)
netw.data <- readRDS(file=paste(network.data.dir,network.data.file,
".contact.RData",sep=""))
netw.data[, 'id1'] <- match(netw.data$Person.Id.1, pop.data$person.id)
netw.data[, 'id2'] <- match(netw.data$Person.Id.2, pop.data$person.id)
write.csv(netw.data,
paste(model.inputs.dir,network.data.file,"_network_data",".csv",sep=""),
row.names = F)
}
|
/Survey code/Behavioral_Regression_Models.R
|
no_license
|
Breakend/RIBSS_tax_evasion_ABM
|
R
| false
| false
| 26,403
|
r
|
remove(list = ls(all = TRUE))
gc()
library(xtable)
library(texreg)
library(yacca)
library(ggplot2)
library(reshape2)
require(Hmisc)
library(foreign)
library(corrgram)
library(xlsx)
library(dplyr)
library(nnet)
options(warn=1)
set.seed(55333)
## Specify directories
data.dir <- "Tax data/"
network.data.dir <- "Network data/"
survey.data.dir <- "Survey data/"
survey.output.dir <- "SurveyTablesPlots/"
model.inputs.dir <- "Inputs/"
library.dir <- "Survey code/SurveyLibrary/"
library <- file.path(library.dir, "library.R")
source(library)
library.dir <- "Library/"
library <- file.path(library.dir, "library.R")
source(library)
### Load eff tax rates
eff.tax.rate <- read.csv("Tax data/US_Income_Tax_Rates_2016.csv", stringsAsFactors = F)
eff.audit.rate <- read.csv("Tax data/Audit_Rates_2015.csv", stringsAsFactors = F)
### Load Survey Data
ego.data <- readRDS(file=paste(survey.data.dir,"ego.data.Rdata",sep=""))
alters.table<-readRDS(file=paste(survey.data.dir,"EgoNet",".Rdata",sep=""))
SD.clean<-readRDS(file=paste(survey.data.dir,"SD.clean",".Rdata",sep=""))
### Load Survey data analysis output from ALP_Survey_Analysis.R
model.param.tab<- readRDS(file=paste(survey.data.dir,"model.param.tab.Rdata",sep=""))
### Create Pop Subsets
subsets <- data.frame(all=rep(T,nrow(ego.data)),
audited = ego.data$hheveraudited,
nonaudited = !(ego.data$hheveraudited),
selfemployed = ego.data$selfemployed,
not.selfemployed = !(ego.data$selfemployed))
tmp <- alters.table$altertaxaudit
in.contact.with.audited.alters<- (tmp$Yes>0 | tmp$`I think so`>0)
subsets$nonaudited.alters.nonaudited <- subsets$nonaudited & !in.contact.with.audited.alters
subsets$nonaudited.alters.audited <- subsets$nonaudited & in.contact.with.audited.alters
subsets$audited.alters.nonaudited <- subsets$audited & !in.contact.with.audited.alters
subsets$audited.alters.audited <- subsets$audited & in.contact.with.audited.alters
### Get selected fields for the regressions
selected.fields <- c("prim_key","currentlivingsituation" ,
"perceivedevasionratepopulation","perceivedevasionrate",
"perceivedpenaltyrate",
"perceivedauditrate",
"perceivedcaught",
"riskauditspenalties_a","riskauditspenalties_b",
"riskauditspenalties_c",
"perceivedtaxrate","tax.rate.threshold.min",
"tax.rate.threshold.max",
"c1.guessed.unmodified","c1.guessed",
"c1.guess.majority.int", "tax.rate.threshold.tri",
"servicestaxes",
"actor",
"s","m",
"perceivedaruprob",
"bombcrateramount")
df<- SD.clean[,selected.fields]
df$actor<- -(df$actor-3) ### NOTE: changed the sign and definition here
df$actor.logical<- df$actor>0
df$servicestaxes<- df$servicestaxes-3
df$preptaxes.CPA <- SD.clean$preptaxes
tmp<-(df$preptaxes.CPA<3)
tmp[df$preptaxes.CPA>3] <- NA
df$preptaxes.CPA <- as.numeric(!(tmp) )
df <- merge(ego.data,df, by ="prim_key")
df$perceivedevasionratepopulation <- as.numeric(df$perceivedevasionratepopulation)
x<- df$n.alters/df$n.alters.tTaxes
x[x==Inf] <- NA
x[is.nan(x)] <-NA
df$prop.alters.tTaxes<-x
subsets$actor <- (df$actor>0)
tmp<- rep("Head.of.Household",nrow(df))
tmp[df$currentlivingsituation==1] <- "Married.Filing.Jointly"
tmp[df$currentlivingsituation==5] <- "Single"
df$filing.status<- tmp
tax.rate.income.bracket.file <- paste(data.dir,"US_Income_Tax_Rates_2013",".csv",sep="")
tax.rate.income.bracket <- read.csv(file=tax.rate.income.bracket.file,head=TRUE,stringsAsFactors = FALSE)
tax.rate.income.bracket <- split(tax.rate.income.bracket,tax.rate.income.bracket$filing.status)
df$income.val.5 <- df$income.val/10^5
income.info<- df[,c("income.val","filing.status")]
names(income.info) <- c("Income","filing.status")
income.tax <-
apply(income.info,1,get.effective.tax.rate,tax.rate.income.bracket=tax.rate.income.bracket)
df$effectivetaxrate <- round(100*income.tax/ income.info$Income,0)
df$diff.perceived.effective.taxrate <- df$perceivedtaxrate - df$effectivetaxrate
df<-ALP.fill.out.missing.variables.for.regression(df)
write.csv(df, paste(survey.data.dir,"Survey_data_input_to_Regression_Analysis",".csv",sep=""),row.names = F)
########################################
### ###
### Load Demo data for the ABM and add missing attributes
########################################
network.data.file <- "PN1"
for( network.data.file in c("PN1","PN10")){
model.pop.attributes<- readRDS(file=paste(network.data.dir,network.data.file,
".demog.person.RData",sep=""))
fields.to.keep<-colnames(model.pop.attributes)
### set intial fields in model.pop.attributes with dummy values.
model.pop.attributes$calcage<- model.pop.attributes$age
model.pop.attributes$income.val.5<- model.pop.attributes$income/10^5
model.pop.attributes$gender <- as.factor(model.pop.attributes$gender)
levels(model.pop.attributes$gender) <- c("Female","Male")
model.pop.attributes$selfemployed <- model.pop.attributes$self.employed
model.pop.attributes$alterselfemployed[model.pop.attributes$self.employed]<-
mean(df$alterselfemployed[df$selfemployed],na.rm=T)
model.pop.attributes$alterselfemployed[!(model.pop.attributes$self.employed)]<-
mean(df$alterselfemployed[!(df$selfemployed)],na.rm=T)
model.pop.attributes$prop.alters.tTaxes <- mean(df$prop.alters.tTaxes,na.rm=T)
model.pop.attributes$actor <- mean(df$actor,na.rm=T)
model.pop.attributes$perceivedevasionratepopulation <- mean(df$perceivedevasionratepopulation,na.rm=T)
model.pop.attributes$everaudited <- F
model.pop.attributes$spouseaudit <- F
model.pop.attributes$altertaxaudit.tTaxes <- mean(df$altertaxaudit.tTaxes,na.rm=T)
model.pop.attributes$servicestaxes <- mean(df$servicestaxes,na.rm=T)
model.pop.attributes$tax.rate.threshold.max <- mean(df$tax.rate.threshold.max,na.rm=T)
model.pop.attributes$perceivedevasionrate <- mean(df$perceivedevasionrate,na.rm=T)
model.pop.attributes$perceivedpenaltyrate <- mean(df$perceivedpenaltyrate,na.rm=T)
model.pop.attributes$perceivedauditrate <- mean(df$perceivedpenaltyrate,na.rm=T)
NA.fields<-names(df)[grepl("NA.Indicator", names(df))]
model.pop.attributes[,NA.fields] <- FALSE
########################################
### ###
### Regression : perceivedtaxrate
########################################
dependent <- "perceivedtaxrate"
covariates <- c("calcage","gender","income.val.5",
"everaudited","spouseaudit","selfemployed")
# "prop.alters.tTaxes","altertaxaudit.tTaxes","alterselfemployed",
# "actor","servicestaxes","perceivedevasionratepopulation",
# "perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression on perceivedtaxrate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedtaxrate",".Rdata",sep=""))
model.pop.attributes$perceivedtaxrate<-round(predict(gillo,newdata=model.pop.attributes),0)/100
fields.to.keep<- c(fields.to.keep,"perceivedtaxrate")
#### Summary stats of the percieved audit rate for different subsets.
perceivedtaxrate <- SD.clean[,"perceivedtaxrate"]
tab <- list()
tab$all<- summaryfunctionFull(perceivedtaxrate)
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedtaxrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedtaxrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedtaxrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-summaryfunctionFull(perceivedtaxrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedtaxrate_summary",".tex",sep=""),
include.rownames=F)
########################################
### ###
### Regression : find who uses CPA
########################################
dependent <- "preptaxes.CPA"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit","perceivedauditrate",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Logistic Regression on whether respondent uses an tax expert to prepare"
gillo<-ALP.glm(df,dependent,covariates,family = binomial(link = "logit"),
threshold = 0.0,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_CPA",".Rdata",sep=""))
model.pop.attributes$preptaxes.CPA<-as.logical(sapply(predict(gillo,newdata=model.pop.attributes,type="response"),stoch.round))
fields.to.keep<- c(fields.to.keep,"preptaxes.CPA")
########################################
### ###
### Regression : find who can be influenced by actor/high earner being audited.
########################################
dependent <- "actor.logical"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit","perceivedauditrate","perceivedpenaltyrate",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Logistic Regression on whether respondent is affected by knowing an actor was audited"
gillo<-ALP.glm(df,dependent,covariates,family = binomial(link = "logit"),
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_actor_affect",".Rdata",sep=""))
model.pop.attributes$actor.logical<-as.logical(sapply(predict(gillo,newdata=model.pop.attributes,type="response"),stoch.round) )
fields.to.keep<- c(fields.to.keep,"actor.logical")
########################################
### ###
### Regression: Perceived audit depends
########################################
dependent <- "perceivedauditrate"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression of the Perceived Audit Rate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedauditrate",".Rdata",sep=""))
reg.perceivedauditrate<-round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Summary stats of the percieved audit rate for different subsets.
perceivedauditrate <- SD.clean[,"perceivedauditrate"]
tab <- list()
tab$all<- summaryfunctionFull(perceivedauditrate[!is.na(perceivedauditrate)])
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedauditrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedauditrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedauditrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-summaryfunctionFull(perceivedauditrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedaudit_summary",".tex",sep=""),
include.rownames=F)
#### Generated distribution of perceived audit rate for those never audited and that know
## no-one that was audited using the survey
pa <- perceivedauditrate[subsets$nonaudited.alters.nonaudited]
r<- do.ALP.per.rate.analysis(per.rate=pa ,lab="Perceived Audit Rate",
scale.up=1, n.bins=50,text.size=12)
print(r)
log.per.audit.dist <- r$fit.log.rate
per.audit.dist<- log.per.audit.dist
per.audit.dist$x <- 10^(log.per.audit.dist$x)
n<- nrow(model.pop.attributes)
sampled.per.audit <-sample(per.audit.dist$x,n, replace = T, prob=per.audit.dist$y)
rescale.to.match.mean <- mean(pa,na.rm=T)/mean(sampled.per.audit)
sampled.per.audit<- rescale.to.match.mean*sampled.per.audit
sampled.per.audit[sampled.per.audit>100] <- 100
summary(sampled.per.audit)
for(k in c("Income")){
print(cor(sampled.per.audit,model.pop.attributes[,k]) )## cor should be ~ 0
}
#### Rearrange the distibution by assigning this perceieved audit rate according to the
### regression model - this will now include the correlations with the significant covariates
sampled.per.audit<- ALP.get.rearranged.sampled.dist(sampled.per.audit,
reg.perceivedauditrate)
sampled.per.audit<- round(sampled.per.audit,3)
sampled.per.audit[sampled.per.audit>1] <- round(sampled.per.audit[sampled.per.audit>1],2)
sampled.per.audit[sampled.per.audit>5] <- round(sampled.per.audit[sampled.per.audit>5],1)
sampled.per.audit[sampled.per.audit>10] <- round(sampled.per.audit[sampled.per.audit>10],0)
model.pop.attributes$perceivedauditrate <- sampled.per.audit/100
for(k in c("Income")){
print(cor(model.pop.attributes$perceivedauditrate,model.pop.attributes[,k]))## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"perceivedauditrate")
########################################
### ###
### Regression2: find how percieved penalty rate
########################################
dependent <- "perceivedpenaltyrate"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate","tax.rate.threshold.max")
lab<- "Linear Regression of the Percieved Penalty Rate"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_perceivedpenaltyrate",".Rdata",sep=""))
reg.perceivedpenaltyrate<-round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Summary stats of the percieved audit rate for different subsets.
perceivedpenaltyrate <- SD.clean$perceivedpenaltyrate
tab <- list()
tab$all<- summaryfunctionFull(perceivedpenaltyrate)
tab$nonaudited.alters.nonaudited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$nonaudited.alters.nonaudited])
tab$nonaudited.alters.audited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$nonaudited.alters.audited])
tab$audited.alters.nonaudited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$audited.alters.nonaudited])
tab$audited.alters.audited<-
summaryfunctionFull(perceivedpenaltyrate[subsets$audited.alters.audited])
### Save summary table
tab <- as.data.frame(do.call("rbind",tab))
tab <- cbind(rownames(tab),tab)
colnames(tab)[1] <- "Respondent subsample"
print(xtable(tab,auto=T,digits=1),
file=paste(survey.output.dir,"perceivedpenaltyrate_summary",".tex",sep=""),
include.rownames=F)
#### Generated distribution of percied penalty rate for those never penalized and that know
## no-one that was penalized using the survey
pp <- perceivedpenaltyrate[subsets$nonaudited.alters.nonaudited]
r<- do.ALP.per.rate.analysis(per.rate=pp ,lab="Percieved Penalty Rate",scale.up=1, n.bins=25,text.size=12)
print(r)
log.per.penalty.dist <- r$fit.log.rate
per.penalty.dist<- log.per.penalty.dist
per.penalty.dist$x <- 10^(log.per.penalty.dist$x)
n<- nrow(model.pop.attributes)
sampled.per.penalty <-sample(per.penalty.dist$x,n, replace = T, prob=per.penalty.dist$y)
rescale.to.match.mean <- mean(pp,na.rm=T)/mean(sampled.per.penalty)
sampled.per.penalty<- rescale.to.match.mean*sampled.per.penalty
summary(sampled.per.penalty)
cor(sampled.per.penalty,model.pop.attributes$Income) ## cor should be ~ 0
#### Rearrange the distibution by assigning this perceieved penalty rate according to the
### regression model - this will now include the correlations with the significant covariates
sampled.per.penalty<- ALP.get.rearranged.sampled.dist(sampled.per.penalty,
reg.perceivedpenaltyrate)
model.pop.attributes$perceivedpenaltyrate <- round(sampled.per.penalty,0)/100
for(k in c("Income")){
print(cor(model.pop.attributes$perceivedpenaltyrate,model.pop.attributes[,k]))## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"perceivedpenaltyrate")
source(paste("Survey code/","Calibration.Target.Increased.Perceived.Audit.Rate.R",sep=""))
########################################
### ###
### Regression 3: find how c_1 (i.e.) tax.rate.threshold.max
### depends on age, income.cat , "perceivedevasionratepopulation" "perceivedevasionrate"
###
########################################
### Note we include "perceivedevasionratepopulation" "perceivedevasionrate" to see how fairness (c_1)
### depends on these perceived evasion rates.
dependent <- "tax.rate.threshold.max"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate",
"perceivedauditrate","perceivedpenaltyrate")
lab<- "Linear Regression of the c1 threshold"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.05,lab=lab,
survey.output.dir=survey.output.dir)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_tax.rate.threshold.max",".Rdata",sep=""))
reg.tax.rate.threshold.max <- round(predict(gillo,newdata=model.pop.attributes,type="response"),0)
#### Generated distribution of c1
tab.label<- paste("ALP_c1_fit_corrected","all",sep="_")
for(type.c1.fit in rev(names(model.param.tab$c1))[-1] ){
c1.dist <- model.param.tab$c1[[type.c1.fit]]
n<- nrow(model.pop.attributes)
sampled.c1<-sample(c1.dist$x,n, replace = T, prob=c1.dist$y)
summary(sampled.c1)
cor(sampled.c1,model.pop.attributes$perceivedtaxrate) ## cor should be ~ 0
#### Rearrange the distibution by assigning this c1 according to the
### regression model - this will now include the correlations with the
### significant covariates
boh <- ALP.match.cor(model.pop.attributes$perceivedtaxrate,sampled.c1,rho=0.08762806,verbose=F)
model.pop.attributes[,type.c1.fit]<-round(boh$y,0)/100
for(k in c("perceivedtaxrate","Income")){
print(cor(model.pop.attributes[,type.c1.fit],model.pop.attributes[,k]))
}
fields.to.keep<- c(fields.to.keep,type.c1.fit)
}
########################################
### ###
### Regression 4: find how m
### depends on age, income.cat
########################################
dependent <- "m"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
#"perceivedevasionrate",
"perceivedauditrate")
## note removed ,"perceivedpenaltyrate" as it is colinear with "perceivedauditrate"
lab<- "Linear Regression of the m value"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.00,lab=lab,
survey.output.dir=survey.output.dir,texreg.digits=10)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_m_value.max",".Rdata",sep=""))
reg.m <- predict(gillo,newdata=model.pop.attributes,type="response")
df$m <- predict(gillo,newdata=df,type="response")
#### Generated distribution of m
qP.parameters<- model.param.tab$qP$qP.summary
n<- nrow(model.pop.attributes)
sampled.m <- rpert(n, x.min=as.numeric(qP.parameters["m","1st Qu."]),
x.max= as.numeric(qP.parameters["m","3rd Qu."]),
x.mode=as.numeric(qP.parameters["m","Median"]))
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(sampled.m,model.pop.attributes[,k]))
}
#### Rearrange the distibution by assigning m according to the regression
sampled.m <- ALP.get.rearranged.sampled.dist(sampled.m, reg.m)
model.pop.attributes$m <- sampled.m
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(model.pop.attributes$m,model.pop.attributes[,k]) )## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"m")
########################################
### ###
### Regression 5: find how s
### depends on age, income.cat
########################################
dependent <- "s"
covariates <- c("calcage","gender","income.val.5","perceivedtaxrate",
"everaudited","spouseaudit",
"prop.alters.tTaxes","altertaxaudit.tTaxes",
"selfemployed","alterselfemployed",
"actor","servicestaxes","perceivedevasionratepopulation",
"perceivedevasionrate",
"perceivedauditrate","m")
lab<- "Linear Regression of the s value"
gillo<-ALP.glm(df,dependent,covariates,
threshold = 0.00,lab=lab,
survey.output.dir=survey.output.dir,texreg.digits=10)
saveRDS(gillo,file=paste(survey.output.dir,"Reg_s_value.max",".Rdata",sep=""))
reg.s <- predict(gillo,newdata=model.pop.attributes,type="response")
#### Generated distribution of m
qP.parameters<- model.param.tab$qP$qP.summary
n<- nrow(model.pop.attributes)
sampled.s <- rpert(n, x.min=as.numeric(qP.parameters["s","1st Qu."]),
x.max= as.numeric(qP.parameters["s","3rd Qu."]),
x.mode=as.numeric(qP.parameters["s","Median"]))
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(sampled.s,model.pop.attributes[,k]))
}
#### Rearrange the distibution by assigning m according to the regression
sampled.s <- ALP.get.rearranged.sampled.dist(sampled.s, reg.s)
model.pop.attributes$s <- sampled.s
for(k in c("Income","perceivedauditrate","perceivedpenaltyrate")){
print(cor(model.pop.attributes$s,model.pop.attributes[,k]) )## cor is no longer 0.
}
fields.to.keep<- c(fields.to.keep,"s")
########################################
### ###
### Write to File
########################################
fields.to.keep<- unique(fields.to.keep)
model.pop.attributes <- model.pop.attributes[,fields.to.keep]
print(head(model.pop.attributes))
saveRDS(model.pop.attributes,file=paste(network.data.dir,network.data.file,
".demog.person.RData",sep=""))
# colnames(model.pop.attributes)<-tolower(colnames(model.pop.attributes))
# model.pop.attributes$tax.ids <- 1:nrow(model.pop.attributes)
# colnames(model.pop.attributes)[match(c("perceivedauditrate","perceivedpenaltyrate"),
# colnames(model.pop.attributes))]<-
# c("per.audit.rate","per.penalty.rate")
########################################
### ###
### Prepare Population for input to the ABM
###
########################################
pop.data<-model.pop.attributes
names(pop.data) <- tolower(names(pop.data))
pop.data[, 'tax.ids'] <- 1:nrow(pop.data)
pop.data[, 'tax.rate'] <- sapply(pop.data[, "tax.ids"], function(id){
inc.f.stat <- pop.data[id, c("income", "filing.status")]
sub.set <- eff.tax.rate[eff.tax.rate$filing.status == inc.f.stat$filing.status, ]
interval.row <- findInterval(inc.f.stat$income, sub.set$min, rightmost.closed = T)
sub.set[interval.row, 'tax.rate']
})
i <- which(names(pop.data) == "perceivedauditrate")
names(pop.data)[i] <- "per.audit.rate"
i <- which(names(pop.data) == "perceivedpenaltyrate")
names(pop.data)[i] <- "per.penalty.rate"
write.csv(pop.data,
paste(model.inputs.dir,network.data.file,"_population_data",".csv",sep=""),
row.names = FALSE)
netw.data <- readRDS(file=paste(network.data.dir,network.data.file,
".contact.RData",sep=""))
netw.data[, 'id1'] <- match(netw.data$Person.Id.1, pop.data$person.id)
netw.data[, 'id2'] <- match(netw.data$Person.Id.2, pop.data$person.id)
write.csv(netw.data,
paste(model.inputs.dir,network.data.file,"_network_data",".csv",sep=""),
row.names = F)
}
|
#### THESE FUNCTIONS NEED MORE TESTING
#' Plots island area function through time
#'
#' @param totaltime total time of simulation
#' @param Apars a named list containing area parameters:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{"beta"} for a beta function describing area through time,
#' or \code{"linear"} for a linear function
#' @param resolution numeric indicating resolution of plot. Should be < 0.
#' @family rates calculation
#'
#' @return a plot with the area size through time
#' @export
DAISIE_plot_area <- function(totaltime,
Apars,
island_ontogeny = "beta",
resolution) {
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
testit::assert(are_area_params(Apars))
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
axis <- seq(0, totaltime, by = resolution)
area <- c()
for (i in seq_along(axis)) {
testit::assert(are_area_params(Apars))
area[i] <- DAISIE::island_area(timeval = axis[i],
Apars = Apars,
island_ontogeny = island_ontogeny
)
}
island_area_time <- data.frame(Area = area, Time = axis, Totaltime = totaltime)
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Area <- NULL; rm(Area) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(
data = island_area_time,
ggplot2::aes(x = Time, y = Area)) +
ggplot2::ggtitle("Variation of island area during simulation") +
ggplot2::theme_classic() +
ggplot2::geom_line(size = 1.5, color = "darkgreen")
)
invisible(island_area_time)
}
#' Plots extinction rate function through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param Epars a numeric vector:
#' \itemize{
#' \item{[1]: minimum extinction when area is at peak}
#' \item{[2]: extinction rate when current area is 0.10 of maximum area}
#' }
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @author Pedro Neves
#' @return per capita extinction rate through time plot and dataframe with extinction
#' at corresponding time
#' @export
DAISIE_plot_extinction <- function(totaltime,
K,
Apars,
Epars,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
ext_rate <- c()
for (i in seq_along(axis)) {
ext_rate[i] <- DAISIE::get_ext_rate(
timeval = axis[i],
Apars = Apars,
Epars = Epars,
mu = NA,
K = K,
extcutoff = 1100,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny
)
}
ext_rate_time <- data.frame(Extinction = ext_rate[removed_timepoints:length(ext_rate)], Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Extinction <- NULL; rm(Extinction) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(
data = ext_rate_time,
ggplot2::aes(x = Time, y = Extinction)) +
ggplot2::ggtitle("Variation of per-capita extinction rate"
) +
ggplot2::geom_line(size = 1, color = "red4") + ggplot2::ylim(0, 0.2))
invisible(ext_rate_time)
}
#' Plot immigration rate through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters as created by create_area_params:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param gam minimum per capita immigration rate
#' @param mainland_n number of mainland species. Set as 1 for clade-specific
#' diversity dependence
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @author Pedro Neves
#' @return a plot with per capita immigration rate through time and dataframe with immigration
#' at corresponding time
#' @export
DAISIE_plot_immigration <- function(totaltime,
K,
Apars,
gam,
mainland_n,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
immig_rate <- c()
for (i in seq_along(axis)) {
immig_rate[i] <- get_immig_rate(
timeval = axis[i],
totaltime = totaltime,
Apars = Apars,
gam = gam,
K = K,
mainland_n = 1,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny
)
}
immig_rate_time <- data.frame(Immigration = immig_rate[removed_timepoints:length(immig_rate)], Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Immigration <- NULL; rm(Immigration) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(data = immig_rate_time, ggplot2::aes(x = Time, y = Immigration)) +
ggplot2::ggtitle("Variation of per-capita immigration rate") +
ggplot2::geom_line(size = 1, color = "blue4") +
ggplot2::ylim(0, 0.002))
invisible(immig_rate_time)
}
#' Plot cladogenesis rate through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters as created by create_area_params:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param lac minimum per capita cladogenesis rate
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @return a plot with per capita cladogenesis rate through time and dataframe with immigration
#' at corresponding time
#' @export
#'
#' @author Pedro Neves
DAISIE_plot_cladogenesis <- function(totaltime,
K,
Apars,
lac,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
clado_rate <- c()
for (i in seq_along(axis)) {
clado_rate[i] <- get_clado_rate(timeval = axis[i],
Apars = Apars,
lac = lac,
K = K,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny)
}
clado_rate_time <- data.frame(Cladogenesis = clado_rate[removed_timepoints:length(clado_rate)],
Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Cladogenesis <- NULL; rm(Cladogenesis) # nolint, fixes warning: no visible binding for global variable
graphics::plot(
ggplot2::ggplot(data = clado_rate_time,
ggplot2::aes(x = Time, y = Cladogenesis)) +
ggplot2::ggtitle("Variation of per-capita cladogenesis rate") +
ggplot2::geom_line(size = 1, color = "darkorchid4")
)
invisible(clado_rate_time)
}
|
/R/DAISIE_plot_area.R
|
no_license
|
jorismoorkamp/DAISIE
|
R
| false
| false
| 10,497
|
r
|
#### THESE FUNCTIONS NEED MORE TESTING
#' Plots island area function through time
#'
#' @param totaltime total time of simulation
#' @param Apars a named list containing area parameters:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{"beta"} for a beta function describing area through time,
#' or \code{"linear"} for a linear function
#' @param resolution numeric indicating resolution of plot. Should be < 0.
#' @family rates calculation
#'
#' @return a plot with the area size through time
#' @export
DAISIE_plot_area <- function(totaltime,
Apars,
island_ontogeny = "beta",
resolution) {
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
testit::assert(are_area_params(Apars))
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
axis <- seq(0, totaltime, by = resolution)
area <- c()
for (i in seq_along(axis)) {
testit::assert(are_area_params(Apars))
area[i] <- DAISIE::island_area(timeval = axis[i],
Apars = Apars,
island_ontogeny = island_ontogeny
)
}
island_area_time <- data.frame(Area = area, Time = axis, Totaltime = totaltime)
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Area <- NULL; rm(Area) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(
data = island_area_time,
ggplot2::aes(x = Time, y = Area)) +
ggplot2::ggtitle("Variation of island area during simulation") +
ggplot2::theme_classic() +
ggplot2::geom_line(size = 1.5, color = "darkgreen")
)
invisible(island_area_time)
}
#' Plots extinction rate function through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param Epars a numeric vector:
#' \itemize{
#' \item{[1]: minimum extinction when area is at peak}
#' \item{[2]: extinction rate when current area is 0.10 of maximum area}
#' }
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @author Pedro Neves
#' @return per capita extinction rate through time plot and dataframe with extinction
#' at corresponding time
#' @export
DAISIE_plot_extinction <- function(totaltime,
K,
Apars,
Epars,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
ext_rate <- c()
for (i in seq_along(axis)) {
ext_rate[i] <- DAISIE::get_ext_rate(
timeval = axis[i],
Apars = Apars,
Epars = Epars,
mu = NA,
K = K,
extcutoff = 1100,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny
)
}
ext_rate_time <- data.frame(Extinction = ext_rate[removed_timepoints:length(ext_rate)], Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Extinction <- NULL; rm(Extinction) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(
data = ext_rate_time,
ggplot2::aes(x = Time, y = Extinction)) +
ggplot2::ggtitle("Variation of per-capita extinction rate"
) +
ggplot2::geom_line(size = 1, color = "red4") + ggplot2::ylim(0, 0.2))
invisible(ext_rate_time)
}
#' Plot immigration rate through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters as created by create_area_params:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param gam minimum per capita immigration rate
#' @param mainland_n number of mainland species. Set as 1 for clade-specific
#' diversity dependence
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @author Pedro Neves
#' @return a plot with per capita immigration rate through time and dataframe with immigration
#' at corresponding time
#' @export
DAISIE_plot_immigration <- function(totaltime,
K,
Apars,
gam,
mainland_n,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
immig_rate <- c()
for (i in seq_along(axis)) {
immig_rate[i] <- get_immig_rate(
timeval = axis[i],
totaltime = totaltime,
Apars = Apars,
gam = gam,
K = K,
mainland_n = 1,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny
)
}
immig_rate_time <- data.frame(Immigration = immig_rate[removed_timepoints:length(immig_rate)], Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Immigration <- NULL; rm(Immigration) # nolint, fixes warning: no visible binding for global variable
graphics::plot(ggplot2::ggplot(data = immig_rate_time, ggplot2::aes(x = Time, y = Immigration)) +
ggplot2::ggtitle("Variation of per-capita immigration rate") +
ggplot2::geom_line(size = 1, color = "blue4") +
ggplot2::ylim(0, 0.002))
invisible(immig_rate_time)
}
#' Plot cladogenesis rate through time
#'
#' @param totaltime total time of simulation
#' @param K K (clade-level carrying capacity)
#' @param Apars a named list containing area parameters as created by create_area_params:
#' \itemize{
#' \item{[1]: maximum area}
#' \item{[2]: value from 0 to 1 indicating where in the island's history the
#' peak area is achieved}
#' \item{[3]: sharpness of peak}
#' \item{[4]: total island age}
#' }
#' @param lac minimum per capita cladogenesis rate
#' @param island_ontogeny a string describing the type of island ontogeny. Can be \code{NULL},
#' \code{beta} for a beta function describing area through time,
#' or \code{linear} for a linear function
#' @param removed_timepoints starting position of time vector
#' @param resolution resolution of time axis
#'
#' @return a plot with per capita cladogenesis rate through time and dataframe with immigration
#' at corresponding time
#' @export
#'
#' @author Pedro Neves
DAISIE_plot_cladogenesis <- function(totaltime,
K,
Apars,
lac,
island_ontogeny = "beta",
removed_timepoints,
resolution) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
testit::assert(DAISIE::is_island_ontogeny_input(island_ontogeny))
island_ontogeny <- translate_island_ontogeny(
island_ontogeny = island_ontogeny
)
axis <- seq(0, totaltime, by = resolution)
clado_rate <- c()
for (i in seq_along(axis)) {
clado_rate[i] <- get_clado_rate(timeval = axis[i],
Apars = Apars,
lac = lac,
K = K,
island_spec = matrix(ncol = 1),
island_ontogeny = island_ontogeny)
}
clado_rate_time <- data.frame(Cladogenesis = clado_rate[removed_timepoints:length(clado_rate)],
Time = axis[removed_timepoints:length(axis)])
Time <- NULL; rm(Time) # nolint, fixes warning: no visible binding for global variable
Cladogenesis <- NULL; rm(Cladogenesis) # nolint, fixes warning: no visible binding for global variable
graphics::plot(
ggplot2::ggplot(data = clado_rate_time,
ggplot2::aes(x = Time, y = Cladogenesis)) +
ggplot2::ggtitle("Variation of per-capita cladogenesis rate") +
ggplot2::geom_line(size = 1, color = "darkorchid4")
)
invisible(clado_rate_time)
}
|
library(shiny)
library(shinythemes)
library(plotly)
source("analysis.R")
ui <- fluidPage(
navbarPage(
theme = shinytheme("cyborg"),
"Sleep Deprivation",
tabPanel(
"Background",
tags$div(
h3("About This Project:"),
p("In today's society, people sacrifice their sleep time for various
reasons: studying for exams, finishing up work or tasks, suffering
from anxiety or sleep disorders… etc. No matter what the reasons are,
they all contribute to sleep deprivation. Since people not getting
enough sleep has become a global phenomenon, it is important to learn
about its effect and what sleep deprivation is like in society."),
p("This project specifically focuses on the topic of sleep deprivation for adults aged 19 to 39 in the United States. The topics covered in this project include adults' sleeping trends from 2000 to 2015, negative impacts of sleep deprivation, the general sleeping hour in each state, and how much time people spend on different activities compared to their sleep time. Moreover, by presenting the information through different types of interactive and non-interactive graphs, our team hopes to provide a fun and interactive learning experience to the users."),
h3("Page Description"),
p("The Trend tab introduces the average sleep time for U.S adult aged 19 to 39 from 2000 to 2012, the information was collected by applying Actigraphic Study and Polysomnography Study. On the bottom, a map illustrates the average percentage of people who do not have a least 7 hours of sleep in each state in the U.S."),
p("The Brutal Reality tab shows the fact that our performance is bad because of sleep deprivation. "),
p("The Causes tab has two sub tabs. On the Multiple Factors tab, an interactive pie chart demonstrates the most common factors that cause U.S college students stay up so late at night based on the survey. On the Life tracking sample page, the interactive scatter plot compares different activity time with people’s sleep time."),
p("The Impact tab introduces some possible impact on people’s mental and physical health if people are sleep deprived for long, as well as how sleep deprivation could affect students’ academic performance. The tab has an interactive scatterplot that allows user to choose their own interest of age group and multiple outcomes of sleep deprivation. On the bottom, an interactive bar graph gives users a glance of U.S college students’ average GPA affected by daytime sleepiness."),
p("The conclusion tab summarize the biggest lesson of this project, strength and weakness, and some possible improvement that could be made to make this project better. The sleep time calculator at the bottom will calculate the time user should go to bed to get enough sleep based on their age and what time they plan to wake up."),
p("The About tab has two sub tabs : About Tech and About us. The About Tech tab has the citation for all the resources used in the project. The About Us tab gives a brief introduction of each members in this project group.")
)
),
tabPanel(
"Trend",
tags$div(
h3("The current sleeping time trend in U.S. population"),
sidebarLayout(
sidebarPanel(
radioButtons("StudiesMethods",
label = h3("Studies' Methods"),
choices = list(
"Actigraphic Study" = 1,
"Polysomnography Study" = 2,
"Both Studies" = 3
),
selected = 3
),
hr(),
),
mainPanel(
plotOutput("us_timeline")
)
),
p("This graph represents the average sleep time in minutes for adults (18-39) in the U.S from 2000 to 2015. The study methods include *Actigraphic Study and *Polysomnography Study."),
p("For Actigraphic Study, the trend experiences a drastic decline from the year 2000 (the highest point) to 2002, then it turns to a small and steady decline until 2006. Starting from 2006, the trend grows constantly for three years then keeps decreasing to its lowest point at the end."),
p("For Polysomnography Study, the trend remains steady from the beginning year, 2000, to 2006. It increases by little for half of a year then goes down constantly with small fluctuation. Starting from 2009, when the trend reaches its lowest point, it goes upward slowly until the end."),
p("Note:"),
p("* Actigraphic Study: Actigraphy is a non-invasive technique used to assess cycles of activity and rest over several days to several weeks."),
p("(2019, Stanford Health Care. https://stanfordhealthcare.org/medical-tests/s/sleep-disorder-tests/procedures/actigraphy.html)"),
p("* Polysomnography Study: A polysomnogram continuously records brain waves during sleep, as well as a number of nerve and muscle functions during nighttime sleep."),
p("(2019, Stanford Health Care. https://stanfordhealthcare.org/medical-tests/p/polysomnogram.html)"),
h3("Geographic map of US adults sleeping <7 hours"),
plotOutput("us_map_7"),
p("The geographic map illustrates the percentage of people in each state in the U.S that sleep less than 7 hours. As the percentage of people gets higher, the black color goes darker. As shown in the graph, states that have the largest population of having less than 7 hours of sleep are mostly located in the Midwest. On the contrary, western states tend to have fewer people who do not get sufficient sleep."),
p("This graph allows people to have a brief idea of a lack of sleep in each state at one glance. People can easily pick up the states which they want to have a closer look on sleeping issues accordingly to the information presented in the graph.")
)
),
tabPanel(
"Brutal Reality",
titlePanel("The FACT that we are having less sleep hours can..."),
p("The Brutal Reality tab shows people the certain fact that sleep deprivation is actually leading to a bad performance in life. It will be discussed in multiple ways, such as fatigue driving, test performances, and reaction times (RTs)."),
sidebarLayout(
sidebarPanel(
radioButtons("Gender",
label = h3("Gender"),
choices = list(
"Male" = 1,
"Female" = 2
),
selected = 1
),
hr()
),
mainPanel(
tags$div(
p("Separating by gender allows us to see how both are affected by gender deprivation. Since women and men are so different, the effects can also be different. We will be investigating how men and women feel after a not good night sleep. ")
),
plotlyOutput("br1")
)
),
tags$hr(),
sidebarLayout(
sidebarPanel(
radioButtons("Sleepdepriv",
label = h3("Sleepdepriv"),
choices = list(
"Enough" = 1,
"Tired" = 2
),
selected = 1
),
hr()
),
mainPanel(
tags$div(
p("Americans are not getting enough sleep. It also depends how each person perceives what enough sleep is. Many people are tired during the day and cannot focus on their daily activities. By graphing enough and tired, we can see the affects of sleep deprivation.")
),
plotlyOutput("br2")
)
)
),
navbarMenu(
"Causes",
tabPanel(
"Multiple Factors",
titlePanel("Why are we staying up so late?"),
p("University students answered to a self-report of multiple causes of their sleep deprivation, which are categorized into the below pie chart: "),
plotlyOutput("pie_chart"),
p("The interactive pie chart demonstrates the most common factors that cause U.S college students stay up so late at night based on the survey. Although there are 14 categories presenting on this chart, it could be organized to 4."),
p("The first one is Dorm/Sleeping environment’s poor quality, including tobacco smoke in sleeping room(7.97%), room’s bad air quality(6.89%), room scents(6.64%), and noise from next door (6.5%). The second category is student’s mental state and feelings, including stress (8.1%), fatigue (6.69%), sadness (6.64%), depression (7.97%), being patient (6.89)and finally, anxiety and tension (6.39%). The third category is student’s physical state, which are pain (7.87%) and strenuous physical activity (6.74%). The fourth category is family issue, making up 7.82 % of the pie chart. (The rest of the 6 percent is reported as other.)"),
p("Overall, students’ poor sleeping/ resting environment makes up 28 % of the pie chart while student’s mental state takes up to 42.6 %, almost half of the sleep deprived students is affected by their mental states. Physical state occupied 14.61 percent of the chart and family issue has least effect on student sleep, which is only 7.82 percent. ")
),
tabPanel(
"Life Tracking Sample",
titlePanel("Let's see what people in U.S. do during the day"),
sidebarLayout(
sidebarPanel(
selectInput("select.activities",
label = h4("Select an activity to compare with sleep's time:"),
choices = list(
"Cook" = "cook", "Eat" = "eat",
"Math" = "math", "Music" = "music",
"Pause" = "pause", "Prep" = "prep",
"Uni" = "uni", "Meditation" = "meditation",
"Special" = "special", "Work" = "work"
),
selected = "cook"
),
hr(),
),
mainPanel(
plotlyOutput("compared.bar")
)
),
h3("Let's see what people in U.S. do during the day:"),
p("The interactive scatter plot compare different activity time with sleeping-time. The x-axis will be all kinds of different activities that can be chosen from the side bar. Y-axis will be the general sleep-time."),
p("There are some interesting ups and downs for some graphs like “cook”, “eat”, “music”, and even “work”. We don’t know the causal or correlational factors of those bumps, which means further research need to be down with that."),
p("But for math, it’s pretty clear that generally, if they do more math problems and use more of the brain, the less sleep people are getting. It can be an analog to doing homework as if in students’ life."),
p("In general, all activities have a tendency to go down except for music and meditation. It seems that people have so many things to do in life, studying, social networking, hanging out with friends that they can not even have enough time to sleep."))
),
tabPanel(
"Impact",
titlePanel("Risks taken for shortened sleep"),
sidebarLayout(
sidebarPanel(
radioButtons("age",
label = h3("Age Groups"),
choices = list(
"Young" = "Young",
"Old" = "Old"
),
selected = "Young"
),
hr(),
radioButtons("symptoms",
label = h4("Possible Symptoms"),
choices = list(
"Anxiety" = "anxiety",
"Depression" = "depression",
"Panic" = "panic",
"Worry" = "worry",
"Health Problems" = "health"
),
selected = "anxiety"
)
),
mainPanel(
plotOutput("sleep_impacts")
)
),
p("This interactive scatterplot allows user to choose their own interest of age group and multiple outcomes of sleep deprivation. The dataset is retrieved from kaggle where the gathered data about 90 patients in different age groups and measured their physical and psychological health. In our plot, we used 6 different factors: "),
p("Age group including old (65 - 75 years old) and young (20 - 30 years old) people. "),
p("Anxiety and depression rating is from the hospital anxiety and depression scale. "),
p("Worry, panic, and health problems are from Karolinska Sleep Questionnaire. "),
p("For the younger group of people, they have an average rating of anxiety for about 2.92. Whereas the older age group only have 1.49. And for panic and worry, the younger group is higher for about .5 rating. For health problems, the younger group are worse by .7 rating for self-assessment. This contrast illustrates that there is a weird paradox in health problems. When young people are supposed to be strong and healthy, and older people are more prone to have health issues, the reality is in reverse."),
p("The lack of sleep is strongly impacting people’s lives. With increasing anxiety and potential health problems. Because this lacking mostly influence mental functioning, and all human activities rely on those 3 pounds little brain, it’s crucial that people should pay more attention to sleeping issues and try to get rid of the malicious effect of not sleeping enough. "),
h3("The relationship between sleep deprivation and student's GPA"),
plotlyOutput("sleep_GPA"),
p("This graph demonstrates U.S college students’ average GPA affected by daytime sleepiness. As shown above, students who report not feeling fatigued have a 3.24 GPA, which is 0.2 higher than sleepy students’ 3.04 GPA."),
p("Academically, the lack of sleep could cause students trouble concentrating in class and performing their knowledge during exams. A lot of students sacrifice their sleep time and stay up late at night to study for exams or do assignments. However, this does not have positive long-term impacts on their academics. Not getting enough sleep at night results in a much lower GPA than students who get at least 7 hours of sleep. While working hard on schoolwork, students should always remember sleep is the biggest priority.")
),
tabPanel(
"Conclusion",
tags$div(
h3("Conclusion"),
p("The strength of our project is that our resources for datasets are from authentic organizations, including American Academy of Sleep Medicine (AASM), National Alliance on Mental Illness (NAMI), and the Centers for Disease Control and Prevention (CDC). By using these credible sources, our group ensures that the information presented in our project is reliable. On the other hand, one of the weaknesses of our project is the narrow age range we covered. We only targeted people aged from 18 to 39, which makes up about 26.3 percent of the U.S population. he project could be more applicable to more users. The main lesson our team learned from this project is, the sleeping trend in the U.S in recent years is getting worse. We found out that the lack of sleep could affect not only students' academic performance but also human health. In the future, people could improve the project by extending the age group and provide suggestions to improve people’s sleeping time and sleeping quality. Our team would also like to look deeper into the sleeping trend in different countries other than the U.S to make a comparison, as well as how foreign governments approach sleeping issues.")
),
tags$hr(),
tags$div(
h3("Suggested sleep-time according to your age:"),
h5("The sleep time calculator allows the user to choose their age and the time they plan to wake up. Then, the calculator will suggest the time user should go to bed based on the information given."),
),
sidebarLayout(
sidebarPanel(
h4("Sleep-time calculation"),
sliderInput("age_years", "How old are you?",
min = 1, max = 99,
value = 19
),
sliderInput("awake_time", "What time do you need to wake up?",
min = 0, max = 23,
value = 8
)
),
mainPanel(
textOutput("years_old"),
textOutput("awake"),
textOutput("sleep_time")
)
)
),
navbarMenu(
"About",
tabPanel(
"About Tech",
titlePanel("Give credit to all the amazing sources!"),
tags$div(
p("[1] Cunningham, J. (2019). College students aren’t getting nearly enough sleep. Retrieved from http://sleepeducation.org/news/2019/07/18/college-students-are-not-getting-nearly-enough-sleep"),
p("[2] Feraco, F. (2018). Sleep Deprivation. Retrieved from https://www.kaggle.com/feraco/sleep-deprivation#demdata_160225_pseudonymized.csv"),
p("[3] Fusion 360. (2014). Sleepless Nights. Retrieved from https://visual.ly/community/infographic/health/sleepless-nights"),
p("[4] Healthguru. (2012). Need More Sleep? The Facts On Sleeping Disorders. Retrieved from https://visual.ly/community/infographic/health/need-more-sleep-facts-sleeping-disorders"),
p("[5] Lomuscio, M. (2019). Sleep Study. Retrieved from https://www.kaggle.com/mlomuscio/sleepstudypilot"),
p("[6] Mental Health Guide for College Students. (2019). Retrieved from https://collegestats.org/resources/mental-health-guide/"),
p("[7] Youngstedt, Shawn D et al. “Has adult sleep duration declined over the last 50+ years?.” Sleep medicine reviews vol. 28 (2016): 69-85. doi:10.1016/j.smrv.2015.08.004"),
p("[8] “500 Cities: Local Data for Better Health, 2018 Release.” Centers for Disease Control and Prevention, Centers for Disease Control and Prevention, https://chronicdata.cdc.gov/500-Cities/500-Cities-Local-Data-for-Better-Health-2018-relea/6vp6-wxuq"),
p("[9] Monideepa B. Becerra, Brittny S. Bol, Rochelle Granados & Christina Hassija (2018) Sleepless in school: The role of social determinants of sleep health among college students, Journal of American College Health, DOI: 10.1080/07448481.2018.1538148"),
p("[10] Life tracking project dataset. Retrieved from: https://www.kaggle.com/maxschmidt94/life-tracking-project-dataset#life_total_data.csv"),
p("[11] 2019 American Academy of Sleep Medicine. (n.d.). Make Time 2 Sleep. Retrieved December 4, 2019, from http://sleepeducation.org/healthysleep/Make-Time-2-Sleep-Bedtime-Calculator?fbclid=IwAR0YjgcVl6BzJW1CFOCoVq0s3niDqMt5Ju5NOzePy6Nm1OBt2halh21spGs."),
p("[12] Altun, I., Cınar, N., & Dede, C. (2012). The contributing factors to poor sleep experiences in according to the university students: A cross-sectional study. Journal of research in medical sciences : the official journal of Isfahan University of Medical Sciences, 17(6), 557–561."),
h5("And a special thanks to Andrey Butenko, our wonderful TA who helped us a lot through this course and on this project :) ")
)
),
tabPanel(
"About Us",
titlePanel("More Information on project members!"),
fluidRow(
column(
8,
h3("Phuong Vu"),
p("Phuong Vu is an international student at the University of Washington who wants to study Informatics, and this is his second year at the UW. He enjoys writing code that would solve real-life tasks. During his free time, he loves traveling to new places to take artistic photos and creating videos.")
),
column(4, imageOutput("pvu", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Yu-Wen Chen"),
p("Yu-Wen Chen is currently a Freshman at the University of Washington from Taoyuan, Taiwan. She enjoys creative problem solving and figuring things out with her team. Outside of the classroom, she loves spending time doing creative writing and reading Asian literature. Most importantly, she thinks her dog May-May is the cutest dog in the universe."),
),
column(4, imageOutput("ychen", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Hanzhi Cao"),
p("Hanzhi Cao is an international student at the UW studying Psychology in her senior year. On one hand she loves psychology and would love to know more about the mysterious human kind. On the other hand, she is also into data field that people are generating data every day every second. She believes that efficiency is EVERYTHING, so her ultimate goal of life is to find a better way to improve human life experience."),
),
column(4, imageOutput("hcao", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Vivian Law"),
p("Vivian Law is a junior student at the University of Washington. She is in the Early Childhood Family Studies major. She enjoys photography and trying different foods. She has a passion for children and for technology. She values her Taiwanese and Cantonese culture.")
),
column(4, imageOutput("vlaw", height = 200))
)
)
)
)
)
|
/app_ui.R
|
no_license
|
lawv-1841143/info201agsleepdeprivation
|
R
| false
| false
| 21,167
|
r
|
library(shiny)
library(shinythemes)
library(plotly)
source("analysis.R")
ui <- fluidPage(
navbarPage(
theme = shinytheme("cyborg"),
"Sleep Deprivation",
tabPanel(
"Background",
tags$div(
h3("About This Project:"),
p("In today's society, people sacrifice their sleep time for various
reasons: studying for exams, finishing up work or tasks, suffering
from anxiety or sleep disorders… etc. No matter what the reasons are,
they all contribute to sleep deprivation. Since people not getting
enough sleep has become a global phenomenon, it is important to learn
about its effect and what sleep deprivation is like in society."),
p("This project specifically focuses on the topic of sleep deprivation for adults aged 19 to 39 in the United States. The topics covered in this project include adults' sleeping trends from 2000 to 2015, negative impacts of sleep deprivation, the general sleeping hour in each state, and how much time people spend on different activities compared to their sleep time. Moreover, by presenting the information through different types of interactive and non-interactive graphs, our team hopes to provide a fun and interactive learning experience to the users."),
h3("Page Description"),
p("The Trend tab introduces the average sleep time for U.S adult aged 19 to 39 from 2000 to 2012, the information was collected by applying Actigraphic Study and Polysomnography Study. On the bottom, a map illustrates the average percentage of people who do not have a least 7 hours of sleep in each state in the U.S."),
p("The Brutal Reality tab shows the fact that our performance is bad because of sleep deprivation. "),
p("The Causes tab has two sub tabs. On the Multiple Factors tab, an interactive pie chart demonstrates the most common factors that cause U.S college students stay up so late at night based on the survey. On the Life tracking sample page, the interactive scatter plot compares different activity time with people’s sleep time."),
p("The Impact tab introduces some possible impact on people’s mental and physical health if people are sleep deprived for long, as well as how sleep deprivation could affect students’ academic performance. The tab has an interactive scatterplot that allows user to choose their own interest of age group and multiple outcomes of sleep deprivation. On the bottom, an interactive bar graph gives users a glance of U.S college students’ average GPA affected by daytime sleepiness."),
p("The conclusion tab summarize the biggest lesson of this project, strength and weakness, and some possible improvement that could be made to make this project better. The sleep time calculator at the bottom will calculate the time user should go to bed to get enough sleep based on their age and what time they plan to wake up."),
p("The About tab has two sub tabs : About Tech and About us. The About Tech tab has the citation for all the resources used in the project. The About Us tab gives a brief introduction of each members in this project group.")
)
),
tabPanel(
"Trend",
tags$div(
h3("The current sleeping time trend in U.S. population"),
sidebarLayout(
sidebarPanel(
radioButtons("StudiesMethods",
label = h3("Studies' Methods"),
choices = list(
"Actigraphic Study" = 1,
"Polysomnography Study" = 2,
"Both Studies" = 3
),
selected = 3
),
hr(),
),
mainPanel(
plotOutput("us_timeline")
)
),
p("This graph represents the average sleep time in minutes for adults (18-39) in the U.S from 2000 to 2015. The study methods include *Actigraphic Study and *Polysomnography Study."),
p("For Actigraphic Study, the trend experiences a drastic decline from the year 2000 (the highest point) to 2002, then it turns to a small and steady decline until 2006. Starting from 2006, the trend grows constantly for three years then keeps decreasing to its lowest point at the end."),
p("For Polysomnography Study, the trend remains steady from the beginning year, 2000, to 2006. It increases by little for half of a year then goes down constantly with small fluctuation. Starting from 2009, when the trend reaches its lowest point, it goes upward slowly until the end."),
p("Note:"),
p("* Actigraphic Study: Actigraphy is a non-invasive technique used to assess cycles of activity and rest over several days to several weeks."),
p("(2019, Stanford Health Care. https://stanfordhealthcare.org/medical-tests/s/sleep-disorder-tests/procedures/actigraphy.html)"),
p("* Polysomnography Study: A polysomnogram continuously records brain waves during sleep, as well as a number of nerve and muscle functions during nighttime sleep."),
p("(2019, Stanford Health Care. https://stanfordhealthcare.org/medical-tests/p/polysomnogram.html)"),
h3("Geographic map of US adults sleeping <7 hours"),
plotOutput("us_map_7"),
p("The geographic map illustrates the percentage of people in each state in the U.S that sleep less than 7 hours. As the percentage of people gets higher, the black color goes darker. As shown in the graph, states that have the largest population of having less than 7 hours of sleep are mostly located in the Midwest. On the contrary, western states tend to have fewer people who do not get sufficient sleep."),
p("This graph allows people to have a brief idea of a lack of sleep in each state at one glance. People can easily pick up the states which they want to have a closer look on sleeping issues accordingly to the information presented in the graph.")
)
),
tabPanel(
"Brutal Reality",
titlePanel("The FACT that we are having less sleep hours can..."),
p("The Brutal Reality tab shows people the certain fact that sleep deprivation is actually leading to a bad performance in life. It will be discussed in multiple ways, such as fatigue driving, test performances, and reaction times (RTs)."),
sidebarLayout(
sidebarPanel(
radioButtons("Gender",
label = h3("Gender"),
choices = list(
"Male" = 1,
"Female" = 2
),
selected = 1
),
hr()
),
mainPanel(
tags$div(
p("Separating by gender allows us to see how both are affected by gender deprivation. Since women and men are so different, the effects can also be different. We will be investigating how men and women feel after a not good night sleep. ")
),
plotlyOutput("br1")
)
),
tags$hr(),
sidebarLayout(
sidebarPanel(
radioButtons("Sleepdepriv",
label = h3("Sleepdepriv"),
choices = list(
"Enough" = 1,
"Tired" = 2
),
selected = 1
),
hr()
),
mainPanel(
tags$div(
p("Americans are not getting enough sleep. It also depends how each person perceives what enough sleep is. Many people are tired during the day and cannot focus on their daily activities. By graphing enough and tired, we can see the affects of sleep deprivation.")
),
plotlyOutput("br2")
)
)
),
navbarMenu(
"Causes",
tabPanel(
"Multiple Factors",
titlePanel("Why are we staying up so late?"),
p("University students answered to a self-report of multiple causes of their sleep deprivation, which are categorized into the below pie chart: "),
plotlyOutput("pie_chart"),
p("The interactive pie chart demonstrates the most common factors that cause U.S college students stay up so late at night based on the survey. Although there are 14 categories presenting on this chart, it could be organized to 4."),
p("The first one is Dorm/Sleeping environment’s poor quality, including tobacco smoke in sleeping room(7.97%), room’s bad air quality(6.89%), room scents(6.64%), and noise from next door (6.5%). The second category is student’s mental state and feelings, including stress (8.1%), fatigue (6.69%), sadness (6.64%), depression (7.97%), being patient (6.89)and finally, anxiety and tension (6.39%). The third category is student’s physical state, which are pain (7.87%) and strenuous physical activity (6.74%). The fourth category is family issue, making up 7.82 % of the pie chart. (The rest of the 6 percent is reported as other.)"),
p("Overall, students’ poor sleeping/ resting environment makes up 28 % of the pie chart while student’s mental state takes up to 42.6 %, almost half of the sleep deprived students is affected by their mental states. Physical state occupied 14.61 percent of the chart and family issue has least effect on student sleep, which is only 7.82 percent. ")
),
tabPanel(
"Life Tracking Sample",
titlePanel("Let's see what people in U.S. do during the day"),
sidebarLayout(
sidebarPanel(
selectInput("select.activities",
label = h4("Select an activity to compare with sleep's time:"),
choices = list(
"Cook" = "cook", "Eat" = "eat",
"Math" = "math", "Music" = "music",
"Pause" = "pause", "Prep" = "prep",
"Uni" = "uni", "Meditation" = "meditation",
"Special" = "special", "Work" = "work"
),
selected = "cook"
),
hr(),
),
mainPanel(
plotlyOutput("compared.bar")
)
),
h3("Let's see what people in U.S. do during the day:"),
p("The interactive scatter plot compare different activity time with sleeping-time. The x-axis will be all kinds of different activities that can be chosen from the side bar. Y-axis will be the general sleep-time."),
p("There are some interesting ups and downs for some graphs like “cook”, “eat”, “music”, and even “work”. We don’t know the causal or correlational factors of those bumps, which means further research need to be down with that."),
p("But for math, it’s pretty clear that generally, if they do more math problems and use more of the brain, the less sleep people are getting. It can be an analog to doing homework as if in students’ life."),
p("In general, all activities have a tendency to go down except for music and meditation. It seems that people have so many things to do in life, studying, social networking, hanging out with friends that they can not even have enough time to sleep."))
),
tabPanel(
"Impact",
titlePanel("Risks taken for shortened sleep"),
sidebarLayout(
sidebarPanel(
radioButtons("age",
label = h3("Age Groups"),
choices = list(
"Young" = "Young",
"Old" = "Old"
),
selected = "Young"
),
hr(),
radioButtons("symptoms",
label = h4("Possible Symptoms"),
choices = list(
"Anxiety" = "anxiety",
"Depression" = "depression",
"Panic" = "panic",
"Worry" = "worry",
"Health Problems" = "health"
),
selected = "anxiety"
)
),
mainPanel(
plotOutput("sleep_impacts")
)
),
p("This interactive scatterplot allows user to choose their own interest of age group and multiple outcomes of sleep deprivation. The dataset is retrieved from kaggle where the gathered data about 90 patients in different age groups and measured their physical and psychological health. In our plot, we used 6 different factors: "),
p("Age group including old (65 - 75 years old) and young (20 - 30 years old) people. "),
p("Anxiety and depression rating is from the hospital anxiety and depression scale. "),
p("Worry, panic, and health problems are from Karolinska Sleep Questionnaire. "),
p("For the younger group of people, they have an average rating of anxiety for about 2.92. Whereas the older age group only have 1.49. And for panic and worry, the younger group is higher for about .5 rating. For health problems, the younger group are worse by .7 rating for self-assessment. This contrast illustrates that there is a weird paradox in health problems. When young people are supposed to be strong and healthy, and older people are more prone to have health issues, the reality is in reverse."),
p("The lack of sleep is strongly impacting people’s lives. With increasing anxiety and potential health problems. Because this lacking mostly influence mental functioning, and all human activities rely on those 3 pounds little brain, it’s crucial that people should pay more attention to sleeping issues and try to get rid of the malicious effect of not sleeping enough. "),
h3("The relationship between sleep deprivation and student's GPA"),
plotlyOutput("sleep_GPA"),
p("This graph demonstrates U.S college students’ average GPA affected by daytime sleepiness. As shown above, students who report not feeling fatigued have a 3.24 GPA, which is 0.2 higher than sleepy students’ 3.04 GPA."),
p("Academically, the lack of sleep could cause students trouble concentrating in class and performing their knowledge during exams. A lot of students sacrifice their sleep time and stay up late at night to study for exams or do assignments. However, this does not have positive long-term impacts on their academics. Not getting enough sleep at night results in a much lower GPA than students who get at least 7 hours of sleep. While working hard on schoolwork, students should always remember sleep is the biggest priority.")
),
tabPanel(
"Conclusion",
tags$div(
h3("Conclusion"),
p("The strength of our project is that our resources for datasets are from authentic organizations, including American Academy of Sleep Medicine (AASM), National Alliance on Mental Illness (NAMI), and the Centers for Disease Control and Prevention (CDC). By using these credible sources, our group ensures that the information presented in our project is reliable. On the other hand, one of the weaknesses of our project is the narrow age range we covered. We only targeted people aged from 18 to 39, which makes up about 26.3 percent of the U.S population. he project could be more applicable to more users. The main lesson our team learned from this project is, the sleeping trend in the U.S in recent years is getting worse. We found out that the lack of sleep could affect not only students' academic performance but also human health. In the future, people could improve the project by extending the age group and provide suggestions to improve people’s sleeping time and sleeping quality. Our team would also like to look deeper into the sleeping trend in different countries other than the U.S to make a comparison, as well as how foreign governments approach sleeping issues.")
),
tags$hr(),
tags$div(
h3("Suggested sleep-time according to your age:"),
h5("The sleep time calculator allows the user to choose their age and the time they plan to wake up. Then, the calculator will suggest the time user should go to bed based on the information given."),
),
sidebarLayout(
sidebarPanel(
h4("Sleep-time calculation"),
sliderInput("age_years", "How old are you?",
min = 1, max = 99,
value = 19
),
sliderInput("awake_time", "What time do you need to wake up?",
min = 0, max = 23,
value = 8
)
),
mainPanel(
textOutput("years_old"),
textOutput("awake"),
textOutput("sleep_time")
)
)
),
navbarMenu(
"About",
tabPanel(
"About Tech",
titlePanel("Give credit to all the amazing sources!"),
tags$div(
p("[1] Cunningham, J. (2019). College students aren’t getting nearly enough sleep. Retrieved from http://sleepeducation.org/news/2019/07/18/college-students-are-not-getting-nearly-enough-sleep"),
p("[2] Feraco, F. (2018). Sleep Deprivation. Retrieved from https://www.kaggle.com/feraco/sleep-deprivation#demdata_160225_pseudonymized.csv"),
p("[3] Fusion 360. (2014). Sleepless Nights. Retrieved from https://visual.ly/community/infographic/health/sleepless-nights"),
p("[4] Healthguru. (2012). Need More Sleep? The Facts On Sleeping Disorders. Retrieved from https://visual.ly/community/infographic/health/need-more-sleep-facts-sleeping-disorders"),
p("[5] Lomuscio, M. (2019). Sleep Study. Retrieved from https://www.kaggle.com/mlomuscio/sleepstudypilot"),
p("[6] Mental Health Guide for College Students. (2019). Retrieved from https://collegestats.org/resources/mental-health-guide/"),
p("[7] Youngstedt, Shawn D et al. “Has adult sleep duration declined over the last 50+ years?.” Sleep medicine reviews vol. 28 (2016): 69-85. doi:10.1016/j.smrv.2015.08.004"),
p("[8] “500 Cities: Local Data for Better Health, 2018 Release.” Centers for Disease Control and Prevention, Centers for Disease Control and Prevention, https://chronicdata.cdc.gov/500-Cities/500-Cities-Local-Data-for-Better-Health-2018-relea/6vp6-wxuq"),
p("[9] Monideepa B. Becerra, Brittny S. Bol, Rochelle Granados & Christina Hassija (2018) Sleepless in school: The role of social determinants of sleep health among college students, Journal of American College Health, DOI: 10.1080/07448481.2018.1538148"),
p("[10] Life tracking project dataset. Retrieved from: https://www.kaggle.com/maxschmidt94/life-tracking-project-dataset#life_total_data.csv"),
p("[11] 2019 American Academy of Sleep Medicine. (n.d.). Make Time 2 Sleep. Retrieved December 4, 2019, from http://sleepeducation.org/healthysleep/Make-Time-2-Sleep-Bedtime-Calculator?fbclid=IwAR0YjgcVl6BzJW1CFOCoVq0s3niDqMt5Ju5NOzePy6Nm1OBt2halh21spGs."),
p("[12] Altun, I., Cınar, N., & Dede, C. (2012). The contributing factors to poor sleep experiences in according to the university students: A cross-sectional study. Journal of research in medical sciences : the official journal of Isfahan University of Medical Sciences, 17(6), 557–561."),
h5("And a special thanks to Andrey Butenko, our wonderful TA who helped us a lot through this course and on this project :) ")
)
),
tabPanel(
"About Us",
titlePanel("More Information on project members!"),
fluidRow(
column(
8,
h3("Phuong Vu"),
p("Phuong Vu is an international student at the University of Washington who wants to study Informatics, and this is his second year at the UW. He enjoys writing code that would solve real-life tasks. During his free time, he loves traveling to new places to take artistic photos and creating videos.")
),
column(4, imageOutput("pvu", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Yu-Wen Chen"),
p("Yu-Wen Chen is currently a Freshman at the University of Washington from Taoyuan, Taiwan. She enjoys creative problem solving and figuring things out with her team. Outside of the classroom, she loves spending time doing creative writing and reading Asian literature. Most importantly, she thinks her dog May-May is the cutest dog in the universe."),
),
column(4, imageOutput("ychen", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Hanzhi Cao"),
p("Hanzhi Cao is an international student at the UW studying Psychology in her senior year. On one hand she loves psychology and would love to know more about the mysterious human kind. On the other hand, she is also into data field that people are generating data every day every second. She believes that efficiency is EVERYTHING, so her ultimate goal of life is to find a better way to improve human life experience."),
),
column(4, imageOutput("hcao", height = 200))
),
hr(),
fluidRow(
column(
8,
h3("Vivian Law"),
p("Vivian Law is a junior student at the University of Washington. She is in the Early Childhood Family Studies major. She enjoys photography and trying different foods. She has a passion for children and for technology. She values her Taiwanese and Cantonese culture.")
),
column(4, imageOutput("vlaw", height = 200))
)
)
)
)
)
|
print("Importing Data",quote=F)
trainDescr <- read.csv("segmentation.csv")
testDescr <- read.csv("segmentation.test.csv")
|
/import.R
|
no_license
|
Salaudeen-Hafeez/uci_mlr_image_seg
|
R
| false
| false
| 121
|
r
|
print("Importing Data",quote=F)
trainDescr <- read.csv("segmentation.csv")
testDescr <- read.csv("segmentation.test.csv")
|
# The purpose of this script is to generate a heatmap of the genes
# in the intersection of the Module 10 RNA-seq features and the Seurat
# cell cycle gene list.
#
# The EdU positive proportion (from IF) is used as an annotation
# and to order samples.
###############################################################################
library(tidyverse)
library(ComplexHeatmap)
library(circlize)
RNAseqMetadataFile <- "../RNAseq/Metadata/MDD_RNAseq_sampleMetadata.csv"
geneMetadataFile <- "../RNAseq/Metadata/MDD_RNAseq_geneAnnotations.csv"
RNAseqL3DataFile <- "../RNAseq/Data/MDD_RNAseq_Level3.csv"
RNAseqL4DataFile <- "../RNAseq/Data/MDD_RNAseq_Level4.csv"
seuratGeneList <- "../RNAseq/misc/seurat_cellCycle_geneList.txt"
moduleGeneFile <- "../misc/MDD_multiomics14Modules_TFannotations_module10.csv"
dataIF <- "../IF/Data/MDD_IF_Level4.csv"
colFile <- "../misc/MDD_color_codes.csv"
outDir <- "../plots/MDD_manuscript_figures"
###############################################################################
HeatmapL4C <- function(mat, ca = ha.RNA.L4, featureName = "Z-score", bks = c(-3, 0, 3), showRow = FALSE, clust = FALSE, ...) {
Heatmap(mat,
top_annotation = ca,
cluster_columns = clust,
show_row_names = showRow,
cluster_row_slices = FALSE,
row_names_gp = gpar(fontsize = 7),
show_column_names = FALSE,
name = featureName,
col = colorRamp2(bks, c("blue", "white", "red")),
...)
}
###############################################################################
if(!grepl("R$", getwd())) {setwd("R")}
###############################################################################
col_raw <- read.csv(colFile,
stringsAsFactors = FALSE)
col <- list(
ligand = dplyr::slice(col_raw, 1:8),
experimentalTimePoint = dplyr::slice(col_raw, 10:15),
secondLigand = dplyr::slice(col_raw, 17:18),
collection = dplyr::slice(col_raw, 26:28)
)
col <-
lapply(col, function(x) {
x <- setNames(x[, 2], x[, 1])
})
col
###############################################################################
moduleGenes <- read.csv(moduleGeneFile, stringsAsFactors = FALSE)
seuratList <- read.table(seuratGeneList,
header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::rename(hgnc_symbol = "Gene") %>%
filter(Class == "G1/S")
seuratG1S_module10 <- base::intersect(moduleGenes$feature,
seuratList$hgnc_symbol)
###############################################################################
at.RNA <- read_csv(geneMetadataFile)
sa.RNA.L3 <-
read.csv(RNAseqMetadataFile,
stringsAsFactors = FALSE) %>%
mutate(ligand = fct_inorder(ligand),
experimentalTimePoint = fct_inorder(as.factor(experimentalTimePoint)),
experimentalCondition = fct_inorder(as.factor(experimentalCondition))) %>%
filter(RNAseq_QCpass) %>%
dplyr::select(-contains("RNA")) %>%
dplyr::select(-contains("sequencing")) %>%
dplyr::select(-contains("File")) %>%
dplyr::rename(Time = experimentalTimePoint,
Ligand = ligand) %>%
arrange(experimentalCondition) %>%
mutate(experimentalCondition = as.character(experimentalCondition)) %>%
mutate(Ligand = fct_recode(Ligand,
"CTRL" = "ctrl",
"BMP2+EGF" = "BMP2",
"IFNG+EGF" = "IFNG",
"TGFB+EGF" = "TGFB"),
experimentalCondition = fct_recode(experimentalCondition,
"CTRL_0" = "ctrl_0")) %>%
mutate(Ligand = fct_relevel(Ligand, "CTRL", "PBS",
"HGF", "OSM", "EGF",
"BMP2+EGF", "IFNG+EGF", "TGFB+EGF")) %>%
arrange(Ligand, Time) %>%
mutate(experimentalCondition = fct_inorder(experimentalCondition))
sa.RNA.L3$experimentalCondition[sa.RNA.L3$experimentalCondition == "ctrl_0"] <- "CTRL_0"
sa.RNA.L3$Ligand[sa.RNA.L3$experimentalCondition == "ctrl_0"] <- "CTRL"
RNAseqL4 <-
read.csv(RNAseqL4DataFile,
header = TRUE, row.names = 1) %>%
data.matrix
colnames(RNAseqL4)[1] <- "CTRL_0"
# Mean-centering RNAseq Level4 data and changing identifiers to gene symbols
RNAseqL4C_s <-
RNAseqL4[, setdiff(colnames(RNAseqL4), "CTRL_0")] %>%
t %>%
scale %>%
t %>%
data.frame %>%
rownames_to_column("ensembl_gene_id") %>%
left_join(at.RNA) %>%
filter(hgnc_symbol != "") %>%
distinct(hgnc_symbol, .keep_all = TRUE) %>%
dplyr::select(-ensembl_gene_id) %>%
column_to_rownames("hgnc_symbol")
eduDataC2 <-
read.csv(dataIF, row.names = 1, stringsAsFactors = FALSE) %>%
t() %>%
data.frame %>%
rownames_to_column("specimenName") %>%
filter(collection == "C2") %>%
mutate(experimentalCondition = sprintf("%s_%s",
ligand, experimentalTimePoint)) %>%
dplyr::select(experimentalCondition, contains("EdU"))
eduDataC2$experimentalCondition[eduDataC2$experimentalCondition == "ctrl_ 0"] <- "CTRL_0"
sa.RNA.joined <-
sa.RNA.L3 %>%
left_join(eduDataC2) %>%
arrange((Edu_Positive_Proportion))
ordL4 <- sa.RNA.joined %>%
filter(Ligand != "CTRL") %>%
pull(experimentalCondition) %>%
unique
names(col)[1:2] <- c("Ligand", "Time")
names(col$ligand) <- levels(sa.RNA.joined$Ligand)
col_fun <- colorRamp2(c(0, 25), c("white", "red"))
ha.RNA.L4 <-
HeatmapAnnotation(df = sa.RNA.joined %>%
distinct(Ligand, Time, .keep_all = TRUE) %>%
filter(Ligand != "CTRL") %>%
dplyr::select(Ligand, Time, Edu_Positive_Proportion) %>%
dplyr::rename("EdU Positive Proportion" = Edu_Positive_Proportion) %>%
dplyr::mutate(`EdU Positive Proportion` = as.numeric(`EdU Positive Proportion`)),
col = c(col, list("EdU Positive Proportion" = col_fun)))
if (!dir.exists(outDir)) {dir.create(outDir, recursive = TRUE)}
pdf(sprintf("%s/MDD_F6F.pdf", outDir),
height = 7, width = 9.5)
HeatmapL4C(RNAseqL4C_s[seuratG1S_module10, ordL4],
featureName = "Mean-centered\nlog2(fpkm + 1)",
showRow = TRUE, ca = ha.RNA.L4,
column_title = "G1/S Genes in Module 10")
dev.off()
|
/R/MDD_module10CellCycleHeatmap.R
|
no_license
|
MEP-LINCS/MDD
|
R
| false
| false
| 6,353
|
r
|
# The purpose of this script is to generate a heatmap of the genes
# in the intersection of the Module 10 RNA-seq features and the Seurat
# cell cycle gene list.
#
# The EdU positive proportion (from IF) is used as an annotation
# and to order samples.
###############################################################################
library(tidyverse)
library(ComplexHeatmap)
library(circlize)
RNAseqMetadataFile <- "../RNAseq/Metadata/MDD_RNAseq_sampleMetadata.csv"
geneMetadataFile <- "../RNAseq/Metadata/MDD_RNAseq_geneAnnotations.csv"
RNAseqL3DataFile <- "../RNAseq/Data/MDD_RNAseq_Level3.csv"
RNAseqL4DataFile <- "../RNAseq/Data/MDD_RNAseq_Level4.csv"
seuratGeneList <- "../RNAseq/misc/seurat_cellCycle_geneList.txt"
moduleGeneFile <- "../misc/MDD_multiomics14Modules_TFannotations_module10.csv"
dataIF <- "../IF/Data/MDD_IF_Level4.csv"
colFile <- "../misc/MDD_color_codes.csv"
outDir <- "../plots/MDD_manuscript_figures"
###############################################################################
HeatmapL4C <- function(mat, ca = ha.RNA.L4, featureName = "Z-score", bks = c(-3, 0, 3), showRow = FALSE, clust = FALSE, ...) {
Heatmap(mat,
top_annotation = ca,
cluster_columns = clust,
show_row_names = showRow,
cluster_row_slices = FALSE,
row_names_gp = gpar(fontsize = 7),
show_column_names = FALSE,
name = featureName,
col = colorRamp2(bks, c("blue", "white", "red")),
...)
}
###############################################################################
if(!grepl("R$", getwd())) {setwd("R")}
###############################################################################
col_raw <- read.csv(colFile,
stringsAsFactors = FALSE)
col <- list(
ligand = dplyr::slice(col_raw, 1:8),
experimentalTimePoint = dplyr::slice(col_raw, 10:15),
secondLigand = dplyr::slice(col_raw, 17:18),
collection = dplyr::slice(col_raw, 26:28)
)
col <-
lapply(col, function(x) {
x <- setNames(x[, 2], x[, 1])
})
col
###############################################################################
moduleGenes <- read.csv(moduleGeneFile, stringsAsFactors = FALSE)
seuratList <- read.table(seuratGeneList,
header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::rename(hgnc_symbol = "Gene") %>%
filter(Class == "G1/S")
seuratG1S_module10 <- base::intersect(moduleGenes$feature,
seuratList$hgnc_symbol)
###############################################################################
at.RNA <- read_csv(geneMetadataFile)
sa.RNA.L3 <-
read.csv(RNAseqMetadataFile,
stringsAsFactors = FALSE) %>%
mutate(ligand = fct_inorder(ligand),
experimentalTimePoint = fct_inorder(as.factor(experimentalTimePoint)),
experimentalCondition = fct_inorder(as.factor(experimentalCondition))) %>%
filter(RNAseq_QCpass) %>%
dplyr::select(-contains("RNA")) %>%
dplyr::select(-contains("sequencing")) %>%
dplyr::select(-contains("File")) %>%
dplyr::rename(Time = experimentalTimePoint,
Ligand = ligand) %>%
arrange(experimentalCondition) %>%
mutate(experimentalCondition = as.character(experimentalCondition)) %>%
mutate(Ligand = fct_recode(Ligand,
"CTRL" = "ctrl",
"BMP2+EGF" = "BMP2",
"IFNG+EGF" = "IFNG",
"TGFB+EGF" = "TGFB"),
experimentalCondition = fct_recode(experimentalCondition,
"CTRL_0" = "ctrl_0")) %>%
mutate(Ligand = fct_relevel(Ligand, "CTRL", "PBS",
"HGF", "OSM", "EGF",
"BMP2+EGF", "IFNG+EGF", "TGFB+EGF")) %>%
arrange(Ligand, Time) %>%
mutate(experimentalCondition = fct_inorder(experimentalCondition))
sa.RNA.L3$experimentalCondition[sa.RNA.L3$experimentalCondition == "ctrl_0"] <- "CTRL_0"
sa.RNA.L3$Ligand[sa.RNA.L3$experimentalCondition == "ctrl_0"] <- "CTRL"
RNAseqL4 <-
read.csv(RNAseqL4DataFile,
header = TRUE, row.names = 1) %>%
data.matrix
colnames(RNAseqL4)[1] <- "CTRL_0"
# Mean-centering RNAseq Level4 data and changing identifiers to gene symbols
RNAseqL4C_s <-
RNAseqL4[, setdiff(colnames(RNAseqL4), "CTRL_0")] %>%
t %>%
scale %>%
t %>%
data.frame %>%
rownames_to_column("ensembl_gene_id") %>%
left_join(at.RNA) %>%
filter(hgnc_symbol != "") %>%
distinct(hgnc_symbol, .keep_all = TRUE) %>%
dplyr::select(-ensembl_gene_id) %>%
column_to_rownames("hgnc_symbol")
eduDataC2 <-
read.csv(dataIF, row.names = 1, stringsAsFactors = FALSE) %>%
t() %>%
data.frame %>%
rownames_to_column("specimenName") %>%
filter(collection == "C2") %>%
mutate(experimentalCondition = sprintf("%s_%s",
ligand, experimentalTimePoint)) %>%
dplyr::select(experimentalCondition, contains("EdU"))
eduDataC2$experimentalCondition[eduDataC2$experimentalCondition == "ctrl_ 0"] <- "CTRL_0"
sa.RNA.joined <-
sa.RNA.L3 %>%
left_join(eduDataC2) %>%
arrange((Edu_Positive_Proportion))
ordL4 <- sa.RNA.joined %>%
filter(Ligand != "CTRL") %>%
pull(experimentalCondition) %>%
unique
names(col)[1:2] <- c("Ligand", "Time")
names(col$ligand) <- levels(sa.RNA.joined$Ligand)
col_fun <- colorRamp2(c(0, 25), c("white", "red"))
ha.RNA.L4 <-
HeatmapAnnotation(df = sa.RNA.joined %>%
distinct(Ligand, Time, .keep_all = TRUE) %>%
filter(Ligand != "CTRL") %>%
dplyr::select(Ligand, Time, Edu_Positive_Proportion) %>%
dplyr::rename("EdU Positive Proportion" = Edu_Positive_Proportion) %>%
dplyr::mutate(`EdU Positive Proportion` = as.numeric(`EdU Positive Proportion`)),
col = c(col, list("EdU Positive Proportion" = col_fun)))
if (!dir.exists(outDir)) {dir.create(outDir, recursive = TRUE)}
pdf(sprintf("%s/MDD_F6F.pdf", outDir),
height = 7, width = 9.5)
HeatmapL4C(RNAseqL4C_s[seuratG1S_module10, ordL4],
featureName = "Mean-centered\nlog2(fpkm + 1)",
showRow = TRUE, ca = ha.RNA.L4,
column_title = "G1/S Genes in Module 10")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meteo_distance.R
\name{deg2rad}
\alias{deg2rad}
\title{Convert from degrees to radians}
\usage{
deg2rad(deg)
}
\arguments{
\item{deg}{A numeric vector in units of degrees.}
}
\value{
The input numeric vector, converted to units of radians.
}
\description{
Convert from degrees to radians
}
|
/man/deg2rad.Rd
|
permissive
|
mpettis/rnoaa
|
R
| false
| true
| 368
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meteo_distance.R
\name{deg2rad}
\alias{deg2rad}
\title{Convert from degrees to radians}
\usage{
deg2rad(deg)
}
\arguments{
\item{deg}{A numeric vector in units of degrees.}
}
\value{
The input numeric vector, converted to units of radians.
}
\description{
Convert from degrees to radians
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mnist_27.R
\docType{data}
\name{mnist_27}
\alias{mnist_27}
\title{Useful example for illustrating machine learning algorithms based on MNIST data}
\format{
An object of class \code{list}.
}
\source{
\href{http://yann.lecun.com/exdb/mnist/}{http://yann.lecun.com/exdb/mnist/}
}
\usage{
data(mnist_27)
}
\description{
We only include a randomly selected set of 2s and 7s along with the two predictors based on the
proportion of dark pixels in the upper left and lower right quadrants respectively. The dataset is divided into
training and test sets.
}
\details{
\itemize{
\item train. A data frame containing training data: labels and predictors.
\item test. A data frame containing test data: labels and predictors.
\item index_train. The index of the original mnist training data used for the training set.
\item index_test. The index of the original mnist test data used for the test set.
\item true_p. A \code{data.frame} containing the
two predictors \code{x_1} and \code{x_2} and the conditional probability of being a 7
for \code{x_1}, \code{x_2}.
}
}
\examples{
data(mnist_27)
with(mnist_27$train, plot(x_1, x_2, col = as.numeric(y)))
}
\keyword{datasets}
|
/man/mnist_27.Rd
|
no_license
|
WinieraSutanto/dslabs
|
R
| false
| true
| 1,247
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mnist_27.R
\docType{data}
\name{mnist_27}
\alias{mnist_27}
\title{Useful example for illustrating machine learning algorithms based on MNIST data}
\format{
An object of class \code{list}.
}
\source{
\href{http://yann.lecun.com/exdb/mnist/}{http://yann.lecun.com/exdb/mnist/}
}
\usage{
data(mnist_27)
}
\description{
We only include a randomly selected set of 2s and 7s along with the two predictors based on the
proportion of dark pixels in the upper left and lower right quadrants respectively. The dataset is divided into
training and test sets.
}
\details{
\itemize{
\item train. A data frame containing training data: labels and predictors.
\item test. A data frame containing test data: labels and predictors.
\item index_train. The index of the original mnist training data used for the training set.
\item index_test. The index of the original mnist test data used for the test set.
\item true_p. A \code{data.frame} containing the
two predictors \code{x_1} and \code{x_2} and the conditional probability of being a 7
for \code{x_1}, \code{x_2}.
}
}
\examples{
data(mnist_27)
with(mnist_27$train, plot(x_1, x_2, col = as.numeric(y)))
}
\keyword{datasets}
|
Args <- commandArgs(T)
input_pm = Args[1] # {Cancer_Type}/{Cancer_Type}.pm
Signature_num = as.numeric(Args[2]) # the number of signatures
mc = as.numeric(Args[3]) # the number of cores
outdir = Args[4] # the output dir
out_RData = gsub("pm", paste0(paste0("est.sig.",Signature_num), ".RData"), basename(input_pm))
out_RData = paste0(outdir, "/", out_RData)
library(GenomicRanges, BSgenome.Hsapiens.UCSC.hg19)
library(pmsignature)
library(ggplot2)
library(cowplot)
library(reshape)
library(lsa)
library(parallel)
# G <- readMPFile(input_pm, numBases = 5, trDir = TRUE)
# BG_prob <- readBGFile(G)
load("G.RData")
# Deciphering Signatures
for(i in list.files("/home/jintao/R/packages/pmsignature-devel/R", "*.R", full.names=T)){source(i)}
getPMSignature_parallel = function (mutationFeatureData, K, BG = NULL, numInit = 10, tol = 1e-04,
maxIter = 10000)
{
if (!is.null(BG)) {
isBG <- TRUE
varK <- K - 1
}else {
isBG <- FALSE
varK <- K
BG <- 0
}
sampleNum <- length(slot(mutationFeatureData, "sampleList"))
fdim <- slot(mutationFeatureData, "possibleFeatures")
tempL <- -Inf
tempPar <- c()
ress = mclapply(1:numInit, function(kkk){
F <- array(0, c(varK, length(fdim), max(fdim)))
for (k in 1:varK) {
for (kk in 1:length(fdim)) {
F[k, kk, 1:fdim[kk]] <- rgamma(fdim[kk], rep(1,
fdim[kk]))
F[k, kk, 1:fdim[kk]] <- F[k, kk, 1:fdim[kk]]/sum(F[k,
kk, 1:fdim[kk]])
}
}
Q <- matrix(rgamma(sampleNum * K, 1, 1), K, sampleNum)
Q <- sweep(Q, 2, apply(Q, 2, sum), `/`)
p0 <- c(convertToTurbo_F(as.vector(F), fdim, K, isBG),
convertToTurbo_Q(as.vector(t(Q)), K, sampleNum))
Y <- list(list(sampleNum, fdim, slot(mutationFeatureData,
"featureVectorList"), slot(mutationFeatureData, "countData")),
K, isBG, BG)
res1 <- mySquareEM(p0, Y, tol = tol, maxIter = maxIter)
cat(paste("#trial: ", sprintf("%2d", kkk), "; #iteration: ",
sprintf("%4d", as.integer(res1$itr)), "; time(s): ",
sprintf("%4.2f", res1$elapsed.time), "; convergence: ",
res1$convergence, "; loglikelihood: ", sprintf("%.4f",
res1$value.objfn), "\n", sep = ""))
return(c(res1$value.objfn, res1$par))
}, mc.cores = mc)
tempL = max(sapply(ress, function(x){x[1]}))
tempPar = unlist(ress[which.max(sapply(ress, function(x){x[1]}))])[-1]
lenF <- varK * (sum(fdim) - length(fdim))
lenQ <- sampleNum * (K - 1)
F <- convertFromTurbo_F(tempPar[1:lenF], fdim, K, isBG)
Q <- convertFromTurbo_Q(tempPar[(lenF + 1):(lenF + lenQ)],
K, sampleNum)
dim(F) <- c(varK, length(fdim), max(fdim))
dim(Q) <- c(sampleNum, K)
return(new(Class = "EstimatedParameters", type = slot(mutationFeatureData,
"type"), flankingBasesNum = slot(mutationFeatureData,
"flankingBasesNum"), transcriptionDirection = slot(mutationFeatureData,
"transcriptionDirection"), possibleFeatures = slot(mutationFeatureData,
"possibleFeatures"), sampleList = slot(mutationFeatureData,
"sampleList"), signatureNum = as.integer(K), isBackGround = isBG,
signatureFeatureDistribution = F, sampleSignatureDistribution = Q,
loglikelihood = tempL))
}
bootPMSignature_parallel <- function(mutationFeatureData, Param0, bootNum = 10, BG = NULL,
tol = 1e-2, maxIter = 10000) {
K <- slot(Param0, "signatureNum")
isBG <- slot(Param0, "isBackGround")
if (isBG == TRUE) {
if (!is.null(BG)) {
varK <- K - 1
} else {
stop(paste("The input parameter is estimated using a background signature.\n",
"Please specify the same background signature."))
}
} else {
if (!is.null(BG)) {
warning(paste("The input parameter is estimated without using a background signature.\n",
"Specified background signature is ignored."))
}
varK <- K
BG <- 0
}
sampleNum <- length(slot(mutationFeatureData, "sampleList"))
fdim <- slot(mutationFeatureData, "possibleFeatures")
countData_org <- slot(mutationFeatureData, "countData")
bootData <- countData_org
F0 <- slot(Param0, "signatureFeatureDistribution")
Q0 <- slot(Param0, "sampleSignatureDistribution")
tempL <- -Inf
tempPar <- c()
sqF <- array(0, c(bootNum, varK, length(fdim), max(fdim)))
sqQ <- array(0, c(bootNum, nrow(Q0), ncol(Q0)))
sq_list = mclapply(1:bootNum, function(bbb){
##########
# This part is under construction!!!!
# bootData violates the validity rules of the mutation feature class... I don't like this..
tempG <- table(sample(1:length(countData_org[3,]), sum(countData_org[3,]), replace=TRUE, prob= countData_org[3,] / sum(countData_org[3,]) ))
bootData[3, ] <- 0
bootData[3, as.integer(names(tempG))] <- tempG
##########
p0 <- c(convertToTurbo_F(as.vector(F0), fdim, K, isBG), convertToTurbo_Q(as.vector(t(Q0)), K, sampleNum))
Y <- list(list(sampleNum, fdim, slot(mutationFeatureData, "featureVectorList"), bootData), K, isBG, BG)
res1 <- mySquareEM(p0, Y, tol = tol, maxIter = maxIter)
cat(paste("#trial: ", sprintf("%2d", bbb),
"; #iteration: ", sprintf("%4d", as.integer(res1$itr)),
"; time(s): ", sprintf("%4.2f", res1$elapsed.time),
"; convergence: ", res1$convergence,
"; loglikelihood: ", sprintf("%.4f", res1$value.objfn), "\n", sep=""
))
tempPar <- res1$par
lenF <- varK * (sum(fdim) - length(fdim))
lenQ <- sampleNum * (K - 1)
F <- convertFromTurbo_F(res1$par[1:lenF], fdim, K, isBG)
Q <- convertFromTurbo_Q(res1$par[(lenF + 1):(lenF + lenQ)], K, sampleNum)
dim(F) <- c(varK, length(fdim), max(fdim))
dim(Q) <- c(sampleNum, K)
sqF_l = lapply(1:varK, function(k){(F[k,,] - F0[k,,])^2})
sqQ_l = lapply(1:sampleNum, function(n){(Q[n,] - Q0[n,])^2})
return(list(bbb, sqF_l, sqQ_l))
}, mc.cores = mc)
for(i in 1:bootNum){
a = sq_list[[i]]
for(j in 1:length(a[[2]])){
sqF[a[[1]], j, , ] <- a[[2]][[j]]
}
sqQ[a[[1]],,] <- matrix(unlist(a[[3]]), nrow=length(a[[3]]), byrow=T)
}
return(list(sqF, sqQ))
}
Param <- getPMSignature_parallel(G, K = Signature_num, numInit = 10, BG = BG_prob)
error = mean(bootPMSignature_parallel(G, Param, bootNum = 10, BG = BG_prob)[[1]])
likelihood = Param@loglikelihood
max.corr = max(cor(Param@sampleSignatureDistribution) - 2 * diag(rep(1, Signature_num)))
save.image(out_RData)
|
/pmsignature/getSig.all.R
|
no_license
|
ghxdghxd/MutSignatures
|
R
| false
| false
| 6,765
|
r
|
Args <- commandArgs(T)
input_pm = Args[1] # {Cancer_Type}/{Cancer_Type}.pm
Signature_num = as.numeric(Args[2]) # the number of signatures
mc = as.numeric(Args[3]) # the number of cores
outdir = Args[4] # the output dir
out_RData = gsub("pm", paste0(paste0("est.sig.",Signature_num), ".RData"), basename(input_pm))
out_RData = paste0(outdir, "/", out_RData)
library(GenomicRanges, BSgenome.Hsapiens.UCSC.hg19)
library(pmsignature)
library(ggplot2)
library(cowplot)
library(reshape)
library(lsa)
library(parallel)
# G <- readMPFile(input_pm, numBases = 5, trDir = TRUE)
# BG_prob <- readBGFile(G)
load("G.RData")
# Deciphering Signatures
for(i in list.files("/home/jintao/R/packages/pmsignature-devel/R", "*.R", full.names=T)){source(i)}
getPMSignature_parallel = function (mutationFeatureData, K, BG = NULL, numInit = 10, tol = 1e-04,
maxIter = 10000)
{
if (!is.null(BG)) {
isBG <- TRUE
varK <- K - 1
}else {
isBG <- FALSE
varK <- K
BG <- 0
}
sampleNum <- length(slot(mutationFeatureData, "sampleList"))
fdim <- slot(mutationFeatureData, "possibleFeatures")
tempL <- -Inf
tempPar <- c()
ress = mclapply(1:numInit, function(kkk){
F <- array(0, c(varK, length(fdim), max(fdim)))
for (k in 1:varK) {
for (kk in 1:length(fdim)) {
F[k, kk, 1:fdim[kk]] <- rgamma(fdim[kk], rep(1,
fdim[kk]))
F[k, kk, 1:fdim[kk]] <- F[k, kk, 1:fdim[kk]]/sum(F[k,
kk, 1:fdim[kk]])
}
}
Q <- matrix(rgamma(sampleNum * K, 1, 1), K, sampleNum)
Q <- sweep(Q, 2, apply(Q, 2, sum), `/`)
p0 <- c(convertToTurbo_F(as.vector(F), fdim, K, isBG),
convertToTurbo_Q(as.vector(t(Q)), K, sampleNum))
Y <- list(list(sampleNum, fdim, slot(mutationFeatureData,
"featureVectorList"), slot(mutationFeatureData, "countData")),
K, isBG, BG)
res1 <- mySquareEM(p0, Y, tol = tol, maxIter = maxIter)
cat(paste("#trial: ", sprintf("%2d", kkk), "; #iteration: ",
sprintf("%4d", as.integer(res1$itr)), "; time(s): ",
sprintf("%4.2f", res1$elapsed.time), "; convergence: ",
res1$convergence, "; loglikelihood: ", sprintf("%.4f",
res1$value.objfn), "\n", sep = ""))
return(c(res1$value.objfn, res1$par))
}, mc.cores = mc)
tempL = max(sapply(ress, function(x){x[1]}))
tempPar = unlist(ress[which.max(sapply(ress, function(x){x[1]}))])[-1]
lenF <- varK * (sum(fdim) - length(fdim))
lenQ <- sampleNum * (K - 1)
F <- convertFromTurbo_F(tempPar[1:lenF], fdim, K, isBG)
Q <- convertFromTurbo_Q(tempPar[(lenF + 1):(lenF + lenQ)],
K, sampleNum)
dim(F) <- c(varK, length(fdim), max(fdim))
dim(Q) <- c(sampleNum, K)
return(new(Class = "EstimatedParameters", type = slot(mutationFeatureData,
"type"), flankingBasesNum = slot(mutationFeatureData,
"flankingBasesNum"), transcriptionDirection = slot(mutationFeatureData,
"transcriptionDirection"), possibleFeatures = slot(mutationFeatureData,
"possibleFeatures"), sampleList = slot(mutationFeatureData,
"sampleList"), signatureNum = as.integer(K), isBackGround = isBG,
signatureFeatureDistribution = F, sampleSignatureDistribution = Q,
loglikelihood = tempL))
}
bootPMSignature_parallel <- function(mutationFeatureData, Param0, bootNum = 10, BG = NULL,
tol = 1e-2, maxIter = 10000) {
K <- slot(Param0, "signatureNum")
isBG <- slot(Param0, "isBackGround")
if (isBG == TRUE) {
if (!is.null(BG)) {
varK <- K - 1
} else {
stop(paste("The input parameter is estimated using a background signature.\n",
"Please specify the same background signature."))
}
} else {
if (!is.null(BG)) {
warning(paste("The input parameter is estimated without using a background signature.\n",
"Specified background signature is ignored."))
}
varK <- K
BG <- 0
}
sampleNum <- length(slot(mutationFeatureData, "sampleList"))
fdim <- slot(mutationFeatureData, "possibleFeatures")
countData_org <- slot(mutationFeatureData, "countData")
bootData <- countData_org
F0 <- slot(Param0, "signatureFeatureDistribution")
Q0 <- slot(Param0, "sampleSignatureDistribution")
tempL <- -Inf
tempPar <- c()
sqF <- array(0, c(bootNum, varK, length(fdim), max(fdim)))
sqQ <- array(0, c(bootNum, nrow(Q0), ncol(Q0)))
sq_list = mclapply(1:bootNum, function(bbb){
##########
# This part is under construction!!!!
# bootData violates the validity rules of the mutation feature class... I don't like this..
tempG <- table(sample(1:length(countData_org[3,]), sum(countData_org[3,]), replace=TRUE, prob= countData_org[3,] / sum(countData_org[3,]) ))
bootData[3, ] <- 0
bootData[3, as.integer(names(tempG))] <- tempG
##########
p0 <- c(convertToTurbo_F(as.vector(F0), fdim, K, isBG), convertToTurbo_Q(as.vector(t(Q0)), K, sampleNum))
Y <- list(list(sampleNum, fdim, slot(mutationFeatureData, "featureVectorList"), bootData), K, isBG, BG)
res1 <- mySquareEM(p0, Y, tol = tol, maxIter = maxIter)
cat(paste("#trial: ", sprintf("%2d", bbb),
"; #iteration: ", sprintf("%4d", as.integer(res1$itr)),
"; time(s): ", sprintf("%4.2f", res1$elapsed.time),
"; convergence: ", res1$convergence,
"; loglikelihood: ", sprintf("%.4f", res1$value.objfn), "\n", sep=""
))
tempPar <- res1$par
lenF <- varK * (sum(fdim) - length(fdim))
lenQ <- sampleNum * (K - 1)
F <- convertFromTurbo_F(res1$par[1:lenF], fdim, K, isBG)
Q <- convertFromTurbo_Q(res1$par[(lenF + 1):(lenF + lenQ)], K, sampleNum)
dim(F) <- c(varK, length(fdim), max(fdim))
dim(Q) <- c(sampleNum, K)
sqF_l = lapply(1:varK, function(k){(F[k,,] - F0[k,,])^2})
sqQ_l = lapply(1:sampleNum, function(n){(Q[n,] - Q0[n,])^2})
return(list(bbb, sqF_l, sqQ_l))
}, mc.cores = mc)
for(i in 1:bootNum){
a = sq_list[[i]]
for(j in 1:length(a[[2]])){
sqF[a[[1]], j, , ] <- a[[2]][[j]]
}
sqQ[a[[1]],,] <- matrix(unlist(a[[3]]), nrow=length(a[[3]]), byrow=T)
}
return(list(sqF, sqQ))
}
Param <- getPMSignature_parallel(G, K = Signature_num, numInit = 10, BG = BG_prob)
error = mean(bootPMSignature_parallel(G, Param, bootNum = 10, BG = BG_prob)[[1]])
likelihood = Param@loglikelihood
max.corr = max(cor(Param@sampleSignatureDistribution) - 2 * diag(rep(1, Signature_num)))
save.image(out_RData)
|
## Proportional hazards GSM with random effects
require(TMB)
require(foreign)
require(rstpm2)
src <- "#include <TMB.hpp>
template<class Type>
Type objective_function<Type>::operator() ()
{
DATA_VECTOR(event); // double
DATA_MATRIX(X);
DATA_MATRIX(XD);
DATA_MATRIX(Z);
DATA_SCALAR(eps); // boundary value for values that are too small or negative
DATA_SCALAR(kappa); // scale for the quadratic penalty
PARAMETER_VECTOR(beta);
PARAMETER_VECTOR(u);
PARAMETER(log_sigma);
Type sigma = exp(log_sigma);
vector<Type> eta = X*beta + Z*u;
vector<Type> etaD = XD*beta;
vector<Type> H = exp(eta);
vector<Type> h = etaD*H;
vector<Type> logl(event.size());
Type pen = 0.0;
for(int i=0; i<event.size(); ++i) {
if (h(i)<eps) {
logl(i) = event(i)*log(eps)-H(i);
pen += h(i)*h(i)*kappa;
} else {
logl(i) = event(i)*log(h(i))-H(i);
}
}
Type f = -sum(logl) + pen;
for(int i=0; i<u.size(); ++i) {
f -= dnorm(u(i), Type(0), sigma, true);
}
ADREPORT(sigma);
return f;
}"
write(src, file="phlink.cpp")
compile("phlink.cpp") # slow compilation
dyn.load(dynlib("phlink"))
##
stmixed <- read.dta("http://fmwww.bc.edu/repec/bocode/s/stmixed_example2.dta")
system.time(fit <- {
init <- stpm2(Surv(stime,event)~treat+factor(trial),data=stmixed,df=3) # initial values
args <- init@args
Z <- model.matrix(~factor(trial):treat-1,stmixed)
f <- MakeADFun(data=list(X=args$X,XD=args$XD,Z=Z,event=as.double(stmixed$event),eps=1e-6,kappa=1.0),
parameters=list(beta=coef(init),u=rep(0,ncol(Z)),log_sigma=-4.0),
method="nlminb",
random="u",
DLL="phlink", silent=TRUE)
nlminb(f$par,f$fn,f$gr)
})# FAST!
summary(sdreport(f,fit$par))
|
/tmb-example.R
|
permissive
|
mclements/autodiff
|
R
| false
| false
| 1,784
|
r
|
## Proportional hazards GSM with random effects
require(TMB)
require(foreign)
require(rstpm2)
src <- "#include <TMB.hpp>
template<class Type>
Type objective_function<Type>::operator() ()
{
DATA_VECTOR(event); // double
DATA_MATRIX(X);
DATA_MATRIX(XD);
DATA_MATRIX(Z);
DATA_SCALAR(eps); // boundary value for values that are too small or negative
DATA_SCALAR(kappa); // scale for the quadratic penalty
PARAMETER_VECTOR(beta);
PARAMETER_VECTOR(u);
PARAMETER(log_sigma);
Type sigma = exp(log_sigma);
vector<Type> eta = X*beta + Z*u;
vector<Type> etaD = XD*beta;
vector<Type> H = exp(eta);
vector<Type> h = etaD*H;
vector<Type> logl(event.size());
Type pen = 0.0;
for(int i=0; i<event.size(); ++i) {
if (h(i)<eps) {
logl(i) = event(i)*log(eps)-H(i);
pen += h(i)*h(i)*kappa;
} else {
logl(i) = event(i)*log(h(i))-H(i);
}
}
Type f = -sum(logl) + pen;
for(int i=0; i<u.size(); ++i) {
f -= dnorm(u(i), Type(0), sigma, true);
}
ADREPORT(sigma);
return f;
}"
write(src, file="phlink.cpp")
compile("phlink.cpp") # slow compilation
dyn.load(dynlib("phlink"))
##
stmixed <- read.dta("http://fmwww.bc.edu/repec/bocode/s/stmixed_example2.dta")
system.time(fit <- {
init <- stpm2(Surv(stime,event)~treat+factor(trial),data=stmixed,df=3) # initial values
args <- init@args
Z <- model.matrix(~factor(trial):treat-1,stmixed)
f <- MakeADFun(data=list(X=args$X,XD=args$XD,Z=Z,event=as.double(stmixed$event),eps=1e-6,kappa=1.0),
parameters=list(beta=coef(init),u=rep(0,ncol(Z)),log_sigma=-4.0),
method="nlminb",
random="u",
DLL="phlink", silent=TRUE)
nlminb(f$par,f$fn,f$gr)
})# FAST!
summary(sdreport(f,fit$par))
|
get_data <- reactive({
# variables
start_date <- input$dateRange[1]
end_date <- input$dateRange[2]
# data frames
requests <- subset(data, the_date >= start_date & the_date < end_date)
info <- list(calls = calls)
return(info)
})
|
/twitch/scripts/reactive.r
|
no_license
|
Josh-D/tw_app
|
R
| false
| false
| 256
|
r
|
get_data <- reactive({
# variables
start_date <- input$dateRange[1]
end_date <- input$dateRange[2]
# data frames
requests <- subset(data, the_date >= start_date & the_date < end_date)
info <- list(calls = calls)
return(info)
})
|
renv::init()
renv::snapshot(prompt = FALSE)
|
/scripts/001_renv_init.R
|
no_license
|
MarcinKosinski/RonAWS
|
R
| false
| false
| 44
|
r
|
renv::init()
renv::snapshot(prompt = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters_beta.R
\name{parameters_beta}
\alias{parameters_beta}
\title{hook for beta/Gaussian equivalency tricks}
\usage{
parameters_beta(dat, ...)
}
\arguments{
\item{dat}{a vector of data, purportedly from a Beta distribution}
\item{...}{additional arguments to be passed on to MASS::fitdistr}
}
\value{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ an object of class `fitdistr` (see MASS::fitdistr for more)
}\if{html}{\out{</div>}}
}
\description{
The canonical Beta distribution is expressed as
}
\details{
X ~ Beta(a, b)
where a > 0 is a shape parameter and b > 0 is another shape parameter.
\code{a} often represents the number of successes and \code{b} the number of failures
in a series of trials, whereupon the Beta represents the conjugate prior for
the binomial distribution where the success probability \code{p} is (a / (a + b)).
The MASS::fitdistr function requires start parameters for a Beta distribution
which we compute via the method of moments and then feed to MASS::fitdistr().
}
\examples{
set.seed(1234)
dat <- rbeta(n=1000, 17, 39)
parameters_beta(dat)
}
\seealso{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ MASS::fitdistr
}\if{html}{\out{</div>}}
}
|
/man/parameters_beta.Rd
|
no_license
|
VanAndelInstitute/bifurcatoR
|
R
| false
| true
| 1,289
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters_beta.R
\name{parameters_beta}
\alias{parameters_beta}
\title{hook for beta/Gaussian equivalency tricks}
\usage{
parameters_beta(dat, ...)
}
\arguments{
\item{dat}{a vector of data, purportedly from a Beta distribution}
\item{...}{additional arguments to be passed on to MASS::fitdistr}
}
\value{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ an object of class `fitdistr` (see MASS::fitdistr for more)
}\if{html}{\out{</div>}}
}
\description{
The canonical Beta distribution is expressed as
}
\details{
X ~ Beta(a, b)
where a > 0 is a shape parameter and b > 0 is another shape parameter.
\code{a} often represents the number of successes and \code{b} the number of failures
in a series of trials, whereupon the Beta represents the conjugate prior for
the binomial distribution where the success probability \code{p} is (a / (a + b)).
The MASS::fitdistr function requires start parameters for a Beta distribution
which we compute via the method of moments and then feed to MASS::fitdistr().
}
\examples{
set.seed(1234)
dat <- rbeta(n=1000, 17, 39)
parameters_beta(dat)
}
\seealso{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ MASS::fitdistr
}\if{html}{\out{</div>}}
}
|
library(dplyr)
PlotAllDatasetBestMetrics <- function(metrics, datasets = globalDataSets) {
did <- datasets$did
all_best <- c()
for (id in did) {
all_best <- c(all_best, DatasetBestMetrics(id, metrics))
}
all_best
}
MetaAttributesMissingValuesCoverage <- function(meta_attributes = globalQualities) {
sapply(meta_attributes, function(col) sum(is.na(col)))/nrow(meta_attributes)
}
# ------------------------- helper functions -------------------------
DatasetMetrics <- function(did, metrics, max = TRUE) {
results <- ResultForDataset(did)
if (max) {
value <- max(results[, metrics])
} else {
value <- min(results[, metrics])
}
results <- filter_(results, paste(metrics, "==", value))
if (nrow(results) > 0) {
unique_names <- unique(results[, "implementation"])
unique_names <- sapply(strsplit(unique_names, "\\("), function(x) x[1])
return(data.frame(stringsAsFactors = F, implementation = unique_names, value = rep(value, length(unique_names))))
} else {
return(NA)
}
}
DatasetBestMetricsForAlgorithm <- function(did, metrics, algorithm) {
results <- ResultForDataset(did)
if (nrow(results) > 0) {
results <- results[grepl(algorithm, results$implementation), ]
if (nrow(results) > 0) {
return(data.frame(stringsAsFactors = F, implementation = algorithm, value = max(results[, metrics])))
}
}
return(NA)
}
TaskIdForDataset <- function(datasetId, tasks = globalTasks) {
filter(tasks, did == datasetId)$task_id
}
ResultForDataset <- function(datasetId, runs = globalResults) {
tasks <- TaskIdForDataset(datasetId)
filter(runs, task.id %in% tasks)
}
PrepareData <- function(predictions) {
did <- predictions$did
values <- c()
real_values <- c()
min_values <- c()
m <- NA
for (id in did) {
#print(DatasetBestMetricsForAlgorithm(id, "area.under.roc.curve", predictions[id, "algorithm"]))
val <- DatasetBestMetricsForAlgorithm(id, "area.under.roc.curve", predictions[id, "algorithm"])
rel <- DatasetMetrics(id, "area.under.roc.curve")
m <- DatasetMetrics(id, "area.under.roc.curve", max = FALSE)
rrel <- NA
#print(rel)
if (!is.na(rel) && nrow(rel) != 0) {
rrel <- rel$value[1]
}
if (rrel < 0.9) {
if (!is.na(m) && nrow(m) != 0) {
m <- m$value[1]
}
if (is.data.frame(val) > 0) {
values <- c(values, val$value)
} else {
values <- c(values, NA)
}
real_values <- c(real_values, rrel)
min_values <- c(min_values, m)
} else {
predictions <- predictions[predictions$did != id, ]
}
}
d <- data.frame(predictions, pred = values, real = real_values, min = min_values)
d <- d[!is.na(d$real) & !is.na(d$pred), ]
bar <- barplot(d$real, col = "blue", pch = 20)
points(bar, d$pred, col = "red", pch = 20)
lines(bar, d$min, col = "yellow", pch = 20)
abline(h = 0.9, col = "green")
abline(h = 0.98, col = "orange")
d
}
|
/ml-auto-configuration-rules/statistics.R
|
no_license
|
quepas/Oy-mate
|
R
| false
| false
| 2,953
|
r
|
library(dplyr)
PlotAllDatasetBestMetrics <- function(metrics, datasets = globalDataSets) {
did <- datasets$did
all_best <- c()
for (id in did) {
all_best <- c(all_best, DatasetBestMetrics(id, metrics))
}
all_best
}
MetaAttributesMissingValuesCoverage <- function(meta_attributes = globalQualities) {
sapply(meta_attributes, function(col) sum(is.na(col)))/nrow(meta_attributes)
}
# ------------------------- helper functions -------------------------
DatasetMetrics <- function(did, metrics, max = TRUE) {
results <- ResultForDataset(did)
if (max) {
value <- max(results[, metrics])
} else {
value <- min(results[, metrics])
}
results <- filter_(results, paste(metrics, "==", value))
if (nrow(results) > 0) {
unique_names <- unique(results[, "implementation"])
unique_names <- sapply(strsplit(unique_names, "\\("), function(x) x[1])
return(data.frame(stringsAsFactors = F, implementation = unique_names, value = rep(value, length(unique_names))))
} else {
return(NA)
}
}
DatasetBestMetricsForAlgorithm <- function(did, metrics, algorithm) {
results <- ResultForDataset(did)
if (nrow(results) > 0) {
results <- results[grepl(algorithm, results$implementation), ]
if (nrow(results) > 0) {
return(data.frame(stringsAsFactors = F, implementation = algorithm, value = max(results[, metrics])))
}
}
return(NA)
}
TaskIdForDataset <- function(datasetId, tasks = globalTasks) {
filter(tasks, did == datasetId)$task_id
}
ResultForDataset <- function(datasetId, runs = globalResults) {
tasks <- TaskIdForDataset(datasetId)
filter(runs, task.id %in% tasks)
}
PrepareData <- function(predictions) {
did <- predictions$did
values <- c()
real_values <- c()
min_values <- c()
m <- NA
for (id in did) {
#print(DatasetBestMetricsForAlgorithm(id, "area.under.roc.curve", predictions[id, "algorithm"]))
val <- DatasetBestMetricsForAlgorithm(id, "area.under.roc.curve", predictions[id, "algorithm"])
rel <- DatasetMetrics(id, "area.under.roc.curve")
m <- DatasetMetrics(id, "area.under.roc.curve", max = FALSE)
rrel <- NA
#print(rel)
if (!is.na(rel) && nrow(rel) != 0) {
rrel <- rel$value[1]
}
if (rrel < 0.9) {
if (!is.na(m) && nrow(m) != 0) {
m <- m$value[1]
}
if (is.data.frame(val) > 0) {
values <- c(values, val$value)
} else {
values <- c(values, NA)
}
real_values <- c(real_values, rrel)
min_values <- c(min_values, m)
} else {
predictions <- predictions[predictions$did != id, ]
}
}
d <- data.frame(predictions, pred = values, real = real_values, min = min_values)
d <- d[!is.na(d$real) & !is.na(d$pred), ]
bar <- barplot(d$real, col = "blue", pch = 20)
points(bar, d$pred, col = "red", pch = 20)
lines(bar, d$min, col = "yellow", pch = 20)
abline(h = 0.9, col = "green")
abline(h = 0.98, col = "orange")
d
}
|
i = 96
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
/data_preparation/R_batch3/_step3/step3_countReads_EA.95.R
|
no_license
|
jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples
|
R
| false
| false
| 650
|
r
|
i = 96
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
library(caret)
library(e1071)
rm(list = ls())
#variable
jumlah_fold = 5
#membaca data
main_data = read.csv("data_2_class_seimbang.csv", stringsAsFactors = FALSE)
#membagi data menjadi k group
print("Membuat kelompok")
fold_index = createFolds(main_data$class_label, k=jumlah_fold)
if(exists("predict_result")) {
rm("predict_result")
}
for(fold_i in 1:jumlah_fold){
print("---------------------------------")
print(paste("iterasi ke-",fold_i))
#membuat data training & testing - start
print(paste("Membuat data testing & training ke-", fold_i))
test_index = as.numeric(unlist((fold_index[fold_i])))
main_data.testing = main_data[test_index,]
main_data.training = main_data[-test_index,]
#membuat class_label menjadi kategori
main_data.training$class_label = as.factor(main_data.training$class_label)
main_data.testing$class_label = as.factor(main_data.testing$class_label)
#membuat data training & testing - end
#membuat model klasifikasi
print(paste("Training model klasifikasi ke-", fold_i))
model_klasifikasi = naiveBayes(class_label~., main_data.training)
#melakukan prediksi
print(paste("Melakukan prediksi ke-", fold_i))
prediksi_klasifikasi = predict(model_klasifikasi, main_data.testing[,-5])
#label dari data testing dan prediksi dari setiap iterasi dikumpulkan
if(!exists("predict_result")){
assign("predict_result", cbind(as.character(prediksi_klasifikasi),
as.character(main_data.testing$class_label)))
} else {
predict_result = rbind.data.frame(predict_result,
cbind(as.character(prediksi_klasifikasi),
as.character(main_data.testing$class_label)))
}
}
colnames(predict_result) = c("predict", "class_label")
predict_result$predict = as.factor(predict_result$predict)
predict_result$class_label = as.factor(predict_result$class_label)
#menghitung kinerja klasifikasi
#kinerja dihitung dengan menggunakan data label dari data testing dan prediksi
#yang telah dikumpulkan
print("---------------------------------")
print("Menghitung kinerja klasifikasi")
kinerja_klasifikasi = confusionMatrix(predict_result$predict,
predict_result$class_label, positive = "positive")
print(kinerja_klasifikasi)
|
/Naive Bayes/naive bayes_Binary Classification.R
|
no_license
|
nolaristiyanti/Cross-Validation
|
R
| false
| false
| 2,332
|
r
|
library(caret)
library(e1071)
rm(list = ls())
#variable
jumlah_fold = 5
#membaca data
main_data = read.csv("data_2_class_seimbang.csv", stringsAsFactors = FALSE)
#membagi data menjadi k group
print("Membuat kelompok")
fold_index = createFolds(main_data$class_label, k=jumlah_fold)
if(exists("predict_result")) {
rm("predict_result")
}
for(fold_i in 1:jumlah_fold){
print("---------------------------------")
print(paste("iterasi ke-",fold_i))
#membuat data training & testing - start
print(paste("Membuat data testing & training ke-", fold_i))
test_index = as.numeric(unlist((fold_index[fold_i])))
main_data.testing = main_data[test_index,]
main_data.training = main_data[-test_index,]
#membuat class_label menjadi kategori
main_data.training$class_label = as.factor(main_data.training$class_label)
main_data.testing$class_label = as.factor(main_data.testing$class_label)
#membuat data training & testing - end
#membuat model klasifikasi
print(paste("Training model klasifikasi ke-", fold_i))
model_klasifikasi = naiveBayes(class_label~., main_data.training)
#melakukan prediksi
print(paste("Melakukan prediksi ke-", fold_i))
prediksi_klasifikasi = predict(model_klasifikasi, main_data.testing[,-5])
#label dari data testing dan prediksi dari setiap iterasi dikumpulkan
if(!exists("predict_result")){
assign("predict_result", cbind(as.character(prediksi_klasifikasi),
as.character(main_data.testing$class_label)))
} else {
predict_result = rbind.data.frame(predict_result,
cbind(as.character(prediksi_klasifikasi),
as.character(main_data.testing$class_label)))
}
}
colnames(predict_result) = c("predict", "class_label")
predict_result$predict = as.factor(predict_result$predict)
predict_result$class_label = as.factor(predict_result$class_label)
#menghitung kinerja klasifikasi
#kinerja dihitung dengan menggunakan data label dari data testing dan prediksi
#yang telah dikumpulkan
print("---------------------------------")
print("Menghitung kinerja klasifikasi")
kinerja_klasifikasi = confusionMatrix(predict_result$predict,
predict_result$class_label, positive = "positive")
print(kinerja_klasifikasi)
|
\name{RegWhy.characterType.nonWordBoundary}
\alias{RegWhy.characterType.nonWordBoundary}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{nonWordBoundary
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
RegWhy.characterType.nonWordBoundary()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
return("\\B")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/R/RegWhy/man/RegWhy.characterType.nonWordBoundary.Rd
|
no_license
|
dmerson/RegWhyMultiLanguage
|
R
| false
| false
| 1,321
|
rd
|
\name{RegWhy.characterType.nonWordBoundary}
\alias{RegWhy.characterType.nonWordBoundary}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{nonWordBoundary
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
RegWhy.characterType.nonWordBoundary()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
return("\\B")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(tidyverse)
library(data.world)
library(stringr)
library(ISOweek)
library(lubridate)
library(patchwork)
library(grid)
data_world <- "katebamford/makeover-monday"
renewables_datasheet_sql <- data.world::qry_sql("SELECT * FROM data")
renewables_data <- data.world::query(renewables_datasheet_sql, data_world)
renewables_data %>%
filter(is.na(share_of_production) == F) %>%
select(year, area, variable, share_of_production) %>%
filter(area == "Austria", year == "2000")
mutate(renewable = case_when(variable == "Fossil "))
|
/Week_5_makeover_monday.R
|
no_license
|
KateBamford/Makeover-Monday
|
R
| false
| false
| 539
|
r
|
library(tidyverse)
library(data.world)
library(stringr)
library(ISOweek)
library(lubridate)
library(patchwork)
library(grid)
data_world <- "katebamford/makeover-monday"
renewables_datasheet_sql <- data.world::qry_sql("SELECT * FROM data")
renewables_data <- data.world::query(renewables_datasheet_sql, data_world)
renewables_data %>%
filter(is.na(share_of_production) == F) %>%
select(year, area, variable, share_of_production) %>%
filter(area == "Austria", year == "2000")
mutate(renewable = case_when(variable == "Fossil "))
|
library(stringr)
library(dplyr)
## ASA data read in and preproc ####
chain<-read.csv('./2019/ASA_PlayerxGChain_per96table.csv', stringsAsFactors = F)
A3pass<-read.csv('./2019/ASApassingtable-attackthird.csv', stringsAsFactors = F)
M3pass<-read.csv('./2019/ASApassingtable-middlethird.csv', stringsAsFactors = F)
D3pass<-read.csv('./2019/ASApassingtable-defthird.csv', stringsAsFactors = F)
shoot<-read.csv('./2019/ASAshootertable.csv', stringsAsFactors = F)
totalpass<-read.csv('./2019/ASApassingtable-total.csv', stringsAsFactors = F)
chain<-subset(chain, Minutes > 1200)
A3pass<-subset(A3pass, Min > 1200)
M3pass<-subset(M3pass, Min > 1200)
D3pass<-subset(D3pass, Min > 1200)
shoot<-subset(shoot, Min > 1200)
totalpass<-subset(totalpass, Min > 1200)
TeamA3pass<-read.csv('./2019/ASAteampassingtable_A3.csv', stringsAsFactors = F)
TeamM3pass<-read.csv('./2019/ASAteampassingtable_M3.csv', stringsAsFactors = F)
TeamD3pass<-read.csv('./2019/ASAteampassingtable_D3.csv', stringsAsFactors = F)
Teamshoot<-read.csv('./2019/ASAteamshooter.csv', stringsAsFactors = F)
Teamtotalpass<-read.csv('./2019/ASAteampassingtable_total.csv', stringsAsFactors = F)
## ASA player data ####
A3pass<-select(A3pass, matches(".96|Player|Pos"))
M3pass<-select(M3pass, matches(".96|Player|Pos"))
D3pass<-select(D3pass, matches(".96|Player|Pos"))
shoot<-select(shoot, matches(".96|Player|Pos|Dist"))
chain<-select(chain, matches(".96|Player|Pos|Team|xB."))
totalpass<-select(totalpass, matches(".96|Player|Pos|Score"))
allplayers<-c(chain$Player, A3pass$Player, M3pass$Player, D3pass$Player, shoot$Player, totalpass$Player)
allplayers<-unique(allplayers)
dat<-data.frame(Player=allplayers,
Pos=as.character(rep(NA, length(allplayers))),
stringsAsFactors = F)
for (player in allplayers){
#add Position and Minutes
if(player %in% shoot$Player){
#dat$InShooter[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-shoot$Pos[shoot$Player==player]
dat$shots[dat$Player==player]<-shoot$Shots.96[shoot$Player==player]
dat$KP[dat$Player==player]<-shoot$KeyP.96[shoot$Player==player]
dat$xG[dat$Player==player]<-shoot$xG.96[shoot$Player==player]
dat$xA[dat$Player==player]<-shoot$xA.96[shoot$Player==player]
dat$xPlace[dat$Player==player]<-shoot$xPlace.96[shoot$Player==player]
dat$ShotDist[dat$Player==player]<-shoot$Dist[shoot$Player==player]
dat$KPDist[dat$Player==player]<-shoot$Dist.key[shoot$Player==player]
}
if(player %in% A3pass$Player){
#dat$InA3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-A3pass$Pos[A3pass$Player==player]
dat$A3Passes[dat$Player==player]<-A3pass$Passes.96[A3pass$Player==player]
}
if(player %in% M3pass$Player){
#dat$InM3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-M3pass$Pos[M3pass$Player==player]
dat$M3Passes[dat$Player==player]<-M3pass$Passes.96[M3pass$Player==player]
}
if(player %in% D3pass$Player){
#dat$InD3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-D3pass$Pos[D3pass$Player==player]
dat$D3Passes[dat$Player==player]<-D3pass$Passes.96[D3pass$Player==player]
}
if(player %in% chain$Player){
#dat$InChain[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-chain$Pos[chain$Player==player]
dat$percChain[dat$Player==player]<-chain$TeamChain.[chain$Player==player]
dat$xGChain[dat$Player==player]<-chain$xGChain.96[chain$Player==player]
dat$xB[dat$Player==player]<-chain$xB.96[chain$Player==player]
dat$Team[dat$Player==player]<-chain$Team[chain$Player==player]
dat$ShotChainPerc[dat$Player==player]<-chain$PlayerShot.[chain$Player==player]
dat$KPChainPerc[dat$Player==player]<-chain$PlayerKP.[chain$Player==player]
dat$xBperc[dat$Player==player]<-chain$xB.[chain$Player==player]
}
if(player %in% totalpass$Player){
dat$Vertical[dat$Player==player]<-totalpass$Vertical.96[totalpass$Player==player]
dat$PassPct[dat$Player==player]<-totalpass$PassPct.96[totalpass$Player==player]
dat$PassDistance[dat$Player==player]<-totalpass$Distance.96[totalpass$Player==player]
#dat$TouchPerc[dat$Player==player]<-totalpass$Touch..96[totalpass$Player==player]
dat$xPassPerc[dat$Player==player]<-totalpass$xPassPct.96[totalpass$Player==player]
dat$Passes[dat$Player==player]<-totalpass$Passes.96[totalpass$Player==player]
dat$PassScore[dat$Player==player]<-totalpass$Score.96[totalpass$Player==player]
}
}
dat[is.na(dat)]<-0 #assuming missing vals mean zeros
dat<-subset(dat, Pos != "GK")
dat$A3perc<-dat$A3Passes/dat$Passes
dat$M3perc<-dat$M3Passes/dat$Passes
dat$D3perc<-dat$D3Passes/dat$Passes
dat$xGper<-dat$xG/dat$shots
dat$xAper<-dat$xA/dat$KP
dat[is.na(dat)]<-0 #assuming missing vals mean zeros
## ASA team data ####
for (player in 1:nrow(dat)){
playerteam<-dat$Team[player]
if (!str_detect(playerteam, ',')){ #single team all season
dat$teamA3pass[player]<-
TeamA3pass$PassF[TeamA3pass$Team==playerteam]
dat$teamM3pass[player]<-
TeamM3pass$PassF[TeamM3pass$Team==playerteam]
dat$teamD3pass[player]<-
TeamD3pass$PassF[TeamD3pass$Team==playerteam]
dat$teampass[player]<-
Teamtotalpass$PassF[Teamtotalpass$Team==playerteam]
dat$teamxG[player]<-
Teamshoot$xGF[Teamshoot$Team==playerteam]
dat$teamshots[player]<-
Teamshoot$ShtF[Teamshoot$Team==playerteam]
}#end single team if
else{
team1<-str_split(playerteam, ',')[[1]][1]
team2<-str_split(playerteam, ',')[[1]][2]
dat$teamA3pass[player]<-
mean(TeamA3pass$PassF[TeamA3pass$Team==team1],
TeamA3pass$PassF[TeamA3pass$Team==team2])
dat$teamM3pass[player]<-
mean(TeamM3pass$PassF[TeamM3pass$Team==team1],
TeamM3pass$PassF[TeamM3pass$Team==team2])
dat$teamD3pass[player]<-
mean(TeamD3pass$PassF[TeamD3pass$Team==team1],
TeamD3pass$PassF[TeamD3pass$Team==team2])
dat$teampass[player]<-
mean(Teamtotalpass$PassF[Teamtotalpass$Team==team1],
Teamtotalpass$PassF[Teamtotalpass$Team==team2])
dat$teamxG[player]<-
mean(Teamshoot$xGF[Teamshoot$Team==team1],
Teamshoot$xGF[Teamshoot$Team==team2])
dat$teamshots[player]<-
mean(Teamshoot$ShtF[Teamshoot$Team==team1],
Teamshoot$ShtF[Teamshoot$Team==team2])
}#end two teams if
}
dat$A3teamperc<-dat$A3Passes/dat$teamA3pass
dat$M3teamperc<-dat$M3Passes/dat$teamM3pass
dat$D3teamperc<-dat$D3Passes/dat$teamD3pass
dat$passteamperc<-dat$Passes/dat$teampass
dat$xGteamperc<-dat$Passes/dat$teamxG
dat$shotteamperc<-dat$Passes/dat$teamshots
## FBRef data read in and preproc ####
poss<-read.csv('./2019/FBRef-Possession.csv', stringsAsFactors = F)
passtype<-read.csv('./2019/FBRef-PassType.csv')
fbshoot<-read.csv('./2019/FBRef-Shoot.csv')
fbpass<-read.csv('./2019/FBRef-Pass.csv')
colnames(poss)[1]<-'Rank'
colnames(passtype)[1]<-'Rank'
colnames(fbshoot)[1]<-'Rank'
colnames(fbpass)[1]<-'Rank'
#reformat player names
poss<-tidyr::separate(poss, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
passtype<-tidyr::separate(passtype, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
fbshoot<-tidyr::separate(fbshoot, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
fbpass<-tidyr::separate(fbpass, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
#average together rows for two team players
twoplayers<-poss$Player[duplicated(poss$Player)]
oneposs<-poss[!(poss$Player %in% twoplayers),]
twoposs<-poss[(poss$Player %in% twoplayers),]
twopossaggs<-aggregate(.~Player, data = twoposs, FUN=mean)
poss<-rbind(oneposs, twopossaggs)
onepasstype<-passtype[!(passtype$Player %in% twoplayers),]
twopasstype<-passtype[(passtype$Player %in% twoplayers),]
twopasstypeaggs<-aggregate(.~Player, data = twopasstype, FUN=mean)
passtype<-rbind(onepasstype, twopasstypeaggs)
passtype<-passtype[passtype$Player!='Marquinhos Pedroso',]
onefbshoot<-fbshoot[!(fbshoot$Player %in% twoplayers),]
twofbshoot<-fbshoot[(fbshoot$Player %in% twoplayers),]
twofbshootaggs<-aggregate(.~Player, data = twofbshoot, FUN=mean)
fbshoot<-rbind(onefbshoot, twofbshootaggs)
fbshoot<-fbshoot[fbshoot$Player!='Marquinhos Pedroso',]
onefbpass<-fbpass[!(fbpass$Player %in% twoplayers),]
twofbpass<-fbpass[(fbpass$Player %in% twoplayers),]
twofbpassaggs<-aggregate(.~Player, data = twofbpass, FUN=mean)
fbpass<-rbind(onefbpass, twofbpassaggs)
fbpass<-fbpass[fbpass$Player!='Marquinhos Pedroso',]
noshotplayers<-passtype$Player[!(passtype$Player %in% fbshoot$Player)]
for (player in noshotplayers){
fbshoot[nrow(fbshoot)+1,]<-
c(player, rep(0, length(fbshoot)-1))
}
colnames(fbpass)<-paste0(colnames(fbpass), 'Pass')
colnames(fbpass)[1]<-'Player'
fbref<-cbind(poss %>% arrange(Player),
passtype %>% arrange(Player) %>% select(-Player),
fbshoot %>% arrange(Player) %>% select(-Player),
fbpass %>% arrange(Player) %>% select(-Player))
#find any differences in spellings or anything
allplayers[!(allplayers %in% fbref$Player)]
#replace (is there a better way to do this?)
fbref_matchnames<-fbref
fbref_matchnames$Player[fbref_matchnames$Player=="Mark Anthony Kaye"]<-"Mark-Anthony Kaye"
fbref_matchnames$Player[fbref_matchnames$Player=='Marky Delgado']<-'Marco Delgado'
fbref_matchnames$Player[fbref_matchnames$Player=='Jorge Luis Corrales']<-'Jorge Corrales'
fbref_matchnames$Player[fbref_matchnames$Player=='Kaku']<-'Alejandro Romero Gamarra'
fbref_matchnames$Player[fbref_matchnames$Player=='Gonzalo Nicolas Martinez']<-'Gonzalo Martinez'
fbref_matchnames$Player[fbref_matchnames$Player=='Boniek Garcia']<-'Oscar Boniek Garcia'
fbref_matchnames$Player[fbref_matchnames$Player=='Jhegson Mendez']<-'Sebastian Mendez'
fbref_matchnames$Player[fbref_matchnames$Player=='Adam Lundqvist']<-'Adam Lundkvist'
fbref_matchnames$Player[fbref_matchnames$Player=='Christopher Mueller']<-'Chris Mueller'
fbref_matchnames$Player[fbref_matchnames$Player=='Fabio Alvarez']<-'Favio Alvarez'
fbref_matchnames$Player[fbref_matchnames$Player=='Ray Gaddis']<-'Raymon Gaddis'
fbref_matchnames$Player[fbref_matchnames$Player=='Hwang In beom']<-'Hwang In-Beom'
fbref_matchnames$Player[fbref_matchnames$Player=='Michael Amir Murillo']<-'Michael Murillo'
fbref_matchnames$Player[fbref_matchnames$Player=='Kim Kee hee']<-'Kim Kee-Hee'
fbref_matchnames$Player[fbref_matchnames$Player=='Harold Santiago Mosquera']<-'Santiago Mosquera'
fbref_matchnames$Player[fbref_matchnames$Player=='Marcelo dos Santos Ferreira']<-'Marcelo'
fbref_matchnames$Player[fbref_matchnames$Player=='Ali Adnan Kadhim']<-'Ali Adnan'
fbref_matchnames$Player[fbref_matchnames$Player=='Antonio Delamea Mlinar']<-'Antonio Mlinar Delamea'
fbref_matchnames$Player[fbref_matchnames$Player=='Cristian Casseres']<-'Cristian Casseres Jr'
fbref_matchnames$Player[fbref_matchnames$Player=='Gerso Fernandes']<-'Gerso'
fbref_matchnames$Player[fbref_matchnames$Player=='Thomas McNamara']<-'Tommy McNamara'
fbref_matchnames$Player[fbref_matchnames$Player=='Steve Birnbaum']<-'Steven Birnbaum'
fbref_matchnames$Player[fbref_matchnames$Player=='Fafa Picault']<-'Fabrice-Jean Picault'
fbref_matchnames$Player[fbref_matchnames$Player=='Jake Nerwinski']<-'Jakob Nerwinski'
fbref_matchnames$Player[fbref_matchnames$Player=='CJ Sapong']<-'C.J. Sapong'
fbref_matchnames$Player[fbref_matchnames$Player=='Bradley Wright Phillips']<-'Bradley Wright-Phillips'
fbref_matchnames$Player[fbref_matchnames$Player=='Dom Dwyer']<-'Dominic Dwyer'
allplayers[!(allplayers %in% fbref_matchnames$Player)]
for (player in allplayers){
dat$SuccDrib[dat$Player==player]<-fbref_matchnames$Succ[fbref_matchnames$Player==player]
dat$AttDrib[dat$Player==player]<-fbref_matchnames$Att[fbref_matchnames$Player==player]
dat$PlayerDrib[dat$Player==player]<-fbref_matchnames$X.Pl[fbref_matchnames$Player==player]
dat$Carries[dat$Player==player]<-fbref_matchnames$Carries[fbref_matchnames$Player==player]
dat$CarryPrgDis[dat$Player==player]<-fbref_matchnames$PrgDist[fbref_matchnames$Player==player]
dat$TB[dat$Player==player]<-fbref_matchnames$TB[fbref_matchnames$Player==player]
dat$PressPass[dat$Player==player]<-fbref_matchnames$Press[fbref_matchnames$Player==player]
dat$SwPass[dat$Player==player]<-fbref_matchnames$Sw[fbref_matchnames$Player==player]
dat$Crs[dat$Player==player]<-
fbref_matchnames$Crs[fbref_matchnames$Player==player] -
fbref_matchnames$CK[fbref_matchnames$Player==player]
dat$npxG[dat$Player==player]<-
fbref_matchnames$npxG[fbref_matchnames$Player==player]
dat$targeted[dat$Player==player]<-
fbref_matchnames$Targ[fbref_matchnames$Player==player]
dat$miscontrol[dat$Player==player]<-fbref_matchnames$Miscon[fbref_matchnames$Player==player]
dat$passprg[dat$Player==player]<-fbref_matchnames$PrgDistPass[fbref_matchnames$Player==player]
dat$penCrs[dat$Player==player]<-fbref_matchnames$CrsPAPass[fbref_matchnames$Player==player]
dat$shortpasspct[dat$Player==player]<-
fbref_matchnames$Att.1Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
dat$medpasspct[dat$Player==player]<-
fbref_matchnames$Att.2Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
dat$longpasspct[dat$Player==player]<-
fbref_matchnames$Att.3Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
}
## write out ####
write.table(dat, '2019summary.txt', row.names = F)
|
/ASAclusters/preproc_ASA_fbref_2019.R
|
no_license
|
mimburgi/SoccerStuff
|
R
| false
| false
| 13,810
|
r
|
library(stringr)
library(dplyr)
## ASA data read in and preproc ####
chain<-read.csv('./2019/ASA_PlayerxGChain_per96table.csv', stringsAsFactors = F)
A3pass<-read.csv('./2019/ASApassingtable-attackthird.csv', stringsAsFactors = F)
M3pass<-read.csv('./2019/ASApassingtable-middlethird.csv', stringsAsFactors = F)
D3pass<-read.csv('./2019/ASApassingtable-defthird.csv', stringsAsFactors = F)
shoot<-read.csv('./2019/ASAshootertable.csv', stringsAsFactors = F)
totalpass<-read.csv('./2019/ASApassingtable-total.csv', stringsAsFactors = F)
chain<-subset(chain, Minutes > 1200)
A3pass<-subset(A3pass, Min > 1200)
M3pass<-subset(M3pass, Min > 1200)
D3pass<-subset(D3pass, Min > 1200)
shoot<-subset(shoot, Min > 1200)
totalpass<-subset(totalpass, Min > 1200)
TeamA3pass<-read.csv('./2019/ASAteampassingtable_A3.csv', stringsAsFactors = F)
TeamM3pass<-read.csv('./2019/ASAteampassingtable_M3.csv', stringsAsFactors = F)
TeamD3pass<-read.csv('./2019/ASAteampassingtable_D3.csv', stringsAsFactors = F)
Teamshoot<-read.csv('./2019/ASAteamshooter.csv', stringsAsFactors = F)
Teamtotalpass<-read.csv('./2019/ASAteampassingtable_total.csv', stringsAsFactors = F)
## ASA player data ####
A3pass<-select(A3pass, matches(".96|Player|Pos"))
M3pass<-select(M3pass, matches(".96|Player|Pos"))
D3pass<-select(D3pass, matches(".96|Player|Pos"))
shoot<-select(shoot, matches(".96|Player|Pos|Dist"))
chain<-select(chain, matches(".96|Player|Pos|Team|xB."))
totalpass<-select(totalpass, matches(".96|Player|Pos|Score"))
allplayers<-c(chain$Player, A3pass$Player, M3pass$Player, D3pass$Player, shoot$Player, totalpass$Player)
allplayers<-unique(allplayers)
dat<-data.frame(Player=allplayers,
Pos=as.character(rep(NA, length(allplayers))),
stringsAsFactors = F)
for (player in allplayers){
#add Position and Minutes
if(player %in% shoot$Player){
#dat$InShooter[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-shoot$Pos[shoot$Player==player]
dat$shots[dat$Player==player]<-shoot$Shots.96[shoot$Player==player]
dat$KP[dat$Player==player]<-shoot$KeyP.96[shoot$Player==player]
dat$xG[dat$Player==player]<-shoot$xG.96[shoot$Player==player]
dat$xA[dat$Player==player]<-shoot$xA.96[shoot$Player==player]
dat$xPlace[dat$Player==player]<-shoot$xPlace.96[shoot$Player==player]
dat$ShotDist[dat$Player==player]<-shoot$Dist[shoot$Player==player]
dat$KPDist[dat$Player==player]<-shoot$Dist.key[shoot$Player==player]
}
if(player %in% A3pass$Player){
#dat$InA3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-A3pass$Pos[A3pass$Player==player]
dat$A3Passes[dat$Player==player]<-A3pass$Passes.96[A3pass$Player==player]
}
if(player %in% M3pass$Player){
#dat$InM3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-M3pass$Pos[M3pass$Player==player]
dat$M3Passes[dat$Player==player]<-M3pass$Passes.96[M3pass$Player==player]
}
if(player %in% D3pass$Player){
#dat$InD3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-D3pass$Pos[D3pass$Player==player]
dat$D3Passes[dat$Player==player]<-D3pass$Passes.96[D3pass$Player==player]
}
if(player %in% chain$Player){
#dat$InChain[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-chain$Pos[chain$Player==player]
dat$percChain[dat$Player==player]<-chain$TeamChain.[chain$Player==player]
dat$xGChain[dat$Player==player]<-chain$xGChain.96[chain$Player==player]
dat$xB[dat$Player==player]<-chain$xB.96[chain$Player==player]
dat$Team[dat$Player==player]<-chain$Team[chain$Player==player]
dat$ShotChainPerc[dat$Player==player]<-chain$PlayerShot.[chain$Player==player]
dat$KPChainPerc[dat$Player==player]<-chain$PlayerKP.[chain$Player==player]
dat$xBperc[dat$Player==player]<-chain$xB.[chain$Player==player]
}
if(player %in% totalpass$Player){
dat$Vertical[dat$Player==player]<-totalpass$Vertical.96[totalpass$Player==player]
dat$PassPct[dat$Player==player]<-totalpass$PassPct.96[totalpass$Player==player]
dat$PassDistance[dat$Player==player]<-totalpass$Distance.96[totalpass$Player==player]
#dat$TouchPerc[dat$Player==player]<-totalpass$Touch..96[totalpass$Player==player]
dat$xPassPerc[dat$Player==player]<-totalpass$xPassPct.96[totalpass$Player==player]
dat$Passes[dat$Player==player]<-totalpass$Passes.96[totalpass$Player==player]
dat$PassScore[dat$Player==player]<-totalpass$Score.96[totalpass$Player==player]
}
}
dat[is.na(dat)]<-0 #assuming missing vals mean zeros
dat<-subset(dat, Pos != "GK")
dat$A3perc<-dat$A3Passes/dat$Passes
dat$M3perc<-dat$M3Passes/dat$Passes
dat$D3perc<-dat$D3Passes/dat$Passes
dat$xGper<-dat$xG/dat$shots
dat$xAper<-dat$xA/dat$KP
dat[is.na(dat)]<-0 #assuming missing vals mean zeros
## ASA team data ####
for (player in 1:nrow(dat)){
playerteam<-dat$Team[player]
if (!str_detect(playerteam, ',')){ #single team all season
dat$teamA3pass[player]<-
TeamA3pass$PassF[TeamA3pass$Team==playerteam]
dat$teamM3pass[player]<-
TeamM3pass$PassF[TeamM3pass$Team==playerteam]
dat$teamD3pass[player]<-
TeamD3pass$PassF[TeamD3pass$Team==playerteam]
dat$teampass[player]<-
Teamtotalpass$PassF[Teamtotalpass$Team==playerteam]
dat$teamxG[player]<-
Teamshoot$xGF[Teamshoot$Team==playerteam]
dat$teamshots[player]<-
Teamshoot$ShtF[Teamshoot$Team==playerteam]
}#end single team if
else{
team1<-str_split(playerteam, ',')[[1]][1]
team2<-str_split(playerteam, ',')[[1]][2]
dat$teamA3pass[player]<-
mean(TeamA3pass$PassF[TeamA3pass$Team==team1],
TeamA3pass$PassF[TeamA3pass$Team==team2])
dat$teamM3pass[player]<-
mean(TeamM3pass$PassF[TeamM3pass$Team==team1],
TeamM3pass$PassF[TeamM3pass$Team==team2])
dat$teamD3pass[player]<-
mean(TeamD3pass$PassF[TeamD3pass$Team==team1],
TeamD3pass$PassF[TeamD3pass$Team==team2])
dat$teampass[player]<-
mean(Teamtotalpass$PassF[Teamtotalpass$Team==team1],
Teamtotalpass$PassF[Teamtotalpass$Team==team2])
dat$teamxG[player]<-
mean(Teamshoot$xGF[Teamshoot$Team==team1],
Teamshoot$xGF[Teamshoot$Team==team2])
dat$teamshots[player]<-
mean(Teamshoot$ShtF[Teamshoot$Team==team1],
Teamshoot$ShtF[Teamshoot$Team==team2])
}#end two teams if
}
dat$A3teamperc<-dat$A3Passes/dat$teamA3pass
dat$M3teamperc<-dat$M3Passes/dat$teamM3pass
dat$D3teamperc<-dat$D3Passes/dat$teamD3pass
dat$passteamperc<-dat$Passes/dat$teampass
dat$xGteamperc<-dat$Passes/dat$teamxG
dat$shotteamperc<-dat$Passes/dat$teamshots
## FBRef data read in and preproc ####
poss<-read.csv('./2019/FBRef-Possession.csv', stringsAsFactors = F)
passtype<-read.csv('./2019/FBRef-PassType.csv')
fbshoot<-read.csv('./2019/FBRef-Shoot.csv')
fbpass<-read.csv('./2019/FBRef-Pass.csv')
colnames(poss)[1]<-'Rank'
colnames(passtype)[1]<-'Rank'
colnames(fbshoot)[1]<-'Rank'
colnames(fbpass)[1]<-'Rank'
#reformat player names
poss<-tidyr::separate(poss, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
passtype<-tidyr::separate(passtype, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
fbshoot<-tidyr::separate(fbshoot, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
fbpass<-tidyr::separate(fbpass, Player, into=c(NA, "Player"), sep='/') %>%
select(-c(Rank, Pos, Squad, Nation, Age, Born, Matches)) %>%
mutate(Player=gsub("-", " ", Player))
#average together rows for two team players
twoplayers<-poss$Player[duplicated(poss$Player)]
oneposs<-poss[!(poss$Player %in% twoplayers),]
twoposs<-poss[(poss$Player %in% twoplayers),]
twopossaggs<-aggregate(.~Player, data = twoposs, FUN=mean)
poss<-rbind(oneposs, twopossaggs)
onepasstype<-passtype[!(passtype$Player %in% twoplayers),]
twopasstype<-passtype[(passtype$Player %in% twoplayers),]
twopasstypeaggs<-aggregate(.~Player, data = twopasstype, FUN=mean)
passtype<-rbind(onepasstype, twopasstypeaggs)
passtype<-passtype[passtype$Player!='Marquinhos Pedroso',]
onefbshoot<-fbshoot[!(fbshoot$Player %in% twoplayers),]
twofbshoot<-fbshoot[(fbshoot$Player %in% twoplayers),]
twofbshootaggs<-aggregate(.~Player, data = twofbshoot, FUN=mean)
fbshoot<-rbind(onefbshoot, twofbshootaggs)
fbshoot<-fbshoot[fbshoot$Player!='Marquinhos Pedroso',]
onefbpass<-fbpass[!(fbpass$Player %in% twoplayers),]
twofbpass<-fbpass[(fbpass$Player %in% twoplayers),]
twofbpassaggs<-aggregate(.~Player, data = twofbpass, FUN=mean)
fbpass<-rbind(onefbpass, twofbpassaggs)
fbpass<-fbpass[fbpass$Player!='Marquinhos Pedroso',]
noshotplayers<-passtype$Player[!(passtype$Player %in% fbshoot$Player)]
for (player in noshotplayers){
fbshoot[nrow(fbshoot)+1,]<-
c(player, rep(0, length(fbshoot)-1))
}
colnames(fbpass)<-paste0(colnames(fbpass), 'Pass')
colnames(fbpass)[1]<-'Player'
fbref<-cbind(poss %>% arrange(Player),
passtype %>% arrange(Player) %>% select(-Player),
fbshoot %>% arrange(Player) %>% select(-Player),
fbpass %>% arrange(Player) %>% select(-Player))
#find any differences in spellings or anything
allplayers[!(allplayers %in% fbref$Player)]
#replace (is there a better way to do this?)
fbref_matchnames<-fbref
fbref_matchnames$Player[fbref_matchnames$Player=="Mark Anthony Kaye"]<-"Mark-Anthony Kaye"
fbref_matchnames$Player[fbref_matchnames$Player=='Marky Delgado']<-'Marco Delgado'
fbref_matchnames$Player[fbref_matchnames$Player=='Jorge Luis Corrales']<-'Jorge Corrales'
fbref_matchnames$Player[fbref_matchnames$Player=='Kaku']<-'Alejandro Romero Gamarra'
fbref_matchnames$Player[fbref_matchnames$Player=='Gonzalo Nicolas Martinez']<-'Gonzalo Martinez'
fbref_matchnames$Player[fbref_matchnames$Player=='Boniek Garcia']<-'Oscar Boniek Garcia'
fbref_matchnames$Player[fbref_matchnames$Player=='Jhegson Mendez']<-'Sebastian Mendez'
fbref_matchnames$Player[fbref_matchnames$Player=='Adam Lundqvist']<-'Adam Lundkvist'
fbref_matchnames$Player[fbref_matchnames$Player=='Christopher Mueller']<-'Chris Mueller'
fbref_matchnames$Player[fbref_matchnames$Player=='Fabio Alvarez']<-'Favio Alvarez'
fbref_matchnames$Player[fbref_matchnames$Player=='Ray Gaddis']<-'Raymon Gaddis'
fbref_matchnames$Player[fbref_matchnames$Player=='Hwang In beom']<-'Hwang In-Beom'
fbref_matchnames$Player[fbref_matchnames$Player=='Michael Amir Murillo']<-'Michael Murillo'
fbref_matchnames$Player[fbref_matchnames$Player=='Kim Kee hee']<-'Kim Kee-Hee'
fbref_matchnames$Player[fbref_matchnames$Player=='Harold Santiago Mosquera']<-'Santiago Mosquera'
fbref_matchnames$Player[fbref_matchnames$Player=='Marcelo dos Santos Ferreira']<-'Marcelo'
fbref_matchnames$Player[fbref_matchnames$Player=='Ali Adnan Kadhim']<-'Ali Adnan'
fbref_matchnames$Player[fbref_matchnames$Player=='Antonio Delamea Mlinar']<-'Antonio Mlinar Delamea'
fbref_matchnames$Player[fbref_matchnames$Player=='Cristian Casseres']<-'Cristian Casseres Jr'
fbref_matchnames$Player[fbref_matchnames$Player=='Gerso Fernandes']<-'Gerso'
fbref_matchnames$Player[fbref_matchnames$Player=='Thomas McNamara']<-'Tommy McNamara'
fbref_matchnames$Player[fbref_matchnames$Player=='Steve Birnbaum']<-'Steven Birnbaum'
fbref_matchnames$Player[fbref_matchnames$Player=='Fafa Picault']<-'Fabrice-Jean Picault'
fbref_matchnames$Player[fbref_matchnames$Player=='Jake Nerwinski']<-'Jakob Nerwinski'
fbref_matchnames$Player[fbref_matchnames$Player=='CJ Sapong']<-'C.J. Sapong'
fbref_matchnames$Player[fbref_matchnames$Player=='Bradley Wright Phillips']<-'Bradley Wright-Phillips'
fbref_matchnames$Player[fbref_matchnames$Player=='Dom Dwyer']<-'Dominic Dwyer'
allplayers[!(allplayers %in% fbref_matchnames$Player)]
for (player in allplayers){
dat$SuccDrib[dat$Player==player]<-fbref_matchnames$Succ[fbref_matchnames$Player==player]
dat$AttDrib[dat$Player==player]<-fbref_matchnames$Att[fbref_matchnames$Player==player]
dat$PlayerDrib[dat$Player==player]<-fbref_matchnames$X.Pl[fbref_matchnames$Player==player]
dat$Carries[dat$Player==player]<-fbref_matchnames$Carries[fbref_matchnames$Player==player]
dat$CarryPrgDis[dat$Player==player]<-fbref_matchnames$PrgDist[fbref_matchnames$Player==player]
dat$TB[dat$Player==player]<-fbref_matchnames$TB[fbref_matchnames$Player==player]
dat$PressPass[dat$Player==player]<-fbref_matchnames$Press[fbref_matchnames$Player==player]
dat$SwPass[dat$Player==player]<-fbref_matchnames$Sw[fbref_matchnames$Player==player]
dat$Crs[dat$Player==player]<-
fbref_matchnames$Crs[fbref_matchnames$Player==player] -
fbref_matchnames$CK[fbref_matchnames$Player==player]
dat$npxG[dat$Player==player]<-
fbref_matchnames$npxG[fbref_matchnames$Player==player]
dat$targeted[dat$Player==player]<-
fbref_matchnames$Targ[fbref_matchnames$Player==player]
dat$miscontrol[dat$Player==player]<-fbref_matchnames$Miscon[fbref_matchnames$Player==player]
dat$passprg[dat$Player==player]<-fbref_matchnames$PrgDistPass[fbref_matchnames$Player==player]
dat$penCrs[dat$Player==player]<-fbref_matchnames$CrsPAPass[fbref_matchnames$Player==player]
dat$shortpasspct[dat$Player==player]<-
fbref_matchnames$Att.1Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
dat$medpasspct[dat$Player==player]<-
fbref_matchnames$Att.2Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
dat$longpasspct[dat$Player==player]<-
fbref_matchnames$Att.3Pass[fbref_matchnames$Player==player]/
fbref_matchnames$AttPass[fbref_matchnames$Player==player]
}
## write out ####
write.table(dat, '2019summary.txt', row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itemfit.R
\name{orlando_itemf}
\alias{orlando_itemf}
\title{Orlando's statistic}
\usage{
orlando_itemf(patterns, G, zita, model)
}
\arguments{
\item{patterns}{matrix of patterns response, the frequency of each pattern and the latent traits.}
\item{G}{number of quadrature points.}
\item{zita}{matrix of estimations of the parameters of the items (discrimination, difficulty, guessing).}
\item{model}{type of model ( "1PL", 2PL", "3PL" ).}
}
\value{
Orlando's statistic, degrees of freedom and p-value for each item.
}
\description{
Calculate the values of the statistics S_x2 from
Maria Orlando and David Thisen.
}
\examples{
#Simulates a test and returns a list:
test <- simulateTest()
#the simulated data:
data <- test$test
#model:
mod=irtpp(dataset = data,model = "3PL")
#Convert parameters to a matrix
zz <- parameter.matrix(mod$z,byrow = FALSE)
#Estimating Latent Traits
p_mat <- mod$prob_mat
trait <- individual.traits(model="3PL", itempars = zz,method = "EAP",dataset = data,
probability_matrix = p_mat)
#Z3 PERSONFIT-Statistic
orlando_itemf(patterns = as.matrix(trait),G = 61,zita = mod$z,model = "3PL")
}
\author{
SICS Research, National University of Colombia \email{ammontenegrod@unal.edu.co}
}
\references{
Orlando, M. & Thissen, D. (2000). Likelihood-based item fit indices for dichotomous item
response theory models. \emph{Applied Psychological Measurement, 24}, 50-64.
}
\seealso{
\code{\link{z3_itemf}}
}
|
/man/orlando_itemf.Rd
|
no_license
|
cran/IRTpp
|
R
| false
| true
| 1,569
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itemfit.R
\name{orlando_itemf}
\alias{orlando_itemf}
\title{Orlando's statistic}
\usage{
orlando_itemf(patterns, G, zita, model)
}
\arguments{
\item{patterns}{matrix of patterns response, the frequency of each pattern and the latent traits.}
\item{G}{number of quadrature points.}
\item{zita}{matrix of estimations of the parameters of the items (discrimination, difficulty, guessing).}
\item{model}{type of model ( "1PL", 2PL", "3PL" ).}
}
\value{
Orlando's statistic, degrees of freedom and p-value for each item.
}
\description{
Calculate the values of the statistics S_x2 from
Maria Orlando and David Thisen.
}
\examples{
#Simulates a test and returns a list:
test <- simulateTest()
#the simulated data:
data <- test$test
#model:
mod=irtpp(dataset = data,model = "3PL")
#Convert parameters to a matrix
zz <- parameter.matrix(mod$z,byrow = FALSE)
#Estimating Latent Traits
p_mat <- mod$prob_mat
trait <- individual.traits(model="3PL", itempars = zz,method = "EAP",dataset = data,
probability_matrix = p_mat)
#Z3 PERSONFIT-Statistic
orlando_itemf(patterns = as.matrix(trait),G = 61,zita = mod$z,model = "3PL")
}
\author{
SICS Research, National University of Colombia \email{ammontenegrod@unal.edu.co}
}
\references{
Orlando, M. & Thissen, D. (2000). Likelihood-based item fit indices for dichotomous item
response theory models. \emph{Applied Psychological Measurement, 24}, 50-64.
}
\seealso{
\code{\link{z3_itemf}}
}
|
library(arules)
library(arulesViz)
# Membuat variabel transaksi
transaksi <- read.transactions(file="https://storage.googleapis.com/dqlab-dataset/data_transaksi.txt", format="single", sep="\t", cols=c(1,2), skip=1)
# Menampilkan daftar item transaksi
transaksi@itemInfo
# Menampilkan daftar kode transaksi
transaksi@itemsetInfo
# Tampilan transaksi dalam bentuk matrix
transaksi@data
# Menampilkan item frequency
itemFrequency(transaksi)
# Menampilkan 3 item terbanyak
data_item <- itemFrequency(transaksi, type="absolute")
#Melakukan sorting pada data_item
data_item <- sort(data_item, decreasing=TRUE)
#Mengambil 3 item pertama
data_item <- data_item[1:3]
#Konversi data_item menjadi data frame dengan kolom Nama_Produk dan Jumlah
data_item <- data.frame("Nama Produk"=names(data_item), "Jumlah"=data_item, row.names=NULL)
print(data_item)
# Grafik itemFrequency
itemFrequencyPlot(transaksi)
# Melihat Itemset per Transaksi dengan Inspect
inspect(transaksi)
# Menghasilkan Rules dengan Apriori
apriori(transaksi)
#Menghasilkan association rules dan disimpan sebagai variable mba
mba <- apriori(transaksi)
#Melihat isi dari rules dengan menggunakan fungsi inspect
inspect(mba)
#Filter rhs dengan item "Sirup" dan tampilkan
inspect(subset(mba, rhs %in% "Sirup"))
#Filter lhs dengan item "Gula" dan tampilkan
inspect(subset(mba, lhs %in% "Gula"))
#Filter lhs dengan item Pet Food dan rhs dengan item Sirup
inspect(subset(mba, lhs %in% "Pet Food" & rhs %in% "Sirup"))
#Menghasilkan Rules dengan Parameter Support dan Confidence
apriori(transaksi,parameter = list(supp = 0.1, confidence = 0.5))
#Inspeksi Rules Yang Dihasilkan
mba <- apriori(transaksi,parameter = list(supp = 0.1, confidence = 0.5))
inspect(mba)
#Filter dimana lhs atau rhs keduanya memiliki item Teh Celup
inspect(subset(mba, lhs %in% "Teh Celup" | rhs %in% "Teh Celup"))
#Filter dimana lhs atau rhs memiliki item Teh Celup dan Lift diatas 1
inspect(subset(mba, (lhs %in% "Teh Celup" | rhs %in% "Teh Celup") & lift>1))
#Filter dimana lhs memiliki item Gula dan Pet Food
inspect(subset(mba, (lhs %ain% c("Pet Food","Gula"))))
#Visualisasi Rules dengan Graph
plot(subset(mba, lift>1.1), method="graph")
|
/mba_analysis.R
|
permissive
|
argaeryzal/market-basket-analysis
|
R
| false
| false
| 2,189
|
r
|
library(arules)
library(arulesViz)
# Membuat variabel transaksi
transaksi <- read.transactions(file="https://storage.googleapis.com/dqlab-dataset/data_transaksi.txt", format="single", sep="\t", cols=c(1,2), skip=1)
# Menampilkan daftar item transaksi
transaksi@itemInfo
# Menampilkan daftar kode transaksi
transaksi@itemsetInfo
# Tampilan transaksi dalam bentuk matrix
transaksi@data
# Menampilkan item frequency
itemFrequency(transaksi)
# Menampilkan 3 item terbanyak
data_item <- itemFrequency(transaksi, type="absolute")
#Melakukan sorting pada data_item
data_item <- sort(data_item, decreasing=TRUE)
#Mengambil 3 item pertama
data_item <- data_item[1:3]
#Konversi data_item menjadi data frame dengan kolom Nama_Produk dan Jumlah
data_item <- data.frame("Nama Produk"=names(data_item), "Jumlah"=data_item, row.names=NULL)
print(data_item)
# Grafik itemFrequency
itemFrequencyPlot(transaksi)
# Melihat Itemset per Transaksi dengan Inspect
inspect(transaksi)
# Menghasilkan Rules dengan Apriori
apriori(transaksi)
#Menghasilkan association rules dan disimpan sebagai variable mba
mba <- apriori(transaksi)
#Melihat isi dari rules dengan menggunakan fungsi inspect
inspect(mba)
#Filter rhs dengan item "Sirup" dan tampilkan
inspect(subset(mba, rhs %in% "Sirup"))
#Filter lhs dengan item "Gula" dan tampilkan
inspect(subset(mba, lhs %in% "Gula"))
#Filter lhs dengan item Pet Food dan rhs dengan item Sirup
inspect(subset(mba, lhs %in% "Pet Food" & rhs %in% "Sirup"))
#Menghasilkan Rules dengan Parameter Support dan Confidence
apriori(transaksi,parameter = list(supp = 0.1, confidence = 0.5))
#Inspeksi Rules Yang Dihasilkan
mba <- apriori(transaksi,parameter = list(supp = 0.1, confidence = 0.5))
inspect(mba)
#Filter dimana lhs atau rhs keduanya memiliki item Teh Celup
inspect(subset(mba, lhs %in% "Teh Celup" | rhs %in% "Teh Celup"))
#Filter dimana lhs atau rhs memiliki item Teh Celup dan Lift diatas 1
inspect(subset(mba, (lhs %in% "Teh Celup" | rhs %in% "Teh Celup") & lift>1))
#Filter dimana lhs memiliki item Gula dan Pet Food
inspect(subset(mba, (lhs %ain% c("Pet Food","Gula"))))
#Visualisasi Rules dengan Graph
plot(subset(mba, lift>1.1), method="graph")
|
#
# counting sort algorithm
# introduction to algorithm, 3rd edition, chapter 3
#
# clean the workspace and memory
rm( list=ls() )
gc()
aaa <- sample(10,10,replace = T)
count_sort <- function( aaa ){
k <- max(aaa)
bbb <- NULL
ccc <- rep(0,k)
for(idx in aaa){
ccc[idx] <- ccc[idx] + 1
}
ccc <- cumsum(ccc)
aaar <- rev(aaa)
for(idx in aaar){
bbb[ccc[idx]] <- idx
ccc[idx] <- ccc[idx] - 1
}
return(bbb)
}
bbb <- count_sort(aaa)
|
/books/cs/general-purpose-algorithms/introduction-to-algorithms/3rd-edition/chapter8/R/counting_sort.R
|
permissive
|
Winfredemalx54/algorithm-challenger-1
|
R
| false
| false
| 463
|
r
|
#
# counting sort algorithm
# introduction to algorithm, 3rd edition, chapter 3
#
# clean the workspace and memory
rm( list=ls() )
gc()
aaa <- sample(10,10,replace = T)
count_sort <- function( aaa ){
k <- max(aaa)
bbb <- NULL
ccc <- rep(0,k)
for(idx in aaa){
ccc[idx] <- ccc[idx] + 1
}
ccc <- cumsum(ccc)
aaar <- rev(aaa)
for(idx in aaar){
bbb[ccc[idx]] <- idx
ccc[idx] <- ccc[idx] - 1
}
return(bbb)
}
bbb <- count_sort(aaa)
|
\name{fdiscgammagpd}
\alias{fdiscgammagpd}
\title{Fit the discrete gamma-GPD spliced threshold model}
\usage{
fdiscgammagpd(x, useq, shift = NULL, pvector=NULL,
std.err = TRUE, method = "Nelder-Mead", ...)
}
\description{
This function takes count data and fits the gamma-GPD spliced threshold model to it. The model consists of a discrete truncated gamma as the bulk distribution, up to the threshold, and a discrete GPD at and above the threshold. The 'shift' is ideally the minimum count in the sample.
}
\arguments{
\item{x}{A vector of count data.}
\item{useq}{A vector of possible thresholds to search over. These should be discrete numbers.}
\item{shift}{The amount the distribution is shifted. It is recommended to use the minimum number in the count data when modeling the clone size distribution of the TCR repertoire.}
\item{pvector}{A vector of 5 elements corresponding to the initial parameter estimates. These 5 initial values are for the gamma shape and rate, the threshold, and the GPD sigma and xi. If they are not prespecified, the function computes pvector automatically.}
\item{std.err}{Logical. Should the standard errors on the estimates be computed from the Hessian matrix?}
\item{method}{Character string listing optimization method fed to \link[stats]{optim}. Defaults to Nelder-Mead.}
\item{...}{Other arguments passed to the function.}
}
\value{
\item{x}{Numerical vector of the original data input}
\item{shift}{Numeric specifying the original shift input.}
\item{init}{Numerical vector of the initial values of the parameter estimates. This is the same as pvector.}
\item{useq}{Numerical vector containing the thresholds the grid search was performed over.}
\item{nllhuseq}{Numerical vector of negative log likelihoods computed at each threshold in useq.}
\item{optim}{Output from optim for the bulk and tail distributions.}
\item{nllh}{The negative log likelihood corresponding to the maximum likelihood fitted distribution.}
\item{mle}{A numerical vector containing the estimates for phi, shape, rate, threshold, sigma, and xi.}
\item{fisherInformation}{The Fisher information matrix computed from the Hessian output from optim.}
}
\author{\email{hbk5086@psu.edu}}
\examples{
data("repertoires")
thresholds1 <- unique(round(quantile(repertoires[[1]], c(.75,.8,.85,.9,.95))))
thresholds2 <- unique(round(quantile(repertoires[[2]], c(.75,.8,.85,.9,.95))))
fit1 <- fdiscgammagpd(repertoires[[1]], useq = thresholds1,
shift = min(repertoires[[1]]))
fit2 <- fdiscgammagpd(repertoires[[2]], useq = thresholds1,
shift = min(repertoires[[2]]))
fit1
fit2
}
|
/man/fdiscgammagpd.Rd
|
permissive
|
hillarykoch/powerTCR
|
R
| false
| false
| 2,693
|
rd
|
\name{fdiscgammagpd}
\alias{fdiscgammagpd}
\title{Fit the discrete gamma-GPD spliced threshold model}
\usage{
fdiscgammagpd(x, useq, shift = NULL, pvector=NULL,
std.err = TRUE, method = "Nelder-Mead", ...)
}
\description{
This function takes count data and fits the gamma-GPD spliced threshold model to it. The model consists of a discrete truncated gamma as the bulk distribution, up to the threshold, and a discrete GPD at and above the threshold. The 'shift' is ideally the minimum count in the sample.
}
\arguments{
\item{x}{A vector of count data.}
\item{useq}{A vector of possible thresholds to search over. These should be discrete numbers.}
\item{shift}{The amount the distribution is shifted. It is recommended to use the minimum number in the count data when modeling the clone size distribution of the TCR repertoire.}
\item{pvector}{A vector of 5 elements corresponding to the initial parameter estimates. These 5 initial values are for the gamma shape and rate, the threshold, and the GPD sigma and xi. If they are not prespecified, the function computes pvector automatically.}
\item{std.err}{Logical. Should the standard errors on the estimates be computed from the Hessian matrix?}
\item{method}{Character string listing optimization method fed to \link[stats]{optim}. Defaults to Nelder-Mead.}
\item{...}{Other arguments passed to the function.}
}
\value{
\item{x}{Numerical vector of the original data input}
\item{shift}{Numeric specifying the original shift input.}
\item{init}{Numerical vector of the initial values of the parameter estimates. This is the same as pvector.}
\item{useq}{Numerical vector containing the thresholds the grid search was performed over.}
\item{nllhuseq}{Numerical vector of negative log likelihoods computed at each threshold in useq.}
\item{optim}{Output from optim for the bulk and tail distributions.}
\item{nllh}{The negative log likelihood corresponding to the maximum likelihood fitted distribution.}
\item{mle}{A numerical vector containing the estimates for phi, shape, rate, threshold, sigma, and xi.}
\item{fisherInformation}{The Fisher information matrix computed from the Hessian output from optim.}
}
\author{\email{hbk5086@psu.edu}}
\examples{
data("repertoires")
thresholds1 <- unique(round(quantile(repertoires[[1]], c(.75,.8,.85,.9,.95))))
thresholds2 <- unique(round(quantile(repertoires[[2]], c(.75,.8,.85,.9,.95))))
fit1 <- fdiscgammagpd(repertoires[[1]], useq = thresholds1,
shift = min(repertoires[[1]]))
fit2 <- fdiscgammagpd(repertoires[[2]], useq = thresholds1,
shift = min(repertoires[[2]]))
fit1
fit2
}
|
dataSubsetRaw <- read.table( "./household_power_consumption.txt"
, header = FALSE
, sep = ";"
, col.names = c( "Date"
, "Time"
, "Global_active_power"
, "Global_reactive_power"
, "Voltage"
, "Global_intensity"
, "Sub_metering_1"
, "Sub_metering_2"
, "Sub_metering_3"
)
, colClasses = c( "character"
, "character"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
)
, skip = 66637
, nrows = 2880
, na.strings = "?"
)
# Date an Time columns are concatenated, converted to POSIXct
# and added as column DateTime
# Date column is converted to date and added as column Date
# to the newly constructed data frame
# All but the columns Date and Time from the original dataset
# are appended to the data frame retaining names and formats
dataSubset <- data.frame( DateTime = strptime( paste( dataSubsetRaw$Date
, dataSubsetRaw$Time
)
, "%d/%m/%Y %H:%M:%S"
)
, Date = as.Date( dataSubsetRaw$Date, "%d/%m/%Y" )
, dataSubsetRaw[,3:9]
)
# Auto labeling uses localized names for weekday
# abbreviations. Since this a German environment
# we need to switch to the US English locale
# to reproduce the exact result.
Sys.setlocale("LC_TIME", "English")
# opening the PNG graphics device and associating
# it with an output file of the required dimensions.
# The original files in the figure subdirectory
# have transparent background color, but I choose
# a white backgroud here , because this resembles more
# closely the presentation of the master plots.
png( file = "plot2.png"
, width = 480
, height = 480
, units = "px"
, bg = "white"
)
# drawing the line diagram on the PNG device
# keeping most of the defaults
plot( dataSubset$DateTime
, dataSubset$Global_active_power
, type = "l"
, xlab = NA
, ylab = "Global Active Power (kilowatts)"
)
# close the PNG device and save the output file
dev.off()
# Switch back to German locale (on Windows)
Sys.setlocale("LC_TIME", "German")
# cleaning up the environment
rm(dataSubsetRaw, dataSubset)
|
/plot2.R
|
no_license
|
yas91/ExData_Plotting1
|
R
| false
| false
| 3,080
|
r
|
dataSubsetRaw <- read.table( "./household_power_consumption.txt"
, header = FALSE
, sep = ";"
, col.names = c( "Date"
, "Time"
, "Global_active_power"
, "Global_reactive_power"
, "Voltage"
, "Global_intensity"
, "Sub_metering_1"
, "Sub_metering_2"
, "Sub_metering_3"
)
, colClasses = c( "character"
, "character"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
, "numeric"
)
, skip = 66637
, nrows = 2880
, na.strings = "?"
)
# Date an Time columns are concatenated, converted to POSIXct
# and added as column DateTime
# Date column is converted to date and added as column Date
# to the newly constructed data frame
# All but the columns Date and Time from the original dataset
# are appended to the data frame retaining names and formats
dataSubset <- data.frame( DateTime = strptime( paste( dataSubsetRaw$Date
, dataSubsetRaw$Time
)
, "%d/%m/%Y %H:%M:%S"
)
, Date = as.Date( dataSubsetRaw$Date, "%d/%m/%Y" )
, dataSubsetRaw[,3:9]
)
# Auto labeling uses localized names for weekday
# abbreviations. Since this a German environment
# we need to switch to the US English locale
# to reproduce the exact result.
Sys.setlocale("LC_TIME", "English")
# opening the PNG graphics device and associating
# it with an output file of the required dimensions.
# The original files in the figure subdirectory
# have transparent background color, but I choose
# a white backgroud here , because this resembles more
# closely the presentation of the master plots.
png( file = "plot2.png"
, width = 480
, height = 480
, units = "px"
, bg = "white"
)
# drawing the line diagram on the PNG device
# keeping most of the defaults
plot( dataSubset$DateTime
, dataSubset$Global_active_power
, type = "l"
, xlab = NA
, ylab = "Global Active Power (kilowatts)"
)
# close the PNG device and save the output file
dev.off()
# Switch back to German locale (on Windows)
Sys.setlocale("LC_TIME", "German")
# cleaning up the environment
rm(dataSubsetRaw, dataSubset)
|
test<-function(x){
## it is a test function showing that
## the symbol "<<-" is give the value to
## a variance in global Environment instead of
## current enviroment
a<<-x^2
## give a value to a in global environment
x
}
makeCacheMatrix <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
y<<-makeCacheMatrix(x)
cacheSolve <- function(x, ...) {
m <- y$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- y$get()
m <- solve(data, ...)
y$setinverse(m)
m
}
|
/assessment.R
|
no_license
|
Lchiffon/ExerciseByRprogramming
|
R
| false
| false
| 761
|
r
|
test<-function(x){
## it is a test function showing that
## the symbol "<<-" is give the value to
## a variance in global Environment instead of
## current enviroment
a<<-x^2
## give a value to a in global environment
x
}
makeCacheMatrix <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
y<<-makeCacheMatrix(x)
cacheSolve <- function(x, ...) {
m <- y$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- y$get()
m <- solve(data, ...)
y$setinverse(m)
m
}
|
require(shiny)
require(shinydashboard)
require(SmarterPoland)
require(ggplot2)
require(dplyr)
require(readxl)
require(hablar)
dane10 <- read_excel('PM10.xlsx', skip = 1)
dane25 <- read_excel('PM25.xlsx', skip = 1)
Kody10 <- c('MzWarAlNiepo', 'MpKrakTelime', 'LdLodzRudzka', 'DsWrocOrzech', 'WpPoznChwial', 'PmGdyJozBema', 'ZpSzczecPrze', 'KpBydPlPozna', 'LbLubSliwins', 'PdBialWaszyn')
Kody25 <- c('MzWarWokalna', 'MpKrakBujaka', 'LdLodzLegion', 'DsWrocNaGrob', 'WpPoznPolank', 'PmGdaPowWiel', 'ZpSzczAndr01', 'KpBydBerling', 'LbLubSliwins', 'PdBialWarsza')
Miasta <- c('Warszawa', 'Kraków', 'Łódź', 'Wrocław', 'Poznań', 'Gdańsk', 'Szczecin', 'Bydgoszcz', 'Lublin', 'Białystok')
Dni = seq(from = as.Date('2017-01-01'), to = as.Date('2017-12-31'), by = 'day')
Stat <- c('Średnia', 'Minimum', 'Maksimum')
TN <- c('NIE', 'TAK')
PM10 <- data.frame(Data = Dni)
PM25 <- data.frame(Data = Dni)
for (i in 1:10){
temp <- dane10[,Kody10[i]]
temp <- temp[-(1:4),] %>% retype()
temp <- round(temp, 2)
colnames(temp) <- Miasta[i]
PM10 <- cbind(PM10, temp)
temp <- dane25[,Kody25[i]]
temp <- temp[-(1:4),] %>% retype()
temp <- round(temp, 2)
colnames(temp) <- Miasta[i]
PM25 <- cbind(PM25, temp)
}
stat10 <- t(apply(PM10[,-1], 2 , function(x) round(c(mean(x, na.rm = TRUE), min(x, na.rm = TRUE), max(x, na.rm = TRUE)), 2)))
stat10 <- cbind(Miasta, stat10, rep('PM 10', 10))
colnames(stat10) <- c('Miasta', 'Średnia', 'Minimum', 'Maksimum', 'PM')
row.names(stat10) <- NULL
stat10 <- data.frame(stat10) %>% retype()
stat25 <- t(apply(PM25[,-1], 2 , function(x) round(c(mean(x, na.rm = TRUE), min(x, na.rm = TRUE), max(x, na.rm = TRUE)), 2)))
stat25 <- cbind(Miasta, stat25, rep('PM 2.5', 10))
colnames(stat25) <- c('Miasta', 'Średnia', 'Minimum', 'Maksimum', 'PM')
row.names(stat25) <- NULL
stat25 <- data.frame(stat25) %>% retype()
stat <- rbind(stat10, stat25)
ui <- dashboardPage(
skin = 'red',
dashboardHeader(title = 'MENU'),
dashboardSidebar(
sidebarMenu(
h2('Wykres roczny: '),
selectizeInput(inputId = 'chosen_city_1',
label = 'Wybierz I miasto: ',
choices = Miasta),
selectizeInput(inputId = 'chosen_city_2',
label = 'Wybierz II miasto: ',
choices = c(Miasta[2], Miasta[1], Miasta[3:10])),
h2('Wykres słupkowy: '),
selectizeInput(inputId = 'chosen_stat',
label = 'Rodzaj statystyki: ',
choices = Stat),
checkboxGroupInput(inputId = 'chosen_citys',
label = 'Wybierz miasta: ',
choices = unique(Miasta),
selected = unique(Miasta)),
h2('Wyświetlić opis: '),
selectizeInput(inputId = 'chosen_describe',
label = 'Wyświetlić opis: ',
choices = TN)
)
),
dashboardBody(
fluidPage(
h1('Zanieczyszczenie powietrza w 10 największych miastach w Polsce'),
h1(' '),
textOutput('describe'),
h1(' '),
fluidRow(column(5, plotOutput('distPlot1')), column(5, plotOutput('distPlot2'))),
h1(' '),
fluidRow(column(10, plotOutput('distPlot3')))
)
)
)
server <- function(input, output) {
output[['distPlot1']] <- renderPlot({
city1 <- input[['chosen_city_1']]
city2 <- input[['chosen_city_2']]
pomiary <- cbind(PM25[,c('Data', city1)], PM25[ ,city2])
colnames(pomiary) <- c('Data', 'Wartosc1', 'Wartosc2')
ggplot(pomiary, aes(x = Data)) +
geom_line(aes(y = Wartosc1, colour = city1)) +
geom_line(aes(y = Wartosc2, colour = city2)) +
geom_hline(yintercept = 25, colour = 'blue', alpha = 0.8, size = 1) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = 'Pomiar stężenia PM 2.5 w powietrzu', x = 'Data pomiaru', y = 'Wartość PM 2.5 [ug/m3]')
})
output[['distPlot2']] <- renderPlot({
city1 <- input[['chosen_city_1']]
city2 <- input[['chosen_city_2']]
pomiary <- cbind(PM10[,c('Data', city1)], PM10[ ,city2])
colnames(pomiary) <- c('Data', 'Wartosc1', 'Wartosc2')
ggplot(pomiary, aes(x = Data)) +
geom_line(aes(y = Wartosc1, colour = city1)) +
geom_line(aes(y = Wartosc2, colour = city2)) +
geom_hline(yintercept = 50, colour = 'blue', alpha = 0.8, size = 1) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = 'Pomiar stężenia PM 10 w powietrzu', x = 'Data pomiaru', y = 'Wartość PM 10 [ug/m3]')
})
output[['distPlot3']] <- renderPlot({
citys <- input[['chosen_citys']]
choise <- input[['chosen_stat']]
temp <- stat %>% filter(Miasta %in% citys) %>% select(Miasta, choise, PM)
colnames(temp) <- c('Miasta','S','PM')
ggplot(data = temp, aes(x = Miasta, y = S), fill = PM) +
geom_bar(aes(fill = factor(PM)), position = 'dodge', stat = 'identity', width = 0.5) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = paste('Zanieczyszczenie powietrza -', tolower(choise), 'z całego roku'), x = 'Miasto', y = 'Stężenie [ug/m3]')
})
output[['describe']] <- renderText({
if (input[['chosen_describe']] == 'TAK'){
'Aplikacja pozwala poznać stan powietrza, a dokładnie stężenie pyłków zawieszonych PM 2.5 oraz PM 10 w 10 największych (pod względem mieszkańców) miastach Polski w 2017 roku.
Na pierwszych 2 wykresach można porównywać pomiary stężenia obu pyłków zawiszonych w powietrzu dla 2 wybranych miast.
Dodadkowo niebieską linią zaznaczone są dopuszczalne stężenia pyłków.
Natomiast wykres słupkowy prezentuje dla wybranych miast roczne zestawienie (średnia, minimum, maksimum) pomiarów stężenia.'
}
})
}
shinyApp(ui = ui, server = server)
|
/PraceDomowe/PD7/gr4/Michal_Maciag/PD7.R
|
no_license
|
Siemashko/TechnikiWizualizacjiDanych2018
|
R
| false
| false
| 7,684
|
r
|
require(shiny)
require(shinydashboard)
require(SmarterPoland)
require(ggplot2)
require(dplyr)
require(readxl)
require(hablar)
dane10 <- read_excel('PM10.xlsx', skip = 1)
dane25 <- read_excel('PM25.xlsx', skip = 1)
Kody10 <- c('MzWarAlNiepo', 'MpKrakTelime', 'LdLodzRudzka', 'DsWrocOrzech', 'WpPoznChwial', 'PmGdyJozBema', 'ZpSzczecPrze', 'KpBydPlPozna', 'LbLubSliwins', 'PdBialWaszyn')
Kody25 <- c('MzWarWokalna', 'MpKrakBujaka', 'LdLodzLegion', 'DsWrocNaGrob', 'WpPoznPolank', 'PmGdaPowWiel', 'ZpSzczAndr01', 'KpBydBerling', 'LbLubSliwins', 'PdBialWarsza')
Miasta <- c('Warszawa', 'Kraków', 'Łódź', 'Wrocław', 'Poznań', 'Gdańsk', 'Szczecin', 'Bydgoszcz', 'Lublin', 'Białystok')
Dni = seq(from = as.Date('2017-01-01'), to = as.Date('2017-12-31'), by = 'day')
Stat <- c('Średnia', 'Minimum', 'Maksimum')
TN <- c('NIE', 'TAK')
PM10 <- data.frame(Data = Dni)
PM25 <- data.frame(Data = Dni)
for (i in 1:10){
temp <- dane10[,Kody10[i]]
temp <- temp[-(1:4),] %>% retype()
temp <- round(temp, 2)
colnames(temp) <- Miasta[i]
PM10 <- cbind(PM10, temp)
temp <- dane25[,Kody25[i]]
temp <- temp[-(1:4),] %>% retype()
temp <- round(temp, 2)
colnames(temp) <- Miasta[i]
PM25 <- cbind(PM25, temp)
}
stat10 <- t(apply(PM10[,-1], 2 , function(x) round(c(mean(x, na.rm = TRUE), min(x, na.rm = TRUE), max(x, na.rm = TRUE)), 2)))
stat10 <- cbind(Miasta, stat10, rep('PM 10', 10))
colnames(stat10) <- c('Miasta', 'Średnia', 'Minimum', 'Maksimum', 'PM')
row.names(stat10) <- NULL
stat10 <- data.frame(stat10) %>% retype()
stat25 <- t(apply(PM25[,-1], 2 , function(x) round(c(mean(x, na.rm = TRUE), min(x, na.rm = TRUE), max(x, na.rm = TRUE)), 2)))
stat25 <- cbind(Miasta, stat25, rep('PM 2.5', 10))
colnames(stat25) <- c('Miasta', 'Średnia', 'Minimum', 'Maksimum', 'PM')
row.names(stat25) <- NULL
stat25 <- data.frame(stat25) %>% retype()
stat <- rbind(stat10, stat25)
ui <- dashboardPage(
skin = 'red',
dashboardHeader(title = 'MENU'),
dashboardSidebar(
sidebarMenu(
h2('Wykres roczny: '),
selectizeInput(inputId = 'chosen_city_1',
label = 'Wybierz I miasto: ',
choices = Miasta),
selectizeInput(inputId = 'chosen_city_2',
label = 'Wybierz II miasto: ',
choices = c(Miasta[2], Miasta[1], Miasta[3:10])),
h2('Wykres słupkowy: '),
selectizeInput(inputId = 'chosen_stat',
label = 'Rodzaj statystyki: ',
choices = Stat),
checkboxGroupInput(inputId = 'chosen_citys',
label = 'Wybierz miasta: ',
choices = unique(Miasta),
selected = unique(Miasta)),
h2('Wyświetlić opis: '),
selectizeInput(inputId = 'chosen_describe',
label = 'Wyświetlić opis: ',
choices = TN)
)
),
dashboardBody(
fluidPage(
h1('Zanieczyszczenie powietrza w 10 największych miastach w Polsce'),
h1(' '),
textOutput('describe'),
h1(' '),
fluidRow(column(5, plotOutput('distPlot1')), column(5, plotOutput('distPlot2'))),
h1(' '),
fluidRow(column(10, plotOutput('distPlot3')))
)
)
)
server <- function(input, output) {
output[['distPlot1']] <- renderPlot({
city1 <- input[['chosen_city_1']]
city2 <- input[['chosen_city_2']]
pomiary <- cbind(PM25[,c('Data', city1)], PM25[ ,city2])
colnames(pomiary) <- c('Data', 'Wartosc1', 'Wartosc2')
ggplot(pomiary, aes(x = Data)) +
geom_line(aes(y = Wartosc1, colour = city1)) +
geom_line(aes(y = Wartosc2, colour = city2)) +
geom_hline(yintercept = 25, colour = 'blue', alpha = 0.8, size = 1) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = 'Pomiar stężenia PM 2.5 w powietrzu', x = 'Data pomiaru', y = 'Wartość PM 2.5 [ug/m3]')
})
output[['distPlot2']] <- renderPlot({
city1 <- input[['chosen_city_1']]
city2 <- input[['chosen_city_2']]
pomiary <- cbind(PM10[,c('Data', city1)], PM10[ ,city2])
colnames(pomiary) <- c('Data', 'Wartosc1', 'Wartosc2')
ggplot(pomiary, aes(x = Data)) +
geom_line(aes(y = Wartosc1, colour = city1)) +
geom_line(aes(y = Wartosc2, colour = city2)) +
geom_hline(yintercept = 50, colour = 'blue', alpha = 0.8, size = 1) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = 'Pomiar stężenia PM 10 w powietrzu', x = 'Data pomiaru', y = 'Wartość PM 10 [ug/m3]')
})
output[['distPlot3']] <- renderPlot({
citys <- input[['chosen_citys']]
choise <- input[['chosen_stat']]
temp <- stat %>% filter(Miasta %in% citys) %>% select(Miasta, choise, PM)
colnames(temp) <- c('Miasta','S','PM')
ggplot(data = temp, aes(x = Miasta, y = S), fill = PM) +
geom_bar(aes(fill = factor(PM)), position = 'dodge', stat = 'identity', width = 0.5) +
theme_minimal() +
theme(axis.line = element_line(color = 'black'),
panel.grid.major.x = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size = 10),
axis.text.x = element_text(angle = 45, size = 10, hjust = 1, face = 'bold'),
axis.text.y = element_text(size = 10, face = 'bold'),
axis.title.x = element_text(size = 15, face = 'bold'),
axis.title.y = element_text(size = 15, face = 'bold'),
plot.title = element_text(size = 20, face = 'bold', hjust = 0.5)) +
labs(title = paste('Zanieczyszczenie powietrza -', tolower(choise), 'z całego roku'), x = 'Miasto', y = 'Stężenie [ug/m3]')
})
output[['describe']] <- renderText({
if (input[['chosen_describe']] == 'TAK'){
'Aplikacja pozwala poznać stan powietrza, a dokładnie stężenie pyłków zawieszonych PM 2.5 oraz PM 10 w 10 największych (pod względem mieszkańców) miastach Polski w 2017 roku.
Na pierwszych 2 wykresach można porównywać pomiary stężenia obu pyłków zawiszonych w powietrzu dla 2 wybranych miast.
Dodadkowo niebieską linią zaznaczone są dopuszczalne stężenia pyłków.
Natomiast wykres słupkowy prezentuje dla wybranych miast roczne zestawienie (średnia, minimum, maksimum) pomiarów stężenia.'
}
})
}
shinyApp(ui = ui, server = server)
|
### The following file contains functions were constructed in the process of writing the manuscipt and are required
# to compile the PDF document.
#
# A cleaned subset of these functions have been packaged for dissemination as an R package designed to accompany
# the paper. While the package is already available through Github it is not possible to anonymize the source for the
# blinded review process, hence the are hosted in the BinPackageFunction.R file that also supplements the manuscript.
# ---------------------------------------------------------------------------------------------------------------
### Preliminaries
library(ggplot2)
library(mvtnorm)
library(plyr)
library(Hmisc)
#----------------------------------------------------
### Binning Functions
#----------------------------------------------------
## Standard 1d Rectangular Binning
StandRectBin1d <- function(xs, origin, width){
binxs <- origin + width*( ceiling((xs-origin)/width) - 0.5 )
binxs[xs == origin] <- origin + width/2
return(binxs)
}
## Random 1d Rectangular Binning
# Establish bounding bin centers for each observation
# Then use Unif(0,1) draws compared to assignment probs to allocate
# Reassignment for values below first center (bx1) or
# above highest center (bxJ)
RandRectBin1d <- function(xs, origin, width){
bx1 <- origin + width/2
bxJ <- origin + width*(floor((max(xs)-bx1)/width) + .5)
lbs <- bx1 + width*floor((xs-bx1)/width)
ubs <- bx1 + width*ceiling((xs-bx1)/width)
# initially assign all values to upper bound
binxs <- ubs
# then use runif to reassign based on distance from
plower <- (ubs - xs)/width
lowerindex <- (plower > runif(length(xs), 0, 1))
binxs[lowerindex] <- lbs[lowerindex]
binxs[xs < bx1] <- bx1
binxs[xs > bxJ] <- bxJ
return(binxs)
}
## Quantile 1d Binning
# used for binning the counts values by quantile
# define vector of counts and number of bin
QuantBin1d <- function(cs, nbin){
bincenters <- quantile(cs, seq(1/(2*nbin) , 1- 1/(2*nbin), by=1/nbin))
binbounds <- quantile(cs, seq(0 , 1, by=1/nbin))
bincs <- rep(bincenters[1],length(cs))
for (i in 2:length(bincenters)){
bincs[(binbounds[i] < cs & cs <= binbounds[i+1])] <- bincenters[i]
}
return(bincs)
}
## 2d Rectangular Binning (for either Standard or Random)
# standard is straight forward extension of 1d binning
# random binning needs post processing to calculate minimum spatial loss
RectBin2d <- function(xs,ys, originx, originy, widthx, widthy, type="standard"){
tempdat <- data.frame(xs = xs, ys=ys,
binxs = StandRectBin1d(xs,originx,widthx),
binys = StandRectBin1d(ys,originy,widthy))
if(type=="random"){
tempdat<- data.frame( tempdat,
randbinxs = RandRectBin1d(xs,originx,widthx),
randbinys = RandRectBin1d(ys,originy,widthy))
tempdat$binname <- paste(tempdat$binxs,tempdat$binys)
tempdat$randbinname <- paste(tempdat$randbinxs,tempdat$randbinys)
tempdat$randdist <- with(tempdat,sqrt((xs-randbinxs)^2 + (ys-randbinys)^2))
tempdat$index <- 1:length(xs)
# points where mismatch between standard and random binning
mismatchindex <- which(tempdat$binxs != tempdat$randbinxs | tempdat$binys != tempdat$randbinys )
mmdat <- tempdat[mismatchindex,]
# identify which need to be swapped to standard for net spatial loss
#loop over all neighboring bin pairs (shift all xs over by 1 then all )
xbins <- seq(min(c(tempdat$binxs,tempdat$randbinxs)) , max(c(tempdat$binxs,tempdat$randbinxs)), by=widthx)
ybins <- seq(min(c(tempdat$binys,tempdat$randbinys)) , max(c(tempdat$binys,tempdat$randbinys)), by=widthy)
nbrs <- data.frame(binxs = rep(rep(xbins,length(ybins)),2) ,
binys = rep(rep(ybins,each=length(xbins)),2),
nbrsxs = c(rep(xbins+widthx,length(ybins)),rep(xbins,length(ybins))),
nbrsys = c(rep(ybins,each=length(xbins)), rep(ybins+widthy,each=length(xbins))) )
nbrs$binname <- paste(nbrs$binxs,nbrs$binys)
nbrs$nbrsname <- paste(nbrs$nbrsxs,nbrs$nbrsys)
swapindex <- NULL
for (i in 1:nrow(nbrs)){
#id points in standard bin i assigned to bin j
itoj <- which(mmdat$binname == nbrs$binname[i] & mmdat$randbinname == nbrs$nbrsname[i])
#id points in standard bin j assigned to bin i
jtoi <- which(mmdat$binname == nbrs$nbrsname[i] & mmdat$randbinname == nbrs$binname[i])
# number to swap in bins i and j is equal to minimum misplaced
nswap <- min(length(itoj), length(jtoi))
# if there are points to swap, then pick the ones with largest distance
# from point to random bin center
if(nswap > 0){
swapindex <- c(swapindex,mmdat$index[itoj][order(mmdat$randdist[itoj],decreasing=TRUE)[nswap]])
swapindex <- c(swapindex,mmdat$index[jtoi][order(mmdat$randdist[jtoi],decreasing=TRUE)[nswap]])
}
}
swapindex <- unique(swapindex)
tempdat$binxs[!(tempdat$index %in% swapindex)] <- tempdat$randbinxs[!(tempdat$index %in% swapindex)]
tempdat$binys[!(tempdat$index %in% swapindex)] <- tempdat$randbinys[!(tempdat$index %in% swapindex)]
}
outdat <- ddply(tempdat, .(binxs,binys), summarise,
binfreq = length(xs),
binspatialloss = sum(sqrt((xs-binxs)^2+(ys-binys)^2)) )
summarydata <- data.frame( originx = originx, originy = originy,
widthx = widthx, widthy = widthy,
totalSpatialLoss = sum(outdat$binspatialloss))
templist <- list(bindat = outdat[,1:3],
summarydata)
return(templist)
}
## Frequency Binning
# allows for standard or quantile binning of either bin counts or log bin counts (4 combinations)
# input requires binned data output, number of freq breaks and type of freq binning
# output of frequency bin values, labels and loss are attached the original then returned
freqBin <- function(binout, binType="standard", ncolor, logCount=FALSE){
cs <- binout[[1]]$binfreq
# add frequency bin centers and bin labels of the form "(a,b]" to binout
# binning depends on type / log counts
if (logCount) cs <- log(binout[[1]]$binfreq)
if(binType=="standard"){
#width <- ceiling((max(cs)-min(cs))/ncolor)
width <- (max(cs)-min(cs))/ncolor
binout[[1]]$freqgroup <- round(StandRectBin1d(cs, min(cs) , width),5)
binout[[1]]$freqlabel <- paste("(",round(binout[[1]]$freqgroup - width/2,1),
",",round(binout[[1]]$freqgroup + width/2,1),"]",sep="")
#close interval for smallest counts
closeidx <- binout[[1]]$freqlabel == min(binout[[1]]$freqlabel)
binout[[1]]$freqlabel[closeidx] <- paste("[",round(min(binout[[1]]$freqgroup)- width/2,1),
",",round(min(binout[[1]]$freqgroup) + width/2,1),"]",sep="")
}
if(binType=="quantile"){
binout[[1]]$freqgroup <- as.numeric(round(QuantBin1d(cs, ncolor),5))
quantbounds <- unique(quantile(cs, (0:ncolor)/ncolor))
if (length(quantbounds)-1 < ncolor) warning("two or more quantiles of the data have same value due to many bins with equivalent frequencies, color will be rendered equivalently for bins with matching quantiles")
binout[[1]]$freqlabel <- cut(cs, breaks=quantbounds, include.lowest=TRUE)
}
# Total Freq Loss
binout[[2]]$totalFreqLoss <- sum((cs - binout[[1]]$freqgroup)^2)
# Max Freq Loss based on shading at data average
# binout[[2]]$maxFreqLoss <- sum((cs - mean(cs))^2)
# Proportion of Max Freq Loss
# binout[[2]]$propFreqLoss <- sum((cs - binout[[1]]$freqgroup)^2) / sum((cs - mean(cs))^2)
return(binout)
}
# #----------------------------------------------------
# #### Function Tests
# #----------------------------------------------------
#
# ## Try 1d binning functions
# xs <- runif(100,0,100)
# cs <- rexp(100,.1)
#
# # standard binning
# standbinnedxs <- StandRectBin1d(xs, origin=0, width=10)
# qplot(xs, standbinnedxs)
# # random binning
# randbinnedxs <- RandRectBin1d(xs, origin=0, width=10)
# qplot(xs, randbinnedxs)
# # quantile binning
# quantbinnedcs <- QuantBin1d(cs, 7)
# qplot(cs, quantbinnedcs)
#
#
# ### Try 2d rectangular binning with MVN data
# n=50000
# mu <- 50
# sig <- 15
# xnorm <- rnorm(n, mean=mu, sd=sig)
# ynorm <- rnorm(n, mean=mu, sd=sig)
# qplot(xnorm, ynorm)
# # create binned data for standard and random
# binout1 <- RectBin2d(xnorm,ynorm,0,0,10,10,type="standard")
# binout2 <- RectBin2d(xnorm,ynorm,0,0,10,10,type="random")
# # take a peek at binned data format
# head(binout1[[1]])
# head(binout2[[2]])
# # compare spatial losses
# binout1[[2]]
# binout2[[2]]
# #compare visualizations
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binout1[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binout2[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# #compare with shade by log counts
# qplot(binxs, binys, geom="tile", fill=log(binfreq+1), data=binout1[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# qplot(binxs, binys, geom="tile", fill=log(binfreq+1), data=binout2[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
#
#
#
# ### Try out frequency binning on the same data using standard/quantile/log-count binning
# binoutFreqGroups <- freqBin(binout1, binType="standard", ncolor=5, logCount=FALSE)
# binoutQuantFreqGroups <- freqBin(binout1, binType="quantile", ncolor=5, logCount=FALSE)
# binoutLogFreqGroups <- freqBin(binout1, binType="standard", ncolor=5, logCount=TRUE)
# # Peek into how frequency bins are defined
# head(binoutFreqGroups[[1]])
# head(binoutQuantFreqGroups[[1]])
# head(binoutLogFreqGroups[[1]])
# # Compare Frequency Losses (Note I dont know if these are all directly comparable after logging counts)
# binoutFreqGroups[[2]]
# binoutQuantFreqGroups[[2]]
# binoutLogFreqGroups[[2]]
# # Plot with Orginal Frequencies
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binoutFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# # Plot with Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutFreqGroups[[1]]$freqgroup))
# # Plot with Quantile Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutQuantFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutQuantFreqGroups[[1]]$freqgroup))
# # Plot with LogCount Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutLogFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutLogFreqGroups[[1]]$freqgroup))
|
/versionForJCGS/BinningLossFunctions.R
|
no_license
|
heike/dbaccess
|
R
| false
| false
| 10,994
|
r
|
### The following file contains functions were constructed in the process of writing the manuscipt and are required
# to compile the PDF document.
#
# A cleaned subset of these functions have been packaged for dissemination as an R package designed to accompany
# the paper. While the package is already available through Github it is not possible to anonymize the source for the
# blinded review process, hence the are hosted in the BinPackageFunction.R file that also supplements the manuscript.
# ---------------------------------------------------------------------------------------------------------------
### Preliminaries
library(ggplot2)
library(mvtnorm)
library(plyr)
library(Hmisc)
#----------------------------------------------------
### Binning Functions
#----------------------------------------------------
## Standard 1d Rectangular Binning
StandRectBin1d <- function(xs, origin, width){
binxs <- origin + width*( ceiling((xs-origin)/width) - 0.5 )
binxs[xs == origin] <- origin + width/2
return(binxs)
}
## Random 1d Rectangular Binning
# Establish bounding bin centers for each observation
# Then use Unif(0,1) draws compared to assignment probs to allocate
# Reassignment for values below first center (bx1) or
# above highest center (bxJ)
RandRectBin1d <- function(xs, origin, width){
bx1 <- origin + width/2
bxJ <- origin + width*(floor((max(xs)-bx1)/width) + .5)
lbs <- bx1 + width*floor((xs-bx1)/width)
ubs <- bx1 + width*ceiling((xs-bx1)/width)
# initially assign all values to upper bound
binxs <- ubs
# then use runif to reassign based on distance from
plower <- (ubs - xs)/width
lowerindex <- (plower > runif(length(xs), 0, 1))
binxs[lowerindex] <- lbs[lowerindex]
binxs[xs < bx1] <- bx1
binxs[xs > bxJ] <- bxJ
return(binxs)
}
## Quantile 1d Binning
# used for binning the counts values by quantile
# define vector of counts and number of bin
QuantBin1d <- function(cs, nbin){
bincenters <- quantile(cs, seq(1/(2*nbin) , 1- 1/(2*nbin), by=1/nbin))
binbounds <- quantile(cs, seq(0 , 1, by=1/nbin))
bincs <- rep(bincenters[1],length(cs))
for (i in 2:length(bincenters)){
bincs[(binbounds[i] < cs & cs <= binbounds[i+1])] <- bincenters[i]
}
return(bincs)
}
## 2d Rectangular Binning (for either Standard or Random)
# standard is straight forward extension of 1d binning
# random binning needs post processing to calculate minimum spatial loss
RectBin2d <- function(xs,ys, originx, originy, widthx, widthy, type="standard"){
tempdat <- data.frame(xs = xs, ys=ys,
binxs = StandRectBin1d(xs,originx,widthx),
binys = StandRectBin1d(ys,originy,widthy))
if(type=="random"){
tempdat<- data.frame( tempdat,
randbinxs = RandRectBin1d(xs,originx,widthx),
randbinys = RandRectBin1d(ys,originy,widthy))
tempdat$binname <- paste(tempdat$binxs,tempdat$binys)
tempdat$randbinname <- paste(tempdat$randbinxs,tempdat$randbinys)
tempdat$randdist <- with(tempdat,sqrt((xs-randbinxs)^2 + (ys-randbinys)^2))
tempdat$index <- 1:length(xs)
# points where mismatch between standard and random binning
mismatchindex <- which(tempdat$binxs != tempdat$randbinxs | tempdat$binys != tempdat$randbinys )
mmdat <- tempdat[mismatchindex,]
# identify which need to be swapped to standard for net spatial loss
#loop over all neighboring bin pairs (shift all xs over by 1 then all )
xbins <- seq(min(c(tempdat$binxs,tempdat$randbinxs)) , max(c(tempdat$binxs,tempdat$randbinxs)), by=widthx)
ybins <- seq(min(c(tempdat$binys,tempdat$randbinys)) , max(c(tempdat$binys,tempdat$randbinys)), by=widthy)
nbrs <- data.frame(binxs = rep(rep(xbins,length(ybins)),2) ,
binys = rep(rep(ybins,each=length(xbins)),2),
nbrsxs = c(rep(xbins+widthx,length(ybins)),rep(xbins,length(ybins))),
nbrsys = c(rep(ybins,each=length(xbins)), rep(ybins+widthy,each=length(xbins))) )
nbrs$binname <- paste(nbrs$binxs,nbrs$binys)
nbrs$nbrsname <- paste(nbrs$nbrsxs,nbrs$nbrsys)
swapindex <- NULL
for (i in 1:nrow(nbrs)){
#id points in standard bin i assigned to bin j
itoj <- which(mmdat$binname == nbrs$binname[i] & mmdat$randbinname == nbrs$nbrsname[i])
#id points in standard bin j assigned to bin i
jtoi <- which(mmdat$binname == nbrs$nbrsname[i] & mmdat$randbinname == nbrs$binname[i])
# number to swap in bins i and j is equal to minimum misplaced
nswap <- min(length(itoj), length(jtoi))
# if there are points to swap, then pick the ones with largest distance
# from point to random bin center
if(nswap > 0){
swapindex <- c(swapindex,mmdat$index[itoj][order(mmdat$randdist[itoj],decreasing=TRUE)[nswap]])
swapindex <- c(swapindex,mmdat$index[jtoi][order(mmdat$randdist[jtoi],decreasing=TRUE)[nswap]])
}
}
swapindex <- unique(swapindex)
tempdat$binxs[!(tempdat$index %in% swapindex)] <- tempdat$randbinxs[!(tempdat$index %in% swapindex)]
tempdat$binys[!(tempdat$index %in% swapindex)] <- tempdat$randbinys[!(tempdat$index %in% swapindex)]
}
outdat <- ddply(tempdat, .(binxs,binys), summarise,
binfreq = length(xs),
binspatialloss = sum(sqrt((xs-binxs)^2+(ys-binys)^2)) )
summarydata <- data.frame( originx = originx, originy = originy,
widthx = widthx, widthy = widthy,
totalSpatialLoss = sum(outdat$binspatialloss))
templist <- list(bindat = outdat[,1:3],
summarydata)
return(templist)
}
## Frequency Binning
# allows for standard or quantile binning of either bin counts or log bin counts (4 combinations)
# input requires binned data output, number of freq breaks and type of freq binning
# output of frequency bin values, labels and loss are attached the original then returned
freqBin <- function(binout, binType="standard", ncolor, logCount=FALSE){
cs <- binout[[1]]$binfreq
# add frequency bin centers and bin labels of the form "(a,b]" to binout
# binning depends on type / log counts
if (logCount) cs <- log(binout[[1]]$binfreq)
if(binType=="standard"){
#width <- ceiling((max(cs)-min(cs))/ncolor)
width <- (max(cs)-min(cs))/ncolor
binout[[1]]$freqgroup <- round(StandRectBin1d(cs, min(cs) , width),5)
binout[[1]]$freqlabel <- paste("(",round(binout[[1]]$freqgroup - width/2,1),
",",round(binout[[1]]$freqgroup + width/2,1),"]",sep="")
#close interval for smallest counts
closeidx <- binout[[1]]$freqlabel == min(binout[[1]]$freqlabel)
binout[[1]]$freqlabel[closeidx] <- paste("[",round(min(binout[[1]]$freqgroup)- width/2,1),
",",round(min(binout[[1]]$freqgroup) + width/2,1),"]",sep="")
}
if(binType=="quantile"){
binout[[1]]$freqgroup <- as.numeric(round(QuantBin1d(cs, ncolor),5))
quantbounds <- unique(quantile(cs, (0:ncolor)/ncolor))
if (length(quantbounds)-1 < ncolor) warning("two or more quantiles of the data have same value due to many bins with equivalent frequencies, color will be rendered equivalently for bins with matching quantiles")
binout[[1]]$freqlabel <- cut(cs, breaks=quantbounds, include.lowest=TRUE)
}
# Total Freq Loss
binout[[2]]$totalFreqLoss <- sum((cs - binout[[1]]$freqgroup)^2)
# Max Freq Loss based on shading at data average
# binout[[2]]$maxFreqLoss <- sum((cs - mean(cs))^2)
# Proportion of Max Freq Loss
# binout[[2]]$propFreqLoss <- sum((cs - binout[[1]]$freqgroup)^2) / sum((cs - mean(cs))^2)
return(binout)
}
# #----------------------------------------------------
# #### Function Tests
# #----------------------------------------------------
#
# ## Try 1d binning functions
# xs <- runif(100,0,100)
# cs <- rexp(100,.1)
#
# # standard binning
# standbinnedxs <- StandRectBin1d(xs, origin=0, width=10)
# qplot(xs, standbinnedxs)
# # random binning
# randbinnedxs <- RandRectBin1d(xs, origin=0, width=10)
# qplot(xs, randbinnedxs)
# # quantile binning
# quantbinnedcs <- QuantBin1d(cs, 7)
# qplot(cs, quantbinnedcs)
#
#
# ### Try 2d rectangular binning with MVN data
# n=50000
# mu <- 50
# sig <- 15
# xnorm <- rnorm(n, mean=mu, sd=sig)
# ynorm <- rnorm(n, mean=mu, sd=sig)
# qplot(xnorm, ynorm)
# # create binned data for standard and random
# binout1 <- RectBin2d(xnorm,ynorm,0,0,10,10,type="standard")
# binout2 <- RectBin2d(xnorm,ynorm,0,0,10,10,type="random")
# # take a peek at binned data format
# head(binout1[[1]])
# head(binout2[[2]])
# # compare spatial losses
# binout1[[2]]
# binout2[[2]]
# #compare visualizations
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binout1[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binout2[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# #compare with shade by log counts
# qplot(binxs, binys, geom="tile", fill=log(binfreq+1), data=binout1[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# qplot(binxs, binys, geom="tile", fill=log(binfreq+1), data=binout2[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
#
#
#
# ### Try out frequency binning on the same data using standard/quantile/log-count binning
# binoutFreqGroups <- freqBin(binout1, binType="standard", ncolor=5, logCount=FALSE)
# binoutQuantFreqGroups <- freqBin(binout1, binType="quantile", ncolor=5, logCount=FALSE)
# binoutLogFreqGroups <- freqBin(binout1, binType="standard", ncolor=5, logCount=TRUE)
# # Peek into how frequency bins are defined
# head(binoutFreqGroups[[1]])
# head(binoutQuantFreqGroups[[1]])
# head(binoutLogFreqGroups[[1]])
# # Compare Frequency Losses (Note I dont know if these are all directly comparable after logging counts)
# binoutFreqGroups[[2]]
# binoutQuantFreqGroups[[2]]
# binoutLogFreqGroups[[2]]
# # Plot with Orginal Frequencies
# qplot(binxs, binys, geom="tile", fill=binfreq, data=binoutFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend")
# # Plot with Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutFreqGroups[[1]]$freqgroup))
# # Plot with Quantile Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutQuantFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutQuantFreqGroups[[1]]$freqgroup))
# # Plot with LogCount Binned Frequencies
# qplot(binxs, binys, geom="tile", fill=freqgroup, data=binoutLogFreqGroups[[1]]) +
# scale_fill_gradient(low="#56B1F7", high="#132B43", guide="legend", breaks=unique(binoutLogFreqGroups[[1]]$freqgroup))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module-post.R
\name{post_clean_chance}
\alias{post_clean_chance}
\title{Scrub processed data with below-chance accuracy}
\usage{
post_clean_chance(
df,
app_type = c("classroom", "explorer"),
overall = TRUE,
cutoff_dprime = 0,
cutoff_2choice = 0.5,
cutoff_4choice = 0.25,
cutoff_5choice = 0.2,
extra_demos = NULL
)
}
\arguments{
\item{df}{a df, output by \code{\link{proc_by_module}}, containing processed
ACE data.}
\item{app_type}{character. What app type produced this data? One of
\code{c("classroom", "explorer")}. Must be specified.}
\item{overall}{Also scrub ".overall" data? Defaults to \code{TRUE}.}
\item{cutoff_dprime}{Minimum value of d' to allow, for relevant tasks
(ACE Tap and Trace, SAAT, Filter). Defaults to 0.}
\item{cutoff_2choice}{Minimum value of accuracy to allow, for 2-response tasks
(ACE Flanker, Boxed). Defaults to 0.5.}
\item{cutoff_4choice}{Minimum value of accuracy to allow, for 4-response tasks
(ACE Stroop, Task Switch). Defaults to 0.25.}
\item{cutoff_5choice}{Minimum value of accuracy to allow, for 5-response tasks
(ACE Color Selection). Defaults to 0.25.}
\item{extra_demos}{Character vector specifying any custom-added demographics
columns (beyond app defaults) to pass through the function. Defaults to \{code{NULL}.}
}
\value{
a df, similar in structure to \code{proc}, but with below-cutoff values in
certain columns converted to \code{NA}.
}
\description{
User-friendly wrapper to replace below-chance records with \code{NA}
in ACE data processed with \code{\link{proc_by_module}}. Currently only
compatible with ACE (SEA not yet implemented),
}
|
/man/post_clean_chance.Rd
|
permissive
|
jessicayounger/aceR
|
R
| false
| true
| 1,690
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module-post.R
\name{post_clean_chance}
\alias{post_clean_chance}
\title{Scrub processed data with below-chance accuracy}
\usage{
post_clean_chance(
df,
app_type = c("classroom", "explorer"),
overall = TRUE,
cutoff_dprime = 0,
cutoff_2choice = 0.5,
cutoff_4choice = 0.25,
cutoff_5choice = 0.2,
extra_demos = NULL
)
}
\arguments{
\item{df}{a df, output by \code{\link{proc_by_module}}, containing processed
ACE data.}
\item{app_type}{character. What app type produced this data? One of
\code{c("classroom", "explorer")}. Must be specified.}
\item{overall}{Also scrub ".overall" data? Defaults to \code{TRUE}.}
\item{cutoff_dprime}{Minimum value of d' to allow, for relevant tasks
(ACE Tap and Trace, SAAT, Filter). Defaults to 0.}
\item{cutoff_2choice}{Minimum value of accuracy to allow, for 2-response tasks
(ACE Flanker, Boxed). Defaults to 0.5.}
\item{cutoff_4choice}{Minimum value of accuracy to allow, for 4-response tasks
(ACE Stroop, Task Switch). Defaults to 0.25.}
\item{cutoff_5choice}{Minimum value of accuracy to allow, for 5-response tasks
(ACE Color Selection). Defaults to 0.25.}
\item{extra_demos}{Character vector specifying any custom-added demographics
columns (beyond app defaults) to pass through the function. Defaults to \{code{NULL}.}
}
\value{
a df, similar in structure to \code{proc}, but with below-cutoff values in
certain columns converted to \code{NA}.
}
\description{
User-friendly wrapper to replace below-chance records with \code{NA}
in ACE data processed with \code{\link{proc_by_module}}. Currently only
compatible with ACE (SEA not yet implemented),
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gc_event.R
\name{gc_event}
\alias{gc_event}
\alias{gc_event_query}
\alias{gc_event_id}
\title{Retrieve a calendar event}
\usage{
gc_event_query(x, ..., verbose = TRUE)
gc_event_id(x, id, verbose = TRUE)
}
\arguments{
\item{x}{\code{googlecalendar} object representing the calendar for
where the event resides.}
\item{\dots}{Additional parameters to be passed as part of the HTTP
request. More information on these optional values is available
below.}
\item{verbose}{Logical indicating whether to print informative
messages.}
\item{id}{Event ID as a character string.}
}
\value{
Event metadata as an \code{event} object (a custom class
wrapping a named list).
}
\description{
Retrieves metadata for a Google Calendar event in the form of an
\code{event} object. This method filters information made available
through the Google Calendar API Events resource.
}
\details{
For \code{gc_event_query}, an arbitrary number of optional query
parameters may be supplied via \dots. A list of available parameters
can be found in
\href{https://developers.google.com/google-apps/calendar/v3/reference/events/list#request}{Events: List}.
Notable parameters include:
\itemize{
\item{\code{q} -- Free text search terms to find in any field.}
\item{\code{singleEvents} -- Logical indicating whether to expand
recurring events into instances and only return single one-off
events.}
\item{\code{timeMax} -- Exclusive upper bound of the start time
filter (formatted as an RFC3339 timestamp).}
\item{\code{timeMin} -- Inclusive lower bound of the end time
filter (formatted as an RFC3339 timestamp).}
}
For more information on the structure of an \code{event} object, see
the Google Calendar API
\href{https://developers.google.com/google-apps/calendar/v3/reference/events}{Events Resource Overview}.
}
\examples{
\dontrun{
gc_summary("Commitments") \%>\%
gc_event_id("lohlv4duqhqu8bh6kfok9ookmk")
gc_summary("Commitments") \%>\%
gc_event_query(cal, q = "Lunch with Will")
}
}
|
/man/gc_event.Rd
|
no_license
|
paddytobias/googlecalendar
|
R
| false
| true
| 2,074
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gc_event.R
\name{gc_event}
\alias{gc_event}
\alias{gc_event_query}
\alias{gc_event_id}
\title{Retrieve a calendar event}
\usage{
gc_event_query(x, ..., verbose = TRUE)
gc_event_id(x, id, verbose = TRUE)
}
\arguments{
\item{x}{\code{googlecalendar} object representing the calendar for
where the event resides.}
\item{\dots}{Additional parameters to be passed as part of the HTTP
request. More information on these optional values is available
below.}
\item{verbose}{Logical indicating whether to print informative
messages.}
\item{id}{Event ID as a character string.}
}
\value{
Event metadata as an \code{event} object (a custom class
wrapping a named list).
}
\description{
Retrieves metadata for a Google Calendar event in the form of an
\code{event} object. This method filters information made available
through the Google Calendar API Events resource.
}
\details{
For \code{gc_event_query}, an arbitrary number of optional query
parameters may be supplied via \dots. A list of available parameters
can be found in
\href{https://developers.google.com/google-apps/calendar/v3/reference/events/list#request}{Events: List}.
Notable parameters include:
\itemize{
\item{\code{q} -- Free text search terms to find in any field.}
\item{\code{singleEvents} -- Logical indicating whether to expand
recurring events into instances and only return single one-off
events.}
\item{\code{timeMax} -- Exclusive upper bound of the start time
filter (formatted as an RFC3339 timestamp).}
\item{\code{timeMin} -- Inclusive lower bound of the end time
filter (formatted as an RFC3339 timestamp).}
}
For more information on the structure of an \code{event} object, see
the Google Calendar API
\href{https://developers.google.com/google-apps/calendar/v3/reference/events}{Events Resource Overview}.
}
\examples{
\dontrun{
gc_summary("Commitments") \%>\%
gc_event_id("lohlv4duqhqu8bh6kfok9ookmk")
gc_summary("Commitments") \%>\%
gc_event_query(cal, q = "Lunch with Will")
}
}
|
#---------------------------------------------------------------------------------------------------------------
#---------- This code separates the different tiers in the summary_table, change the unit to kgal for everything
#---------------------------------------------------------------------------------------------------------------
rm(list = ls()) #cleaning workspace, rm for remove
#dev.off() #clearing the plots
getwd()
my_packages <- c("rgeos","raster","tidyverse","magrittr","haven","readxl","tools","usethis","RColorBrewer","ggrepel","sf","tictoc","lubridate")
#install.packages(my_packages, repos = "http://cran.rstudio.com")
lapply(my_packages, library, character.only = TRUE)
map<-purrr::map #For the nice iterations
select<-dplyr::select
#All variables capitalized ; values str_to_title ; remove accents from all variables and values
#-----------------------------------------------------------------------------------------------------------------
summary_table <- read_csv("Data/OWRS/summary_table.csv")
#Change billing frequency to monthly
unique(summary_table$bill_frequency)
#Going to normalize usage, tier thresholds and total bill by the bill frequency to get in month
#la 09/30/2019 updated service charge based on comparison of OWRS raw file and summary_table.csv entry
##for bi-monthly: Alameda County Water District 03/2018, Alhambra City Of, Bakman Water Company, Garden Grove City, Westminister City Of, Sweetwater Springs Water District
#- but with Pomona City of and Hayward City of - not sure what is going on there,
##for quarterly: South Tahoe Public Utility District checks out fine
##for annual:Glenbrook Water Cooperative - not found in OWRS in github, not verified
summary_table %<>%
mutate(service_charge = case_when(bill_frequency == "Annually" ~ service_charge/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ service_charge,
bill_frequency == "Quarterly" ~ service_charge/3,
TRUE ~ service_charge))
summary_table %<>%
mutate(commodity_charge = case_when(bill_frequency == "Annually" ~ commodity_charge/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ commodity_charge/2,
bill_frequency == "Quarterly" ~ commodity_charge/3,
TRUE ~ commodity_charge))
summary_table %<>%
mutate(bill = case_when(bill_frequency == "Annually" ~ bill/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ bill/2,
bill_frequency == "Quarterly" ~ bill/3,
TRUE ~ bill))
summary_table %<>%
mutate(check = ifelse(signif(service_charge,2)+signif(commodity_charge,2)==signif(bill,2) ,1,0))
summary_table$check #differences are due to OWRS data
summary_table %<>%
mutate(bill_frequency_update = "Monthly")
#Separating the different tiers
summary_table %<>%
mutate(tier_starts = str_replace_all(tier_starts, "\n","-")) %>%
separate(tier_starts,c("tier_0","tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"),
sep = "-", remove = FALSE) %>%
mutate(tier_prices = str_replace_all(tier_prices, "\n","-")) %>%
separate(tier_prices,c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"),
sep = "-", remove = FALSE)
glimpse(summary_table)
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"), ~as.numeric(.)) %>%
mutate_at(c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"), ~as.numeric(.))
#Convert tiers to correct bill frequency
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"), funs(case_when(bill_frequency == "Annually" ~ ./12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ ./2,
bill_frequency == "Quarterly" ~ ./3,
TRUE ~ .))
)
#Convert everything to kgal
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"),
funs(case_when(bill_unit == 'ccf' ~ .*.748052,
TRUE ~ .))) %>%
mutate_at(c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"),
funs(case_when(bill_unit == 'ccf' ~ .*.748052,
TRUE ~ .)))
summary_table %<>%
mutate(bill_unit = "converted to kgal")
write_csv(summary_table, "Data/OWRS/summary_table_cleaned_wide_formatv2.csv")
#Gathering to get in long format
summary_table %<>%
gather(one_of("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8",
"tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price",
"tier_6_price","tier_7_price","tier_8_price"),
key = "Tier_number", value = "Tier_threshold")
summary_table %<>%
distinct() %>%
mutate(value = if_else(str_detect(Tier_number, "_price"), "Tier_price", "Tier_volume"),
tier_number = str_extract(Tier_number, "\\d")) %>%
select(-Tier_number) %>%
spread(value, Tier_threshold)
summary_table %<>%
rename(Tier_number = tier_number)
write_csv(summary_table, "Data/OWRS/summary_table_cleanedv2.csv")
|
/Code/cleaning_summary_table.R
|
no_license
|
ianartmor/ca_owrs
|
R
| false
| false
| 6,087
|
r
|
#---------------------------------------------------------------------------------------------------------------
#---------- This code separates the different tiers in the summary_table, change the unit to kgal for everything
#---------------------------------------------------------------------------------------------------------------
rm(list = ls()) #cleaning workspace, rm for remove
#dev.off() #clearing the plots
getwd()
my_packages <- c("rgeos","raster","tidyverse","magrittr","haven","readxl","tools","usethis","RColorBrewer","ggrepel","sf","tictoc","lubridate")
#install.packages(my_packages, repos = "http://cran.rstudio.com")
lapply(my_packages, library, character.only = TRUE)
map<-purrr::map #For the nice iterations
select<-dplyr::select
#All variables capitalized ; values str_to_title ; remove accents from all variables and values
#-----------------------------------------------------------------------------------------------------------------
summary_table <- read_csv("Data/OWRS/summary_table.csv")
#Change billing frequency to monthly
unique(summary_table$bill_frequency)
#Going to normalize usage, tier thresholds and total bill by the bill frequency to get in month
#la 09/30/2019 updated service charge based on comparison of OWRS raw file and summary_table.csv entry
##for bi-monthly: Alameda County Water District 03/2018, Alhambra City Of, Bakman Water Company, Garden Grove City, Westminister City Of, Sweetwater Springs Water District
#- but with Pomona City of and Hayward City of - not sure what is going on there,
##for quarterly: South Tahoe Public Utility District checks out fine
##for annual:Glenbrook Water Cooperative - not found in OWRS in github, not verified
summary_table %<>%
mutate(service_charge = case_when(bill_frequency == "Annually" ~ service_charge/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ service_charge,
bill_frequency == "Quarterly" ~ service_charge/3,
TRUE ~ service_charge))
summary_table %<>%
mutate(commodity_charge = case_when(bill_frequency == "Annually" ~ commodity_charge/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ commodity_charge/2,
bill_frequency == "Quarterly" ~ commodity_charge/3,
TRUE ~ commodity_charge))
summary_table %<>%
mutate(bill = case_when(bill_frequency == "Annually" ~ bill/12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ bill/2,
bill_frequency == "Quarterly" ~ bill/3,
TRUE ~ bill))
summary_table %<>%
mutate(check = ifelse(signif(service_charge,2)+signif(commodity_charge,2)==signif(bill,2) ,1,0))
summary_table$check #differences are due to OWRS data
summary_table %<>%
mutate(bill_frequency_update = "Monthly")
#Separating the different tiers
summary_table %<>%
mutate(tier_starts = str_replace_all(tier_starts, "\n","-")) %>%
separate(tier_starts,c("tier_0","tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"),
sep = "-", remove = FALSE) %>%
mutate(tier_prices = str_replace_all(tier_prices, "\n","-")) %>%
separate(tier_prices,c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"),
sep = "-", remove = FALSE)
glimpse(summary_table)
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"), ~as.numeric(.)) %>%
mutate_at(c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"), ~as.numeric(.))
#Convert tiers to correct bill frequency
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"), funs(case_when(bill_frequency == "Annually" ~ ./12,
bill_frequency == "Bi-Monthly" | bill_frequency == "Bimonthly" | bill_frequency == "bimonthly" ~ ./2,
bill_frequency == "Quarterly" ~ ./3,
TRUE ~ .))
)
#Convert everything to kgal
summary_table %<>%
mutate_at(c("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8"),
funs(case_when(bill_unit == 'ccf' ~ .*.748052,
TRUE ~ .))) %>%
mutate_at(c("tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price","tier_6_price","tier_7_price","tier_8_price"),
funs(case_when(bill_unit == 'ccf' ~ .*.748052,
TRUE ~ .)))
summary_table %<>%
mutate(bill_unit = "converted to kgal")
write_csv(summary_table, "Data/OWRS/summary_table_cleaned_wide_formatv2.csv")
#Gathering to get in long format
summary_table %<>%
gather(one_of("tier_1","tier_2","tier_3","tier_4","tier_5","tier_6","tier_7","tier_8",
"tier_1_price","tier_2_price","tier_3_price","tier_4_price","tier_5_price",
"tier_6_price","tier_7_price","tier_8_price"),
key = "Tier_number", value = "Tier_threshold")
summary_table %<>%
distinct() %>%
mutate(value = if_else(str_detect(Tier_number, "_price"), "Tier_price", "Tier_volume"),
tier_number = str_extract(Tier_number, "\\d")) %>%
select(-Tier_number) %>%
spread(value, Tier_threshold)
summary_table %<>%
rename(Tier_number = tier_number)
write_csv(summary_table, "Data/OWRS/summary_table_cleanedv2.csv")
|
### creating area averaged line plot
rasterdir="/Users/heatherwelch/Dropbox/JPSS/global/alaska_rasters_mask/"
csvdir="/Users/heatherwelch/Dropbox/JPSS/global/alaska_csvs_mask/";dir.create(csvdir)
rasterlist=list.files(rasterdir,pattern = ".grd")
setwd(rasterdir)
df=data.frame(number=1:length(rasterlist)) %>% mutate(date=as.Date("2012-03-01"))%>% mutate(sensor=NA)%>% mutate(mean=NA)
for(i in 1:length(rasterlist)){
name=rasterlist[i]
ras=rasterlist[i] %>% raster()
print(name)
if(grepl("erdMH1chlamday", name)){
sensor="MODIS"
date=substr(name,16,25) %>% as.Date()
}
if(grepl("nesdisVHNSQchlaMonthly", name)){
sensor="VIIRS"
date=substr(name,24,33) %>% as.Date()
}
if(grepl("AVW", name)){
sensor="GlobColour Merged AVW"
date=substr(name,23,32) %>% as.Date()
}
if(grepl("GSM", name)){
sensor="GlobColour Merged GSM"
date=substr(name,23,32)%>% as.Date()
}
if(grepl("ESACCI-OC-L3S", name)){
sensor="OC-CCI"
date=substr(name,15,24) %>% as.Date()
}
mean=log10(ras+0.001) %>% cellStats(.,stat="mean",na.rm=T)
df$date[i]=date
df$sensor[i]=sensor
df$mean[i]=mean
}
df$sensor=as.factor(df$sensor)
dfAll=df
write.csv(dfAll,"/Users/heatherwelch/Dropbox/JPSS/global/alaska_csvs_mask/All_products_4km_alaska_log_mask.csv")
#### plotting ####
a=ggplot(dfAll,aes(x=date,y=mean)) +geom_line(aes(group=sensor,color=sensor,linetype=sensor),size=.5)+geom_point(aes(color=sensor))+
scale_x_date(date_breaks="year",date_labels = "%Y",date_minor_breaks = "months")+
theme(legend.position=c(.9,.9),legend.justification = c(.4,.4))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())+
theme(legend.key.size = unit(.5,'lines'))+
scale_color_manual("Product",values=c("VIIRS"="#d3ad06","MODIS"="#0066cc","OC-CCI"="red","GlobColour Merged GSM"="black","GlobColour Merged AVW"="darkgreen"))+ylab("log Chl-a (mg/m3)")+xlab("Year")+
scale_linetype_manual("Product",values = c("VIIRS"="dashed","MODIS"="dashed","OC-CCI"="solid","GlobColour Merged GSM"="solid","GlobColour Merged AVW"="solid"))
a
datatype="alaska_chla_4km_log_mask"
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_04.14.19/"
png(paste(outputDir,datatype,".png",sep=''),width=24,height=12,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
a
dev.off()
|
/code_03.05.19/alaska_chla_v2_log_masked.R
|
no_license
|
HeatherWelch/JPSS_VIIRS
|
R
| false
| false
| 2,522
|
r
|
### creating area averaged line plot
rasterdir="/Users/heatherwelch/Dropbox/JPSS/global/alaska_rasters_mask/"
csvdir="/Users/heatherwelch/Dropbox/JPSS/global/alaska_csvs_mask/";dir.create(csvdir)
rasterlist=list.files(rasterdir,pattern = ".grd")
setwd(rasterdir)
df=data.frame(number=1:length(rasterlist)) %>% mutate(date=as.Date("2012-03-01"))%>% mutate(sensor=NA)%>% mutate(mean=NA)
for(i in 1:length(rasterlist)){
name=rasterlist[i]
ras=rasterlist[i] %>% raster()
print(name)
if(grepl("erdMH1chlamday", name)){
sensor="MODIS"
date=substr(name,16,25) %>% as.Date()
}
if(grepl("nesdisVHNSQchlaMonthly", name)){
sensor="VIIRS"
date=substr(name,24,33) %>% as.Date()
}
if(grepl("AVW", name)){
sensor="GlobColour Merged AVW"
date=substr(name,23,32) %>% as.Date()
}
if(grepl("GSM", name)){
sensor="GlobColour Merged GSM"
date=substr(name,23,32)%>% as.Date()
}
if(grepl("ESACCI-OC-L3S", name)){
sensor="OC-CCI"
date=substr(name,15,24) %>% as.Date()
}
mean=log10(ras+0.001) %>% cellStats(.,stat="mean",na.rm=T)
df$date[i]=date
df$sensor[i]=sensor
df$mean[i]=mean
}
df$sensor=as.factor(df$sensor)
dfAll=df
write.csv(dfAll,"/Users/heatherwelch/Dropbox/JPSS/global/alaska_csvs_mask/All_products_4km_alaska_log_mask.csv")
#### plotting ####
a=ggplot(dfAll,aes(x=date,y=mean)) +geom_line(aes(group=sensor,color=sensor,linetype=sensor),size=.5)+geom_point(aes(color=sensor))+
scale_x_date(date_breaks="year",date_labels = "%Y",date_minor_breaks = "months")+
theme(legend.position=c(.9,.9),legend.justification = c(.4,.4))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())+
theme(legend.key.size = unit(.5,'lines'))+
scale_color_manual("Product",values=c("VIIRS"="#d3ad06","MODIS"="#0066cc","OC-CCI"="red","GlobColour Merged GSM"="black","GlobColour Merged AVW"="darkgreen"))+ylab("log Chl-a (mg/m3)")+xlab("Year")+
scale_linetype_manual("Product",values = c("VIIRS"="dashed","MODIS"="dashed","OC-CCI"="solid","GlobColour Merged GSM"="solid","GlobColour Merged AVW"="solid"))
a
datatype="alaska_chla_4km_log_mask"
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_04.14.19/"
png(paste(outputDir,datatype,".png",sep=''),width=24,height=12,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
a
dev.off()
|
#Shiny server using fluidPage template
shinyServer(function(input, output) {
#Send output (right side of screen) showing number of MTUs
output$text1 <- renderText({paste("Modern Time Unit:", input$n)
})
#Send output (right side of screen) showing year selection
output$text <- renderText({
paste("Select a Year after the beginning of time (1/1/2000):", input$n_years)
})
#Send output (right side of screen) showing year selection
output$text2 <- renderPrint({
#Add slider selection to year selection to create total selected MTU (decimal)
nn<-as.numeric(input$n_years)+(.01*(as.numeric(input$n)))
#Calculate Gregorian date based upon MTU and place in final output (right side of screen)
paste("MTU:", nn," Date:",as.Date((nn*365.25)+1,origin="2000/01/01"))
}
)
})
|
/server.R
|
no_license
|
JCurrie2014/DataProducts
|
R
| false
| false
| 954
|
r
|
#Shiny server using fluidPage template
shinyServer(function(input, output) {
#Send output (right side of screen) showing number of MTUs
output$text1 <- renderText({paste("Modern Time Unit:", input$n)
})
#Send output (right side of screen) showing year selection
output$text <- renderText({
paste("Select a Year after the beginning of time (1/1/2000):", input$n_years)
})
#Send output (right side of screen) showing year selection
output$text2 <- renderPrint({
#Add slider selection to year selection to create total selected MTU (decimal)
nn<-as.numeric(input$n_years)+(.01*(as.numeric(input$n)))
#Calculate Gregorian date based upon MTU and place in final output (right side of screen)
paste("MTU:", nn," Date:",as.Date((nn*365.25)+1,origin="2000/01/01"))
}
)
})
|
apple_dataset <- read.csv("Apple_NYSE_NN.csv")
apple_stock_data <- cbind(apple_dataset$Date,apple_dataset$Open, apple_dataset$High, apple_dataset$Low, apple_dataset$Close, apple_dataset$Volume, apple_dataset$Adj.Close)
apple_stock_data.lm <- lm(Adj.Close~ Open + High + Low + Volume+ Close, data = apple_stock_data)
apple_stock_data.lm
summary(apple_stock_data.lm)
predict(apple_stock_data.lm)
predict(apple_stock_data.lm, level = 0.95)
qqnorm(apple_stock_data.lm)
#Equation
#Adj_close=112.6655044190974-0.0310677272107open+2.8484184978663high-2.5652874460933low-0.3175670409389close-0.0000003572271volume
# Analysis:
abline(apple_stock_data.lm)
qqnorm(apple_stock_data.lm)
predict(apple_stock_data.lm)
confint(apple_stock_data.lm)
apple_stock_data.res=resid(apple_stock_data.lm)
apple_stock_data.res
plot(apple_stock_data.lm)
predict(apple_stock_data.lm, level = 0.95)
####Predict the value means it gives the range of values the actual values got deviated from theoretical.
apple_stdres
qqnorm(apple_stdres,ylab = "Standardized Residuals", xlab = "Normal Scores", main = "apple stock standard residuals")
#Equation is ___
summary(apple_stock_data)
mean_aclose= mean(apple_stock_data$Adj.Close)
mean_aclose
abline(apple_stock_data.lm, col = "red")
plot(apple_stock_data.lm)
termplot(apple_stock_data.lm)
summary(apple_stock_data.lm)
apple_stock_data.lm
#gives the below info:
#Coefficients:
# (Intercept) Open High Low Close Volume
#1.127e+02 -3.107e-02 2.848e+00 -2.565e+00 -3.176e-01 -3.572e-07
#open, high, low, close, volume are the slopes.
#correlation to other stocks
#RNN algo
#Dow Jones
#some variable.names to use
|
/Apple_Stock_Analysis.R
|
no_license
|
cgajiwala/NYSE-Big-Data-Analytics-Project
|
R
| false
| false
| 1,850
|
r
|
apple_dataset <- read.csv("Apple_NYSE_NN.csv")
apple_stock_data <- cbind(apple_dataset$Date,apple_dataset$Open, apple_dataset$High, apple_dataset$Low, apple_dataset$Close, apple_dataset$Volume, apple_dataset$Adj.Close)
apple_stock_data.lm <- lm(Adj.Close~ Open + High + Low + Volume+ Close, data = apple_stock_data)
apple_stock_data.lm
summary(apple_stock_data.lm)
predict(apple_stock_data.lm)
predict(apple_stock_data.lm, level = 0.95)
qqnorm(apple_stock_data.lm)
#Equation
#Adj_close=112.6655044190974-0.0310677272107open+2.8484184978663high-2.5652874460933low-0.3175670409389close-0.0000003572271volume
# Analysis:
abline(apple_stock_data.lm)
qqnorm(apple_stock_data.lm)
predict(apple_stock_data.lm)
confint(apple_stock_data.lm)
apple_stock_data.res=resid(apple_stock_data.lm)
apple_stock_data.res
plot(apple_stock_data.lm)
predict(apple_stock_data.lm, level = 0.95)
####Predict the value means it gives the range of values the actual values got deviated from theoretical.
apple_stdres
qqnorm(apple_stdres,ylab = "Standardized Residuals", xlab = "Normal Scores", main = "apple stock standard residuals")
#Equation is ___
summary(apple_stock_data)
mean_aclose= mean(apple_stock_data$Adj.Close)
mean_aclose
abline(apple_stock_data.lm, col = "red")
plot(apple_stock_data.lm)
termplot(apple_stock_data.lm)
summary(apple_stock_data.lm)
apple_stock_data.lm
#gives the below info:
#Coefficients:
# (Intercept) Open High Low Close Volume
#1.127e+02 -3.107e-02 2.848e+00 -2.565e+00 -3.176e-01 -3.572e-07
#open, high, low, close, volume are the slopes.
#correlation to other stocks
#RNN algo
#Dow Jones
#some variable.names to use
|
## Script for doing the first project in Exploratory Data Analysis
##
## The File household_power_consumption.txt should be in the working directory
library(sqldf)
library(datasets)
library(lubridate)
file<-"household_power_consumption.txt"
tipos<- list(Date='char', Time='char', Global_active_power='real', Global_reactive_power='real', Voltage='real', Global_intensity='real', Sub_metering_1='real',Sub_metering_2='real',Sub_metering_3='real')
Only<-read.csv2.sql(file,sql="select * from file where Date='1/2/2007' or Date='2/2/2007'",field.types=tipos)
dh<-with(Only,paste(Date,Time))
x<-dmy_hms(dh)
Only$Global_active_power<-as.numeric(Only$Global_active_power)
png(file="plot3.png")
titulo="Energy sub metering"
plot(x,y=Only$Sub_metering_1,type="n",ylab=titulo,xlab="")
lines(x,y=Only$Sub_metering_1,type="l",col="black")
lines(x,y=Only$Sub_metering_2,type="l",col="red")
lines(x,y=Only$Sub_metering_3,type="l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=1)
dev.off()
|
/plot3.R
|
no_license
|
Cuiteco/ExData_Plotting1
|
R
| false
| false
| 1,053
|
r
|
## Script for doing the first project in Exploratory Data Analysis
##
## The File household_power_consumption.txt should be in the working directory
library(sqldf)
library(datasets)
library(lubridate)
file<-"household_power_consumption.txt"
tipos<- list(Date='char', Time='char', Global_active_power='real', Global_reactive_power='real', Voltage='real', Global_intensity='real', Sub_metering_1='real',Sub_metering_2='real',Sub_metering_3='real')
Only<-read.csv2.sql(file,sql="select * from file where Date='1/2/2007' or Date='2/2/2007'",field.types=tipos)
dh<-with(Only,paste(Date,Time))
x<-dmy_hms(dh)
Only$Global_active_power<-as.numeric(Only$Global_active_power)
png(file="plot3.png")
titulo="Energy sub metering"
plot(x,y=Only$Sub_metering_1,type="n",ylab=titulo,xlab="")
lines(x,y=Only$Sub_metering_1,type="l",col="black")
lines(x,y=Only$Sub_metering_2,type="l",col="red")
lines(x,y=Only$Sub_metering_3,type="l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=1)
dev.off()
|
#' Creates an adjacency matrix from a set of edges.
#'
#' The first step in the ignition workflow (if you are using your own edge set) is
#' to convert the edges into an adjacency matrix. This method provides a simple
#' framework for doing so.
#'
#' @param edge.set A data frame with two columns. Each row is an interaction and each column
#' is a gene involved in the interaction. Optionally, the data frame can additionally contain
#' a third column containing a numeric confidence measure for each interaction. Interactions
#' with missing or negative confidence measures are ignored.
#' @return An adjacency matrix representation of the given edge set.
#' @examples
#' data(ignition.example.edges)
#' adj.mat = CreateAdjMatrix(ignition.example.edges)
#' @export
CreateAdjMatrix <- function(edge.set){
weighted = F
if (ncol(edge.set) == 3){
weighted = T
weight.vec = as.numeric(edge.set[,3])
keep.ind = ((!is.na(weight.vec)) & (weight.vec > 0))
edge.set = edge.set[keep.ind,]
weight.vec = as.numeric(edge.set[,3])
}
e1 = as.character(toupper(edge.set[,1]))
e2 = as.character(toupper(edge.set[,2]))
node.set = unique(c(e1,e2))
gene.hash = hash::hash(node.set, 1:length(node.set))
adj.mat = matrix(0.0,length(gene.hash),length(gene.hash))
for(i in 1:length(e1)){
par1 = gene.hash[[e1[i]]]
par2 = gene.hash[[e2[i]]]
if (!is.null(par1) && !is.null(par2)){
if (par1 != par2){
if (weighted == T){
adj.mat[par1,par2] = weight.vec[i]
adj.mat[par2,par1] = weight.vec[i]
} else{
adj.mat[par1,par2] = 1
adj.mat[par2,par1] = 1
}
}
}
}
rownames(adj.mat) = node.set
colnames(adj.mat) = node.set
return(adj.mat)
}
|
/R/create_adj_matrix.R
|
no_license
|
lancour/ignition
|
R
| false
| false
| 1,747
|
r
|
#' Creates an adjacency matrix from a set of edges.
#'
#' The first step in the ignition workflow (if you are using your own edge set) is
#' to convert the edges into an adjacency matrix. This method provides a simple
#' framework for doing so.
#'
#' @param edge.set A data frame with two columns. Each row is an interaction and each column
#' is a gene involved in the interaction. Optionally, the data frame can additionally contain
#' a third column containing a numeric confidence measure for each interaction. Interactions
#' with missing or negative confidence measures are ignored.
#' @return An adjacency matrix representation of the given edge set.
#' @examples
#' data(ignition.example.edges)
#' adj.mat = CreateAdjMatrix(ignition.example.edges)
#' @export
CreateAdjMatrix <- function(edge.set){
weighted = F
if (ncol(edge.set) == 3){
weighted = T
weight.vec = as.numeric(edge.set[,3])
keep.ind = ((!is.na(weight.vec)) & (weight.vec > 0))
edge.set = edge.set[keep.ind,]
weight.vec = as.numeric(edge.set[,3])
}
e1 = as.character(toupper(edge.set[,1]))
e2 = as.character(toupper(edge.set[,2]))
node.set = unique(c(e1,e2))
gene.hash = hash::hash(node.set, 1:length(node.set))
adj.mat = matrix(0.0,length(gene.hash),length(gene.hash))
for(i in 1:length(e1)){
par1 = gene.hash[[e1[i]]]
par2 = gene.hash[[e2[i]]]
if (!is.null(par1) && !is.null(par2)){
if (par1 != par2){
if (weighted == T){
adj.mat[par1,par2] = weight.vec[i]
adj.mat[par2,par1] = weight.vec[i]
} else{
adj.mat[par1,par2] = 1
adj.mat[par2,par1] = 1
}
}
}
}
rownames(adj.mat) = node.set
colnames(adj.mat) = node.set
return(adj.mat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likelihoods.R
\name{sumcll}
\alias{sumcll}
\title{Calculate the complete data log-likelihood across all observations.}
\usage{
sumcll(..., z, pi_est)
}
\arguments{
\item{...}{all model parameters and data (par, Ntot, ns0,ns1,nu0,nu1)}
\item{z}{\code{matrix} of type \code{numeric} represents the posterior probabilities of the class assignments of each observation to each component. Rows sum to 1.}
\item{pi_est}{\code{numeric} the mixing proportions.}
}
\description{
Calculate the complete data log-likelihood across all observations.
}
\seealso{
\link{bbll} \link{MIMOSA2}
}
|
/man/sumcll.Rd
|
no_license
|
RGLab/MIMOSA2
|
R
| false
| true
| 659
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likelihoods.R
\name{sumcll}
\alias{sumcll}
\title{Calculate the complete data log-likelihood across all observations.}
\usage{
sumcll(..., z, pi_est)
}
\arguments{
\item{...}{all model parameters and data (par, Ntot, ns0,ns1,nu0,nu1)}
\item{z}{\code{matrix} of type \code{numeric} represents the posterior probabilities of the class assignments of each observation to each component. Rows sum to 1.}
\item{pi_est}{\code{numeric} the mixing proportions.}
}
\description{
Calculate the complete data log-likelihood across all observations.
}
\seealso{
\link{bbll} \link{MIMOSA2}
}
|
#!/usr/bin/R -f
# This piece of code will test the package on main platforms
#
# Clone then
pkgdir <- "/tmp/ramdisk/Rtesting"
git_address <- system(paste0("git clone https://github.com/spatial-ews/spatialwarnings ",
pkgdir))
# Check on local computer
devtools::check(pkg = pkgdir, cran = TRUE)
# Check on windows
# devtools::check_win_devel(pkg = pkgdir)
# devtools::check_win_release(pkg = pkgdir)
# devtools::check_win_oldrelease(pkg = pkgdir)
# Check on Solaris
rhub::check(pkgdir, platform = "solaris-x86-patched")
# Check on common platforms for CRAN
rhub::check_for_cran(pkgdir)
# => Results are sent by email
# Cleanup
unlink(pkgdir, recursive = TRUE)
# file.remove(dir(".", full.names = TRUE))
# file.remove(dir(".", full.names = TRUE))
|
/tools/pkg_check.R
|
permissive
|
W529/spatialwarnings
|
R
| false
| false
| 789
|
r
|
#!/usr/bin/R -f
# This piece of code will test the package on main platforms
#
# Clone then
pkgdir <- "/tmp/ramdisk/Rtesting"
git_address <- system(paste0("git clone https://github.com/spatial-ews/spatialwarnings ",
pkgdir))
# Check on local computer
devtools::check(pkg = pkgdir, cran = TRUE)
# Check on windows
# devtools::check_win_devel(pkg = pkgdir)
# devtools::check_win_release(pkg = pkgdir)
# devtools::check_win_oldrelease(pkg = pkgdir)
# Check on Solaris
rhub::check(pkgdir, platform = "solaris-x86-patched")
# Check on common platforms for CRAN
rhub::check_for_cran(pkgdir)
# => Results are sent by email
# Cleanup
unlink(pkgdir, recursive = TRUE)
# file.remove(dir(".", full.names = TRUE))
# file.remove(dir(".", full.names = TRUE))
|
library(data.table)
library(dplyr)
library(ggplot2) # learn ggplot2 T_T
options(digits=5)
len = length
## load in data
# read in crime data
c = read.csv('bostonCrime.csv')
ac = read.csv('airbnbCalendar.csv', as.is = T) # airbnb calendar
ac$price = as.numeric(gsub('\\$|\\,', '', ac$price))
ac = as.data.table(ac)
ac = unique(ac)
al = read.csv('airbnbListings.csv', as.is = T) # airbnb listing
# rename
names(c) = c('incident.id', 'offense.code', 'offense.group', 'offense.dis',
'dist', 'report.area', 'shooting', 'date', 'hr', 'yr', 'mon',
'day', 'ucr', 'street', 'lat', 'long', 'loc')
# remove unlocated data
i = which(c$long == -1 | is.na(c$long))
if(len(i)) c = c[-i, ]
# remove useless column
i = which(names(al) %in% c('listing_url', 'scrape_id', 'last_scraped', 'experiences_offered',
'thumbnail_url', 'medium_url', 'picture_url','xl_picture_url','host_url',
'host_thumbnail_url', 'host_picture_url','jurisdiction_names','license',
'requires_license', 'calendar_last_scraped', 'has_availability', 'calendar_updated',
'state'))
if(len(i)) al = al[, -i]
al = data.table(al)
## take a look of crime data
## time
# crime occurance time
ggplot(c, aes(hr)) +
geom_histogram(binwidth = 1, col = 'black', fill = 'lightblue')
ggplot(c, aes(mon)) +
geom_histogram(binwidth = 1, col = 'black', fill = 'lightblue')
# perhaps the data starts from Aug '15 to Jan '17
# percentage of shooting
table(c$shooting)[2] / nrow(c) * 100
# crime categories
unique(c$offense.group)
table(c$offense.group) # make a list
tmp = c %>% group_by(offense.group) %>% summarize(p = n()/nrow(c) * 100)
tmp = tmp[with(tmp, order(-p)), ]
ggplot(as.data.frame(tmp), aes(x = offense.group, y = p)) +
geom_bar(position="dodge",stat="identity") +
coord_flip()
## airbnb
# identify_verified percentage
tmp = al %>% group_by(host_identity_verified) %>% summarize(p = n()/nrow(al)) %>% rename(hid = host_identity_verified)
ggplot(melt(tmp), aes(x = variable, y= value,fill = value)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous(labels = percent_format())
##### build airbnb and crime data
# use ggmap to convert all long, lat into zip
m = read.csv('isd_neighborhood_districts.csv', as.is = T)
res = list()
for(j in 1:len(m$Name)) {
x = gsub('\\(|\\)', '', m$the_geom[j])
x = strsplit(x, " |,")[[1]]
x = as.data.frame(matrix(unlist(x), ncol = 3, byrow = T), stringsAsFactors = F)
x$V1 = NULL
x = sapply(x, as.numeric)
res[[m$Name[j]]] = x
}
# allocate crime points into each counties
library(SDMTools)
clist = list()
ctmp = c[, c('long', 'lat')]
c$county = NA
for(n in m$Name){
poly = res[[n]]
tmp = pnt.in.poly(ctmp[,c("long","lat")], poly)
i = which(tmp$pip != 0)
c[i, ]$county = n
}
# allocate airbnb member into region
al$county = NA
atmp = al[, c('id', 'longitude', 'latitude')]
for(n in m$Name) {
poly = res[[n]]
tmp = pnt.in.poly(atmp[, c('longitude', 'latitude')], poly)
i = which(tmp$pip != 0)
if(len(i)) al[i, ]$county = n
}
# see crime distribution
al = al[-which(is.na(al$county)),]
plot(al$longitude, al$latitude, col = 'blue', pch = '.')
points(c$long, c$lat, col = 'red', pch = '.')
plot(x$longitude, x$latitude, pch = '.')
for(i in 1:len(m$Name)) {
points(res[[i]], pch = '.', type = 'l', col = 'black')
}
i = which(is.na(al$county))
plot(al[i, ]$longitude, al[i, ]$latitude, pch = '.')
for(i in c(0,1)){
points(tmp[which(tmp$pip == i), c('longitude', 'latitude')], col = 1 + i)
}
## create a county-based dataframe
# crime
c.summary = c %>% group_by(county, offense.group) %>% summarize(count = n())
# airbnb listing
i = which(ac$available != 'f')
ac = ac[i, ]
ac = ac %>% group_by(listing_id) %>%
summarize(p.mean = mean(price), p.med = median(price), p.max = max(price),
p.min = min(price), p.q25 = quantile(price, .25),
p.q75 = quantile(price, .75), p.sd = sd(price))
# combine the price
al$p.mean = ac[match(al$id, ac$listing_id),]$p.mean
# see price hist
pricedis = ggplot(al, aes(x=(p.mean))) +
geom_histogram(aes(y = ..density..,fill = ..count..), bins = 30) +
geom_density() +
scale_x_log10() +
labs(x = "Average Price") +
ggtitle('Airbnb Boston Area Price Distribution') +
theme(plot.title = element_text(hjust = 0.5,face="bold"))
ggsave('PriceDistribution.png')
# use price to create color
i = which(al$host_is_superhost == 't')
al[i, ]$host_is_superhost = 'o'
al[-i, ]$host_is_superhost = 'x'
|
/Project/preliminary.r
|
no_license
|
CHENPoHeng/UM-SI618
|
R
| false
| false
| 4,556
|
r
|
library(data.table)
library(dplyr)
library(ggplot2) # learn ggplot2 T_T
options(digits=5)
len = length
## load in data
# read in crime data
c = read.csv('bostonCrime.csv')
ac = read.csv('airbnbCalendar.csv', as.is = T) # airbnb calendar
ac$price = as.numeric(gsub('\\$|\\,', '', ac$price))
ac = as.data.table(ac)
ac = unique(ac)
al = read.csv('airbnbListings.csv', as.is = T) # airbnb listing
# rename
names(c) = c('incident.id', 'offense.code', 'offense.group', 'offense.dis',
'dist', 'report.area', 'shooting', 'date', 'hr', 'yr', 'mon',
'day', 'ucr', 'street', 'lat', 'long', 'loc')
# remove unlocated data
i = which(c$long == -1 | is.na(c$long))
if(len(i)) c = c[-i, ]
# remove useless column
i = which(names(al) %in% c('listing_url', 'scrape_id', 'last_scraped', 'experiences_offered',
'thumbnail_url', 'medium_url', 'picture_url','xl_picture_url','host_url',
'host_thumbnail_url', 'host_picture_url','jurisdiction_names','license',
'requires_license', 'calendar_last_scraped', 'has_availability', 'calendar_updated',
'state'))
if(len(i)) al = al[, -i]
al = data.table(al)
## take a look of crime data
## time
# crime occurance time
ggplot(c, aes(hr)) +
geom_histogram(binwidth = 1, col = 'black', fill = 'lightblue')
ggplot(c, aes(mon)) +
geom_histogram(binwidth = 1, col = 'black', fill = 'lightblue')
# perhaps the data starts from Aug '15 to Jan '17
# percentage of shooting
table(c$shooting)[2] / nrow(c) * 100
# crime categories
unique(c$offense.group)
table(c$offense.group) # make a list
tmp = c %>% group_by(offense.group) %>% summarize(p = n()/nrow(c) * 100)
tmp = tmp[with(tmp, order(-p)), ]
ggplot(as.data.frame(tmp), aes(x = offense.group, y = p)) +
geom_bar(position="dodge",stat="identity") +
coord_flip()
## airbnb
# identify_verified percentage
tmp = al %>% group_by(host_identity_verified) %>% summarize(p = n()/nrow(al)) %>% rename(hid = host_identity_verified)
ggplot(melt(tmp), aes(x = variable, y= value,fill = value)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous(labels = percent_format())
##### build airbnb and crime data
# use ggmap to convert all long, lat into zip
m = read.csv('isd_neighborhood_districts.csv', as.is = T)
res = list()
for(j in 1:len(m$Name)) {
x = gsub('\\(|\\)', '', m$the_geom[j])
x = strsplit(x, " |,")[[1]]
x = as.data.frame(matrix(unlist(x), ncol = 3, byrow = T), stringsAsFactors = F)
x$V1 = NULL
x = sapply(x, as.numeric)
res[[m$Name[j]]] = x
}
# allocate crime points into each counties
library(SDMTools)
clist = list()
ctmp = c[, c('long', 'lat')]
c$county = NA
for(n in m$Name){
poly = res[[n]]
tmp = pnt.in.poly(ctmp[,c("long","lat")], poly)
i = which(tmp$pip != 0)
c[i, ]$county = n
}
# allocate airbnb member into region
al$county = NA
atmp = al[, c('id', 'longitude', 'latitude')]
for(n in m$Name) {
poly = res[[n]]
tmp = pnt.in.poly(atmp[, c('longitude', 'latitude')], poly)
i = which(tmp$pip != 0)
if(len(i)) al[i, ]$county = n
}
# see crime distribution
al = al[-which(is.na(al$county)),]
plot(al$longitude, al$latitude, col = 'blue', pch = '.')
points(c$long, c$lat, col = 'red', pch = '.')
plot(x$longitude, x$latitude, pch = '.')
for(i in 1:len(m$Name)) {
points(res[[i]], pch = '.', type = 'l', col = 'black')
}
i = which(is.na(al$county))
plot(al[i, ]$longitude, al[i, ]$latitude, pch = '.')
for(i in c(0,1)){
points(tmp[which(tmp$pip == i), c('longitude', 'latitude')], col = 1 + i)
}
## create a county-based dataframe
# crime
c.summary = c %>% group_by(county, offense.group) %>% summarize(count = n())
# airbnb listing
i = which(ac$available != 'f')
ac = ac[i, ]
ac = ac %>% group_by(listing_id) %>%
summarize(p.mean = mean(price), p.med = median(price), p.max = max(price),
p.min = min(price), p.q25 = quantile(price, .25),
p.q75 = quantile(price, .75), p.sd = sd(price))
# combine the price
al$p.mean = ac[match(al$id, ac$listing_id),]$p.mean
# see price hist
pricedis = ggplot(al, aes(x=(p.mean))) +
geom_histogram(aes(y = ..density..,fill = ..count..), bins = 30) +
geom_density() +
scale_x_log10() +
labs(x = "Average Price") +
ggtitle('Airbnb Boston Area Price Distribution') +
theme(plot.title = element_text(hjust = 0.5,face="bold"))
ggsave('PriceDistribution.png')
# use price to create color
i = which(al$host_is_superhost == 't')
al[i, ]$host_is_superhost = 'o'
al[-i, ]$host_is_superhost = 'x'
|
## Run required libraries
library(lubridate)
library(dplyr)
## Download and unzip data file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("./data")){dir.create("./data")}
download.file(fileUrl, destfile = "./data/powerData.zip", mode = "wb")
unzip("./data/powerData.zip", exdir = "./data")
## Read in the data
data <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c("character", "character", rep("numeric", 7)))
## Format some of the variables
timeStamp <- paste(data$Date, data$Time)
data$timeStamp <- dmy_hms(timeStamp)
powerData <- tbl_df(data)
powerData$Date <- dmy(powerData$Date)
## Subsetting of the data for the dates in question
powerDataFeb <- subset(powerData, Date > as.Date("2007-01-31") & Date < as.Date("2007-02-03"))
## Make plot 3 (Sub-meter readings over time) using R Base Plotting
png("plot3.png")
plot(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_1, ylab = "Energy Sub-metering", xlab = "", type = "n")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_1, type = "l", col = "black")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_2, type = "l", col = "red")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), pch = c(95, 95, 95), col = c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
kylelackinger/ExData_Plotting1
|
R
| false
| false
| 1,470
|
r
|
## Run required libraries
library(lubridate)
library(dplyr)
## Download and unzip data file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("./data")){dir.create("./data")}
download.file(fileUrl, destfile = "./data/powerData.zip", mode = "wb")
unzip("./data/powerData.zip", exdir = "./data")
## Read in the data
data <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c("character", "character", rep("numeric", 7)))
## Format some of the variables
timeStamp <- paste(data$Date, data$Time)
data$timeStamp <- dmy_hms(timeStamp)
powerData <- tbl_df(data)
powerData$Date <- dmy(powerData$Date)
## Subsetting of the data for the dates in question
powerDataFeb <- subset(powerData, Date > as.Date("2007-01-31") & Date < as.Date("2007-02-03"))
## Make plot 3 (Sub-meter readings over time) using R Base Plotting
png("plot3.png")
plot(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_1, ylab = "Energy Sub-metering", xlab = "", type = "n")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_1, type = "l", col = "black")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_2, type = "l", col = "red")
lines(powerDataFeb$timeStamp, powerDataFeb$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), pch = c(95, 95, 95), col = c("black", "red", "blue"))
dev.off()
|
##########################################################
# metropolis for estimating mu and sigma at same time
# the data
n <- 30
y <- rnorm( n , mean=7 , sd=0.5 )
# priors; these are uninformative
prior.mu <- function(theta) dnorm( theta , mean=7 , sd=1000 , log=TRUE )
prior.sigma <- function(theta) dgamma( theta , shape=0.001 , rate=0.001 , log=TRUE )
# initial guesses
k.mu <- mean(y)
k.sigma <- sd(y)
num.samples <- 20000
sample.mu <- rep(0,num.samples)
sample.sigma <- rep(0,num.samples)
step.mu <- 1/10
step.sigma <- 1/10
for ( i in 1:num.samples ) {
# record history
sample.mu[i] <- k.mu
sample.sigma[i] <- k.sigma
# propose jump
prop.mu <- k.mu + rnorm( 1 , mean=0 , sd=step.mu )
prop.sigma <- k.sigma + rnorm( 1 , mean=0 , sd=step.sigma )
prop.sigma <- max( 0.001 , prop.sigma ) # must ensure positive variance
# compute prob of accepting proposed mu
pr.prop <- sum( dnorm( y , mean=prop.mu , sd=k.sigma , log=TRUE ) ) + prior.mu(prop.mu) + prior.sigma(k.sigma)
pr.here <- sum( dnorm( y , mean=k.mu , sd=k.sigma , log=TRUE ) ) + prior.mu(k.mu) + prior.sigma(k.sigma)
pr.accept <- exp( pr.prop - pr.here )
k.mu <- ifelse( runif(1) < pr.accept , prop.mu , k.mu )
# compute prob of accepting proposed sigma
pr.prop <- sum( dnorm( y , mean=k.mu , sd=prop.sigma , log=TRUE ) ) + prior.mu(k.mu) + prior.sigma(prop.sigma)
pr.accept <- exp( pr.prop - pr.here )
k.sigma <- ifelse( runif(1) < pr.accept , prop.sigma , k.sigma )
}
# plot chains
par(mfrow=c(2,2))
plot( sample.mu , col="slateblue" , main="samples of mu" , type="l" )
plot( sample.sigma , col="slateblue" , main="samples of sigma" , type="l" )
# remove the "burn in" samples at the start
sample.mu <- sample.mu[-(1:5000)]
sample.sigma <- sample.sigma[-(1:5000)]
# plot posterior densities
plot( density(sample.mu) , col="slateblue" , lwd=2 , main="posterior mu" )
plot( density(sample.sigma) , col="slateblue" , lwd=2 , main="posterior sigma" )
# illustrate checking of autocorrelation
par(mfrow=c(3,1))
acf( sample.mu )
acf( sample.sigma )
ccf( sample.mu , sample.sigma ) # cross-correlation
|
/OldStuff/metropolis2.r
|
no_license
|
npp97-field/Working-Code
|
R
| false
| false
| 2,146
|
r
|
##########################################################
# metropolis for estimating mu and sigma at same time
# the data
n <- 30
y <- rnorm( n , mean=7 , sd=0.5 )
# priors; these are uninformative
prior.mu <- function(theta) dnorm( theta , mean=7 , sd=1000 , log=TRUE )
prior.sigma <- function(theta) dgamma( theta , shape=0.001 , rate=0.001 , log=TRUE )
# initial guesses
k.mu <- mean(y)
k.sigma <- sd(y)
num.samples <- 20000
sample.mu <- rep(0,num.samples)
sample.sigma <- rep(0,num.samples)
step.mu <- 1/10
step.sigma <- 1/10
for ( i in 1:num.samples ) {
# record history
sample.mu[i] <- k.mu
sample.sigma[i] <- k.sigma
# propose jump
prop.mu <- k.mu + rnorm( 1 , mean=0 , sd=step.mu )
prop.sigma <- k.sigma + rnorm( 1 , mean=0 , sd=step.sigma )
prop.sigma <- max( 0.001 , prop.sigma ) # must ensure positive variance
# compute prob of accepting proposed mu
pr.prop <- sum( dnorm( y , mean=prop.mu , sd=k.sigma , log=TRUE ) ) + prior.mu(prop.mu) + prior.sigma(k.sigma)
pr.here <- sum( dnorm( y , mean=k.mu , sd=k.sigma , log=TRUE ) ) + prior.mu(k.mu) + prior.sigma(k.sigma)
pr.accept <- exp( pr.prop - pr.here )
k.mu <- ifelse( runif(1) < pr.accept , prop.mu , k.mu )
# compute prob of accepting proposed sigma
pr.prop <- sum( dnorm( y , mean=k.mu , sd=prop.sigma , log=TRUE ) ) + prior.mu(k.mu) + prior.sigma(prop.sigma)
pr.accept <- exp( pr.prop - pr.here )
k.sigma <- ifelse( runif(1) < pr.accept , prop.sigma , k.sigma )
}
# plot chains
par(mfrow=c(2,2))
plot( sample.mu , col="slateblue" , main="samples of mu" , type="l" )
plot( sample.sigma , col="slateblue" , main="samples of sigma" , type="l" )
# remove the "burn in" samples at the start
sample.mu <- sample.mu[-(1:5000)]
sample.sigma <- sample.sigma[-(1:5000)]
# plot posterior densities
plot( density(sample.mu) , col="slateblue" , lwd=2 , main="posterior mu" )
plot( density(sample.sigma) , col="slateblue" , lwd=2 , main="posterior sigma" )
# illustrate checking of autocorrelation
par(mfrow=c(3,1))
acf( sample.mu )
acf( sample.sigma )
ccf( sample.mu , sample.sigma ) # cross-correlation
|
# google sheets : https://docs.google.com/spreadsheets/d/1AxvMtxLIJUWyfXUgM_LLo5XV5Bj7ODqcu6JMEtBelE4/edit?usp=sharing
SHSZ_table = function(save_csv_file = FALSE){
library(rvest)
library(httr)
library(progress)
url = "https://hq.gucheng.com/gpdmylb.html" %>%
read_html() %>%
html_nodes("#stock_index_right > div > section > a")
name = url %>% html_text() %>% substr(.,1,(nchar(.))-8) %>% gsub(" ","",x=.)
code = url %>% html_text() %>% substr(.,(nchar(.))-6,(nchar(.))-1)
url2 = url %>% html_attr("href")
table = data.frame("회사명"= name,
"종목코드" = code,
"시장구분" = NA,
"소재지"=NA,
"업종"=NA,
"유통주식수" = NA,
"기준일" = NA,
"매출액(억위안)" = NA,
"순이익(억위안)" = NA,
"매출총이익률" =NA,
"순자산(억위안)" = NA,
"ROE" = NA,
"BPS" = NA,
"EPS" = NA,
"PER" = NA,
"PBR" = NA,
"CPS" = NA)
for (i in 1:nrow(table)) {
if(substr(table[i,2],1,3)=='000'){
table[i,3] = "SZ-A" #SZ-A : 심천 A주
} else if(substr(table[i,2],1,3)=="200") {
table[i,3] = "SZ-B" #SZ-A : 심천 B주
} else if(substr(table[i,2],1,3)=="002") {
table[i,3] = "SZ-ZXB" #SZ-A : 심천 중소판주
} else if(substr(table[i,2],1,3)=="300") {
table[i,3] = "SZ-CYB" #SZ-A : 심천 창업판주
} else if(substr(table[i,2],1,2)=="60"){
table[i,3] = "SH-A" #SH-A : 상해 A주
} else if(substr(table[i,2],1,2)=="90"){
table[i,3] = "SH-B" #SH-B : 상해 B주
} else if(substr(table[i,2],1,3)=="399"){
table[i,3] = "SZ-INDEX" #심천 인덱스
} else if(substr(table[i,2],1,3)=="010"){
table[i,3] = "BOND"
} else if(substr(table[i,2],1,3)=="019"){
table[i,3] = "BOND"
} else {
table[i,3] = "INDEX"
}
}
pb = progress_bar$new(total = nrow(table))
View(table)
for (i in 1:nrow(table)) {
try({
uu = GET(url2[i])
tb = uu %>%
read_html() %>%
html_nodes(css = 'div.hq_wrap_right > section:nth-child(9) > div > table')
cominfo = uu %>%
read_html() %>%
html_nodes(css='#hq_wrap > div.hq_wrap_right > section.stock_company > div > div.stock_company_info.clearfix')
table[i,4] = cominfo %>%
html_nodes('p:nth-child(2)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,5] = cominfo %>%
html_nodes('p:nth-child(3)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,6] = cominfo %>%
html_nodes('p:nth-child(8)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,7] = tb %>%
html_node(css = 'tbody > tr.stock_table_tr > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,8] = tb %>%
html_node(css = 'tbody > tr:nth-child(7) > td:nth-child(5) > div') %>%
html_text(trim = TRUE)
table[i,9] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(6) > div') %>%
html_text(trim = TRUE)
table[i,10] = tb %>%
html_node(css = 'tbody > tr:nth-child(3) > td:nth-child(4) > div') %>%
html_text(trim = TRUE)
table[i,11] = tb %>%
html_node(css = 'tbody > tr:nth-child(7) > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,12] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,13] = tb %>%
html_node(css = 'tbody > tr:nth-child(3) > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,14] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,15] = tb %>%
html_node(css = 'tbody > tr:nth-child(6) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,16] = tb %>%
html_node(css = 'tbody > tr:nth-child(4) > td:nth-child(5) > div') %>%
html_text(trim = TRUE)
table[i,17] = tb %>%
html_node(css = 'tbody > tr:nth-child(5) > td:nth-child(4) > div') %>%
html_text(trim = TRUE)
},silent = T)
pb$tick()
}
table[,6] = table[,6] %>%
gsub("股","주",x=.)
table[,7] = table[,7] %>%
gsub("年","년",x=.) %>%
gsub("月","월",x=.)
if(save_csv_file == TRUE){
write.csv(table,"SH_SZ_Table.csv")
print("csv파일로 저장되었습니다. 파일명:SH_SZ_Table.csv")
View(table)
} else {
print("파일을 저장하지 않습니다.")
View(table)
}
}
|
/[중국] 전 상장기업 재무요약.R
|
no_license
|
Hong-Sung-Hyun/R-code-for-finance
|
R
| false
| false
| 5,001
|
r
|
# google sheets : https://docs.google.com/spreadsheets/d/1AxvMtxLIJUWyfXUgM_LLo5XV5Bj7ODqcu6JMEtBelE4/edit?usp=sharing
SHSZ_table = function(save_csv_file = FALSE){
library(rvest)
library(httr)
library(progress)
url = "https://hq.gucheng.com/gpdmylb.html" %>%
read_html() %>%
html_nodes("#stock_index_right > div > section > a")
name = url %>% html_text() %>% substr(.,1,(nchar(.))-8) %>% gsub(" ","",x=.)
code = url %>% html_text() %>% substr(.,(nchar(.))-6,(nchar(.))-1)
url2 = url %>% html_attr("href")
table = data.frame("회사명"= name,
"종목코드" = code,
"시장구분" = NA,
"소재지"=NA,
"업종"=NA,
"유통주식수" = NA,
"기준일" = NA,
"매출액(억위안)" = NA,
"순이익(억위안)" = NA,
"매출총이익률" =NA,
"순자산(억위안)" = NA,
"ROE" = NA,
"BPS" = NA,
"EPS" = NA,
"PER" = NA,
"PBR" = NA,
"CPS" = NA)
for (i in 1:nrow(table)) {
if(substr(table[i,2],1,3)=='000'){
table[i,3] = "SZ-A" #SZ-A : 심천 A주
} else if(substr(table[i,2],1,3)=="200") {
table[i,3] = "SZ-B" #SZ-A : 심천 B주
} else if(substr(table[i,2],1,3)=="002") {
table[i,3] = "SZ-ZXB" #SZ-A : 심천 중소판주
} else if(substr(table[i,2],1,3)=="300") {
table[i,3] = "SZ-CYB" #SZ-A : 심천 창업판주
} else if(substr(table[i,2],1,2)=="60"){
table[i,3] = "SH-A" #SH-A : 상해 A주
} else if(substr(table[i,2],1,2)=="90"){
table[i,3] = "SH-B" #SH-B : 상해 B주
} else if(substr(table[i,2],1,3)=="399"){
table[i,3] = "SZ-INDEX" #심천 인덱스
} else if(substr(table[i,2],1,3)=="010"){
table[i,3] = "BOND"
} else if(substr(table[i,2],1,3)=="019"){
table[i,3] = "BOND"
} else {
table[i,3] = "INDEX"
}
}
pb = progress_bar$new(total = nrow(table))
View(table)
for (i in 1:nrow(table)) {
try({
uu = GET(url2[i])
tb = uu %>%
read_html() %>%
html_nodes(css = 'div.hq_wrap_right > section:nth-child(9) > div > table')
cominfo = uu %>%
read_html() %>%
html_nodes(css='#hq_wrap > div.hq_wrap_right > section.stock_company > div > div.stock_company_info.clearfix')
table[i,4] = cominfo %>%
html_nodes('p:nth-child(2)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,5] = cominfo %>%
html_nodes('p:nth-child(3)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,6] = cominfo %>%
html_nodes('p:nth-child(8)') %>%
html_text(trim = TRUE) %>%
substr(.,6,nchar(.))
table[i,7] = tb %>%
html_node(css = 'tbody > tr.stock_table_tr > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,8] = tb %>%
html_node(css = 'tbody > tr:nth-child(7) > td:nth-child(5) > div') %>%
html_text(trim = TRUE)
table[i,9] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(6) > div') %>%
html_text(trim = TRUE)
table[i,10] = tb %>%
html_node(css = 'tbody > tr:nth-child(3) > td:nth-child(4) > div') %>%
html_text(trim = TRUE)
table[i,11] = tb %>%
html_node(css = 'tbody > tr:nth-child(7) > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,12] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,13] = tb %>%
html_node(css = 'tbody > tr:nth-child(3) > td:nth-child(2) > div') %>%
html_text(trim = TRUE)
table[i,14] = tb %>%
html_node(css = 'tbody > tr:nth-child(2) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,15] = tb %>%
html_node(css = 'tbody > tr:nth-child(6) > td:nth-child(3) > div') %>%
html_text(trim = TRUE)
table[i,16] = tb %>%
html_node(css = 'tbody > tr:nth-child(4) > td:nth-child(5) > div') %>%
html_text(trim = TRUE)
table[i,17] = tb %>%
html_node(css = 'tbody > tr:nth-child(5) > td:nth-child(4) > div') %>%
html_text(trim = TRUE)
},silent = T)
pb$tick()
}
table[,6] = table[,6] %>%
gsub("股","주",x=.)
table[,7] = table[,7] %>%
gsub("年","년",x=.) %>%
gsub("月","월",x=.)
if(save_csv_file == TRUE){
write.csv(table,"SH_SZ_Table.csv")
print("csv파일로 저장되었습니다. 파일명:SH_SZ_Table.csv")
View(table)
} else {
print("파일을 저장하지 않습니다.")
View(table)
}
}
|
# this is the start of a new file
# should remember to add output to every branch
rankall <- function(outcome, num = "best") {
## set working directory
setwd("/Users/lingwei/Desktop/coursera/rprog_data_ProgAssignment3-data")
## Read outcome data
## add na.strings = "Not Available", to avoid the problem of not getting NA
outcome_by_state <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", header = TRUE, stringsAsFactors = FALSE)
## Check that state and outcome are valid
if (outcome %in% c("heart attack","heart failure","pneumonia") == FALSE){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
else{
if(outcome == "heart attack"){
index <- 11
}
else if(outcome == "heart failure"){
index <- 17
}
else {
index <- 23
}
if (num == "best"){
num <- 1
}
outcome_all <- data.frame()
i <- 0
## For each state, find the hospital of the given rank
for (state in unique(outcome_by_state$State)){
i <- i + 1
state_data <- outcome_by_state[outcome_by_state$State == state, ]
# sort here
state_data<- state_data[order(state_data[, index], state_data[, 2]), c(2, index, 7) ]
colnames(state_data) <- c("hospital", "outcome", "state")
if (num == "worst"){
# num_1 <- sum(complete.cases(as.numeric(state_data[, index])))
num_1 <- sum(complete.cases(state_data[, index]))
# s <- state_data[order(as.numeric(state_data[, index]), state_data[, 2]), c(2,7)][num_1, ]
s <- state_data[num_1, "outcome"]
}
# else if (!(num > nrow(state_data))){
else {
# s <- state_data[order(as.numeric(state_data[, index]), state_data[, 2]), c(2,7)][num, ]
s <- state_data[num, "outcome"]
}
# else{
# s <- c(NA, state)
# }
# if (is.na(s[1])){ print (s)}
# print (i)
outcome_all <- rbind (outcome_all, cbind(state_data[num, "hospital"],state, outcome, s))
}
## Return a data frame with the hospital names and the
## 30-day death rate
## (abbreviated) state name
return(outcome_all[order(outcome_all$state), ])
}
}
# This is another way to loop over states
rankall_2 <- function(outcome, num = "best") {
## set working directory
setwd("/Users/lingwei/Desktop/coursera/rprog_data_ProgAssignment3-data")
## Read outcome data
## add na.strings = "Not Available", to avoid the problem of not getting NA
outcome_by_state <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", header = TRUE, stringsAsFactors = FALSE)
## filter out data of three outcomes and shuffle data by hospital names and mortality
## Return hospital name in that state with the given rank
data_mortality <- outcome_by_state[, c(2, 7, 11, 17, 23)]
names(data_mortality) <- c("hospital", "state", "heart attack", "heart failure", "pheumonia")
## Check that state and outcome are valid
if (outcome %in% c("heart attack","heart failure","pneumonia") == FALSE){
stop("invalid outcome")
}
else{
## For each state, find the hospital of the given rank
lapply(split(outcome_by_state, State), )
find_hospital <- function(state, num, outcome){
# shuffle state data
state_data<- state_data[order(state_data[, outcome], state_data[, "hospital"]), ]
if (num == "worst"){
num_1 <- sum(complete.cases(state_data[, index]))
s <- state_data[num_1, outcome]
}
else {
s <- state_data[num, outcome]
}
}
## Return a data frame with the hospital names and the
## 30-day death rate
## (abbreviated) state name
return(outcome_all[order(outcome_all$state), ])
}
}
|
/rankall.R
|
no_license
|
Lingwei-oy/ProgrammingAssignment3
|
R
| false
| false
| 5,701
|
r
|
# this is the start of a new file
# should remember to add output to every branch
rankall <- function(outcome, num = "best") {
## set working directory
setwd("/Users/lingwei/Desktop/coursera/rprog_data_ProgAssignment3-data")
## Read outcome data
## add na.strings = "Not Available", to avoid the problem of not getting NA
outcome_by_state <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", header = TRUE, stringsAsFactors = FALSE)
## Check that state and outcome are valid
if (outcome %in% c("heart attack","heart failure","pneumonia") == FALSE){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
else{
if(outcome == "heart attack"){
index <- 11
}
else if(outcome == "heart failure"){
index <- 17
}
else {
index <- 23
}
if (num == "best"){
num <- 1
}
outcome_all <- data.frame()
i <- 0
## For each state, find the hospital of the given rank
for (state in unique(outcome_by_state$State)){
i <- i + 1
state_data <- outcome_by_state[outcome_by_state$State == state, ]
# sort here
state_data<- state_data[order(state_data[, index], state_data[, 2]), c(2, index, 7) ]
colnames(state_data) <- c("hospital", "outcome", "state")
if (num == "worst"){
# num_1 <- sum(complete.cases(as.numeric(state_data[, index])))
num_1 <- sum(complete.cases(state_data[, index]))
# s <- state_data[order(as.numeric(state_data[, index]), state_data[, 2]), c(2,7)][num_1, ]
s <- state_data[num_1, "outcome"]
}
# else if (!(num > nrow(state_data))){
else {
# s <- state_data[order(as.numeric(state_data[, index]), state_data[, 2]), c(2,7)][num, ]
s <- state_data[num, "outcome"]
}
# else{
# s <- c(NA, state)
# }
# if (is.na(s[1])){ print (s)}
# print (i)
outcome_all <- rbind (outcome_all, cbind(state_data[num, "hospital"],state, outcome, s))
}
## Return a data frame with the hospital names and the
## 30-day death rate
## (abbreviated) state name
return(outcome_all[order(outcome_all$state), ])
}
}
# This is another way to loop over states
rankall_2 <- function(outcome, num = "best") {
## set working directory
setwd("/Users/lingwei/Desktop/coursera/rprog_data_ProgAssignment3-data")
## Read outcome data
## add na.strings = "Not Available", to avoid the problem of not getting NA
outcome_by_state <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", header = TRUE, stringsAsFactors = FALSE)
## filter out data of three outcomes and shuffle data by hospital names and mortality
## Return hospital name in that state with the given rank
data_mortality <- outcome_by_state[, c(2, 7, 11, 17, 23)]
names(data_mortality) <- c("hospital", "state", "heart attack", "heart failure", "pheumonia")
## Check that state and outcome are valid
if (outcome %in% c("heart attack","heart failure","pneumonia") == FALSE){
stop("invalid outcome")
}
else{
## For each state, find the hospital of the given rank
lapply(split(outcome_by_state, State), )
find_hospital <- function(state, num, outcome){
# shuffle state data
state_data<- state_data[order(state_data[, outcome], state_data[, "hospital"]), ]
if (num == "worst"){
num_1 <- sum(complete.cases(state_data[, index]))
s <- state_data[num_1, outcome]
}
else {
s <- state_data[num, outcome]
}
}
## Return a data frame with the hospital names and the
## 30-day death rate
## (abbreviated) state name
return(outcome_all[order(outcome_all$state), ])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CentSim2D.R
\name{CSarc.dens.test}
\alias{CSarc.dens.test}
\title{A test of segregation/association based on arc density of Central Similarity Proximity Catch Digraph
(CS-PCD) for 2D data}
\usage{
CSarc.dens.test(
Xp,
Yp,
t,
ch.cor = FALSE,
alternative = c("two.sided", "less", "greater"),
conf.level = 0.95
)
}
\arguments{
\item{Xp}{A set of 2D points which constitute the vertices of the CS-PCD.}
\item{Yp}{A set of 2D points which constitute the vertices of the Delaunay triangles.}
\item{t}{A positive real number which serves as the expansion parameter in CS proximity region.}
\item{ch.cor}{A logical argument for convex hull correction, default \code{ch.cor=FALSE},
recommended when both \code{Xp} and \code{Yp} have the same rectangular support.}
\item{alternative}{Type of the alternative hypothesis in the test, one of \code{"two.sided"}, \code{"less"}, \code{"greater"}.}
\item{conf.level}{Level of the confidence interval, default is \code{0.95}, for the arc density of CS-PCD based on
the 2D data set \code{Xp}.}
}
\value{
A \code{list} with the elements
\item{statistic}{Test statistic}
\item{p.value}{The \eqn{p}-value for the hypothesis test for the corresponding \code{alternative}}
\item{conf.int}{Confidence interval for the arc density at the given confidence level \code{conf.level} and
depends on the type of \code{alternative}.}
\item{estimate}{Estimate of the parameter, i.e., arc density}
\item{null.value}{Hypothesized value for the parameter, i.e., the null arc density, which is usually the
mean arc density under uniform distribution.}
\item{alternative}{Type of the alternative hypothesis in the test, one of \code{"two.sided"}, \code{"less"}, \code{"greater"}}
\item{method}{Description of the hypothesis test}
\item{data.name}{Name of the data set}
}
\description{
An object of class \code{"htest"} (i.e., hypothesis test) function which performs a hypothesis test of complete spatial
randomness (CSR) or uniformity of \code{Xp} points in the convex hull of \code{Yp} points against the alternatives
of segregation (where \code{Xp} points cluster away from \code{Yp} points) and association (where \code{Xp} points cluster around
\code{Yp} points) based on the normal approximation of the arc density of the CS-PCD for uniform 2D data
in the convex hull of \code{Yp} points.
The function yields the test statistic, \eqn{p}-value for the corresponding \code{alternative},
the confidence interval, estimate and null value for the parameter of interest (which is the arc density),
and method and name of the data set used.
Under the null hypothesis of uniformity of \code{Xp} points in the convex hull of \code{Yp} points, arc density
of CS-PCD whose vertices are \code{Xp} points equals to its expected value under the uniform distribution and
\code{alternative} could be two-sided, or left-sided (i.e., data is accumulated around the \code{Yp} points, or association)
or right-sided (i.e., data is accumulated around the centers of the triangles, or segregation).
CS proximity region is constructed with the expansion parameter \eqn{t>0} and \eqn{CM}-edge regions
(i.e., the test is not available for a general center \eqn{M} at this version of the function).
**Caveat:** This test is currently a conditional test, where \code{Xp} points are assumed to be random, while \code{Yp} points are
assumed to be fixed (i.e., the test is conditional on \code{Yp} points).
Furthermore, the test is a large sample test when \code{Xp} points are substantially larger than \code{Yp} points,
say at least 5 times more.
This test is more appropriate when supports of \code{Xp} and \code{Yp} has a substantial overlap.
Currently, the \code{Xp} points outside the convex hull of \code{Yp} points are handled with a convex hull correction factor
(see the description below and the function code.)
However, in the special case of no \code{Xp} points in the convex hull of \code{Yp} points, arc density is taken to be 1,
as this is clearly a case of segregation. Removing the conditioning and extending it to the case of non-concurring supports is
an ongoing line of research of the author of the package.
\code{ch.cor} is for convex hull correction (default is \code{"no convex hull correction"}, i.e., \code{ch.cor=FALSE})
which is recommended when both \code{Xp} and \code{Yp} have the same rectangular support.
See also (\insertCite{ceyhan:Phd-thesis,ceyhan:arc-density-CS,ceyhan:test2014;textual}{pcds}).
}
\examples{
\dontrun{
#nx is number of X points (target) and ny is number of Y points (nontarget)
nx<-100; ny<-5; #try also nx<-40; ny<-10 or nx<-1000; ny<-10;
set.seed(1)
Xp<-cbind(runif(nx),runif(nx))
Yp<-cbind(runif(ny,0,.25),runif(ny,0,.25))+cbind(c(0,0,0.5,1,1),c(0,1,.5,0,1))
#try also Yp<-cbind(runif(ny,0,1),runif(ny,0,1))
plotDelaunay.tri(Xp,Yp,xlab="",ylab = "")
CSarc.dens.test(Xp,Yp,t=.5)
CSarc.dens.test(Xp,Yp,t=.5,ch=TRUE)
#try also t=1.0 and 1.5 above
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{PEarc.dens.test}} and \code{\link{CSarc.dens.test1D}}
}
\author{
Elvan Ceyhan
}
|
/man/CSarc.dens.test.Rd
|
no_license
|
elvanceyhan/pcds
|
R
| false
| true
| 5,135
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CentSim2D.R
\name{CSarc.dens.test}
\alias{CSarc.dens.test}
\title{A test of segregation/association based on arc density of Central Similarity Proximity Catch Digraph
(CS-PCD) for 2D data}
\usage{
CSarc.dens.test(
Xp,
Yp,
t,
ch.cor = FALSE,
alternative = c("two.sided", "less", "greater"),
conf.level = 0.95
)
}
\arguments{
\item{Xp}{A set of 2D points which constitute the vertices of the CS-PCD.}
\item{Yp}{A set of 2D points which constitute the vertices of the Delaunay triangles.}
\item{t}{A positive real number which serves as the expansion parameter in CS proximity region.}
\item{ch.cor}{A logical argument for convex hull correction, default \code{ch.cor=FALSE},
recommended when both \code{Xp} and \code{Yp} have the same rectangular support.}
\item{alternative}{Type of the alternative hypothesis in the test, one of \code{"two.sided"}, \code{"less"}, \code{"greater"}.}
\item{conf.level}{Level of the confidence interval, default is \code{0.95}, for the arc density of CS-PCD based on
the 2D data set \code{Xp}.}
}
\value{
A \code{list} with the elements
\item{statistic}{Test statistic}
\item{p.value}{The \eqn{p}-value for the hypothesis test for the corresponding \code{alternative}}
\item{conf.int}{Confidence interval for the arc density at the given confidence level \code{conf.level} and
depends on the type of \code{alternative}.}
\item{estimate}{Estimate of the parameter, i.e., arc density}
\item{null.value}{Hypothesized value for the parameter, i.e., the null arc density, which is usually the
mean arc density under uniform distribution.}
\item{alternative}{Type of the alternative hypothesis in the test, one of \code{"two.sided"}, \code{"less"}, \code{"greater"}}
\item{method}{Description of the hypothesis test}
\item{data.name}{Name of the data set}
}
\description{
An object of class \code{"htest"} (i.e., hypothesis test) function which performs a hypothesis test of complete spatial
randomness (CSR) or uniformity of \code{Xp} points in the convex hull of \code{Yp} points against the alternatives
of segregation (where \code{Xp} points cluster away from \code{Yp} points) and association (where \code{Xp} points cluster around
\code{Yp} points) based on the normal approximation of the arc density of the CS-PCD for uniform 2D data
in the convex hull of \code{Yp} points.
The function yields the test statistic, \eqn{p}-value for the corresponding \code{alternative},
the confidence interval, estimate and null value for the parameter of interest (which is the arc density),
and method and name of the data set used.
Under the null hypothesis of uniformity of \code{Xp} points in the convex hull of \code{Yp} points, arc density
of CS-PCD whose vertices are \code{Xp} points equals to its expected value under the uniform distribution and
\code{alternative} could be two-sided, or left-sided (i.e., data is accumulated around the \code{Yp} points, or association)
or right-sided (i.e., data is accumulated around the centers of the triangles, or segregation).
CS proximity region is constructed with the expansion parameter \eqn{t>0} and \eqn{CM}-edge regions
(i.e., the test is not available for a general center \eqn{M} at this version of the function).
**Caveat:** This test is currently a conditional test, where \code{Xp} points are assumed to be random, while \code{Yp} points are
assumed to be fixed (i.e., the test is conditional on \code{Yp} points).
Furthermore, the test is a large sample test when \code{Xp} points are substantially larger than \code{Yp} points,
say at least 5 times more.
This test is more appropriate when supports of \code{Xp} and \code{Yp} has a substantial overlap.
Currently, the \code{Xp} points outside the convex hull of \code{Yp} points are handled with a convex hull correction factor
(see the description below and the function code.)
However, in the special case of no \code{Xp} points in the convex hull of \code{Yp} points, arc density is taken to be 1,
as this is clearly a case of segregation. Removing the conditioning and extending it to the case of non-concurring supports is
an ongoing line of research of the author of the package.
\code{ch.cor} is for convex hull correction (default is \code{"no convex hull correction"}, i.e., \code{ch.cor=FALSE})
which is recommended when both \code{Xp} and \code{Yp} have the same rectangular support.
See also (\insertCite{ceyhan:Phd-thesis,ceyhan:arc-density-CS,ceyhan:test2014;textual}{pcds}).
}
\examples{
\dontrun{
#nx is number of X points (target) and ny is number of Y points (nontarget)
nx<-100; ny<-5; #try also nx<-40; ny<-10 or nx<-1000; ny<-10;
set.seed(1)
Xp<-cbind(runif(nx),runif(nx))
Yp<-cbind(runif(ny,0,.25),runif(ny,0,.25))+cbind(c(0,0,0.5,1,1),c(0,1,.5,0,1))
#try also Yp<-cbind(runif(ny,0,1),runif(ny,0,1))
plotDelaunay.tri(Xp,Yp,xlab="",ylab = "")
CSarc.dens.test(Xp,Yp,t=.5)
CSarc.dens.test(Xp,Yp,t=.5,ch=TRUE)
#try also t=1.0 and 1.5 above
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{PEarc.dens.test}} and \code{\link{CSarc.dens.test1D}}
}
\author{
Elvan Ceyhan
}
|
rm(list=ls())
load('Scaling/ProcBWorkspace.RData')
library('tidyverse')
library('lme4')
library('lmerTest')
# rename the columns so that everything is the same
colnames(Coral)[which(colnames(Coral)=='Volume'|colnames(Coral)=='SA')]<-c('FinalVol','FinalSA')
colnames(Rubble)[which(colnames(Rubble)=='Volume'|colnames(Rubble)=='SA')]<-c('FinalVol','FinalSA')
colnames(Sand)[which(colnames(Sand)=='Vol'|colnames(Sand)=='SA')]<-c('FinalVol','FinalSA')
varlist <-c("AFDW","DW","FinalVol","FinalSA") #custom list of vars
# sum by aquarium and sample ID for experiment 1
biolist<-list(Coral, Algae, Rubble, Sand)
substrate<-c('Coral','Algae','Rubble','Sand')
e<-vector('list',4)
for(i in 1:length(biolist)){
# get sums for experiment 1 by individuals
a<- biolist[[i]] %>%group_by(Aq_Ex1, SampleID) %>%
summarise_at(varlist, sum) # sum by aquarium number
# sum for experiment 2
b<-biolist[[i]] %>%
group_by(Aq_Ex2, SampleID) %>%
summarise_at(varlist, sum)
# get the total sum by aquarium
c<-biolist[[i]] %>%
group_by(Aq_Ex1) %>%
summarise_at(varlist, funs(total=sum))
# merge everything
d<-left_join(a,b)
d<-left_join(d,c)
# calculate proportion of aquarium for each sample
proportions<-d[,varlist]/d[,c("AFDW_total",'DW_total','FinalVol_total',"FinalSA_total")]
PropData<-cbind(data.frame(d),proportions)
colnames(PropData)[12:15]<-c('propSA','propAFDW','propDW','propVolume')
PropData$Substrate<-rep(substrate[i], nrow(d))
e[[i]]<-PropData
e[[i]][,'Aq_Ex1']<-as.integer(e[[i]][,'Aq_Ex1'])
}
# bring everything together
ScalDat<-rbind(e[[1]],e[[2]],e[[3]],e[[4]])
### add normalization for residence time
#experiment 1
ResData.1<-AllData[AllData$Experiment==1,c('Aquarium','ResTime.mean')]
# make the column names the same
colnames(ResData.1)[c(1,2)]<-c('Aq_Ex1','ResTime1')
# make it one value per aquarium
ResData.1 <-ResData.1 %>%
group_by(Aq_Ex1) %>%
summarise_at('ResTime1',mean)
#join with the scaled data
ScalDat<-left_join(ScalDat,ResData.1)
# experiment 2 residence time
ResData.2<-AllData[AllData$Experiment==2,c('Aquarium','ResTime.mean')]
# make the column names the same
colnames(ResData.2)[c(1,2)]<-c('Aq_Ex2','ResTime2')
ResData.2 <-ResData.2 %>%
group_by(Aq_Ex2) %>%
summarise_at('ResTime2',mean)
#join with the scaled data
ScalDat<-left_join(ScalDat,ResData.2)
# Calculate N uptake
AllData$N.uptake<-NutCalc(AllData$HeaderN, AllData$TankN, AllData$ResTime.mean, AllData$AFDW)
AllData$P.uptake<-NutCalc(AllData$HeaderP, AllData$TankP, AllData$ResTime.mean, AllData$AFDW)
# Experiment one data
DataEx1 <- AllData[AllData$Experiment==1,]
colnames(DataEx1)[1] <- "Aq_Ex1"
# merge with the NEC and NEP data
ScalDat<-left_join(DataEx1,ScalDat,by=c("Aq_Ex1","Substrate"))
#### light normalization #####
## run the light script
source('Scaling/lightnormalization.R')
## average NCP and NEC rates by substrate and incubation tank
TankRates<-AllData %>%
group_by(Substrate,Tank, NutLevel) %>%
summarise_at(c('NCP.AFDW','NEC.AFDW'), mean)
# create an empty dataframe to put the coefficients in
coefs.sub.nut<-data.frame(matrix(NA, nrow=12, ncol=6))
colnames(coefs.sub.nut)<-c('Substrate','NutLevel','NCP.Intercept','NCP.Slope','NEC.Intercept','NEC.Slope')
coefs.sub.nut$Substrate<-rep(sub[1:4],3)
coefs.sub.nut$NutLevel<-c(rep(as.character(Nuts[1]),4),
rep(as.character(Nuts[2]),4),rep(as.character(Nuts[3]),4))
# create a dataframe for just the substrate coefficients
coefs.sub<-data.frame(matrix(NA, nrow=4, ncol=5))
colnames(coefs.sub)<-c('Substrate','NCP.Intercept','NCP.Slope','NEC.Intercept','NEC.Slope')
coefs.sub$Substrate<-sub[1:4]
# Light by NCP plot across substrates and nutrients
png('Scaling/plots/LightNormNCP.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(4,3))
for (i in 1:4){
for (j in 1:3){
plot(CumLight[1,], TankRates$NCP.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]],
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i], Nuts[j]),
pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCP', ylim =c(min(TankRates$NCP.AFDW),
max(TankRates$NCP.AFDW)))
mod<-lm(TankRates$NCP.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub.nut[coefs.sub.nut$Substrate==sub[i] & coefs.sub.nut$NutLevel == Nuts[j],3:4]<-coef(mod)
}
}
dev.off()
## same figures, but only across substrates
## average NCP and NEC rates by substrate and incubation tank
TankRates.sub<-AllData %>%
group_by(Substrate,Tank) %>%
summarise_at(c('NCP.AFDW','NEC.AFDW'), mean)
#NCP
png('Scaling/plots/LightNormNCPSub.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(2,2))
for (i in 1:4){
plot(CumLight[1,], TankRates.sub$NCP.AFDW[TankRates.sub$Substrate==sub[i]], ylim = c(min(TankRates.sub$NCP.AFDW), max(TankRates.sub$NCP.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCP')
mod<-lm(TankRates.sub$NCP.AFDW[TankRates.sub$Substrate==sub[i]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub[coefs.sub$Substrate==sub[i],2:3]<-coef(mod)
}
dev.off()
#NEC
png('Scaling/plots/LightNormNECSub.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(2,2))
for (i in 1:4){
plot(CumLight[1,], TankRates.sub$NEC.AFDW[TankRates.sub$Substrate==sub[i]], ylim = c(min(TankRates.sub$NEC.AFDW), max(TankRates.sub$NEC.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCC')
mod<-lm(TankRates.sub$NEC.AFDW[TankRates.sub$Substrate==sub[i]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub[coefs.sub$Substrate==sub[i],4:5]<-coef(mod)
}
dev.off()
# Light by NEC plot across substrates and nutrients
png('Scaling/plots/LightNormNEC.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(4,3))
for (i in 1:4){
for (j in 1:3){
plot(CumLight[1,], TankRates$NEC.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]], ylim = c(min(TankRates$NEC.AFDW), max(TankRates$NEC.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i], Nuts[j]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCC')
mod<-lm(TankRates$NEC.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub.nut[coefs.sub.nut$Substrate==sub[i] & coefs.sub.nut$NutLevel == Nuts[j],5:6]<-coef(mod)
}
}
dev.off()
# calculate estimated rate based on proportion of weight
ScalDat$NN.wtd<-ScalDat$N.uptake*ScalDat$propAFDW
ScalDat$PO.wtd<-ScalDat$P.uptake*ScalDat$propAFDW
ScalDat$NCP.wtd<-ScalDat$NCP.AFDW*ScalDat$propAFDW
ScalDat$NEC.wtd<-ScalDat$NEC.AFDW*ScalDat$propAFDW
### normalize the NEC and NCP data to constant light. ##
# Take the relationship between cumulative light and the rate (NCP, NEC). Put in the delta light
# between day 1 and 2 into the equation for each bin x substrate. The NCP value calculated from this
# equation is how much the NCP changed between day 1 and 2 due to differences in light. This value gets
#subtracted from the predicted NCP to account for light effects. THis need to be done on the average daily NCP and NEC
# based on how I ran the regressions
# delta cumulative light between day 1 ans 2
deltaLight<-data.frame(as.matrix(CumLight[1,] - CumLight[2,]))
deltaLight$Tank<-c(1,2,3)
colnames(deltaLight)[1]<-'deltaPAR'
# now calculate the predicted
DataEx2 <- AllData[AllData$Experiment==2,]
colnames(DataEx2)[1] <- "Aq_Ex2"
colnames(DataEx2)[49] <- "DateTimeEx2"
#take the daily averages
ScalDat_Ave<-ScalDat %>%
group_by(Aq_Ex2, NutLevel, Substrate, Tank, Experiment) %>%
summarise(NN.pred = mean(NN.wtd), PO.pred = mean(PO.wtd), NCP.pred = mean(NCP.wtd), NEC.pred = mean(NEC.wtd))%>%
left_join(.,deltaLight)
ScalDat_Ave<-ScalDat_Ave[-c(121:nrow(ScalDat_Ave)),]
ScalDat_Ave.sub<-ScalDat_Ave
# add light coefs for substrate and nutrients
ScalDat_Ave<-left_join(ScalDat_Ave, coefs.sub.nut)
# just substrate coefficients
ScalDat_Ave.sub<-left_join(ScalDat_Ave.sub, coefs.sub)
#scale to light with both nutrients and substrate
ScalDat_Ave$NCP.pred.light<-with(ScalDat_Ave,NCP.pred+(NCP.Intercept+deltaPAR*NCP.Slope))
ScalDat_Ave$NEC.pred.light<-with(ScalDat_Ave,NEC.pred+(NEC.Intercept+deltaPAR*NEC.Slope))
#scale to light with only substrate coeffs
ScalDat_Ave.sub$NCP.pred.light<-with(ScalDat_Ave.sub,NCP.pred+(NCP.Intercept+deltaPAR*NCP.Slope))
ScalDat_Ave.sub$NEC.pred.light<-with(ScalDat_Ave.sub,NEC.pred+(NEC.Intercept+deltaPAR*NEC.Slope))
# calculate observed - predicted for both substrate and nutrient coefficients
# sum across all 4 substrates in each aquarium
ScalDat_Avesum<-ScalDat_Ave %>%
group_by(Aq_Ex2, NutLevel) %>%
summarise(NCP.pred = sum(NCP.pred.light), NEC.pred = sum(NEC.pred.light))
# average the observed data
DataEx2_Ave<-DataEx2 %>% group_by(Aq_Ex2, NutLevel, Tank) %>%
summarise(NCP.mean = mean(NCP.AFDW), NEC.mean = mean(NEC.AFDW))
# bring together the observed and predicted
DataEx2_Ave<-left_join(ScalDat_Avesum, DataEx2_Ave)
# calculate observed - predicted
DataEx2_Ave <- mutate(DataEx2_Ave,
NCP.diff = NCP.mean - NCP.pred,
NEC.diff = NEC.mean - NEC.pred)
# calculate observed - predicted for just substrate coefficients
# sum across all 4 substrates in each aquarium
ScalDat_Avesum.sub<-ScalDat_Ave.sub %>%
group_by(Aq_Ex2, NutLevel) %>%
summarise(NCP.pred = sum(NCP.pred.light), NEC.pred = sum(NEC.pred.light))
# average the observed data
DataEx2_Ave.sub<-DataEx2 %>% group_by(Aq_Ex2, NutLevel, Tank) %>%
summarise(NCP.mean = mean(NCP.AFDW), NEC.mean = mean(NEC.AFDW))
# bring together the observed and predicted
DataEx2_Ave.sub<-left_join(ScalDat_Avesum.sub, DataEx2_Ave.sub)
# calculate observed - predicted
DataEx2_Ave.sub <- mutate(DataEx2_Ave.sub,
NCP.diff = NCP.mean - NCP.pred,
NEC.diff = NEC.mean - NEC.pred)
### stats #####
# does nutrients affect scaling with nutrient and substrate light coeffs?
mod1.NEC.diff <- lmer(NEC.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave)
mod1.NCP.diff <- lmer(NCP.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave)
# does nutrients affect scaling with substrate only light coeffs?
mod1.NEC.diff.sub <- lmer(NEC.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave.sub)
mod1.NCP.diff.sub <- lmer(NCP.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave.sub)
# plot #####
# nutrient and substrate light coefficients
Uptake.diff.means<-DataEx2_Ave %>%
group_by(NutLevel) %>%
summarise(.,NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
## NCP
#par(mfrow= c(1,2))
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', ylab = 'Observed - predicted', xlab = '',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', xlab = '', main = 'NEC', col='grey',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
legend('bottomleft', c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
# substrate light coefficients
Uptake.diff.means<-DataEx2_Ave.sub %>%
group_by(NutLevel) %>%
summarise(.,NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
## NCP
#par(mfrow= c(1,2))
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-5,12), xaxt = 'n', ylab = 'Observed - predicted', xlab = '',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', xlab = '', main = 'NEC', col='grey',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
legend('bottomleft', c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
########################### stop ################################
#### without scaling to light or averaging over the day
ScalDat_Aqua<-ScalDat %>%
group_by(Aq_Ex2, DateTime,NutLevel) %>%
summarise(NN.pred = sum(NN.wtd), PO.pred = sum(PO.wtd), NCP.pred = sum(NCP.wtd), NEC.pred = sum(NEC.wtd))
ScalDat_Aqua$DateTimeEx2<-ScalDat_Aqua$DateTime+60*60*48 # get the correct time
DataEx2 <- left_join(DataEx2,ScalDat_Aqua,by=c("Aq_Ex2","DateTimeEx2","NutLevel"))
levels(DataEx2$DateTimeEx2)<- c(1:7)
# calculate observed - predicted
DataEx2 <- mutate(DataEx2,NN.diff = N.uptake - NN.pred,
PO.diff = P.uptake - PO.pred,
NCP.diff = NCP.AFDW - NCP.pred,
NEC.diff = NEC.AFDW - NEC.pred)
DataEx2$DateTimeEx2<- as.factor(DataEx2$DateTimeEx2)
DataEx2$DayNight<- as.factor(DataEx2$DayNight)
# do we see differences in N uptake by nutrients or day night between predicted and observed?
mod1.NN.diff <- lmer(NN.diff ~ DayNight*NutLevel
+ (1|Tank)
+ (DayNight|DateTimeEx2),data=DataEx2)
Uptake.diff.means<-DataEx2 %>%
group_by(DayNight, NutLevel) %>%
summarise(.,NN.diff.mean = mean(NN.diff),NN.diff.SE = sd(NN.diff)/sqrt(n()), PO.diff.mean = mean(PO.diff),PO.diff.SE = sd(PO.diff)/sqrt(n()),
NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
par(mfrow= c(1,2))
plot(rep(1:3,2), Uptake.diff.means$NN.diff.mean, col = Uptake.diff.means$DayNight,
pch = 19, cex = 2, ylim = c(-0.2,1), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'N+N')
segments(rep(1:3,2), Uptake.diff.means$NN.diff.mean+Uptake.diff.means$NN.diff.SE,
rep(1:3,2), Uptake.diff.means$NN.diff.mean-Uptake.diff.means$NN.diff.SE,col = Uptake.diff.means$DayNight)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
plot(rep(1:3,2), Uptake.diff.means$PO.diff.mean, col = Uptake.diff.means$DayNight,
pch = 19, cex = 2, ylim = c(-0.2,0.15), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'PO')
segments(rep(1:3,2), Uptake.diff.means$PO.diff.mean+Uptake.diff.means$PO.diff.SE,
rep(1:3,2), Uptake.diff.means$PO.diff.mean-Uptake.diff.means$PO.diff.SE,col = Uptake.diff.means$DayNight)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
# averages by nutrients without day night
Uptake.diff.means<-DataEx2 %>%
group_by(NutLevel) %>%
summarise(.,NN.diff.mean = mean(NN.diff),NN.diff.SE = sd(NN.diff)/sqrt(n()), PO.diff.mean = mean(PO.diff),PO.diff.SE = sd(PO.diff)/sqrt(n()),
NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
par(mfrow= c(1,2))
plot(1:3, Uptake.diff.means$NN.diff.mean,
pch = 19, cex = 2, ylim = c(0,0.7), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'N+N')
segments(1:3, Uptake.diff.means$NN.diff.mean+Uptake.diff.means$NN.diff.SE,
1:3, Uptake.diff.means$NN.diff.mean-Uptake.diff.means$NN.diff.SE)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
plot(1:3, Uptake.diff.means$PO.diff.mean, pch = 19, cex = 2, ylim = c(-0.05,0.05), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'PO')
segments(1:3, Uptake.diff.means$PO.diff.mean+Uptake.diff.means$PO.diff.SE,
1:3, Uptake.diff.means$PO.diff.mean-Uptake.diff.means$PO.diff.SE)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
# do we see differences in N uptake by nutrients between predicted and observed?
mod2.NN.diff <- lmer(NN.diff ~ NutLevel
+ (1|Tank)
+ (1|DateTimeEx2),data=DataEx2)
mod2.PO.diff <- lmer(PO.diff ~ NutLevel
+ (1|Tank)
+ (1|DateTimeEx2),data=DataEx2)
## NCP
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-5,5), xaxt = 'n', ylab = 'Difference in rate', xlab = '', lty=2, type ='b')
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-1,1), xaxt = 'n', xlab = '', col = 'grey', type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
abline(h=0)
#axis(1, at = c(1:3), c("Ambient","Medium","High"))
legend('bottomleft',c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
|
/Scaling/NutrientUptakeAnalysis.R
|
no_license
|
donahuem/CRANE-R
|
R
| false
| false
| 18,264
|
r
|
rm(list=ls())
load('Scaling/ProcBWorkspace.RData')
library('tidyverse')
library('lme4')
library('lmerTest')
# rename the columns so that everything is the same
colnames(Coral)[which(colnames(Coral)=='Volume'|colnames(Coral)=='SA')]<-c('FinalVol','FinalSA')
colnames(Rubble)[which(colnames(Rubble)=='Volume'|colnames(Rubble)=='SA')]<-c('FinalVol','FinalSA')
colnames(Sand)[which(colnames(Sand)=='Vol'|colnames(Sand)=='SA')]<-c('FinalVol','FinalSA')
varlist <-c("AFDW","DW","FinalVol","FinalSA") #custom list of vars
# sum by aquarium and sample ID for experiment 1
biolist<-list(Coral, Algae, Rubble, Sand)
substrate<-c('Coral','Algae','Rubble','Sand')
e<-vector('list',4)
for(i in 1:length(biolist)){
# get sums for experiment 1 by individuals
a<- biolist[[i]] %>%group_by(Aq_Ex1, SampleID) %>%
summarise_at(varlist, sum) # sum by aquarium number
# sum for experiment 2
b<-biolist[[i]] %>%
group_by(Aq_Ex2, SampleID) %>%
summarise_at(varlist, sum)
# get the total sum by aquarium
c<-biolist[[i]] %>%
group_by(Aq_Ex1) %>%
summarise_at(varlist, funs(total=sum))
# merge everything
d<-left_join(a,b)
d<-left_join(d,c)
# calculate proportion of aquarium for each sample
proportions<-d[,varlist]/d[,c("AFDW_total",'DW_total','FinalVol_total',"FinalSA_total")]
PropData<-cbind(data.frame(d),proportions)
colnames(PropData)[12:15]<-c('propSA','propAFDW','propDW','propVolume')
PropData$Substrate<-rep(substrate[i], nrow(d))
e[[i]]<-PropData
e[[i]][,'Aq_Ex1']<-as.integer(e[[i]][,'Aq_Ex1'])
}
# bring everything together
ScalDat<-rbind(e[[1]],e[[2]],e[[3]],e[[4]])
### add normalization for residence time
#experiment 1
ResData.1<-AllData[AllData$Experiment==1,c('Aquarium','ResTime.mean')]
# make the column names the same
colnames(ResData.1)[c(1,2)]<-c('Aq_Ex1','ResTime1')
# make it one value per aquarium
ResData.1 <-ResData.1 %>%
group_by(Aq_Ex1) %>%
summarise_at('ResTime1',mean)
#join with the scaled data
ScalDat<-left_join(ScalDat,ResData.1)
# experiment 2 residence time
ResData.2<-AllData[AllData$Experiment==2,c('Aquarium','ResTime.mean')]
# make the column names the same
colnames(ResData.2)[c(1,2)]<-c('Aq_Ex2','ResTime2')
ResData.2 <-ResData.2 %>%
group_by(Aq_Ex2) %>%
summarise_at('ResTime2',mean)
#join with the scaled data
ScalDat<-left_join(ScalDat,ResData.2)
# Calculate N uptake
AllData$N.uptake<-NutCalc(AllData$HeaderN, AllData$TankN, AllData$ResTime.mean, AllData$AFDW)
AllData$P.uptake<-NutCalc(AllData$HeaderP, AllData$TankP, AllData$ResTime.mean, AllData$AFDW)
# Experiment one data
DataEx1 <- AllData[AllData$Experiment==1,]
colnames(DataEx1)[1] <- "Aq_Ex1"
# merge with the NEC and NEP data
ScalDat<-left_join(DataEx1,ScalDat,by=c("Aq_Ex1","Substrate"))
#### light normalization #####
## run the light script
source('Scaling/lightnormalization.R')
## average NCP and NEC rates by substrate and incubation tank
TankRates<-AllData %>%
group_by(Substrate,Tank, NutLevel) %>%
summarise_at(c('NCP.AFDW','NEC.AFDW'), mean)
# create an empty dataframe to put the coefficients in
coefs.sub.nut<-data.frame(matrix(NA, nrow=12, ncol=6))
colnames(coefs.sub.nut)<-c('Substrate','NutLevel','NCP.Intercept','NCP.Slope','NEC.Intercept','NEC.Slope')
coefs.sub.nut$Substrate<-rep(sub[1:4],3)
coefs.sub.nut$NutLevel<-c(rep(as.character(Nuts[1]),4),
rep(as.character(Nuts[2]),4),rep(as.character(Nuts[3]),4))
# create a dataframe for just the substrate coefficients
coefs.sub<-data.frame(matrix(NA, nrow=4, ncol=5))
colnames(coefs.sub)<-c('Substrate','NCP.Intercept','NCP.Slope','NEC.Intercept','NEC.Slope')
coefs.sub$Substrate<-sub[1:4]
# Light by NCP plot across substrates and nutrients
png('Scaling/plots/LightNormNCP.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(4,3))
for (i in 1:4){
for (j in 1:3){
plot(CumLight[1,], TankRates$NCP.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]],
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i], Nuts[j]),
pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCP', ylim =c(min(TankRates$NCP.AFDW),
max(TankRates$NCP.AFDW)))
mod<-lm(TankRates$NCP.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub.nut[coefs.sub.nut$Substrate==sub[i] & coefs.sub.nut$NutLevel == Nuts[j],3:4]<-coef(mod)
}
}
dev.off()
## same figures, but only across substrates
## average NCP and NEC rates by substrate and incubation tank
TankRates.sub<-AllData %>%
group_by(Substrate,Tank) %>%
summarise_at(c('NCP.AFDW','NEC.AFDW'), mean)
#NCP
png('Scaling/plots/LightNormNCPSub.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(2,2))
for (i in 1:4){
plot(CumLight[1,], TankRates.sub$NCP.AFDW[TankRates.sub$Substrate==sub[i]], ylim = c(min(TankRates.sub$NCP.AFDW), max(TankRates.sub$NCP.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCP')
mod<-lm(TankRates.sub$NCP.AFDW[TankRates.sub$Substrate==sub[i]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub[coefs.sub$Substrate==sub[i],2:3]<-coef(mod)
}
dev.off()
#NEC
png('Scaling/plots/LightNormNECSub.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(2,2))
for (i in 1:4){
plot(CumLight[1,], TankRates.sub$NEC.AFDW[TankRates.sub$Substrate==sub[i]], ylim = c(min(TankRates.sub$NEC.AFDW), max(TankRates.sub$NEC.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCC')
mod<-lm(TankRates.sub$NEC.AFDW[TankRates.sub$Substrate==sub[i]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub[coefs.sub$Substrate==sub[i],4:5]<-coef(mod)
}
dev.off()
# Light by NEC plot across substrates and nutrients
png('Scaling/plots/LightNormNEC.png',width = 10000, height = 12000, res = 800 )
par(mfrow=c(4,3))
for (i in 1:4){
for (j in 1:3){
plot(CumLight[1,], TankRates$NEC.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]], ylim = c(min(TankRates$NEC.AFDW), max(TankRates$NEC.AFDW)),
cex.lab = 2,cex.main = 2, cex.axis = 1.5, cex = 1.5,main = paste(sub[i], Nuts[j]), pch = 19,xlab = 'Cumulative PAR', ylab = 'mean NCC')
mod<-lm(TankRates$NEC.AFDW[TankRates$Substrate==sub[i] & TankRates$NutLevel == Nuts[j]]~CumLight[1,])
lines(CumLight[1,],predict(mod))
legend('topleft',c(paste('r2 = ',round(summary(mod)$r.squared,2)),paste('slope = ',formatC(coef(mod)[2], format = "e", digits = 2))), bty = 'n', cex = 2)
coefs.sub.nut[coefs.sub.nut$Substrate==sub[i] & coefs.sub.nut$NutLevel == Nuts[j],5:6]<-coef(mod)
}
}
dev.off()
# calculate estimated rate based on proportion of weight
ScalDat$NN.wtd<-ScalDat$N.uptake*ScalDat$propAFDW
ScalDat$PO.wtd<-ScalDat$P.uptake*ScalDat$propAFDW
ScalDat$NCP.wtd<-ScalDat$NCP.AFDW*ScalDat$propAFDW
ScalDat$NEC.wtd<-ScalDat$NEC.AFDW*ScalDat$propAFDW
### normalize the NEC and NCP data to constant light. ##
# Take the relationship between cumulative light and the rate (NCP, NEC). Put in the delta light
# between day 1 and 2 into the equation for each bin x substrate. The NCP value calculated from this
# equation is how much the NCP changed between day 1 and 2 due to differences in light. This value gets
#subtracted from the predicted NCP to account for light effects. THis need to be done on the average daily NCP and NEC
# based on how I ran the regressions
# delta cumulative light between day 1 ans 2
deltaLight<-data.frame(as.matrix(CumLight[1,] - CumLight[2,]))
deltaLight$Tank<-c(1,2,3)
colnames(deltaLight)[1]<-'deltaPAR'
# now calculate the predicted
DataEx2 <- AllData[AllData$Experiment==2,]
colnames(DataEx2)[1] <- "Aq_Ex2"
colnames(DataEx2)[49] <- "DateTimeEx2"
#take the daily averages
ScalDat_Ave<-ScalDat %>%
group_by(Aq_Ex2, NutLevel, Substrate, Tank, Experiment) %>%
summarise(NN.pred = mean(NN.wtd), PO.pred = mean(PO.wtd), NCP.pred = mean(NCP.wtd), NEC.pred = mean(NEC.wtd))%>%
left_join(.,deltaLight)
ScalDat_Ave<-ScalDat_Ave[-c(121:nrow(ScalDat_Ave)),]
ScalDat_Ave.sub<-ScalDat_Ave
# add light coefs for substrate and nutrients
ScalDat_Ave<-left_join(ScalDat_Ave, coefs.sub.nut)
# just substrate coefficients
ScalDat_Ave.sub<-left_join(ScalDat_Ave.sub, coefs.sub)
#scale to light with both nutrients and substrate
ScalDat_Ave$NCP.pred.light<-with(ScalDat_Ave,NCP.pred+(NCP.Intercept+deltaPAR*NCP.Slope))
ScalDat_Ave$NEC.pred.light<-with(ScalDat_Ave,NEC.pred+(NEC.Intercept+deltaPAR*NEC.Slope))
#scale to light with only substrate coeffs
ScalDat_Ave.sub$NCP.pred.light<-with(ScalDat_Ave.sub,NCP.pred+(NCP.Intercept+deltaPAR*NCP.Slope))
ScalDat_Ave.sub$NEC.pred.light<-with(ScalDat_Ave.sub,NEC.pred+(NEC.Intercept+deltaPAR*NEC.Slope))
# calculate observed - predicted for both substrate and nutrient coefficients
# sum across all 4 substrates in each aquarium
ScalDat_Avesum<-ScalDat_Ave %>%
group_by(Aq_Ex2, NutLevel) %>%
summarise(NCP.pred = sum(NCP.pred.light), NEC.pred = sum(NEC.pred.light))
# average the observed data
DataEx2_Ave<-DataEx2 %>% group_by(Aq_Ex2, NutLevel, Tank) %>%
summarise(NCP.mean = mean(NCP.AFDW), NEC.mean = mean(NEC.AFDW))
# bring together the observed and predicted
DataEx2_Ave<-left_join(ScalDat_Avesum, DataEx2_Ave)
# calculate observed - predicted
DataEx2_Ave <- mutate(DataEx2_Ave,
NCP.diff = NCP.mean - NCP.pred,
NEC.diff = NEC.mean - NEC.pred)
# calculate observed - predicted for just substrate coefficients
# sum across all 4 substrates in each aquarium
ScalDat_Avesum.sub<-ScalDat_Ave.sub %>%
group_by(Aq_Ex2, NutLevel) %>%
summarise(NCP.pred = sum(NCP.pred.light), NEC.pred = sum(NEC.pred.light))
# average the observed data
DataEx2_Ave.sub<-DataEx2 %>% group_by(Aq_Ex2, NutLevel, Tank) %>%
summarise(NCP.mean = mean(NCP.AFDW), NEC.mean = mean(NEC.AFDW))
# bring together the observed and predicted
DataEx2_Ave.sub<-left_join(ScalDat_Avesum.sub, DataEx2_Ave.sub)
# calculate observed - predicted
DataEx2_Ave.sub <- mutate(DataEx2_Ave.sub,
NCP.diff = NCP.mean - NCP.pred,
NEC.diff = NEC.mean - NEC.pred)
### stats #####
# does nutrients affect scaling with nutrient and substrate light coeffs?
mod1.NEC.diff <- lmer(NEC.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave)
mod1.NCP.diff <- lmer(NCP.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave)
# does nutrients affect scaling with substrate only light coeffs?
mod1.NEC.diff.sub <- lmer(NEC.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave.sub)
mod1.NCP.diff.sub <- lmer(NCP.diff ~ NutLevel
+ (1|Tank),data=DataEx2_Ave.sub)
# plot #####
# nutrient and substrate light coefficients
Uptake.diff.means<-DataEx2_Ave %>%
group_by(NutLevel) %>%
summarise(.,NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
## NCP
#par(mfrow= c(1,2))
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', ylab = 'Observed - predicted', xlab = '',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', xlab = '', main = 'NEC', col='grey',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
legend('bottomleft', c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
# substrate light coefficients
Uptake.diff.means<-DataEx2_Ave.sub %>%
group_by(NutLevel) %>%
summarise(.,NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
## NCP
#par(mfrow= c(1,2))
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-5,12), xaxt = 'n', ylab = 'Observed - predicted', xlab = '',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-10,10), xaxt = 'n', xlab = '', main = 'NEC', col='grey',type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
legend('bottomleft', c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
########################### stop ################################
#### without scaling to light or averaging over the day
ScalDat_Aqua<-ScalDat %>%
group_by(Aq_Ex2, DateTime,NutLevel) %>%
summarise(NN.pred = sum(NN.wtd), PO.pred = sum(PO.wtd), NCP.pred = sum(NCP.wtd), NEC.pred = sum(NEC.wtd))
ScalDat_Aqua$DateTimeEx2<-ScalDat_Aqua$DateTime+60*60*48 # get the correct time
DataEx2 <- left_join(DataEx2,ScalDat_Aqua,by=c("Aq_Ex2","DateTimeEx2","NutLevel"))
levels(DataEx2$DateTimeEx2)<- c(1:7)
# calculate observed - predicted
DataEx2 <- mutate(DataEx2,NN.diff = N.uptake - NN.pred,
PO.diff = P.uptake - PO.pred,
NCP.diff = NCP.AFDW - NCP.pred,
NEC.diff = NEC.AFDW - NEC.pred)
DataEx2$DateTimeEx2<- as.factor(DataEx2$DateTimeEx2)
DataEx2$DayNight<- as.factor(DataEx2$DayNight)
# do we see differences in N uptake by nutrients or day night between predicted and observed?
mod1.NN.diff <- lmer(NN.diff ~ DayNight*NutLevel
+ (1|Tank)
+ (DayNight|DateTimeEx2),data=DataEx2)
Uptake.diff.means<-DataEx2 %>%
group_by(DayNight, NutLevel) %>%
summarise(.,NN.diff.mean = mean(NN.diff),NN.diff.SE = sd(NN.diff)/sqrt(n()), PO.diff.mean = mean(PO.diff),PO.diff.SE = sd(PO.diff)/sqrt(n()),
NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
par(mfrow= c(1,2))
plot(rep(1:3,2), Uptake.diff.means$NN.diff.mean, col = Uptake.diff.means$DayNight,
pch = 19, cex = 2, ylim = c(-0.2,1), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'N+N')
segments(rep(1:3,2), Uptake.diff.means$NN.diff.mean+Uptake.diff.means$NN.diff.SE,
rep(1:3,2), Uptake.diff.means$NN.diff.mean-Uptake.diff.means$NN.diff.SE,col = Uptake.diff.means$DayNight)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
plot(rep(1:3,2), Uptake.diff.means$PO.diff.mean, col = Uptake.diff.means$DayNight,
pch = 19, cex = 2, ylim = c(-0.2,0.15), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'PO')
segments(rep(1:3,2), Uptake.diff.means$PO.diff.mean+Uptake.diff.means$PO.diff.SE,
rep(1:3,2), Uptake.diff.means$PO.diff.mean-Uptake.diff.means$PO.diff.SE,col = Uptake.diff.means$DayNight)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
# averages by nutrients without day night
Uptake.diff.means<-DataEx2 %>%
group_by(NutLevel) %>%
summarise(.,NN.diff.mean = mean(NN.diff),NN.diff.SE = sd(NN.diff)/sqrt(n()), PO.diff.mean = mean(PO.diff),PO.diff.SE = sd(PO.diff)/sqrt(n()),
NCP.diff.mean = mean(NCP.diff), NCP.diff.SE = sd(NCP.diff)/sqrt(n()),NEC.diff.mean = mean(NEC.diff), NEC.diff.SE = sd(NEC.diff)/sqrt(n()) )
par(mfrow= c(1,2))
plot(1:3, Uptake.diff.means$NN.diff.mean,
pch = 19, cex = 2, ylim = c(0,0.7), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'N+N')
segments(1:3, Uptake.diff.means$NN.diff.mean+Uptake.diff.means$NN.diff.SE,
1:3, Uptake.diff.means$NN.diff.mean-Uptake.diff.means$NN.diff.SE)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
plot(1:3, Uptake.diff.means$PO.diff.mean, pch = 19, cex = 2, ylim = c(-0.05,0.05), xaxt = 'n', ylab = 'Difference in uptake rate', xlab = '', main = 'PO')
segments(1:3, Uptake.diff.means$PO.diff.mean+Uptake.diff.means$PO.diff.SE,
1:3, Uptake.diff.means$PO.diff.mean-Uptake.diff.means$PO.diff.SE)
abline(h=0)
axis(1, at = c(1:3), c("Ambient","Medium","High"))
# do we see differences in N uptake by nutrients between predicted and observed?
mod2.NN.diff <- lmer(NN.diff ~ NutLevel
+ (1|Tank)
+ (1|DateTimeEx2),data=DataEx2)
mod2.PO.diff <- lmer(PO.diff ~ NutLevel
+ (1|Tank)
+ (1|DateTimeEx2),data=DataEx2)
## NCP
par(mfrow= c(1,1))
plot(1:3, Uptake.diff.means$NCP.diff.mean,
pch = 19, cex = 2, ylim = c(-5,5), xaxt = 'n', ylab = 'Difference in rate', xlab = '', lty=2, type ='b')
segments(1:3, Uptake.diff.means$NCP.diff.mean+Uptake.diff.means$NCP.diff.SE,
1:3, Uptake.diff.means$NCP.diff.mean-Uptake.diff.means$NCP.diff.SE)
abline(h=0)
#NEC
axis(1, at = c(1:3), c("Ambient","Medium","High"))
points(1:3, Uptake.diff.means$NEC.diff.mean, pch = 19, cex = 2, ylim = c(-1,1), xaxt = 'n', xlab = '', col = 'grey', type = 'b', lty=2)
segments(1:3, Uptake.diff.means$NEC.diff.mean+Uptake.diff.means$NEC.diff.SE,
1:3, Uptake.diff.means$NEC.diff.mean-Uptake.diff.means$NEC.diff.SE)
abline(h=0)
#axis(1, at = c(1:3), c("Ambient","Medium","High"))
legend('bottomleft',c('NCP','NCC'), pch = 19, col = c('black','grey'), bty = 'n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.