content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(tidyverse)
library(randomForest)
library(glmnet)
library(tictoc)
# prep stuff
# this is in a brackets just so that I don't need to run it line by line
{
car6 <- as_tibble(read.csv('Cars6.csv'))
car6$state_id <- as.factor(car6$state_id)
car6$car_origin <- as.factor(car6$car_origin)
car6$engine_cylinders <- as.factor(car6$engine_cylinders)
car6$engine_type <- as.factor(car6$engine_type)
car6$exterior_color <- as.factor(car6$exterior_color)
car6$fleet <- as.factor(car6$fleet)
car6$franchise_dealer <- as.factor(car6$franchise_dealer)
car6$franchise_make <- as.factor(car6$franchise_make)
car6$fuel_type <- as.factor(car6$fuel_type)
car6$has_accidents <- as.factor(car6$has_accidents)
car6$interior_color <- as.factor(car6$interior_color)
car6$isCab <- as.factor(car6$isCab)
car6$is_new <- as.factor(car6$is_new)
car6$listing_color <- as.factor(car6$listing_color)
car6$make_name <- as.factor(car6$make_name)
car6$model_name <- as.factor(car6$model_name)
car6$salvage <- as.factor(car6$salvage)
car6$transmission <- as.factor(car6$transmission)
car6$wheel_system <- as.factor(car6$wheel_system)
n1 <- dim(car6)[1]
comp_time <- matrix(1:400, nrow = 100) # store time
v.err <- matrix(1:400, nrow = 100) # store test residuals
t.err <- matrix(1:400, nrow = 100) # store train residuals
}
# Actual work on carS starts here:
{
set.seed(317)
carS <- car6[sample(1:n1,8000),]
# carS %>% group_by(model_name) %>% summarise(Count = n()) %>%
# arrange(desc(Count)) %>% ungroup() %>% print(n = Inf)
# I take out a few rows because these only appear a handful of times, but they
# would still add more factors and may be influenced by high variance
# I left in out liers because all of these models should mitigate the influence
carS <- carS %>% filter(
!(franchise_make %in% c('Alfa Romeo','Shelby','Rolls-Royce','Aston Martin',
'Bentley','Porsche','Merecedes-Benz','Jaguar','FIAT',
'Chrysler','Volvo','MINI','Lexus','Maserati','Land Rover',
'Genesis','Audi')),
!(make_name %in% c('Hummer','Suzuki','Isuzu','Mazda','Mitsubishi','Cadillac',
'Lincoln','Subaru')),
!(model_name %in% c('Silverado 2500','Sierra 2500HD','Silverado Classic 1500HD',
'i-Series','B-Series','Silverado 2500HD','F-150 Heritage',
'Sonoma','Silverado SS','Blackwood','Equator','Silverado 1500HD',
'Sierra Classic 1500','Baja','Raider','S-10',
'Sierra 1500 Limited','Escalade EXT','H3T','Mark LT',
'Sierra Classic 1500HD','Silverado Classic 1500')),
!(state_id %in% c('KS','OK','AZ','CO','HI','NV','CA'))
)
n <- dim(carS)[1]
p <- dim(carS)[2]-1
K <- 100
test_or_train <- ifelse(1:n <= length(price.tr), 'Train', 'Test')
# Trial 1:
set.seed(100)
spliter <- sample(1:n, n/5)
train <- carS[-spliter,]
test <- carS[spliter,]
price.tr <- train$price
price.val <- test$price
# in general, t in a variable name means training,
# v stands for validate, but I use it to mean test
m.tr <- model.matrix(price ~ ., data = train)[,-1]
m.val <- model.matrix(price ~ ., data = test)[,-1]
par(mfrow=c(1,1))
}
# Regression
# Lasso
{
tic('Lasso')
tic('Lasso CV Creation')
mod.las <- cv.glmnet(m.tr,price.tr, alpha=1)
q <- toc()
plot(mod.las, main = 'Lasso', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.las$lambda.min
beta.las <- as.matrix(predict(mod.las, s = lambda, type = 'coefficients'))[-1]
pred.las <- predict(mod.las, s = lambda, newx = m.val)
tr.las <- predict(mod.las, s = lambda, newx = m.tr)
v.err[1,1] <- 1 - mean((pred.las - price.val)^2) / mean((mean(pred.las)-price.val)^2)
t.err[1,1] <- 1 - mean((tr.las - price.tr)^2) / mean((mean(tr.las)-price.tr)^2)
toc()
las.v.resid <- pred.las-price.val
las.t.resid <- tr.las-price.tr
}
# Ridge
{
tic('Ridge')
tic('Ridge CV creation')
mod.rid <- cv.glmnet(m.tr,price.tr, alpha=0)
q <- toc()
plot(mod.rid, main = 'Ridge', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.rid$lambda.min
beta.rid <- as.matrix(predict(mod.rid, s = lambda, type = 'coefficients'))[-1]
pred.rid <- predict(mod.rid, s = lambda, newx = m.val)
tr.rid <- predict(mod.rid, s = lambda, newx = m.tr)
v.err[1,2] <- 1- mean((pred.rid- price.val)^2) / mean((mean(pred.rid)-price.val)^2)
t.err[1,2] <- 1- mean((tr.rid - price.tr)^2) / mean((mean(tr.rid)-price.tr)^2)
toc()
rid.v.resid <- pred.rid-price.val
rid.t.resid <- tr.rid-price.tr
}
# Elastic
{
tic('Elastic')
tic('Elastic CV creation')
mod.els <- cv.glmnet(m.tr,price.tr, alpha=0.5)
q <- toc()
plot(mod.els, main = 'Elastic', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.els$lambda.min
beta.els <- as.matrix(predict(mod.els, s = lambda, type = 'coefficients'))[-1]
pred.els <- predict(mod.els, s = lambda, newx = m.val)
tr.els <- predict(mod.els, s = lambda, newx = m.tr)
v.err[1,3] <- 1 - mean((pred.els- price.val)^2) / mean((mean(pred.els)-price.val)^2)
t.err[1,3] <- 1 - mean((tr.els - price.tr)^2) / mean((mean(tr.els)-price.tr)^2)
toc()
els.v.resid <- pred.els-price.val
els.t.resid <- tr.els-price.tr
}
# Random Forest
{
tic('Random Forest')
rf.car <- randomForest(x=m.tr, y=price.tr,mtry=floor(sqrt(p)), importance=T)
y.hat.test <- predict(rf.car, newdata = m.val)
y.hat.train <- predict(rf.car, newdata = m.tr)
v.err[1,4] <- 1 - mean((y.hat.test-price.val)^2) / mean((mean(y.hat.test)-price.val)^2)
t.err[1,4] <- 1 - mean((y.hat.train-price.tr)^2) / mean((mean(y.hat.train)-price.tr)^2)
rf.import <- importance(rf.car)
toc()
rf.v.resid <- y.hat.test-price.val
rf.t.resid <- y.hat.train-price.tr
}
# bar plot for coefficients (unordered)
bar_label <- tibble('Coefficient' = colnames(m.tr), 'group' = 1:236)
for (i in 1:236){
if (grepl('back_legroom',bar_label$Coefficient[i])) {
bar_label$group[i] = 'back_legroom'}
else if (grepl('engine_displacement',bar_label$Coefficient[i])){
bar_label$group[i] = 'engine_displacement'}
else if (grepl('franchise_make',bar_label$Coefficient[i])) {
bar_label$group[i] = 'franchise_make'}
else if (grepl('height',bar_label$Coefficient[i])) {
bar_label$group[i] = 'height'}
else if (grepl('is_new',bar_label$Coefficient[i])) {
bar_label$group[i] = 'is_new'}
else if (grepl('maximum_seating',bar_label$Coefficient[i])) {
bar_label$group[i] = 'maximum_seating'}
else if (grepl('seller_rating',bar_label$Coefficient[i])) {
bar_label$group[i] = 'seller_rating'}
else if (grepl('wheelbase',bar_label$Coefficient[i])) {
bar_label$group[i] = 'wheelbase'}
else if (grepl('bed_length',bar_label$Coefficient[i])) {
bar_label$group[i] = 'bed_length'}
else if (grepl('engine_type',bar_label$Coefficient[i])) {
bar_label$group[i] = 'engine_type'}
else if (grepl('front_legroom',bar_label$Coefficient[i])) {
bar_label$group[i] = 'front_legroom'}
else if (grepl('highway_fuel_',bar_label$Coefficient[i])) {
bar_label$group[i] = 'highway_fuel_'}
else if (grepl('length',bar_label$Coefficient[i])) {
bar_label$group[i] = 'length'}
else if (grepl('mileage',bar_label$Coefficient[i])) {
bar_label$group[i] = 'mileage'}
else if (grepl('sp_id',bar_label$Coefficient[i])) {
bar_label$group[i] = 'sp_id'}
else if (grepl('width',bar_label$Coefficient[i])) {
bar_label$group[i] = 'width'}
else if (grepl('city_fuel_',bar_label$Coefficient[i])) {
bar_label$group[i] = 'city_fuel_'}
else if (grepl('exterior_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'exterior_color'}
else if (grepl('fuel_tank',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fuel_tank'}
else if (grepl('horsepower',bar_label$Coefficient[i])) {
bar_label$group[i] = 'horsepower'}
else if (grepl('listed_month',bar_label$Coefficient[i])) {
bar_label$group[i] = 'listed_month'}
else if (grepl('model_name',bar_label$Coefficient[i])) {
bar_label$group[i] = 'model_name'}
else if (grepl('torque',bar_label$Coefficient[i])) {
bar_label$group[i] = 'torque'}
else if (grepl('year',bar_label$Coefficient[i])) {
bar_label$group[i] = 'year'}
else if (grepl('daysonmarket',bar_label$Coefficient[i])) {
bar_label$group[i] = 'daysonmarket'}
else if (grepl('fleet',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fleet'}
else if (grepl('fuel_type',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fuel_type'}
else if (grepl('interior_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'interior_color'}
else if (grepl('listing_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'listing_color'}
else if (grepl('owner_count',bar_label$Coefficient[i])) {
bar_label$group[i] = 'owner_count'}
else if (grepl('transmission',bar_label$Coefficient[i])) {
bar_label$group[i] = 'transmission'}
else if (grepl('state_id',bar_label$Coefficient[i])) {
bar_label$group[i] = 'state_id'}
else if (grepl('engine_cylinders',bar_label$Coefficient[i])) {
bar_label$group[i] = 'engine_cylinders'}
else if (grepl('franchise_dealer',bar_label$Coefficient[i])) {
bar_label$group[i] = 'franchise_dealer'}
else if (grepl('has_accidents',bar_label$Coefficient[i])) {
bar_label$group[i] = 'has_accidents'}
else if (grepl('isCab',bar_label$Coefficient[i])) {
bar_label$group[i] = 'isCab'}
else if (grepl('make_name',bar_label$Coefficient[i])) {
bar_label$group[i] = 'make_name'}
else if (grepl('salvage',bar_label$Coefficient[i])) {
bar_label$group[i] = 'salvage'}
else if (grepl('wheel_system',bar_label$Coefficient[i])) {
bar_label$group[i] = 'wheel_system'}
else if (grepl('car_origin',bar_label$Coefficient[i])) {
bar_label$group[i] = 'car_origin'}
}
predictors <- tibble('Predictors' = colnames(m.tr), 'Lasso' = beta.las,
'Elastic' = beta.els, 'Ridge' = beta.rid, 'Group' = bar_label$group,
'Random Forest' = as.matrix(rf.import[,2]))
ggplot(data = predictors, aes(x = Predictors, y = Lasso, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Lasso Coefficients') +
theme(legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = Ridge,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Ridge Coefficients') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = Elastic,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Elastic Coefficients') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = `Random Forest`,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Random Forest Importance') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
# bar plot for coefficients (ordered)
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Lasso, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Lasso Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Ridge, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Ridge Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Elastic, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Elastic Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = `Random Forest`, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Random Forest Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
# box plot for residuals is something like this
# Residual boxplots
{
# I don't know why these are so similar, but I 100% made sure that these are different
# Proof: sum(las.v.resid == rid.v.resid) = 0
# They are very similar box plots, but the individual points ARE different
my.resid <- tibble('Type' = test_or_train,
'Lasso' = c(las.v.resid,las.t.resid),
'Ridge' = c(rid.v.resid,rid.t.resid),
'Elastic' = c(els.v.resid,els.t.resid),
'Forest' = c(rf.v.resid,rf.t.resid))
par(mfrow=c(2,2))
boxplot(Lasso~Type, data = my.resid, outline = F, main = 'Lasso Residuals')
boxplot(Ridge~Type, data = my.resid, outline = F, main = 'Ridge Residuals')
boxplot(Elastic~Type, data = my.resid, outline =F, main = 'Elastic Residuals')
boxplot(Forest~Type, data = my.resid, outline = F, main = 'Random Forest Residuals')
par(mfrow=c(1,1))
}
# Trials 2-100
for (i in 2:K) {
set.seed(i)
spliter <- sample(1:n, n/5)
train <- carS[-spliter,]
test <- carS[spliter,]
price.tr <- train$price
price.val <- test$price
m.tr <- model.matrix(price ~ ., data = train)[,-1]
m.val <- model.matrix(price ~ ., data = test)[,-1]
# Lasso
mod.las <- cv.glmnet(m.tr,price.tr, alpha=1)
cat(1)
lambda <- mod.las$lambda.min
pred.las <- predict(mod.las, s = lambda, newx = m.val)
tr.las <- predict(mod.las, s = lambda, newx = m.tr)
v.err[i,1] <- 1 - mean((pred.las - price.val)^2) / mean((mean(pred.las)-price.val)^2)
t.err[i,1] <- 1 - mean((tr.las - price.tr)^2) / mean((mean(tr.las)-price.tr)^2)
# Ridge
mod.rid <- cv.glmnet(m.tr,price.tr, alpha=0)
lambda <- mod.rid$lambda.min
pred.rid <- predict(mod.rid, s = lambda, newx = m.val)
tr.rid <- predict(mod.rid, s = lambda, newx = m.tr)
v.err[i,2] <- 1- mean((pred.rid- price.val)^2) / mean((mean(pred.rid)-price.val)^2)
t.err[i,2] <- 1- mean((tr.rid - price.tr)^2) / mean((mean(tr.rid)-price.tr)^2)
# Elastic
mod.els <- cv.glmnet(m.tr,price.tr, alpha=0.5)
lambda <- mod.els$lambda.min
pred.els <- predict(mod.els, s = lambda, newx = m.val)
tr.els <- predict(mod.els, s = lambda, newx = m.tr)
v.err[i,3] <- 1 - mean((pred.els- price.val)^2) / mean((mean(pred.els)-price.val)^2)
t.err[i,3] <- 1 - mean((tr.els - price.tr)^2) / mean((mean(tr.els)-price.tr)^2)
# Random Forest
rf.car <- randomForest(x=m.tr, y=price.tr,mtry=floor(sqrt(p)), importance=T)
y.hat.test <- predict(rf.car, newdata = m.val)
y.hat.train <- predict(rf.car, newdata = m.tr)
v.err[i,4] <- 1 - mean((y.hat.test-price.val)^2) / mean((mean(y.hat.test)-price.val)^2)
t.err[i,4] <- 1 - mean((y.hat.train-price.tr)^2) / mean((mean(y.hat.train)-price.tr)^2)
}
# R^2 info
{par(mfrow=c(1,2))
colnames(t.err) <- c('Lasso','Ridge',"Elastic","RF")
colnames(v.err) <- c('Lasso','Ridge',"Elastic","RF")
boxplot(t.err,data = t.err, outline = F, main = 'Training Residuals', ylim = c(.85,.92))
boxplot(v.err,data = v.err, outline = F, main = 'Testing Residuals', ylim = c(.85,.92))
par(mfrow=c(1,1))
}
# 90% CI of R^2
lr2 <- sort(v.err[,1])
c(lr2[6],lr2[94])
rr2 <- sort(v.err[,2])
c(rr2[6],rr2[94])
er2 <- sort(v.err[,3])
c(er2[6],er2[94])
fr2 <- sort(v.err[,4])
c(fr2[6],fr2[94])
| /9890 Final Project BC TW.R | no_license | BrianContreras317/9890-Project | R | false | false | 20,890 | r | library(tidyverse)
library(randomForest)
library(glmnet)
library(tictoc)
# prep stuff
# this is in a brackets just so that I don't need to run it line by line
{
car6 <- as_tibble(read.csv('Cars6.csv'))
car6$state_id <- as.factor(car6$state_id)
car6$car_origin <- as.factor(car6$car_origin)
car6$engine_cylinders <- as.factor(car6$engine_cylinders)
car6$engine_type <- as.factor(car6$engine_type)
car6$exterior_color <- as.factor(car6$exterior_color)
car6$fleet <- as.factor(car6$fleet)
car6$franchise_dealer <- as.factor(car6$franchise_dealer)
car6$franchise_make <- as.factor(car6$franchise_make)
car6$fuel_type <- as.factor(car6$fuel_type)
car6$has_accidents <- as.factor(car6$has_accidents)
car6$interior_color <- as.factor(car6$interior_color)
car6$isCab <- as.factor(car6$isCab)
car6$is_new <- as.factor(car6$is_new)
car6$listing_color <- as.factor(car6$listing_color)
car6$make_name <- as.factor(car6$make_name)
car6$model_name <- as.factor(car6$model_name)
car6$salvage <- as.factor(car6$salvage)
car6$transmission <- as.factor(car6$transmission)
car6$wheel_system <- as.factor(car6$wheel_system)
n1 <- dim(car6)[1]
comp_time <- matrix(1:400, nrow = 100) # store time
v.err <- matrix(1:400, nrow = 100) # store test residuals
t.err <- matrix(1:400, nrow = 100) # store train residuals
}
# Actual work on carS starts here:
{
set.seed(317)
carS <- car6[sample(1:n1,8000),]
# carS %>% group_by(model_name) %>% summarise(Count = n()) %>%
# arrange(desc(Count)) %>% ungroup() %>% print(n = Inf)
# I take out a few rows because these only appear a handful of times, but they
# would still add more factors and may be influenced by high variance
# I left in out liers because all of these models should mitigate the influence
carS <- carS %>% filter(
!(franchise_make %in% c('Alfa Romeo','Shelby','Rolls-Royce','Aston Martin',
'Bentley','Porsche','Merecedes-Benz','Jaguar','FIAT',
'Chrysler','Volvo','MINI','Lexus','Maserati','Land Rover',
'Genesis','Audi')),
!(make_name %in% c('Hummer','Suzuki','Isuzu','Mazda','Mitsubishi','Cadillac',
'Lincoln','Subaru')),
!(model_name %in% c('Silverado 2500','Sierra 2500HD','Silverado Classic 1500HD',
'i-Series','B-Series','Silverado 2500HD','F-150 Heritage',
'Sonoma','Silverado SS','Blackwood','Equator','Silverado 1500HD',
'Sierra Classic 1500','Baja','Raider','S-10',
'Sierra 1500 Limited','Escalade EXT','H3T','Mark LT',
'Sierra Classic 1500HD','Silverado Classic 1500')),
!(state_id %in% c('KS','OK','AZ','CO','HI','NV','CA'))
)
n <- dim(carS)[1]
p <- dim(carS)[2]-1
K <- 100
test_or_train <- ifelse(1:n <= length(price.tr), 'Train', 'Test')
# Trial 1:
set.seed(100)
spliter <- sample(1:n, n/5)
train <- carS[-spliter,]
test <- carS[spliter,]
price.tr <- train$price
price.val <- test$price
# in general, t in a variable name means training,
# v stands for validate, but I use it to mean test
m.tr <- model.matrix(price ~ ., data = train)[,-1]
m.val <- model.matrix(price ~ ., data = test)[,-1]
par(mfrow=c(1,1))
}
# Regression
# Lasso
{
tic('Lasso')
tic('Lasso CV Creation')
mod.las <- cv.glmnet(m.tr,price.tr, alpha=1)
q <- toc()
plot(mod.las, main = 'Lasso', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.las$lambda.min
beta.las <- as.matrix(predict(mod.las, s = lambda, type = 'coefficients'))[-1]
pred.las <- predict(mod.las, s = lambda, newx = m.val)
tr.las <- predict(mod.las, s = lambda, newx = m.tr)
v.err[1,1] <- 1 - mean((pred.las - price.val)^2) / mean((mean(pred.las)-price.val)^2)
t.err[1,1] <- 1 - mean((tr.las - price.tr)^2) / mean((mean(tr.las)-price.tr)^2)
toc()
las.v.resid <- pred.las-price.val
las.t.resid <- tr.las-price.tr
}
# Ridge
{
tic('Ridge')
tic('Ridge CV creation')
mod.rid <- cv.glmnet(m.tr,price.tr, alpha=0)
q <- toc()
plot(mod.rid, main = 'Ridge', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.rid$lambda.min
beta.rid <- as.matrix(predict(mod.rid, s = lambda, type = 'coefficients'))[-1]
pred.rid <- predict(mod.rid, s = lambda, newx = m.val)
tr.rid <- predict(mod.rid, s = lambda, newx = m.tr)
v.err[1,2] <- 1- mean((pred.rid- price.val)^2) / mean((mean(pred.rid)-price.val)^2)
t.err[1,2] <- 1- mean((tr.rid - price.tr)^2) / mean((mean(tr.rid)-price.tr)^2)
toc()
rid.v.resid <- pred.rid-price.val
rid.t.resid <- tr.rid-price.tr
}
# Elastic
{
tic('Elastic')
tic('Elastic CV creation')
mod.els <- cv.glmnet(m.tr,price.tr, alpha=0.5)
q <- toc()
plot(mod.els, main = 'Elastic', sub = paste ('Seconds to compute: ',q$toc-q$tic))
lambda <- mod.els$lambda.min
beta.els <- as.matrix(predict(mod.els, s = lambda, type = 'coefficients'))[-1]
pred.els <- predict(mod.els, s = lambda, newx = m.val)
tr.els <- predict(mod.els, s = lambda, newx = m.tr)
v.err[1,3] <- 1 - mean((pred.els- price.val)^2) / mean((mean(pred.els)-price.val)^2)
t.err[1,3] <- 1 - mean((tr.els - price.tr)^2) / mean((mean(tr.els)-price.tr)^2)
toc()
els.v.resid <- pred.els-price.val
els.t.resid <- tr.els-price.tr
}
# Random Forest
{
tic('Random Forest')
rf.car <- randomForest(x=m.tr, y=price.tr,mtry=floor(sqrt(p)), importance=T)
y.hat.test <- predict(rf.car, newdata = m.val)
y.hat.train <- predict(rf.car, newdata = m.tr)
v.err[1,4] <- 1 - mean((y.hat.test-price.val)^2) / mean((mean(y.hat.test)-price.val)^2)
t.err[1,4] <- 1 - mean((y.hat.train-price.tr)^2) / mean((mean(y.hat.train)-price.tr)^2)
rf.import <- importance(rf.car)
toc()
rf.v.resid <- y.hat.test-price.val
rf.t.resid <- y.hat.train-price.tr
}
# bar plot for coefficients (unordered)
bar_label <- tibble('Coefficient' = colnames(m.tr), 'group' = 1:236)
for (i in 1:236){
if (grepl('back_legroom',bar_label$Coefficient[i])) {
bar_label$group[i] = 'back_legroom'}
else if (grepl('engine_displacement',bar_label$Coefficient[i])){
bar_label$group[i] = 'engine_displacement'}
else if (grepl('franchise_make',bar_label$Coefficient[i])) {
bar_label$group[i] = 'franchise_make'}
else if (grepl('height',bar_label$Coefficient[i])) {
bar_label$group[i] = 'height'}
else if (grepl('is_new',bar_label$Coefficient[i])) {
bar_label$group[i] = 'is_new'}
else if (grepl('maximum_seating',bar_label$Coefficient[i])) {
bar_label$group[i] = 'maximum_seating'}
else if (grepl('seller_rating',bar_label$Coefficient[i])) {
bar_label$group[i] = 'seller_rating'}
else if (grepl('wheelbase',bar_label$Coefficient[i])) {
bar_label$group[i] = 'wheelbase'}
else if (grepl('bed_length',bar_label$Coefficient[i])) {
bar_label$group[i] = 'bed_length'}
else if (grepl('engine_type',bar_label$Coefficient[i])) {
bar_label$group[i] = 'engine_type'}
else if (grepl('front_legroom',bar_label$Coefficient[i])) {
bar_label$group[i] = 'front_legroom'}
else if (grepl('highway_fuel_',bar_label$Coefficient[i])) {
bar_label$group[i] = 'highway_fuel_'}
else if (grepl('length',bar_label$Coefficient[i])) {
bar_label$group[i] = 'length'}
else if (grepl('mileage',bar_label$Coefficient[i])) {
bar_label$group[i] = 'mileage'}
else if (grepl('sp_id',bar_label$Coefficient[i])) {
bar_label$group[i] = 'sp_id'}
else if (grepl('width',bar_label$Coefficient[i])) {
bar_label$group[i] = 'width'}
else if (grepl('city_fuel_',bar_label$Coefficient[i])) {
bar_label$group[i] = 'city_fuel_'}
else if (grepl('exterior_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'exterior_color'}
else if (grepl('fuel_tank',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fuel_tank'}
else if (grepl('horsepower',bar_label$Coefficient[i])) {
bar_label$group[i] = 'horsepower'}
else if (grepl('listed_month',bar_label$Coefficient[i])) {
bar_label$group[i] = 'listed_month'}
else if (grepl('model_name',bar_label$Coefficient[i])) {
bar_label$group[i] = 'model_name'}
else if (grepl('torque',bar_label$Coefficient[i])) {
bar_label$group[i] = 'torque'}
else if (grepl('year',bar_label$Coefficient[i])) {
bar_label$group[i] = 'year'}
else if (grepl('daysonmarket',bar_label$Coefficient[i])) {
bar_label$group[i] = 'daysonmarket'}
else if (grepl('fleet',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fleet'}
else if (grepl('fuel_type',bar_label$Coefficient[i])) {
bar_label$group[i] = 'fuel_type'}
else if (grepl('interior_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'interior_color'}
else if (grepl('listing_color',bar_label$Coefficient[i])) {
bar_label$group[i] = 'listing_color'}
else if (grepl('owner_count',bar_label$Coefficient[i])) {
bar_label$group[i] = 'owner_count'}
else if (grepl('transmission',bar_label$Coefficient[i])) {
bar_label$group[i] = 'transmission'}
else if (grepl('state_id',bar_label$Coefficient[i])) {
bar_label$group[i] = 'state_id'}
else if (grepl('engine_cylinders',bar_label$Coefficient[i])) {
bar_label$group[i] = 'engine_cylinders'}
else if (grepl('franchise_dealer',bar_label$Coefficient[i])) {
bar_label$group[i] = 'franchise_dealer'}
else if (grepl('has_accidents',bar_label$Coefficient[i])) {
bar_label$group[i] = 'has_accidents'}
else if (grepl('isCab',bar_label$Coefficient[i])) {
bar_label$group[i] = 'isCab'}
else if (grepl('make_name',bar_label$Coefficient[i])) {
bar_label$group[i] = 'make_name'}
else if (grepl('salvage',bar_label$Coefficient[i])) {
bar_label$group[i] = 'salvage'}
else if (grepl('wheel_system',bar_label$Coefficient[i])) {
bar_label$group[i] = 'wheel_system'}
else if (grepl('car_origin',bar_label$Coefficient[i])) {
bar_label$group[i] = 'car_origin'}
}
predictors <- tibble('Predictors' = colnames(m.tr), 'Lasso' = beta.las,
'Elastic' = beta.els, 'Ridge' = beta.rid, 'Group' = bar_label$group,
'Random Forest' = as.matrix(rf.import[,2]))
ggplot(data = predictors, aes(x = Predictors, y = Lasso, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Lasso Coefficients') +
theme(legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = Ridge,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Ridge Coefficients') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = Elastic,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Elastic Coefficients') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = Predictors, y = `Random Forest`,
fill = Group)) +
geom_bar(stat='identity') + ggtitle('Random Forest Importance') +
# theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
# legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
# bar plot for coefficients (ordered)
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Lasso, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Lasso Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Ridge, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Ridge Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = Elastic, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Elastic Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
ggplot(data = predictors, aes(x = reorder(Predictors, -Elastic),
y = `Random Forest`, fill = Group)) +
geom_bar(stat='identity') + ggtitle('Random Forest Coefficients') +
theme(axis.text.x = element_text(angle=90, size=4, color = 'black'),
legend.text = element_text(size = 6)) +
scale_fill_manual(values = c('#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A',
'#808000','#800000','#00FF00','#008000','#FF00FF',
'#FF0000','#EAC117','#00FFFF','#808080','#0000FF',
'#000000','#ADD8E6','#FFA500','#800080','#A52A2A'))
# box plot for residuals is something like this
# Residual boxplots
{
# I don't know why these are so similar, but I 100% made sure that these are different
# Proof: sum(las.v.resid == rid.v.resid) = 0
# They are very similar box plots, but the individual points ARE different
my.resid <- tibble('Type' = test_or_train,
'Lasso' = c(las.v.resid,las.t.resid),
'Ridge' = c(rid.v.resid,rid.t.resid),
'Elastic' = c(els.v.resid,els.t.resid),
'Forest' = c(rf.v.resid,rf.t.resid))
par(mfrow=c(2,2))
boxplot(Lasso~Type, data = my.resid, outline = F, main = 'Lasso Residuals')
boxplot(Ridge~Type, data = my.resid, outline = F, main = 'Ridge Residuals')
boxplot(Elastic~Type, data = my.resid, outline =F, main = 'Elastic Residuals')
boxplot(Forest~Type, data = my.resid, outline = F, main = 'Random Forest Residuals')
par(mfrow=c(1,1))
}
# Trials 2-100
for (i in 2:K) {
set.seed(i)
spliter <- sample(1:n, n/5)
train <- carS[-spliter,]
test <- carS[spliter,]
price.tr <- train$price
price.val <- test$price
m.tr <- model.matrix(price ~ ., data = train)[,-1]
m.val <- model.matrix(price ~ ., data = test)[,-1]
# Lasso
mod.las <- cv.glmnet(m.tr,price.tr, alpha=1)
cat(1)
lambda <- mod.las$lambda.min
pred.las <- predict(mod.las, s = lambda, newx = m.val)
tr.las <- predict(mod.las, s = lambda, newx = m.tr)
v.err[i,1] <- 1 - mean((pred.las - price.val)^2) / mean((mean(pred.las)-price.val)^2)
t.err[i,1] <- 1 - mean((tr.las - price.tr)^2) / mean((mean(tr.las)-price.tr)^2)
# Ridge
mod.rid <- cv.glmnet(m.tr,price.tr, alpha=0)
lambda <- mod.rid$lambda.min
pred.rid <- predict(mod.rid, s = lambda, newx = m.val)
tr.rid <- predict(mod.rid, s = lambda, newx = m.tr)
v.err[i,2] <- 1- mean((pred.rid- price.val)^2) / mean((mean(pred.rid)-price.val)^2)
t.err[i,2] <- 1- mean((tr.rid - price.tr)^2) / mean((mean(tr.rid)-price.tr)^2)
# Elastic
mod.els <- cv.glmnet(m.tr,price.tr, alpha=0.5)
lambda <- mod.els$lambda.min
pred.els <- predict(mod.els, s = lambda, newx = m.val)
tr.els <- predict(mod.els, s = lambda, newx = m.tr)
v.err[i,3] <- 1 - mean((pred.els- price.val)^2) / mean((mean(pred.els)-price.val)^2)
t.err[i,3] <- 1 - mean((tr.els - price.tr)^2) / mean((mean(tr.els)-price.tr)^2)
# Random Forest
rf.car <- randomForest(x=m.tr, y=price.tr,mtry=floor(sqrt(p)), importance=T)
y.hat.test <- predict(rf.car, newdata = m.val)
y.hat.train <- predict(rf.car, newdata = m.tr)
v.err[i,4] <- 1 - mean((y.hat.test-price.val)^2) / mean((mean(y.hat.test)-price.val)^2)
t.err[i,4] <- 1 - mean((y.hat.train-price.tr)^2) / mean((mean(y.hat.train)-price.tr)^2)
}
# R^2 info
{par(mfrow=c(1,2))
colnames(t.err) <- c('Lasso','Ridge',"Elastic","RF")
colnames(v.err) <- c('Lasso','Ridge',"Elastic","RF")
boxplot(t.err,data = t.err, outline = F, main = 'Training Residuals', ylim = c(.85,.92))
boxplot(v.err,data = v.err, outline = F, main = 'Testing Residuals', ylim = c(.85,.92))
par(mfrow=c(1,1))
}
# 90% CI of R^2
lr2 <- sort(v.err[,1])
c(lr2[6],lr2[94])
rr2 <- sort(v.err[,2])
c(rr2[6],rr2[94])
er2 <- sort(v.err[,3])
c(er2[6],er2[94])
fr2 <- sort(v.err[,4])
c(fr2[6],fr2[94])
|
fig09x006<-function(){
#
# Requires textbook R data set Fishcatch.RData
# which contains dataframe Fishcatch
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.85,1.25,0.25,0.25),xaxs="r")
#
fc<-Fishcatch
#
fc$Species<-ordered(fc$Species,
levels=c(1,6,2,7,3,4,5),
labels=c("Bream","Pike","Ide","Perch",
"Roach","Silver Bream","Smelt"))
#
fc$Count<-fc$Weight
fc$Count[fc$Count >= 0]<-1
fc$Count[is.na(fc$Count)== TRUE]<-0
sums<-tapply(fc[,"Count"],
INDEX=fc[,"Species"],FUN=sum,na.rm=TRUE)
sums<-sums/max(sums)
#
boxplot(Weight ~ Species,data=fc,
ylim=c(0,2000),xlab="Mass (g)",
horizontal=TRUE,las=1,width=sums)
#
dev.copy2eps(file="fig09x006.eps")
dev.copy2pdf(file="fig09x006.pdf")
}
| /graphicsforstatistics_2e_figures_scripts_r/Chapter 9/fig09x006.R | no_license | saqibarfeen/coding_time | R | false | false | 775 | r | fig09x006<-function(){
#
# Requires textbook R data set Fishcatch.RData
# which contains dataframe Fishcatch
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.85,1.25,0.25,0.25),xaxs="r")
#
fc<-Fishcatch
#
fc$Species<-ordered(fc$Species,
levels=c(1,6,2,7,3,4,5),
labels=c("Bream","Pike","Ide","Perch",
"Roach","Silver Bream","Smelt"))
#
fc$Count<-fc$Weight
fc$Count[fc$Count >= 0]<-1
fc$Count[is.na(fc$Count)== TRUE]<-0
sums<-tapply(fc[,"Count"],
INDEX=fc[,"Species"],FUN=sum,na.rm=TRUE)
sums<-sums/max(sums)
#
boxplot(Weight ~ Species,data=fc,
ylim=c(0,2000),xlab="Mass (g)",
horizontal=TRUE,las=1,width=sums)
#
dev.copy2eps(file="fig09x006.eps")
dev.copy2pdf(file="fig09x006.pdf")
}
|
library(rjags)
library(coda)
library(BEST)
library(stats)
source("DBDA2E-utilities.R")
#number of students
M <- 15
#number of questions
N <-40
#Probability for guessing students
pGuess <- 0.5
#Probability for studying students
aStudy <- 16
bStudy <- 4
observations <- rep(0,M)
#generate data
for(i in 1:M){
#group: 0 is guessing, 1 is studied
group <- sample(1,x = c(0,1), prob = c(0.5,0.5))
if(group == 1){ #studied
# pick from distribution of probabilities for correct answer
answerProb <- rbeta(1,aStudy,bStudy)
}
else{ #guessing
answerProb <- pGuess
}
#now generate 40 answers for student code with 0 for wrong, 1 for correct
correctAnswers <- rbinom(n=1,prob=answerProb, size=N)
observations[i] <- correctAnswers
}
observations
hist(observations, breaks = 10)
| /ex2Sampler.R | permissive | MelleStarke/Bayes-Stat-Group-Ass-1 | R | false | false | 855 | r | library(rjags)
library(coda)
library(BEST)
library(stats)
source("DBDA2E-utilities.R")
#number of students
M <- 15
#number of questions
N <-40
#Probability for guessing students
pGuess <- 0.5
#Probability for studying students
aStudy <- 16
bStudy <- 4
observations <- rep(0,M)
#generate data
for(i in 1:M){
#group: 0 is guessing, 1 is studied
group <- sample(1,x = c(0,1), prob = c(0.5,0.5))
if(group == 1){ #studied
# pick from distribution of probabilities for correct answer
answerProb <- rbeta(1,aStudy,bStudy)
}
else{ #guessing
answerProb <- pGuess
}
#now generate 40 answers for student code with 0 for wrong, 1 for correct
correctAnswers <- rbinom(n=1,prob=answerProb, size=N)
observations[i] <- correctAnswers
}
observations
hist(observations, breaks = 10)
|
install.packages("ggplot2")
library(ggplot2)
data <- read.table(file = "age_distance.txt" , header = T)
Age <- data[,1]
Distance <- data[,2]
# a. Please draw stem-leaf plots for “Age” and “Distance” respectively. Comments on the two plots
stem(Age)
stem(Distance)
# b. Please draw a scatterplot of “Age” (X 軸) and “Distance” (Y 軸). Comments on the plot. Are there any outliers or influential observations?
plot(Age,Distance,type="p",col="yellowgreen",main="Age-Distance Relationship",xlab="Age",ylab="Distance",pch=10)
cor(Distance,Age)
abline(coef(Reg),col = "red")
# c. For each variable, compute the mean and standard deviation. Then compute the Pearson correlation coefficient.
mean(Age)
mean(Distance)
sd(Age)
sd(Distance)
cor(Age,Distance,method="pearson")
# d.
# e. Draw the residual plot and comment on the plot.
Reg <- lm(Distance~Age) #Create a linear model
resid(Reg) #List of residuals
residPlot <- ggplot(aes(x = .fitted, y = .resid),data = Reg) + geom_point() + geom_hline(yintercept = 0) + labs (x = "fitted value", y = "Residual") #A density plot
residPlot
# f. Draw the normal probability plot. Commet on the plot.
qqnorm(resid(Reg)) # A quantile normal plot - good for checking normality
qqline(resid(Reg))
# g. what is the value of ? What does this value mean?
summary(Reg)
(summary(Reg)$sigma)**2
| /HW_2.R | no_license | CookieNotSession/Statistics_R | R | false | false | 1,356 | r | install.packages("ggplot2")
library(ggplot2)
data <- read.table(file = "age_distance.txt" , header = T)
Age <- data[,1]
Distance <- data[,2]
# a. Please draw stem-leaf plots for “Age” and “Distance” respectively. Comments on the two plots
stem(Age)
stem(Distance)
# b. Please draw a scatterplot of “Age” (X 軸) and “Distance” (Y 軸). Comments on the plot. Are there any outliers or influential observations?
plot(Age,Distance,type="p",col="yellowgreen",main="Age-Distance Relationship",xlab="Age",ylab="Distance",pch=10)
cor(Distance,Age)
abline(coef(Reg),col = "red")
# c. For each variable, compute the mean and standard deviation. Then compute the Pearson correlation coefficient.
mean(Age)
mean(Distance)
sd(Age)
sd(Distance)
cor(Age,Distance,method="pearson")
# d.
# e. Draw the residual plot and comment on the plot.
Reg <- lm(Distance~Age) #Create a linear model
resid(Reg) #List of residuals
residPlot <- ggplot(aes(x = .fitted, y = .resid),data = Reg) + geom_point() + geom_hline(yintercept = 0) + labs (x = "fitted value", y = "Residual") #A density plot
residPlot
# f. Draw the normal probability plot. Commet on the plot.
qqnorm(resid(Reg)) # A quantile normal plot - good for checking normality
qqline(resid(Reg))
# g. what is the value of ? What does this value mean?
summary(Reg)
(summary(Reg)$sigma)**2
|
###############################################################################
# __ ___ ____ _____________________
# / |/ / / __ )/ _/_ __/ ____/ ___/
# / /|_/ /_____/ __ |/ / / / / __/ \__ \
# / / / /_____/ /_/ // / / / / /___ ___/ /
# /_/ /_/ /_____/___/ /_/ /_____//____/
#
# MASH-MICRO
# MBITES-Male: Survival
# MASH-MICRO Team
# September 18, 2017
#
###############################################################################
###############################################################################
# Resting Surival
###############################################################################
#' MBITES-Male: Resting Survival for \code{\link{MosquitoMale}}
#'
#' Run generic resting survival probailities for bouts (launch to launch).
#' * This method is bound to \code{MosquitoMale$surviveResting()}.
#'
mbitesMale_surviveResting <- function(){
if(self$isAlive()){
if(runif(1) < self$get_restHaz()){
private$stateNew = "D"
}
}
}
#' MBITES-Male: Get Resting Hazards for \code{\link{MosquitoMale}}
#'
#' Get resting hazards for \code{\link{mbitesGeneric_surviveResting}}.
#' * This method is bound to \code{MosquitoMale$get_restHaz()}.
#'
mbitesMale_get_restHaz <- function(){
switch(private$pSetNow,
m = {return(private$LandscapePointer$get_MatingSites(private$locNow)$get_haz())},
s = {return(private$LandscapePointer$get_SugarSites(private$locNow)$get_haz())},
{stop(cat("illegal point set for MBITES-Male: ",private$pSetNow,"\n",sep=""))}
)
}
###############################################################################
# Flight Survival
###############################################################################
#' MBITES-Male: Flight Survival for \code{\link{MosquitoMale}}
#'
#' Run generic flight survival probailities for bouts (launch to launch).
#' Depending on settings from M-BITES parameters, senescence and/or tattering may also be simulated.
#' * This method is bound to \code{MosquitoMale$surviveFlight()}.
#'
mbitesMale_surviveFlight <- function(){
if(self$isAlive()){
p = self$get_surviveFlightProb()
if(private$MalePopPointer$get_MBITES_PAR("TATTER")){
private$damage = private$damage + self$rTatterSize()
p = p * self$pTatter()
}
if(private$MalePopPointer$get_MBITES_PAR("SENESCE")){
p = p * self$pSenesce()
}
if(runif(1) < 1-p){
private$stateNew = "D"
}
}
}
#' MBITES-Male: Probability of Death due to Senescence for \code{\link{MosquitoMale}}
#'
#' probability of death due to senescence given by \deqn{ \frac{2+sns.b}{1+sns.b} - \frac{e^{sns.a\times age}}{sns.b+e^{sns.a\times age}} }
#' * This method is bound to \code{MosquitoMale$pSenesce()}.
#'
mbitesMale_pSenesce <- function(){
age = private$tNow - private$bDay
return((2+private$MalePopPointer$get_MBITES_PAR("sns.b"))/(1+private$MalePopPointer$get_MBITES_PAR("sns.b")) - exp(private$MalePopPointer$get_MBITES_PAR("sns.a")*private$age)/(private$MalePopPointer$get_MBITES_PAR("sns.b") + exp(private$MalePopPointer$get_MBITES_PAR("sns.a")*private$age)))
}
#' MBITES-Generic: Wing Tattering for \code{\link{MosquitoFemale}}
#'
#' Draw from a zero-inflated Beta distribution for additive wing damage from tattering.
#' Wing damage is given by \deqn{ \left\{\begin{matrix}
#' x=0; P(ttsz.p)
#' \\
#' x\sim Beta(ttsz.a,ttsz.b); P(1-ttsz.p)
#' \end{matrix}\right. }
#' * This method is bound to \code{MosquitoMale$rTatterSize()}.
#' @md
mbitesMale_rTatterSize <- function(){
if(runif(1) < private$MalePopPointer$get_MBITES_PAR("ttsz.p")){
return(0)
} else {
return(rbeta(1,private$MalePopPointer$get_MBITES_PAR("ttsz.a"),private$MalePopPointer$get_MBITES_PAR("ttsz.b")))
}
}
#' MBITES-Generic: Probability of Death due to Wing Tattering for \code{\link{MosquitoFemale}}
#'
#' probability of death due to tattering given by \deqn{ \frac{2+ttr.b}{1+ttr.b} - \frac{e^{damage\times ttr.a}}{ttr.b+e^{damage\times ttr.a}} }
#' * This method is bound to \code{MosquitoFemale$pTatter()}.
#' @md
mbitesMale_pTatter <- function(){
return((2+private$MalePopPointer$get_MBITES_PAR("ttr.b"))/(1+private$MalePopPointer$get_MBITES_PAR("ttr.b")) - exp(private$damage*private$MalePopPointer$get_MBITES_PAR("ttr.a"))/(private$MalePopPointer$get_MBITES_PAR("ttr.b") + exp(private$damage*private$MalePopPointer$get_MBITES_PAR("ttr.a"))))
}
#' MBITES-Male: Get Baseline Survival Probability for \code{\link{MosquitoMale}}
#'
#' Get baseline flight survival probability for \code{\link{mbitesGeneric_surviveFlight}}.
#' * This method is bound to \code{MosquitoMale$get_surviveFlightProb()}.
#'
mbitesMale_get_surviveFlightProb <- function(){
switch(private$state,
M = {return(private$MalePopPointer$get_MBITES_PAR("M_surv"))},
S = {return(private$MalePopPointer$get_MBITES_PAR("S_surv"))},
R = {return(private$MalePopPointer$get_MBITES_PAR("R_surv"))},
{stop("illegal behavioral state for MBITES-Male")}
)
}
| /MASH-MICRO/R/MBITES-Male-Survival.R | no_license | QianZhang7/MASH-Main | R | false | false | 4,989 | r | ###############################################################################
# __ ___ ____ _____________________
# / |/ / / __ )/ _/_ __/ ____/ ___/
# / /|_/ /_____/ __ |/ / / / / __/ \__ \
# / / / /_____/ /_/ // / / / / /___ ___/ /
# /_/ /_/ /_____/___/ /_/ /_____//____/
#
# MASH-MICRO
# MBITES-Male: Survival
# MASH-MICRO Team
# September 18, 2017
#
###############################################################################
###############################################################################
# Resting Surival
###############################################################################
#' MBITES-Male: Resting Survival for \code{\link{MosquitoMale}}
#'
#' Run generic resting survival probailities for bouts (launch to launch).
#' * This method is bound to \code{MosquitoMale$surviveResting()}.
#'
mbitesMale_surviveResting <- function(){
if(self$isAlive()){
if(runif(1) < self$get_restHaz()){
private$stateNew = "D"
}
}
}
#' MBITES-Male: Get Resting Hazards for \code{\link{MosquitoMale}}
#'
#' Get resting hazards for \code{\link{mbitesGeneric_surviveResting}}.
#' * This method is bound to \code{MosquitoMale$get_restHaz()}.
#'
mbitesMale_get_restHaz <- function(){
switch(private$pSetNow,
m = {return(private$LandscapePointer$get_MatingSites(private$locNow)$get_haz())},
s = {return(private$LandscapePointer$get_SugarSites(private$locNow)$get_haz())},
{stop(cat("illegal point set for MBITES-Male: ",private$pSetNow,"\n",sep=""))}
)
}
###############################################################################
# Flight Survival
###############################################################################
#' MBITES-Male: Flight Survival for \code{\link{MosquitoMale}}
#'
#' Run generic flight survival probailities for bouts (launch to launch).
#' Depending on settings from M-BITES parameters, senescence and/or tattering may also be simulated.
#' * This method is bound to \code{MosquitoMale$surviveFlight()}.
#'
mbitesMale_surviveFlight <- function(){
if(self$isAlive()){
p = self$get_surviveFlightProb()
if(private$MalePopPointer$get_MBITES_PAR("TATTER")){
private$damage = private$damage + self$rTatterSize()
p = p * self$pTatter()
}
if(private$MalePopPointer$get_MBITES_PAR("SENESCE")){
p = p * self$pSenesce()
}
if(runif(1) < 1-p){
private$stateNew = "D"
}
}
}
#' MBITES-Male: Probability of Death due to Senescence for \code{\link{MosquitoMale}}
#'
#' probability of death due to senescence given by \deqn{ \frac{2+sns.b}{1+sns.b} - \frac{e^{sns.a\times age}}{sns.b+e^{sns.a\times age}} }
#' * This method is bound to \code{MosquitoMale$pSenesce()}.
#'
mbitesMale_pSenesce <- function(){
age = private$tNow - private$bDay
return((2+private$MalePopPointer$get_MBITES_PAR("sns.b"))/(1+private$MalePopPointer$get_MBITES_PAR("sns.b")) - exp(private$MalePopPointer$get_MBITES_PAR("sns.a")*private$age)/(private$MalePopPointer$get_MBITES_PAR("sns.b") + exp(private$MalePopPointer$get_MBITES_PAR("sns.a")*private$age)))
}
#' MBITES-Generic: Wing Tattering for \code{\link{MosquitoFemale}}
#'
#' Draw from a zero-inflated Beta distribution for additive wing damage from tattering.
#' Wing damage is given by \deqn{ \left\{\begin{matrix}
#' x=0; P(ttsz.p)
#' \\
#' x\sim Beta(ttsz.a,ttsz.b); P(1-ttsz.p)
#' \end{matrix}\right. }
#' * This method is bound to \code{MosquitoMale$rTatterSize()}.
#' @md
mbitesMale_rTatterSize <- function(){
if(runif(1) < private$MalePopPointer$get_MBITES_PAR("ttsz.p")){
return(0)
} else {
return(rbeta(1,private$MalePopPointer$get_MBITES_PAR("ttsz.a"),private$MalePopPointer$get_MBITES_PAR("ttsz.b")))
}
}
#' MBITES-Generic: Probability of Death due to Wing Tattering for \code{\link{MosquitoFemale}}
#'
#' probability of death due to tattering given by \deqn{ \frac{2+ttr.b}{1+ttr.b} - \frac{e^{damage\times ttr.a}}{ttr.b+e^{damage\times ttr.a}} }
#' * This method is bound to \code{MosquitoFemale$pTatter()}.
#' @md
mbitesMale_pTatter <- function(){
return((2+private$MalePopPointer$get_MBITES_PAR("ttr.b"))/(1+private$MalePopPointer$get_MBITES_PAR("ttr.b")) - exp(private$damage*private$MalePopPointer$get_MBITES_PAR("ttr.a"))/(private$MalePopPointer$get_MBITES_PAR("ttr.b") + exp(private$damage*private$MalePopPointer$get_MBITES_PAR("ttr.a"))))
}
#' MBITES-Male: Get Baseline Survival Probability for \code{\link{MosquitoMale}}
#'
#' Get baseline flight survival probability for \code{\link{mbitesGeneric_surviveFlight}}.
#' * This method is bound to \code{MosquitoMale$get_surviveFlightProb()}.
#'
mbitesMale_get_surviveFlightProb <- function(){
switch(private$state,
M = {return(private$MalePopPointer$get_MBITES_PAR("M_surv"))},
S = {return(private$MalePopPointer$get_MBITES_PAR("S_surv"))},
R = {return(private$MalePopPointer$get_MBITES_PAR("R_surv"))},
{stop("illegal behavioral state for MBITES-Male")}
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytofkit_shinyAPP.R
\name{cytofkitShinyAPP}
\alias{cytofkitShinyAPP}
\title{A Shiny APP to interactively visualize the analysis results}
\usage{
cytofkitShinyAPP(RData = NULL, onServer = FALSE)
}
\arguments{
\item{RData}{Either the RData object file or data object, if missing, RData file need to be loaded on the ShinyAPP}
\item{onServer}{Logical value, if \verb{TRUE}, sets shinyApp host to 0.0.0.0 for other clients to access, otherwise defaults to 127.0.0.1 (local host)}
}
\value{
Opens shinyApp session for data visualisation
}
\description{
Take the the RData object file saved by cytofkit as input, automatically load the data and allow exploration of the analysis results with interactive control
}
\examples{
d <- system.file('extdata', package = 'cytofkit2')
Rdata <- list.files(d, pattern = '.RData$', full.names = TRUE)
#only for interactive sessions, remove hash to run
#cytofkitShinyAPP(Rdata)
}
\author{
Hao Chen
}
| /man/cytofkitShinyAPP.Rd | no_license | JinmiaoChenLab/cytofkit2 | R | false | true | 1,038 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytofkit_shinyAPP.R
\name{cytofkitShinyAPP}
\alias{cytofkitShinyAPP}
\title{A Shiny APP to interactively visualize the analysis results}
\usage{
cytofkitShinyAPP(RData = NULL, onServer = FALSE)
}
\arguments{
\item{RData}{Either the RData object file or data object, if missing, RData file need to be loaded on the ShinyAPP}
\item{onServer}{Logical value, if \verb{TRUE}, sets shinyApp host to 0.0.0.0 for other clients to access, otherwise defaults to 127.0.0.1 (local host)}
}
\value{
Opens shinyApp session for data visualisation
}
\description{
Take the the RData object file saved by cytofkit as input, automatically load the data and allow exploration of the analysis results with interactive control
}
\examples{
d <- system.file('extdata', package = 'cytofkit2')
Rdata <- list.files(d, pattern = '.RData$', full.names = TRUE)
#only for interactive sessions, remove hash to run
#cytofkitShinyAPP(Rdata)
}
\author{
Hao Chen
}
|
---
title: Template for .Rmd
date: "`r paste('last updated',
format(lubridate::now(), '%H:%M, %d %B %Y'))`"
output:
pdf_document:
latex_engine: xelatex
toc: TRUE
toc_depth: 4
fontsize: 10pt
geometry: margin=0.4in,top=0.25in
TAGS: &, &&
---
#### Logic: & vs &&
#### & is vectorized, compares 2 boolean vectors, element-by-elemnt
# simple
{
x = c(1,2,3)
y = c(1,2,-3)
(x > 0) & (x < 4)
# [1] TRUE TRUE TRUE
( (-2:2) >= 0) & ( (-2:2) <=0)
# [1] FALSE FALSE TRUE FALSE FALSE
( c(T,F,F) ) & (c(T,T,T))
# [1] TRUE FALSE FALSE
}
#### && is used to avoid evaluated 2nd item if 1st is FALSE
## If you give && a vector, it will work on 1st element only
{
if ( (1>0) && (T)) print('hi')
if ( (1<0) && (T)) print('hi')
# NULL
## lazy, no error, no need to evaluate x
if ( (1<0) && (x)) print('hi')
# NULL
}
| /BASE/103_base_&_&&_logic.R | no_license | jimrothstein/try_things_here | R | false | false | 841 | r | ---
title: Template for .Rmd
date: "`r paste('last updated',
format(lubridate::now(), '%H:%M, %d %B %Y'))`"
output:
pdf_document:
latex_engine: xelatex
toc: TRUE
toc_depth: 4
fontsize: 10pt
geometry: margin=0.4in,top=0.25in
TAGS: &, &&
---
#### Logic: & vs &&
#### & is vectorized, compares 2 boolean vectors, element-by-elemnt
# simple
{
x = c(1,2,3)
y = c(1,2,-3)
(x > 0) & (x < 4)
# [1] TRUE TRUE TRUE
( (-2:2) >= 0) & ( (-2:2) <=0)
# [1] FALSE FALSE TRUE FALSE FALSE
( c(T,F,F) ) & (c(T,T,T))
# [1] TRUE FALSE FALSE
}
#### && is used to avoid evaluated 2nd item if 1st is FALSE
## If you give && a vector, it will work on 1st element only
{
if ( (1>0) && (T)) print('hi')
if ( (1<0) && (T)) print('hi')
# NULL
## lazy, no error, no need to evaluate x
if ( (1<0) && (x)) print('hi')
# NULL
}
|
###################################################################
#
# This function is part of WACSgen 1.0
# Copyright © 2013,2014,2015, D. Allard, BioSP,
# and Ronan Trépos MIA-T, INRA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. http://www.gnu.org
#
###################################################################
#' Performs validations of WACS simulations
#'
#' The validation is based on different types of
#' statistics computed on WACS data, WACS parameters and WACS simulations.
#'
#' @export
#'
#' @param what Type of validation. Possible choices are:
#' \tabular{ll}{
#' \env{="Sim"} \tab Compares a simulation run to data \cr
#' \env{="Rain"}\tab qq-plots of rainfall, per season \cr
#' \env{="MeanSd"}\tab Compares monthly mean and standard deviations \cr
#' \env{="BiVar"}\tab Compares monthly variate correlations \cr
#' \env{="CorTemp"}\tab Compares monthly temporal correlations \cr
#' \env{="SumBase"}\tab Compares sums above a threshold \cr
#' \env{="Persistence"}\tab Compares persistence of a variable above (or below) a threshold \cr
#' }
#' @param wacsdata WACS data obtained when calling \link{WACSdata}
#'
#' @param wacspar WACS parameters estimated when calling \link{WACSestim}
#'
#' @param wacssimul WACS simulation obtained when calling \link{WACSsimul}
#'
#' @param varname Variable on which the validation is performed
#'
#' @param varname2 Second variable on which validation is performed (only needed if \code{what=BiVar})
#'
#' @param base Threshold used for "SumBase" and "Persistence"
#'
#' @param above Boolean value used for "Persistence":
#' TRUE if data is considered above threshold;
#' FALSE otherwise
#'
#' @param months Months to which the analysis should be restricted (only for "SumBase" and "Persistence")
#'
#' @return A list containing all information needed for plots; contains also the type of validation, as a class
#'
#' @examples
#' \dontrun{
#' ## Simple example
#' data(ClimateSeries)
#' ThisData = WACSdata(ClimateSeries)
#' ThisPar = WACSestim(ThisData)
#' ThisSim = WACSsimul(ThisPar, from="1995-01-01", to="2012-12-31")
#' Val1 = WACSvalid(what="Sim",wacsdata = ThisData,
#' wacspar = ThisPar, wacssimul = ThisSim, varname="tmin")
#' Val2 = WACSvalid(what="MeanSd",wacsdata = ThisData,
#' wacssimul = ThisSim, varname="RG")
#' Val3 = WACSvalid(what="SumBase", wacsdata = ThisData,
#' wacssimul = ThisSim, varname="tmoy", base=5, month=2:5)
#' Val4 = WACSvalid(what="Persistence",wacsdata = ThisData,
#' wacssimul = ThisSim, varname="tmin", base=0, above=FALSE)
#' }
#' @note
#'
#' If \code{what=sim}, data and simulations are displayed as a function of the day of the year, from 1 to 365.
#' Smoothed versions of daily average and daily envelopes (defined by average +/- 2. standard deviations) are also displayed.
#'
#'
#' If \code{what=rain}, qq-plots and superimposition of histograms and models of rain are produced for each season.
#'
#'
#' If \code{what=MeanSd}, boxplots of monthly means and monthly standard deviations are compared.
#' The median value of the monthly mean, resp. monthly standard deviation, of the data are displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' #' If \code{what=BiVar}, boxplots of monthly correlations coefficients between \code{varname} and \code{varname2}
#' are compared. The median value of the correlation coefficient computed on the data is displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' If \code{what=CorTemp}, boxplots of monthly 1-day auto correlation are compared. The median value of the
#' auto-correlation coefficient computed on the data is displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' If \code{what=SumBase}, boxplots of the sum of the variable \code{varname} above a given threshold, \code{base},
#' is computed during the months provided in the variable \code{months}.
#'
#'
#' If \code{what=Persistence}, histograms of consecutive days of the variable \code{varname} above (or below) a given
#' threshold, \code{base}, are compared. If \code{above=TRUE}, consecutive days above the threshold are computed,
#' whereas days below the threshold are computed if \code{above=FALSE}. Months can be selected
#' with the variable \code{months}.
#'
#'
WACSvalid = function(what="Sim",
wacsdata=NULL,
wacspar=NULL,
wacssimul=NULL,
varname=NULL,
varname2=NULL,
base = 0,
above=TRUE,
months=1:12)
{
# Some checking
if (!(what %in% c("Sim","Rain","MeanSd","BiVar","CorTemp","SumBase","Persistence"))){
stop ("[WACSvalid: 'what' should belong to one of the following:
'Sim','Rain','MeanSd','BiVar','CorTemp','SumBase','Persistence']")
}
if(class(wacsdata)!="WACSdata"){
stop ("[WACSvalid] Data should be of class 'WACSdata', as generated by calling WACSdata")
}
if(!is.null(wacssimul) && class(wacssimul)!="WACSsimul"){
stop ("[WACSvalid] Simulation should be of class 'WACSsimul', as generated by calling WACSsimul")
}
if (what != "Rain"){
if (!(varname %in% wacsdata$mapping$wacs_names) & !(varname=="tmoy")){
stop ("[WACSvalid: varname must be one of the variable name of WACSdata]")
}
# Creating 'tmax' if Trange = TRUE
if( ((varname=="tmax") && wacsdata$Trange) || ((varname=="tmoy") && wacsdata$Trange) ){
tmax = wacsdata$data$tmin + wacsdata$data$trange
wacsdata$data = cbind(wacsdata$data,tmax)
tmax = wacssimul$sim$tmin + wacssimul$sim$trange
wacssimul$sim = cbind(wacssimul$sim,tmax)
}
}
#
# Different types of validations
#
############################# VALIDATING RAIN
if (what == "Rain") {
if (is.null(wacsdata) || is.null(wacspar)) {
stop ("[WACSvalid] for 'Rain' you must provide wacsdata and wacspar")
}
res = wacsvalid.Rain(wacsdata,wacspar)
}
############################# VALIDATING ONE SIMULATION RUN
if (what == "Sim"){
if (is.null(wacsdata) || is.null(wacssimul) || is.null(wacspar) || is.null(varname)) {
stop ("[WACSvalid] for 'Sim' you must provide wacsdata, wacssimul, wacspar
and varname");
}
res = wacsvalid.Sim(wacsdata,wacspar,wacssimul,varname)
}
############################# VALIDATING Means and Standard Deviations
if (what == "MeanSd") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname)) {
stop ("[WACSvalid] for 'MeanSd' you must provide wacsdata, wacssimul
and varname");
}
res = wacsvalid.MeanSd(wacsdata,wacssimul,varname)
}
############################# VALIDATING Bivariate correlation
if (what == "BiVar") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(varname2)) {
stop ("[WACSvalid] for 'BiVar' you must provide wacsdata,
wacssimul, varname and varname2")}
if ((varname2=="tmax") && wacsdata$Trange){
tmax = wacsdata$data$tmin + wacsdata$data$trange
wacsdata$data = cbind(wacsdata$data,tmax)
tmax = wacssimul$sim$tmin + wacssimul$sim$trange
wacssimul$sim = cbind(wacssimul$sim,tmax)
}
res=wacsvalid.BiVar(wacsdata,wacssimul,varname,varname2)
}
############################# VALIDATING Temporal Correlations
if (what == "CorTemp") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname)) {
stop ("[WACSvalid] for 'CorTemp' you must provide wacsdata,
wacssimul and varname");
}
res = wacsvalid.CorTemp(wacsdata,wacssimul,varname)
}
############################# VALIDATING Sum above base
if (what == "SumBase") {
if ( is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(base) || is.null(months) ) {
stop ("[WACSvalid] for 'SumBase' you must provide wacsdata,
wacssimul, varname, base, months");
}
res = wacsvalid.SumBase(wacsdata,wacssimul,varname,base,months)
}
############################# VALIDATING Persistance
if (what == "Persistence") {
if ( is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(base) || is.null(months) || is.null(above)) {
stop ("[WACSvalid] for 'Persistence' you must provide wacsdata,
wacssimul, varname, base, above, months");
}
res = wacsvalid.Persistence(wacsdata,wacssimul,varname,base,above,months)
}
res$labels = c("Observed","Simulated")
return(res)
}
| /R/WACSvalid.R | no_license | cran/WACS | R | false | false | 9,203 | r | ###################################################################
#
# This function is part of WACSgen 1.0
# Copyright © 2013,2014,2015, D. Allard, BioSP,
# and Ronan Trépos MIA-T, INRA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. http://www.gnu.org
#
###################################################################
#' Performs validations of WACS simulations
#'
#' The validation is based on different types of
#' statistics computed on WACS data, WACS parameters and WACS simulations.
#'
#' @export
#'
#' @param what Type of validation. Possible choices are:
#' \tabular{ll}{
#' \env{="Sim"} \tab Compares a simulation run to data \cr
#' \env{="Rain"}\tab qq-plots of rainfall, per season \cr
#' \env{="MeanSd"}\tab Compares monthly mean and standard deviations \cr
#' \env{="BiVar"}\tab Compares monthly variate correlations \cr
#' \env{="CorTemp"}\tab Compares monthly temporal correlations \cr
#' \env{="SumBase"}\tab Compares sums above a threshold \cr
#' \env{="Persistence"}\tab Compares persistence of a variable above (or below) a threshold \cr
#' }
#' @param wacsdata WACS data obtained when calling \link{WACSdata}
#'
#' @param wacspar WACS parameters estimated when calling \link{WACSestim}
#'
#' @param wacssimul WACS simulation obtained when calling \link{WACSsimul}
#'
#' @param varname Variable on which the validation is performed
#'
#' @param varname2 Second variable on which validation is performed (only needed if \code{what=BiVar})
#'
#' @param base Threshold used for "SumBase" and "Persistence"
#'
#' @param above Boolean value used for "Persistence":
#' TRUE if data is considered above threshold;
#' FALSE otherwise
#'
#' @param months Months to which the analysis should be restricted (only for "SumBase" and "Persistence")
#'
#' @return A list containing all information needed for plots; contains also the type of validation, as a class
#'
#' @examples
#' \dontrun{
#' ## Simple example
#' data(ClimateSeries)
#' ThisData = WACSdata(ClimateSeries)
#' ThisPar = WACSestim(ThisData)
#' ThisSim = WACSsimul(ThisPar, from="1995-01-01", to="2012-12-31")
#' Val1 = WACSvalid(what="Sim",wacsdata = ThisData,
#' wacspar = ThisPar, wacssimul = ThisSim, varname="tmin")
#' Val2 = WACSvalid(what="MeanSd",wacsdata = ThisData,
#' wacssimul = ThisSim, varname="RG")
#' Val3 = WACSvalid(what="SumBase", wacsdata = ThisData,
#' wacssimul = ThisSim, varname="tmoy", base=5, month=2:5)
#' Val4 = WACSvalid(what="Persistence",wacsdata = ThisData,
#' wacssimul = ThisSim, varname="tmin", base=0, above=FALSE)
#' }
#' @note
#'
#' If \code{what=sim}, data and simulations are displayed as a function of the day of the year, from 1 to 365.
#' Smoothed versions of daily average and daily envelopes (defined by average +/- 2. standard deviations) are also displayed.
#'
#'
#' If \code{what=rain}, qq-plots and superimposition of histograms and models of rain are produced for each season.
#'
#'
#' If \code{what=MeanSd}, boxplots of monthly means and monthly standard deviations are compared.
#' The median value of the monthly mean, resp. monthly standard deviation, of the data are displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' #' If \code{what=BiVar}, boxplots of monthly correlations coefficients between \code{varname} and \code{varname2}
#' are compared. The median value of the correlation coefficient computed on the data is displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' If \code{what=CorTemp}, boxplots of monthly 1-day auto correlation are compared. The median value of the
#' auto-correlation coefficient computed on the data is displayed
#' on top of the boxplots computed on the simulations.
#'
#'
#' If \code{what=SumBase}, boxplots of the sum of the variable \code{varname} above a given threshold, \code{base},
#' is computed during the months provided in the variable \code{months}.
#'
#'
#' If \code{what=Persistence}, histograms of consecutive days of the variable \code{varname} above (or below) a given
#' threshold, \code{base}, are compared. If \code{above=TRUE}, consecutive days above the threshold are computed,
#' whereas days below the threshold are computed if \code{above=FALSE}. Months can be selected
#' with the variable \code{months}.
#'
#'
WACSvalid = function(what="Sim",
wacsdata=NULL,
wacspar=NULL,
wacssimul=NULL,
varname=NULL,
varname2=NULL,
base = 0,
above=TRUE,
months=1:12)
{
# Some checking
if (!(what %in% c("Sim","Rain","MeanSd","BiVar","CorTemp","SumBase","Persistence"))){
stop ("[WACSvalid: 'what' should belong to one of the following:
'Sim','Rain','MeanSd','BiVar','CorTemp','SumBase','Persistence']")
}
if(class(wacsdata)!="WACSdata"){
stop ("[WACSvalid] Data should be of class 'WACSdata', as generated by calling WACSdata")
}
if(!is.null(wacssimul) && class(wacssimul)!="WACSsimul"){
stop ("[WACSvalid] Simulation should be of class 'WACSsimul', as generated by calling WACSsimul")
}
if (what != "Rain"){
if (!(varname %in% wacsdata$mapping$wacs_names) & !(varname=="tmoy")){
stop ("[WACSvalid: varname must be one of the variable name of WACSdata]")
}
# Creating 'tmax' if Trange = TRUE
if( ((varname=="tmax") && wacsdata$Trange) || ((varname=="tmoy") && wacsdata$Trange) ){
tmax = wacsdata$data$tmin + wacsdata$data$trange
wacsdata$data = cbind(wacsdata$data,tmax)
tmax = wacssimul$sim$tmin + wacssimul$sim$trange
wacssimul$sim = cbind(wacssimul$sim,tmax)
}
}
#
# Different types of validations
#
############################# VALIDATING RAIN
if (what == "Rain") {
if (is.null(wacsdata) || is.null(wacspar)) {
stop ("[WACSvalid] for 'Rain' you must provide wacsdata and wacspar")
}
res = wacsvalid.Rain(wacsdata,wacspar)
}
############################# VALIDATING ONE SIMULATION RUN
if (what == "Sim"){
if (is.null(wacsdata) || is.null(wacssimul) || is.null(wacspar) || is.null(varname)) {
stop ("[WACSvalid] for 'Sim' you must provide wacsdata, wacssimul, wacspar
and varname");
}
res = wacsvalid.Sim(wacsdata,wacspar,wacssimul,varname)
}
############################# VALIDATING Means and Standard Deviations
if (what == "MeanSd") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname)) {
stop ("[WACSvalid] for 'MeanSd' you must provide wacsdata, wacssimul
and varname");
}
res = wacsvalid.MeanSd(wacsdata,wacssimul,varname)
}
############################# VALIDATING Bivariate correlation
if (what == "BiVar") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(varname2)) {
stop ("[WACSvalid] for 'BiVar' you must provide wacsdata,
wacssimul, varname and varname2")}
if ((varname2=="tmax") && wacsdata$Trange){
tmax = wacsdata$data$tmin + wacsdata$data$trange
wacsdata$data = cbind(wacsdata$data,tmax)
tmax = wacssimul$sim$tmin + wacssimul$sim$trange
wacssimul$sim = cbind(wacssimul$sim,tmax)
}
res=wacsvalid.BiVar(wacsdata,wacssimul,varname,varname2)
}
############################# VALIDATING Temporal Correlations
if (what == "CorTemp") {
if (is.null(wacsdata) || is.null(wacssimul) || is.null(varname)) {
stop ("[WACSvalid] for 'CorTemp' you must provide wacsdata,
wacssimul and varname");
}
res = wacsvalid.CorTemp(wacsdata,wacssimul,varname)
}
############################# VALIDATING Sum above base
if (what == "SumBase") {
if ( is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(base) || is.null(months) ) {
stop ("[WACSvalid] for 'SumBase' you must provide wacsdata,
wacssimul, varname, base, months");
}
res = wacsvalid.SumBase(wacsdata,wacssimul,varname,base,months)
}
############################# VALIDATING Persistance
if (what == "Persistence") {
if ( is.null(wacsdata) || is.null(wacssimul) || is.null(varname) || is.null(base) || is.null(months) || is.null(above)) {
stop ("[WACSvalid] for 'Persistence' you must provide wacsdata,
wacssimul, varname, base, above, months");
}
res = wacsvalid.Persistence(wacsdata,wacssimul,varname,base,above,months)
}
res$labels = c("Observed","Simulated")
return(res)
}
|
library(shapr)
context("test-predictions.R")
test_that("Test prediction", {
# Example -----------
data("Boston", package = "MASS")
dt_train <- data.table::as.data.table(Boston)
features <- c("lstat", "rm", "dis", "indus")
n_combinations <- 10
n_features <- 4
prediction_zero <- .5
n_xtest <- 8
explainer <- list()
explainer$model <- stats::lm(formula = "medv ~ lstat + rm + dis + indus", data = head(dt_train, -n_xtest))
explainer$x_test <- tail(dt_train[, .SD, .SDcols = features], n_xtest)
explainer$W <- matrix(1, nrow = n_features + 1, ncol = n_combinations)
dt <- dt_train[rep(1:.N, 4)]
dt[, id := rep_len(1:n_xtest, .N)]
dt[, id_combination := rep_len(1:n_combinations, .N), id]
dt[, w := runif(.N)]
x <- prediction(dt, prediction_zero, explainer)
# Test -----------
lnms <- c("dt", "model", "p", "x_test")
expect_equal(class(x), c("shapr", "list"))
expect_equal(names(x), lnms)
expect_equal(x$model, explainer$model)
expect_equal(x$x_test, explainer$x_test)
expect_equal(x$p, predict_model(explainer$model, explainer$x_test))
expect_true(data.table::is.data.table(x$dt))
expect_equal(ncol(x$dt), n_features + 1)
expect_equal(nrow(x$dt), nrow(explainer$x_test))
expect_equal(colnames(x$dt), c("none", features))
# Tets errors
expect_error(prediction(dt[id < n_xtest], prediction_zero, explainer))
})
| /tests/testthat/test-predictions.R | permissive | martinju/shapr | R | false | false | 1,372 | r | library(shapr)
context("test-predictions.R")
test_that("Test prediction", {
# Example -----------
data("Boston", package = "MASS")
dt_train <- data.table::as.data.table(Boston)
features <- c("lstat", "rm", "dis", "indus")
n_combinations <- 10
n_features <- 4
prediction_zero <- .5
n_xtest <- 8
explainer <- list()
explainer$model <- stats::lm(formula = "medv ~ lstat + rm + dis + indus", data = head(dt_train, -n_xtest))
explainer$x_test <- tail(dt_train[, .SD, .SDcols = features], n_xtest)
explainer$W <- matrix(1, nrow = n_features + 1, ncol = n_combinations)
dt <- dt_train[rep(1:.N, 4)]
dt[, id := rep_len(1:n_xtest, .N)]
dt[, id_combination := rep_len(1:n_combinations, .N), id]
dt[, w := runif(.N)]
x <- prediction(dt, prediction_zero, explainer)
# Test -----------
lnms <- c("dt", "model", "p", "x_test")
expect_equal(class(x), c("shapr", "list"))
expect_equal(names(x), lnms)
expect_equal(x$model, explainer$model)
expect_equal(x$x_test, explainer$x_test)
expect_equal(x$p, predict_model(explainer$model, explainer$x_test))
expect_true(data.table::is.data.table(x$dt))
expect_equal(ncol(x$dt), n_features + 1)
expect_equal(nrow(x$dt), nrow(explainer$x_test))
expect_equal(colnames(x$dt), c("none", features))
# Tets errors
expect_error(prediction(dt[id < n_xtest], prediction_zero, explainer))
})
|
#' Interpolate concentrations between measurements or extrapolate
#' concentrations after the last measurement.
#'
#' \code{interpolate.conc} and \code{extrapolate.conc} returns an
#' interpolated (or extrapolated) concentration.
#' \code{interp.extrap.conc} will choose whether interpolation or
#' extrapolation is required and will also operate on many
#' concentrations. These will typically be used to estimate the
#' concentration between two measured concentrations or after the last
#' measured concentration. Of note, these functions will not
#' extrapolate prior to the first point.
#'
#' @param conc Measured concentrations
#' @param time Time of the concentration measurement
#' @param time.dose Time of the dose
#' @param time.out Time when interpolation is requested (vector for
#' \code{interp.extrap.conc}, scalar otherwise)
#' @param lambda.z The elimination rate constant. \code{NA} will
#' prevent extrapolation.
#' @param clast The last observed concentration above the limit of
#' quantification. If not given, \code{clast} is calculated from
#' \code{\link{pk.calc.clast.obs}}
#' @param conc.origin The concentration before the first measurement.
#' \code{conc.origin} is typically used to set predose values to zero
#' (default), set a predose concentration for endogenous compounds, or
#' set predose concentrations to \code{NA} if otherwise unknown.
#' @param options List of changes to the default
#' \code{\link{PKNCA.options}} for calculations.
#' @param interp.method The method for interpolation (either 'lin up/log
#' down' or 'linear')
#' @param extrap.method The method for extrapolation: "AUCinf",
#' "AUClast", or "AUCall". See details for usage.
#' @param conc.blq How to handle BLQ values. (See
#' \code{\link{clean.conc.blq}} for usage instructions.)
#' @param conc.na How to handle NA concentrations. (See
#' \code{\link{clean.conc.na}})
#' @param route.dose What is the route of administration
#' ("intravascular" or "extravascular"). See the details below for
#' how this parameter is used.
#' @param duration.dose What is the duration of administration? See the
#' details below for how this parameter is used.
#' @param out.after Should interpolation occur from the data before
#' (\code{FALSE}) or after (\code{TRUE}) the interpolated point? See
#' the details below for how this parameter is used. It only has a
#' meaningful effect at the instant of an IV bolus dose.
#' @param check Run \code{\link{check.conc.time}},
#' \code{\link{clean.conc.blq}}, and \code{\link{clean.conc.na}}?
#' @param ... Additional arguments passed to \code{interpolate.conc} or
#' \code{extrapolate.conc}.
#' @return The interpolated or extrapolated concentration value as a
#' scalar float.
#' @details
#' \describe{
#' \item{extrap.method}{
#' \describe{
#' \item{'AUCinf'}{Use lambda.z to extrapolate beyond the last point with the half-life.}
#' \item{'AUCall'}{If the last point is above the limit of quantification or missing, this is identical to 'AUCinf'. If the last point is below the limit of quantification, then linear interpolation between the Clast and the next BLQ is used for that interval and all additional points are extrapolated as 0.}
#' \item{'AUClast'}{Extrapolates all points after the last above the limit of quantification as 0.}
#' }
#' }
#' }
#'
#' \code{duration.dose} and \code{direction.out} are ignored if
#' \code{route.dose == "extravascular"}. \code{direction.out} is ignored
#' if \code{duration.dose > 0}.
#'
#' \code{route.dose} and \code{duration.dose} affect how
#' interpolation/extrapolation of the concentration occurs at the time
#' of dosing. If \code{route.dose == "intravascular"} and
#' \code{duration.dose == 0} then extrapolation occurs for an IV bolus
#' using \code{\link{pk.calc.c0}} with the data after dosing. Otherwise
#' (either \code{route.dose == "extravascular"} or \code{duration.dose >
#' 0}), extrapolation occurs using the concentrations before dosing and
#' estimating the half-life (or more precisely, estimating
#' \code{lambda.z}). Finally, \code{direction.out} can change the
#' direction of interpolation in cases with \code{route.dose ==
#' "intravascular"} and \code{duration.dose == 0}. When
#' \code{direction.out == "before"} interpolation occurs only with data
#' before the dose (as is the case for \code{route.dose ==
#' "extravascular"}), but if \code{direction.out == "after"}
#' interpolation occurs from the data after dosing.
#'
#' @seealso \code{\link{pk.calc.clast.obs}},
#' \code{\link{pk.calc.half.life}}, \code{\link{pk.calc.c0}}
#' @export
interp.extrap.conc <- function(conc, time, time.out,
lambda.z=NA,
clast=pk.calc.clast.obs(conc, time),
options=list(),
interp.method=PKNCA.choose.option("auc.method", options),
extrap.method="AUCinf",
...,
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data <- data.frame(conc, time)
}
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (length(time.out) < 1)
stop("time.out must be a vector with at least one element")
ret <- rep(NA, length(time.out))
for (i in seq_len(length(time.out)))
if (is.na(time.out[i])) {
warning("An interpolation/extrapolation time is NA")
} else if (time.out[i] <= tlast) {
ret[i] <- interpolate.conc(data$conc, data$time,
time.out[i],
interp.method=interp.method,
conc.blq=conc.blq,
conc.na=conc.na,
check=FALSE)
} else {
ret[i] <- extrapolate.conc(data$conc, data$time,
time.out[i],
lambda.z=lambda.z,
clast=clast,
extrap.method=extrap.method,
check=FALSE)
}
ret
}
#' @describeIn interp.extrap.conc Interpolate concentrations through Tlast (inclusive)
#' @export
interpolate.conc <- function(conc, time, time.out,
options=list(),
interp.method=PKNCA.choose.option("auc.method", options),
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
conc.origin=0,
...,
check=TRUE) {
## Check the inputs
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data <- data.frame(conc, time)
}
# Ensure that conc.origin is valid
if (length(conc.origin) != 1) {
stop("conc.origin must be a scalar")
}
if (!(is.na(conc.origin) | (is.numeric(conc.origin) & !is.factor(conc.origin)))) {
stop("conc.origin must be NA or a number (and not a factor)")
}
## Verify that we are interpolating between the first concentration
## and the last above LOQ concentration
if (length(time.out) != 1) {
stop("Can only interpolate for one time point per function call")
}
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (time.out < min(data$time)) {
ret <- conc.origin
} else if (time.out > tlast) {
stop("interpolate.conc can only works through Tlast, please use interp.extrap.conc to combine both interpolation and extrapolation.")
} else if (!(tolower(interp.method) %in% c("lin up/log down", "linear"))) {
stop("interp.method must be one of 'linear' or 'lin up/log down'")
} else if (time.out %in% data$time) {
## See if there is an exact time match and return that if it
## exists.
ret <- conc[time.out == data$time]
} else {
## Find the last time before and the first time after the output
## time.
time.1 <- max(data$time[data$time <= time.out])
time.2 <- min(data$time[time.out <= data$time])
conc.1 <- data$conc[data$time == time.1]
conc.2 <- data$conc[data$time == time.2]
interp.method <- tolower(interp.method)
if (is.na(conc.1) | is.na(conc.2)) {
ret <- NA_real_
} else if ((interp.method == "linear") |
(interp.method == "lin up/log down" &
((conc.1 <= 0 | conc.2 <= 0) |
(conc.1 <= conc.2)))) {
## Do linear interpolation if:
## linear interpolation is selected or
## lin up/log down interpolation is selected and
## one concentration is 0 or
## the concentrations are equal
ret <- conc.1+(time.out-time.1)/(time.2-time.1)*(conc.2-conc.1)
} else if (interp.method == "lin up/log down") {
ret <- exp(log(conc.1)+
(time.out-time.1)/(time.2-time.1)*(log(conc.2)-log(conc.1)))
} else {
stop("You should never see this error. Please report this as a bug with a reproducible example.") # nocov
}
}
ret
}
#' @describeIn interp.extrap.conc Extrapolate concentrations after Tlast
#' @export
extrapolate.conc <- function(conc, time, time.out,
lambda.z=NA, clast=pk.calc.clast.obs(conc, time),
extrap.method="AUCinf",
options=list(),
conc.na=PKNCA.choose.option("conc.na", options),
conc.blq=PKNCA.choose.option("conc.blq", options),
...,
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time, conc.na=conc.na, check=FALSE)
} else {
data <- data.frame(conc, time)
}
extrap.method <- tolower(extrap.method)
if (!(extrap.method %in%
c("aucinf", "aucall", "auclast")))
stop("extrap.method must be one of 'AUCinf', 'AUClast', or 'AUCall'")
if (length(time.out) != 1)
stop("Only one time.out value may be estimated at once.")
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (is.na(tlast)) {
## If there are no observed concentrations, return NA
ret <- NA
} else if (time.out <= tlast) {
stop("extrapolate.conc can only work beyond Tlast, please use interp.extrap.conc to combine both interpolation and extrapolation.")
} else {
## Start the interpolation
if (extrap.method %in% "aucinf") {
## If AUCinf is requested, extrapolate using the half-life
ret <- clast*exp(-lambda.z*(time.out - tlast))
} else if (extrap.method %in% "auclast" |
(extrap.method %in% "aucall" &
tlast == max(data$time))) {
## If AUClast is requested or AUCall is requested and there are
## no BLQ at the end, we are already certain that we are after
## Tlast, so the answer is 0.
ret <- 0
} else if (extrap.method %in% "aucall") {
## If the last non-missing concentration is below the limit of
## quantification, extrapolate with the triangle method of
## AUCall.
time.prev <- max(data$time[data$time <= time.out])
conc.prev <- data$conc[data$time %in% time.prev]
if (conc.prev %in% 0) {
## If we are already BLQ, then we have confirmed that there
## are no more ALQ measurements (because we are beyond
## Tlast) and therefore we can extrapolate as 0.
ret <- 0
} else {
if (time.prev != max(data$time)) {
time.next <- min(data$time[data$time >= time.out])
conc.next <- data$conc[data$time %in% time.next]
}
## If we are not already BLQ, then we have confirmed that we
## are in the triangle extrapolation region and need to draw
## a line.
ret <- (time.out - time.prev)/(time.next - time.prev)*conc.prev
}
} else {
stop("Invalid extrap.method caught too late (seeing this error indicates a software bug)") # nocov
}
}
ret
}
# Choices for events in interp.extrap.conc.dose. This is included here to assist with testing later.
event_choices_interp.extrap.conc.dose <-
list(conc_dose_iv_bolus_after="conc_dose_iv_bolus_after",
conc_dose="conc_dose",
dose_iv_bolus_after="dose_iv_bolus_after",
dose="dose",
conc="conc",
output_only="output_only",
none="none")
#' @importFrom dplyr case_when
#' @describeIn interp.extrap.conc Interpolate and extrapolate
#' concentrations without interpolating or extrapolating beyond doses.
#' @export
interp.extrap.conc.dose <- function(conc, time,
time.dose, route.dose="extravascular", duration.dose=NA,
time.out, out.after=FALSE,
options=list(),
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
...,
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data_conc <-
clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data_conc <- data.frame(conc, time)
}
# Check other inputs
if (!is.character(route.dose)) {
route.dose <- as.character(route.dose)
}
if (!(all(route.dose %in% c("extravascular", "intravascular")))) {
stop("route.dose must be either 'extravascular' or 'intravascular'")
}
if (!(length(route.dose) %in% c(1, length(time.dose)))) {
stop("route.dose must either be a scalar or the same length as time.dose")
}
if (!all(is.na(duration.dose) | (is.numeric(duration.dose) & !is.factor(duration.dose)))) {
stop("duration.dose must be NA or a number.")
}
if (!(length(duration.dose) %in% c(1, length(time.dose)))) {
stop("duration.dose must either be a scalar or the same length as time.dose")
}
# Generate a single timeline
# Concentrations are assumed to occur before dosing
data_conc$out_after <- FALSE
data_dose <-
merge(
data.frame(dose=TRUE,
time=time.dose,
route=route.dose,
duration=duration.dose,
iv_bolus=route.dose %in% "intravascular" & duration.dose %in% 0,
stringsAsFactors=FALSE),
# Expand IV bolus dosing to have a before and after concentration
data.frame(iv_bolus=c(FALSE, TRUE, TRUE),
out_after=c(FALSE, FALSE, TRUE)),
all.x=TRUE)
data_out <-
data.frame(out=TRUE,
out_after=out.after,
out_order=1:length(time.out),
time=time.out)
data_all <-
merge(merge(data_conc,
data_dose,
all=TRUE),
data_out,
all=TRUE)
data_all$dose_event <- data_all$dose %in% TRUE
data_all$conc_event <- !is.na(data_all$conc)
data_all$iv_bolus <- data_all$iv_bolus %in% TRUE
data_all$out <- data_all$out %in% TRUE
data_all$dose_count <- cumsum(data_all$dose_event)
mask_include_before <- data_all$dose_event & data_all$conc_event & !data_all$out_after
data_all$dose_count_prev <- data_all$dose_count - mask_include_before
data_all$event <- dplyr::case_when(
data_all$dose_event & data_all$conc_event & data_all$iv_bolus & data_all$out_after~event_choices_interp.extrap.conc.dose$conc_dose_iv_bolus_after,
data_all$dose_event & data_all$conc_event~event_choices_interp.extrap.conc.dose$conc_dose,
data_all$dose_event & data_all$iv_bolus & data_all$out_after~event_choices_interp.extrap.conc.dose$dose_iv_bolus_after,
data_all$dose_event~event_choices_interp.extrap.conc.dose$dose,
data_all$conc_event~event_choices_interp.extrap.conc.dose$conc,
data_all$out~event_choices_interp.extrap.conc.dose$output_only, # interpolation/extrapolation-only row
TRUE~"unknown") # should never happen
if (any(mask_unknown <- data_all$event %in% "unknown")) {
# All events should be accounted for
stop("Unknown event in interp.extrap.conc.dose at time(s): ",
paste(unique(data_all$time[mask_unknown]), collapse=", ")) # nocov
}
# Remove "output_only" from event_before and event_after
simple_locf <- function(x, missing_val) {
mask_found <- !(x %in% missing_val)
ret <- x[mask_found]
ret[cumsum(mask_found)]
}
data_all$event_before <- simple_locf(c(event_choices_interp.extrap.conc.dose$none, data_all$event[-nrow(data_all)]),
"output_only")
data_all$event_after <- rev(simple_locf(rev(c(data_all$event[-1], event_choices_interp.extrap.conc.dose$none)),
"output_only"))
# Loop through the methods until all have been tested or no missing
# values remain.
data_all$method <- NA_character_
for (nm in names(interp.extrap.conc.dose.select)) {
mask <- is.na(data_all$method) &
do.call(interp.extrap.conc.dose.select[[nm]]$select, list(x=data_all))
if (any(mask)) {
if ("warning" %in% names(interp.extrap.conc.dose.select[[nm]])) {
warning(sprintf("%s: %d data points",
interp.extrap.conc.dose.select[[nm]]$warning,
sum(mask)))
data_all$method[mask] <- nm
} else {
for (current_idx in which(mask)) {
data_all$conc[current_idx] <-
do.call(interp.extrap.conc.dose.select[[nm]]$value,
list(data_all=data_all,
current_idx=current_idx,
options=options,
...))
data_all$method[current_idx] <- nm
}
}
}
}
if (any(mask_no_method <- is.na(data_all$method))) {
# This should never happen, all eventualities should be covered
stop("No method for imputing concentration at time(s): ",
paste(unique(data_all$time[mask_no_method]), collapse=", ")) # nocov
}
# Filter to the requested time points and output
data_out <- data_all[data_all$out,,drop=FALSE]
data_out <- data_out[order(data_out$out_order),,drop=FALSE]
ret <- data_out$conc
attr(ret, "Method") <- data_out$method
ret
}
# Dose-aware interpolation/extrapolation functions ####
# Impossible combinations ####
iecd_impossible_select <- function(x) {
x$event_before %in% "output_only" | # Restricted in code
x$event %in% "none" | # "none" events do not occur, they are before or after the timeline
x$event_after %in% "output_only" | # Restricted in code
(x$event %in% "output_only" &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event %in% c("none", "output_only") &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after"))
}
iecd_impossible_value <- function(data_all, current_idx, ...) {
stop(
sprintf(
"Impossible combination requested for interp.extrap.conc.dose. event_before: %s, event: %s, event_after: %s",
data_all$event_before[current_idx],
data_all$event[current_idx],
data_all$event_after[current_idx])) # nocov
}
# Observed concentration ####
iecd_observed_select <- function(x) {
x$event %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc") &
!do.call(interp.extrap.conc.dose.select[["Impossible combinations"]]$select, list(x=x))
}
iecd_observed_value <- function(data_all, current_idx, ...) {
data_all$conc[current_idx]
}
# Before all events ####
iecd_before_select <- function(x) {
x$event_before %in% "none" &
!(x$event %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc", "dose_iv_bolus_after", "none")) &
!(x$event_after %in% "output_only" |
(x$event %in% "output_only" &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after"))) # because these are impossible
}
iecd_before_value <- function(data_all, current_idx, conc.origin=0, ...) {
conc.origin
}
# Interpolation ####
iecd_interp_select <- function(x) {
x$event_before %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc") &
x$event %in% c("output_only") &
x$event_after %in% c("conc_dose", "conc") &
!(x$event_before %in% "conc_dose" &
x$event_after %in% "conc_dose")
}
iecd_interp_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[!is.na(data_all$conc) &
data_all$dose_count %in% data_all$dose_count[current_idx],]
interpolate.conc(conc=tmp_conc$conc, time=tmp_conc$time,
time.out=data_all$time[current_idx],
check=FALSE, ...)
}
# Extrapolation ####
iecd_extrap_select <- function(x) {
extrap_output_only <-
x$event_before %in% c("conc_dose_iv_bolus_after", "conc") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "none")
extrap_dose <-
x$event_before %in% c("conc_dose_iv_bolus_after", "conc") &
x$event %in% "dose" &
!(x$event_after %in% "output_only")
extrap_output_only | extrap_dose
}
iecd_extrap_value <- function(data_all, current_idx, lambda.z, ...) {
last_conc <- data_all[data_all$time < data_all$time[current_idx] &
!is.na(data_all$conc),]
last_conc <- last_conc[nrow(last_conc),]
if (last_conc$conc %in% 0) {
# BLQ continues to be BLQ
0
} else {
if (missing(lambda.z)) {
lambda.z <- NA_real_
}
args <- list(conc=last_conc$conc[nrow(last_conc)],
time=last_conc$time[nrow(last_conc)],
time.out=data_all$time[current_idx], lambda.z=lambda.z,
...)
if (!("clast" %in% names(args))) {
args$clast <- last_conc$conc[nrow(last_conc)]
}
do.call(extrapolate.conc, args)
}
}
# Immediately after an IV bolus with a concentration next ####
iecd_iv_conc_select <- function(x) {
!(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after", "output_only")) &
x$event %in% "dose_iv_bolus_after" &
x$event_after %in% c("conc_dose", "conc")
}
iecd_iv_conc_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[data_all$conc_event &
(data_all$dose_count %in% data_all$dose_count[current_idx] |
data_all$dose_count_prev %in% data_all$dose_count[current_idx]),]
tmp_dose <- data_all[data_all$dose_event &
data_all$dose_count %in% data_all$dose_count[current_idx],]
pk.calc.c0(conc=tmp_conc$conc, time=tmp_conc$time,
time.dose=tmp_dose$time,
method=c("logslope", "c1"))
}
# Immediately after an IV bolus without a concentration next ####
iecd_iv_noconc_select <- function(x) {
bolus_is_current <-
x$event_before %in% c("conc", "conc_dose", "dose", "none") &
x$event %in% "dose_iv_bolus_after" &
x$event_after %in% c("dose", "none")
bolus_is_previous <-
x$event_before %in% "dose_iv_bolus_after" &
x$event %in% "dose" &
x$event_after %in% c("conc_dose_iv_bolus_after", "conc_dose", "dose_iv_bolus_after", "dose", "conc", "none")
bolus_is_current | bolus_is_previous
}
# After an IV bolus with a concentration next ####
iecd_afteriv_conc_select <- function(x) {
x$event_before %in% c("dose_iv_bolus_after") &
x$event %in% c("output_only") &
x$event_after %in% c("conc_dose", "conc")
}
iecd_afteriv_conc_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[data_all$conc_event &
(data_all$dose_count %in% data_all$dose_count[current_idx] |
data_all$dose_count_prev %in% data_all$dose_count[current_idx]),
c("conc", "time")]
tmp_dose <- data_all[data_all$dose_event &
data_all$dose_count %in% data_all$dose_count[current_idx],]
tmp_conc <- rbind(
data.frame(
time=tmp_dose$time,
conc=
pk.calc.c0(conc=tmp_conc$conc, time=tmp_conc$time,
time.dose=tmp_dose$time,
method=c("logslope", "c1"))),
tmp_conc)
interpolate.conc(conc=tmp_conc$conc,
time=tmp_conc$time,
time.out=data_all$time[current_idx],
check=FALSE, ...)
}
# After an IV bolus without a concentration next ####
iecd_afteriv_noconc_select <- function(x) {
x$event_before %in% c("dose_iv_bolus_after") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "none")
}
# Doses with no concentrations between ####
iecd_dose_noconc_select <- function(x) {
dose_current <-
x$event_before %in% c("conc_dose", "dose") &
x$event %in% "dose" &
!(x$event_after %in% "output_only")
dose_around <-
x$event_before %in% c("dose", "conc_dose") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "conc_dose")
dose_current | dose_around
}
# Dose as the last event in the timeline and requesting a concentration after ####
iecd_dose_last_select <- function(x) {
x$event_before %in% c("conc_dose", "dose") &
x$event %in% "output_only" &
x$event_after %in% "none"
}
# Dose before, concentration after without a dose ####
iecd_dose_conc_select <- function(x) {
x$event_before %in% "dose" &
x$event %in% "output_only" &
x$event_after %in% "conc"
}
iecd_dose_conc_value <- function(data_all, current_idx, ...) {
data_tmp <- data_all[data_all$dose_event | data_all$conc_event,]
interpolate.conc(conc=data_tmp$conc, time=data_tmp$time,
time.out=data_all$time[current_idx], ...,
check=FALSE)
}
interp.extrap.conc.dose.select <-
list(
"Impossible combinations"=
list(
select="iecd_impossible_select",
value="iecd_impossible_value",
description="The event combination cannot exist."),
"Observed concentration"=
list(
select="iecd_observed_select",
value="iecd_observed_value",
description="Copy the input concentration at the given time to the output."),
"Before all events"=
list(
select="iecd_before_select",
value="iecd_before_value",
description=paste("Interpolation before any events is NA or zero (0)",
"depending on the value of conc.origin. conc.origin",
"defaults to zero which is the implicit assumption",
"that a complete washout occurred and there is no",
"endogenous source of the analyte.")),
"Interpolation"=
list(
select="iecd_interp_select",
value="iecd_interp_value",
description=paste("With concentrations before and after and not an IV",
"bolus before, interpolate between observed concentrations.")),
"Extrapolation"=
list(
select="iecd_extrap_select",
value="iecd_extrap_value",
description="Extrapolate from a concentration to a dose"),
"Immediately after an IV bolus with a concentration next"=
list(
select="iecd_iv_conc_select",
value="iecd_iv_conc_value",
description=paste("Calculate C0 for the time immediately after an IV",
"bolus. First, attempt using log slope",
"back-extrapolation. If that fails, use the first",
"concentration after the dose as C0.")),
"Immediately after an IV bolus without a concentration next"=
list(
select="iecd_iv_noconc_select",
warning="Cannot interpolate immediately after an IV bolus without a concentration next.",
description="Cannot calculate C0 without a concentration after an IV bolus; return NA."),
"After an IV bolus with a concentration next"=
list(
select="iecd_afteriv_conc_select",
value="iecd_afteriv_conc_value",
description=paste("First, calculate C0 using log slope back-extrapolation",
"(falling back to the first post-dose concentration",
"if that fails). Then, interpolate between C0 and",
"the first post-dose concentration.")),
"After an IV bolus without a concentration next"=
list(
select="iecd_afteriv_noconc_select",
warning="Cannot interpolate after an IV bolus without a concentration next.",
description=paste("Between an IV bolus and anything other than a",
"concentration, interpolation cannot occur. Return NA")),
"Doses with no concentrations between"=
list(
select="iecd_dose_noconc_select",
warning="Cannot interpolate between two doses or after a dose without a concentration after the first dose.",
description="Two doses with no concentrations between them, return NA."),
"Dose as the last event in the timeline and requesting a concentration after"=
list(
select="iecd_dose_last_select",
warning="Cannot extrapolate from a dose without any concentrations after it.",
description=paste("Cannot estimate the concentration after a dose",
"without concentrations after the dose, return NA.")),
"Dose before, concentration after without a dose"=list(
select="iecd_dose_conc_select",
value="iecd_dose_conc_value",
description="If the concentration at the dose is estimable, interpolate. Otherwise, NA."))
| /R/interpolate.conc.R | no_license | ksl31/pknca | R | false | false | 30,996 | r | #' Interpolate concentrations between measurements or extrapolate
#' concentrations after the last measurement.
#'
#' \code{interpolate.conc} and \code{extrapolate.conc} returns an
#' interpolated (or extrapolated) concentration.
#' \code{interp.extrap.conc} will choose whether interpolation or
#' extrapolation is required and will also operate on many
#' concentrations. These will typically be used to estimate the
#' concentration between two measured concentrations or after the last
#' measured concentration. Of note, these functions will not
#' extrapolate prior to the first point.
#'
#' @param conc Measured concentrations
#' @param time Time of the concentration measurement
#' @param time.dose Time of the dose
#' @param time.out Time when interpolation is requested (vector for
#' \code{interp.extrap.conc}, scalar otherwise)
#' @param lambda.z The elimination rate constant. \code{NA} will
#' prevent extrapolation.
#' @param clast The last observed concentration above the limit of
#' quantification. If not given, \code{clast} is calculated from
#' \code{\link{pk.calc.clast.obs}}
#' @param conc.origin The concentration before the first measurement.
#' \code{conc.origin} is typically used to set predose values to zero
#' (default), set a predose concentration for endogenous compounds, or
#' set predose concentrations to \code{NA} if otherwise unknown.
#' @param options List of changes to the default
#' \code{\link{PKNCA.options}} for calculations.
#' @param interp.method The method for interpolation (either 'lin up/log
#' down' or 'linear')
#' @param extrap.method The method for extrapolation: "AUCinf",
#' "AUClast", or "AUCall". See details for usage.
#' @param conc.blq How to handle BLQ values. (See
#' \code{\link{clean.conc.blq}} for usage instructions.)
#' @param conc.na How to handle NA concentrations. (See
#' \code{\link{clean.conc.na}})
#' @param route.dose What is the route of administration
#' ("intravascular" or "extravascular"). See the details below for
#' how this parameter is used.
#' @param duration.dose What is the duration of administration? See the
#' details below for how this parameter is used.
#' @param out.after Should interpolation occur from the data before
#' (\code{FALSE}) or after (\code{TRUE}) the interpolated point? See
#' the details below for how this parameter is used. It only has a
#' meaningful effect at the instant of an IV bolus dose.
#' @param check Run \code{\link{check.conc.time}},
#' \code{\link{clean.conc.blq}}, and \code{\link{clean.conc.na}}?
#' @param ... Additional arguments passed to \code{interpolate.conc} or
#' \code{extrapolate.conc}.
#' @return The interpolated or extrapolated concentration value as a
#' scalar float.
#' @details
#' \describe{
#' \item{extrap.method}{
#' \describe{
#' \item{'AUCinf'}{Use lambda.z to extrapolate beyond the last point with the half-life.}
#' \item{'AUCall'}{If the last point is above the limit of quantification or missing, this is identical to 'AUCinf'. If the last point is below the limit of quantification, then linear interpolation between the Clast and the next BLQ is used for that interval and all additional points are extrapolated as 0.}
#' \item{'AUClast'}{Extrapolates all points after the last above the limit of quantification as 0.}
#' }
#' }
#' }
#'
#' \code{duration.dose} and \code{direction.out} are ignored if
#' \code{route.dose == "extravascular"}. \code{direction.out} is ignored
#' if \code{duration.dose > 0}.
#'
#' \code{route.dose} and \code{duration.dose} affect how
#' interpolation/extrapolation of the concentration occurs at the time
#' of dosing. If \code{route.dose == "intravascular"} and
#' \code{duration.dose == 0} then extrapolation occurs for an IV bolus
#' using \code{\link{pk.calc.c0}} with the data after dosing. Otherwise
#' (either \code{route.dose == "extravascular"} or \code{duration.dose >
#' 0}), extrapolation occurs using the concentrations before dosing and
#' estimating the half-life (or more precisely, estimating
#' \code{lambda.z}). Finally, \code{direction.out} can change the
#' direction of interpolation in cases with \code{route.dose ==
#' "intravascular"} and \code{duration.dose == 0}. When
#' \code{direction.out == "before"} interpolation occurs only with data
#' before the dose (as is the case for \code{route.dose ==
#' "extravascular"}), but if \code{direction.out == "after"}
#' interpolation occurs from the data after dosing.
#'
#' @seealso \code{\link{pk.calc.clast.obs}},
#' \code{\link{pk.calc.half.life}}, \code{\link{pk.calc.c0}}
#' @export
interp.extrap.conc <- function(conc, time, time.out,
lambda.z=NA,
clast=pk.calc.clast.obs(conc, time),
options=list(),
interp.method=PKNCA.choose.option("auc.method", options),
extrap.method="AUCinf",
...,
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data <- data.frame(conc, time)
}
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (length(time.out) < 1)
stop("time.out must be a vector with at least one element")
ret <- rep(NA, length(time.out))
for (i in seq_len(length(time.out)))
if (is.na(time.out[i])) {
warning("An interpolation/extrapolation time is NA")
} else if (time.out[i] <= tlast) {
ret[i] <- interpolate.conc(data$conc, data$time,
time.out[i],
interp.method=interp.method,
conc.blq=conc.blq,
conc.na=conc.na,
check=FALSE)
} else {
ret[i] <- extrapolate.conc(data$conc, data$time,
time.out[i],
lambda.z=lambda.z,
clast=clast,
extrap.method=extrap.method,
check=FALSE)
}
ret
}
#' @describeIn interp.extrap.conc Interpolate concentrations through Tlast (inclusive)
#' @export
interpolate.conc <- function(conc, time, time.out,
options=list(),
interp.method=PKNCA.choose.option("auc.method", options),
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
conc.origin=0,
...,
check=TRUE) {
## Check the inputs
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data <- data.frame(conc, time)
}
# Ensure that conc.origin is valid
if (length(conc.origin) != 1) {
stop("conc.origin must be a scalar")
}
if (!(is.na(conc.origin) | (is.numeric(conc.origin) & !is.factor(conc.origin)))) {
stop("conc.origin must be NA or a number (and not a factor)")
}
## Verify that we are interpolating between the first concentration
## and the last above LOQ concentration
if (length(time.out) != 1) {
stop("Can only interpolate for one time point per function call")
}
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (time.out < min(data$time)) {
ret <- conc.origin
} else if (time.out > tlast) {
stop("interpolate.conc can only works through Tlast, please use interp.extrap.conc to combine both interpolation and extrapolation.")
} else if (!(tolower(interp.method) %in% c("lin up/log down", "linear"))) {
stop("interp.method must be one of 'linear' or 'lin up/log down'")
} else if (time.out %in% data$time) {
## See if there is an exact time match and return that if it
## exists.
ret <- conc[time.out == data$time]
} else {
## Find the last time before and the first time after the output
## time.
time.1 <- max(data$time[data$time <= time.out])
time.2 <- min(data$time[time.out <= data$time])
conc.1 <- data$conc[data$time == time.1]
conc.2 <- data$conc[data$time == time.2]
interp.method <- tolower(interp.method)
if (is.na(conc.1) | is.na(conc.2)) {
ret <- NA_real_
} else if ((interp.method == "linear") |
(interp.method == "lin up/log down" &
((conc.1 <= 0 | conc.2 <= 0) |
(conc.1 <= conc.2)))) {
## Do linear interpolation if:
## linear interpolation is selected or
## lin up/log down interpolation is selected and
## one concentration is 0 or
## the concentrations are equal
ret <- conc.1+(time.out-time.1)/(time.2-time.1)*(conc.2-conc.1)
} else if (interp.method == "lin up/log down") {
ret <- exp(log(conc.1)+
(time.out-time.1)/(time.2-time.1)*(log(conc.2)-log(conc.1)))
} else {
stop("You should never see this error. Please report this as a bug with a reproducible example.") # nocov
}
}
ret
}
#' @describeIn interp.extrap.conc Extrapolate concentrations after Tlast
#' @export
extrapolate.conc <- function(conc, time, time.out,
lambda.z=NA, clast=pk.calc.clast.obs(conc, time),
extrap.method="AUCinf",
options=list(),
conc.na=PKNCA.choose.option("conc.na", options),
conc.blq=PKNCA.choose.option("conc.blq", options),
...,
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data <- clean.conc.blq(conc, time, conc.na=conc.na, check=FALSE)
} else {
data <- data.frame(conc, time)
}
extrap.method <- tolower(extrap.method)
if (!(extrap.method %in%
c("aucinf", "aucall", "auclast")))
stop("extrap.method must be one of 'AUCinf', 'AUClast', or 'AUCall'")
if (length(time.out) != 1)
stop("Only one time.out value may be estimated at once.")
tlast <- pk.calc.tlast(data$conc, data$time, check=FALSE)
if (is.na(tlast)) {
## If there are no observed concentrations, return NA
ret <- NA
} else if (time.out <= tlast) {
stop("extrapolate.conc can only work beyond Tlast, please use interp.extrap.conc to combine both interpolation and extrapolation.")
} else {
## Start the interpolation
if (extrap.method %in% "aucinf") {
## If AUCinf is requested, extrapolate using the half-life
ret <- clast*exp(-lambda.z*(time.out - tlast))
} else if (extrap.method %in% "auclast" |
(extrap.method %in% "aucall" &
tlast == max(data$time))) {
## If AUClast is requested or AUCall is requested and there are
## no BLQ at the end, we are already certain that we are after
## Tlast, so the answer is 0.
ret <- 0
} else if (extrap.method %in% "aucall") {
## If the last non-missing concentration is below the limit of
## quantification, extrapolate with the triangle method of
## AUCall.
time.prev <- max(data$time[data$time <= time.out])
conc.prev <- data$conc[data$time %in% time.prev]
if (conc.prev %in% 0) {
## If we are already BLQ, then we have confirmed that there
## are no more ALQ measurements (because we are beyond
## Tlast) and therefore we can extrapolate as 0.
ret <- 0
} else {
if (time.prev != max(data$time)) {
time.next <- min(data$time[data$time >= time.out])
conc.next <- data$conc[data$time %in% time.next]
}
## If we are not already BLQ, then we have confirmed that we
## are in the triangle extrapolation region and need to draw
## a line.
ret <- (time.out - time.prev)/(time.next - time.prev)*conc.prev
}
} else {
stop("Invalid extrap.method caught too late (seeing this error indicates a software bug)") # nocov
}
}
ret
}
# Choices for events in interp.extrap.conc.dose. This is included here to assist with testing later.
event_choices_interp.extrap.conc.dose <-
list(conc_dose_iv_bolus_after="conc_dose_iv_bolus_after",
conc_dose="conc_dose",
dose_iv_bolus_after="dose_iv_bolus_after",
dose="dose",
conc="conc",
output_only="output_only",
none="none")
#' @importFrom dplyr case_when
#' @describeIn interp.extrap.conc Interpolate and extrapolate
#' concentrations without interpolating or extrapolating beyond doses.
#' @export
interp.extrap.conc.dose <- function(conc, time,
time.dose, route.dose="extravascular", duration.dose=NA,
time.out, out.after=FALSE,
options=list(),
conc.blq=PKNCA.choose.option("conc.blq", options),
conc.na=PKNCA.choose.option("conc.na", options),
...,
check=TRUE) {
if (check) {
check.conc.time(conc, time)
data_conc <-
clean.conc.blq(conc, time,
conc.blq=conc.blq, conc.na=conc.na,
check=FALSE)
} else {
data_conc <- data.frame(conc, time)
}
# Check other inputs
if (!is.character(route.dose)) {
route.dose <- as.character(route.dose)
}
if (!(all(route.dose %in% c("extravascular", "intravascular")))) {
stop("route.dose must be either 'extravascular' or 'intravascular'")
}
if (!(length(route.dose) %in% c(1, length(time.dose)))) {
stop("route.dose must either be a scalar or the same length as time.dose")
}
if (!all(is.na(duration.dose) | (is.numeric(duration.dose) & !is.factor(duration.dose)))) {
stop("duration.dose must be NA or a number.")
}
if (!(length(duration.dose) %in% c(1, length(time.dose)))) {
stop("duration.dose must either be a scalar or the same length as time.dose")
}
# Generate a single timeline
# Concentrations are assumed to occur before dosing
data_conc$out_after <- FALSE
data_dose <-
merge(
data.frame(dose=TRUE,
time=time.dose,
route=route.dose,
duration=duration.dose,
iv_bolus=route.dose %in% "intravascular" & duration.dose %in% 0,
stringsAsFactors=FALSE),
# Expand IV bolus dosing to have a before and after concentration
data.frame(iv_bolus=c(FALSE, TRUE, TRUE),
out_after=c(FALSE, FALSE, TRUE)),
all.x=TRUE)
data_out <-
data.frame(out=TRUE,
out_after=out.after,
out_order=1:length(time.out),
time=time.out)
data_all <-
merge(merge(data_conc,
data_dose,
all=TRUE),
data_out,
all=TRUE)
data_all$dose_event <- data_all$dose %in% TRUE
data_all$conc_event <- !is.na(data_all$conc)
data_all$iv_bolus <- data_all$iv_bolus %in% TRUE
data_all$out <- data_all$out %in% TRUE
data_all$dose_count <- cumsum(data_all$dose_event)
mask_include_before <- data_all$dose_event & data_all$conc_event & !data_all$out_after
data_all$dose_count_prev <- data_all$dose_count - mask_include_before
data_all$event <- dplyr::case_when(
data_all$dose_event & data_all$conc_event & data_all$iv_bolus & data_all$out_after~event_choices_interp.extrap.conc.dose$conc_dose_iv_bolus_after,
data_all$dose_event & data_all$conc_event~event_choices_interp.extrap.conc.dose$conc_dose,
data_all$dose_event & data_all$iv_bolus & data_all$out_after~event_choices_interp.extrap.conc.dose$dose_iv_bolus_after,
data_all$dose_event~event_choices_interp.extrap.conc.dose$dose,
data_all$conc_event~event_choices_interp.extrap.conc.dose$conc,
data_all$out~event_choices_interp.extrap.conc.dose$output_only, # interpolation/extrapolation-only row
TRUE~"unknown") # should never happen
if (any(mask_unknown <- data_all$event %in% "unknown")) {
# All events should be accounted for
stop("Unknown event in interp.extrap.conc.dose at time(s): ",
paste(unique(data_all$time[mask_unknown]), collapse=", ")) # nocov
}
# Remove "output_only" from event_before and event_after
simple_locf <- function(x, missing_val) {
mask_found <- !(x %in% missing_val)
ret <- x[mask_found]
ret[cumsum(mask_found)]
}
data_all$event_before <- simple_locf(c(event_choices_interp.extrap.conc.dose$none, data_all$event[-nrow(data_all)]),
"output_only")
data_all$event_after <- rev(simple_locf(rev(c(data_all$event[-1], event_choices_interp.extrap.conc.dose$none)),
"output_only"))
# Loop through the methods until all have been tested or no missing
# values remain.
data_all$method <- NA_character_
for (nm in names(interp.extrap.conc.dose.select)) {
mask <- is.na(data_all$method) &
do.call(interp.extrap.conc.dose.select[[nm]]$select, list(x=data_all))
if (any(mask)) {
if ("warning" %in% names(interp.extrap.conc.dose.select[[nm]])) {
warning(sprintf("%s: %d data points",
interp.extrap.conc.dose.select[[nm]]$warning,
sum(mask)))
data_all$method[mask] <- nm
} else {
for (current_idx in which(mask)) {
data_all$conc[current_idx] <-
do.call(interp.extrap.conc.dose.select[[nm]]$value,
list(data_all=data_all,
current_idx=current_idx,
options=options,
...))
data_all$method[current_idx] <- nm
}
}
}
}
if (any(mask_no_method <- is.na(data_all$method))) {
# This should never happen, all eventualities should be covered
stop("No method for imputing concentration at time(s): ",
paste(unique(data_all$time[mask_no_method]), collapse=", ")) # nocov
}
# Filter to the requested time points and output
data_out <- data_all[data_all$out,,drop=FALSE]
data_out <- data_out[order(data_out$out_order),,drop=FALSE]
ret <- data_out$conc
attr(ret, "Method") <- data_out$method
ret
}
# Dose-aware interpolation/extrapolation functions ####
# Impossible combinations ####
iecd_impossible_select <- function(x) {
x$event_before %in% "output_only" | # Restricted in code
x$event %in% "none" | # "none" events do not occur, they are before or after the timeline
x$event_after %in% "output_only" | # Restricted in code
(x$event %in% "output_only" &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after")) |
(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after") &
x$event %in% c("none", "output_only") &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after"))
}
iecd_impossible_value <- function(data_all, current_idx, ...) {
stop(
sprintf(
"Impossible combination requested for interp.extrap.conc.dose. event_before: %s, event: %s, event_after: %s",
data_all$event_before[current_idx],
data_all$event[current_idx],
data_all$event_after[current_idx])) # nocov
}
# Observed concentration ####
iecd_observed_select <- function(x) {
x$event %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc") &
!do.call(interp.extrap.conc.dose.select[["Impossible combinations"]]$select, list(x=x))
}
iecd_observed_value <- function(data_all, current_idx, ...) {
data_all$conc[current_idx]
}
# Before all events ####
iecd_before_select <- function(x) {
x$event_before %in% "none" &
!(x$event %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc", "dose_iv_bolus_after", "none")) &
!(x$event_after %in% "output_only" |
(x$event %in% "output_only" &
x$event_after %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after"))) # because these are impossible
}
iecd_before_value <- function(data_all, current_idx, conc.origin=0, ...) {
conc.origin
}
# Interpolation ####
iecd_interp_select <- function(x) {
x$event_before %in% c("conc_dose_iv_bolus_after", "conc_dose", "conc") &
x$event %in% c("output_only") &
x$event_after %in% c("conc_dose", "conc") &
!(x$event_before %in% "conc_dose" &
x$event_after %in% "conc_dose")
}
iecd_interp_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[!is.na(data_all$conc) &
data_all$dose_count %in% data_all$dose_count[current_idx],]
interpolate.conc(conc=tmp_conc$conc, time=tmp_conc$time,
time.out=data_all$time[current_idx],
check=FALSE, ...)
}
# Extrapolation ####
iecd_extrap_select <- function(x) {
extrap_output_only <-
x$event_before %in% c("conc_dose_iv_bolus_after", "conc") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "none")
extrap_dose <-
x$event_before %in% c("conc_dose_iv_bolus_after", "conc") &
x$event %in% "dose" &
!(x$event_after %in% "output_only")
extrap_output_only | extrap_dose
}
iecd_extrap_value <- function(data_all, current_idx, lambda.z, ...) {
last_conc <- data_all[data_all$time < data_all$time[current_idx] &
!is.na(data_all$conc),]
last_conc <- last_conc[nrow(last_conc),]
if (last_conc$conc %in% 0) {
# BLQ continues to be BLQ
0
} else {
if (missing(lambda.z)) {
lambda.z <- NA_real_
}
args <- list(conc=last_conc$conc[nrow(last_conc)],
time=last_conc$time[nrow(last_conc)],
time.out=data_all$time[current_idx], lambda.z=lambda.z,
...)
if (!("clast" %in% names(args))) {
args$clast <- last_conc$conc[nrow(last_conc)]
}
do.call(extrapolate.conc, args)
}
}
# Immediately after an IV bolus with a concentration next ####
iecd_iv_conc_select <- function(x) {
!(x$event_before %in% c("conc_dose_iv_bolus_after", "dose_iv_bolus_after", "output_only")) &
x$event %in% "dose_iv_bolus_after" &
x$event_after %in% c("conc_dose", "conc")
}
iecd_iv_conc_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[data_all$conc_event &
(data_all$dose_count %in% data_all$dose_count[current_idx] |
data_all$dose_count_prev %in% data_all$dose_count[current_idx]),]
tmp_dose <- data_all[data_all$dose_event &
data_all$dose_count %in% data_all$dose_count[current_idx],]
pk.calc.c0(conc=tmp_conc$conc, time=tmp_conc$time,
time.dose=tmp_dose$time,
method=c("logslope", "c1"))
}
# Immediately after an IV bolus without a concentration next ####
iecd_iv_noconc_select <- function(x) {
bolus_is_current <-
x$event_before %in% c("conc", "conc_dose", "dose", "none") &
x$event %in% "dose_iv_bolus_after" &
x$event_after %in% c("dose", "none")
bolus_is_previous <-
x$event_before %in% "dose_iv_bolus_after" &
x$event %in% "dose" &
x$event_after %in% c("conc_dose_iv_bolus_after", "conc_dose", "dose_iv_bolus_after", "dose", "conc", "none")
bolus_is_current | bolus_is_previous
}
# After an IV bolus with a concentration next ####
iecd_afteriv_conc_select <- function(x) {
x$event_before %in% c("dose_iv_bolus_after") &
x$event %in% c("output_only") &
x$event_after %in% c("conc_dose", "conc")
}
iecd_afteriv_conc_value <- function(data_all, current_idx, ...) {
tmp_conc <- data_all[data_all$conc_event &
(data_all$dose_count %in% data_all$dose_count[current_idx] |
data_all$dose_count_prev %in% data_all$dose_count[current_idx]),
c("conc", "time")]
tmp_dose <- data_all[data_all$dose_event &
data_all$dose_count %in% data_all$dose_count[current_idx],]
tmp_conc <- rbind(
data.frame(
time=tmp_dose$time,
conc=
pk.calc.c0(conc=tmp_conc$conc, time=tmp_conc$time,
time.dose=tmp_dose$time,
method=c("logslope", "c1"))),
tmp_conc)
interpolate.conc(conc=tmp_conc$conc,
time=tmp_conc$time,
time.out=data_all$time[current_idx],
check=FALSE, ...)
}
# After an IV bolus without a concentration next ####
iecd_afteriv_noconc_select <- function(x) {
x$event_before %in% c("dose_iv_bolus_after") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "none")
}
# Doses with no concentrations between ####
iecd_dose_noconc_select <- function(x) {
dose_current <-
x$event_before %in% c("conc_dose", "dose") &
x$event %in% "dose" &
!(x$event_after %in% "output_only")
dose_around <-
x$event_before %in% c("dose", "conc_dose") &
x$event %in% "output_only" &
x$event_after %in% c("dose", "conc_dose")
dose_current | dose_around
}
# Dose as the last event in the timeline and requesting a concentration after ####
iecd_dose_last_select <- function(x) {
x$event_before %in% c("conc_dose", "dose") &
x$event %in% "output_only" &
x$event_after %in% "none"
}
# Dose before, concentration after without a dose ####
iecd_dose_conc_select <- function(x) {
x$event_before %in% "dose" &
x$event %in% "output_only" &
x$event_after %in% "conc"
}
iecd_dose_conc_value <- function(data_all, current_idx, ...) {
data_tmp <- data_all[data_all$dose_event | data_all$conc_event,]
interpolate.conc(conc=data_tmp$conc, time=data_tmp$time,
time.out=data_all$time[current_idx], ...,
check=FALSE)
}
interp.extrap.conc.dose.select <-
list(
"Impossible combinations"=
list(
select="iecd_impossible_select",
value="iecd_impossible_value",
description="The event combination cannot exist."),
"Observed concentration"=
list(
select="iecd_observed_select",
value="iecd_observed_value",
description="Copy the input concentration at the given time to the output."),
"Before all events"=
list(
select="iecd_before_select",
value="iecd_before_value",
description=paste("Interpolation before any events is NA or zero (0)",
"depending on the value of conc.origin. conc.origin",
"defaults to zero which is the implicit assumption",
"that a complete washout occurred and there is no",
"endogenous source of the analyte.")),
"Interpolation"=
list(
select="iecd_interp_select",
value="iecd_interp_value",
description=paste("With concentrations before and after and not an IV",
"bolus before, interpolate between observed concentrations.")),
"Extrapolation"=
list(
select="iecd_extrap_select",
value="iecd_extrap_value",
description="Extrapolate from a concentration to a dose"),
"Immediately after an IV bolus with a concentration next"=
list(
select="iecd_iv_conc_select",
value="iecd_iv_conc_value",
description=paste("Calculate C0 for the time immediately after an IV",
"bolus. First, attempt using log slope",
"back-extrapolation. If that fails, use the first",
"concentration after the dose as C0.")),
"Immediately after an IV bolus without a concentration next"=
list(
select="iecd_iv_noconc_select",
warning="Cannot interpolate immediately after an IV bolus without a concentration next.",
description="Cannot calculate C0 without a concentration after an IV bolus; return NA."),
"After an IV bolus with a concentration next"=
list(
select="iecd_afteriv_conc_select",
value="iecd_afteriv_conc_value",
description=paste("First, calculate C0 using log slope back-extrapolation",
"(falling back to the first post-dose concentration",
"if that fails). Then, interpolate between C0 and",
"the first post-dose concentration.")),
"After an IV bolus without a concentration next"=
list(
select="iecd_afteriv_noconc_select",
warning="Cannot interpolate after an IV bolus without a concentration next.",
description=paste("Between an IV bolus and anything other than a",
"concentration, interpolation cannot occur. Return NA")),
"Doses with no concentrations between"=
list(
select="iecd_dose_noconc_select",
warning="Cannot interpolate between two doses or after a dose without a concentration after the first dose.",
description="Two doses with no concentrations between them, return NA."),
"Dose as the last event in the timeline and requesting a concentration after"=
list(
select="iecd_dose_last_select",
warning="Cannot extrapolate from a dose without any concentrations after it.",
description=paste("Cannot estimate the concentration after a dose",
"without concentrations after the dose, return NA.")),
"Dose before, concentration after without a dose"=list(
select="iecd_dose_conc_select",
value="iecd_dose_conc_value",
description="If the concentration at the dose is estimable, interpolate. Otherwise, NA."))
|
library(shiny)
date1 <- Sys.Date()
ui <- fluidPage(
mainPanel(
img(src = "ButtersSmile.png", height = 150, width = 150),
div("Scott's Riddle Corner!",style = "font-family: 'times'; font-size:48pt; color:red"),
dateInput("date1", "Date:",value = date1, format = "mm/dd/yyyy"),
textInput("riddle", "This week's riddle:",value="???"),
textInput("riddle", "What's your answer?",value="Give it your best shot.")
)
)
# Define server logic required to draw a histogram
server <- function(input, output,session) {
session$onSessionEnded(function() {
stopApp()
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /R/RiddleCorner/app.R | no_license | zuehlkescott5/Practice | R | false | false | 699 | r |
library(shiny)
date1 <- Sys.Date()
ui <- fluidPage(
mainPanel(
img(src = "ButtersSmile.png", height = 150, width = 150),
div("Scott's Riddle Corner!",style = "font-family: 'times'; font-size:48pt; color:red"),
dateInput("date1", "Date:",value = date1, format = "mm/dd/yyyy"),
textInput("riddle", "This week's riddle:",value="???"),
textInput("riddle", "What's your answer?",value="Give it your best shot.")
)
)
# Define server logic required to draw a histogram
server <- function(input, output,session) {
session$onSessionEnded(function() {
stopApp()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(psych)
### Name: cohen.d
### Title: Find Cohen d and confidence intervals
### Aliases: cohen.d d.robust cohen.d.ci d.ci cohen.d.by d2r r2d d2t t2d
### m2t
### Keywords: models multivariate
### ** Examples
cohen.d(sat.act,"gender")
cd <- cohen.d.by(sat.act,"gender","education")
summary(cd) #summarize the output
#now show several examples of confidence intervals
#one group (d vs 0)
#consider the t from the cushny data set
t2d( -4.0621,n1=10)
d.ci(-1.284549,n1=10) #the confidence interval of the effect of drug on sleep
#two groups
d.ci(.62,n=64) #equal group size
d.ci(.62,n1=35,n2=29) #unequal group size
| /data/genthat_extracted_code/psych/examples/cohen.d.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 632 | r | library(psych)
### Name: cohen.d
### Title: Find Cohen d and confidence intervals
### Aliases: cohen.d d.robust cohen.d.ci d.ci cohen.d.by d2r r2d d2t t2d
### m2t
### Keywords: models multivariate
### ** Examples
cohen.d(sat.act,"gender")
cd <- cohen.d.by(sat.act,"gender","education")
summary(cd) #summarize the output
#now show several examples of confidence intervals
#one group (d vs 0)
#consider the t from the cushny data set
t2d( -4.0621,n1=10)
d.ci(-1.284549,n1=10) #the confidence interval of the effect of drug on sleep
#two groups
d.ci(.62,n=64) #equal group size
d.ci(.62,n1=35,n2=29) #unequal group size
|
#' @title Maps a function over lists or vectors in parallel.
#'
#' @description
#' Uses the parallelization mode and the other options specified in
#' \code{\link{parallelStart}}.
#'
#' Libraries and source file can be initialized on slaves with
#' \code{\link{parallelLibrary}} and \code{\link{parallelSource}}.
#'
#' Large objects can be separately exported via \code{\link{parallelExport}},
#' they can be simply used under their exported name in slave body code.
#'
#' Regarding error handling, see the argument \code{impute.error}.
#'
#' @param fun [\code{function}]\cr
#' Function to map over \code{...}.
#' @param ... [any]\cr
#' Arguments to vectorize over (list or vector).
#' @param more.args [\code{list}]\cr
#' A list of other arguments passed to \code{fun}.
#' Default is empty list.
#' @param simplify [\code{logical(1)}]\cr
#' Should the result be simplified?
#' See \code{\link{sapply}}.
#' Default is \code{FALSE}.
#' @param use.names [\code{logical(1)}]\cr
#' Should result be named by first vector if that is
#' of class character?
#' Default is \code{FALSE}.
#' @param impute.error [\code{NULL} | \code{function(x)}]\cr
#' This argument can be used for improved error handling.
#' \code{NULL} means that, if an exception is generated on one of the slaves, it is also
#' thrown on the master. Usually all slave jobs will have to terminate until this exception on
#' the master can be thrown.
#' If you pass a constant value or a function, all jobs are guaranteed to return a result object,
#' without generating an exception on the master for slave errors.
#' In case of an error,
#' this is a \code{\link{simpleError}} object containing the error message.
#' If you passed a constant object, the error-objects will be substituted with this object.
#' If you passed a function, it will be used to operate
#' on these error-objects (it will ONLY be applied to the error results).
#' For example, using \code{identity} would keep and return the \code{simpleError}-object,
#' or \code{function(x) 99} would impute a constant value
#' (which could be achieved more easily by simply passing \code{99}).
#' Default is \code{NULL}.
#' @param level [\code{character(1)}]\cr
#' If a (non-missing) level is specified in \code{\link{parallelStart}},
#' this call is only parallelized if the level specified here matches.
#' Useful if this function is used in a package.
#' Default is \code{NA}.
#' @param show.info [\code{logical(1)}]\cr
#' Verbose output on console?
#' Can be used to override setting from options / \code{\link{parallelStart}}.
#' Default is NA which means no overriding.
#' @return Result.
#' @export
#' @examples
#' parallelStart()
#' parallelMap(identity, 1:2)
#' parallelStop()
parallelMap = function(fun, ..., more.args = list(), simplify = FALSE, use.names = FALSE,
impute.error = NULL, level = NA_character_, show.info = NA) {
assertFunction(fun)
assertList(more.args)
assertFlag(simplify)
assertFlag(use.names)
# if it is a constant value construct function to impute
if (!is.null(impute.error)) {
if (is.function(impute.error))
impute.error.fun = impute.error
else
impute.error.fun = function(x) impute.error
}
assertString(level, na.ok = TRUE)
assertFlag(show.info, na.ok = TRUE)
if (!is.na(level) && level %nin% unlist(getPMOption("registered.levels", list())))
stopf("Level '%s' not registered", level)
cpus = getPMOptCpus()
load.balancing = getPMOptLoadBalancing()
logging = getPMOptLogging()
# use NA to encode "no logging" in logdir
logdir = ifelse(logging, getNextLogDir(), NA_character_)
if (isModeLocal() || !isParallelizationLevel(level) || getPMOptOnSlave()) {
if (!is.null(impute.error)) {
# so we behave in local mode as in parallelSlaveWrapper
fun2 = function (...) {
res = try(fun(...), silent = getOption("parallelMap.suppress.local.errors"))
if (is.error(res)) {
res = list(try.object = res)
class(res) = "parallelMapErrorWrapper"
}
return(res)
}
} else {
fun2 = fun
}
assignInFunctionNamespace(fun, env = PKG_LOCAL_ENV)
res = mapply(fun2, ..., MoreArgs = more.args, SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else {
iters = seq_along(..1)
showInfoMessage("Mapping in parallel%s: mode = %s; level = %s; cpus = %i; elements = %i.",
ifelse(load.balancing, " (load balanced)", ""), getPMOptMode(),
level, getPMOptCpus(), length(iters), show.info = show.info)
if (isModeMulticore()) {
more.args = c(list(.fun = fun, .logdir = logdir), more.args)
res = MulticoreClusterMap(slaveWrapper, ..., .i = iters, MoreArgs = more.args, mc.cores = cpus,
SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else if (isModeSocket() || isModeMPI()) {
more.args = c(list(.fun = fun, .logdir = logdir), more.args)
if (load.balancing) {
res = clusterMapLB(cl = NULL, slaveWrapper, ..., .i = iters, MoreArgs = more.args)
} else {
res = clusterMap(cl = NULL, slaveWrapper, ..., .i = iters, MoreArgs = more.args, SIMPLIFY = FALSE, USE.NAMES = FALSE)
}
} else if (isModeBatchJobs()) {
# dont log extra in BatchJobs
more.args = c(list(.fun = fun, .logdir = NA_character_), more.args)
suppressMessages({
reg = getBatchJobsReg()
BatchJobs:::dbRemoveJobs(reg, BatchJobs::getJobIds(reg))
BatchJobs::batchMap(reg, slaveWrapper, ..., more.args = more.args)
# increase max.retries a bit, we dont want to abort here prematurely
# if no resources set we submit with the default ones from the bj conf
BatchJobs::submitJobs(reg, resources = getPMOptBatchJobsResources(), max.retries = 15)
ok = BatchJobs::waitForJobs(reg, stop.on.error = is.null(impute.error))
})
# copy log files of terminated jobs to designated dir
if (!is.na(logdir)) {
term = BatchJobs::findTerminated(reg)
fns = BatchJobs::getLogFiles(reg, term)
dests = file.path(logdir, sprintf("%05i.log", term))
file.copy(from = fns, to = dests)
}
ids = BatchJobs::getJobIds(reg)
ids.err = BatchJobs::findErrors(reg)
ids.exp = BatchJobs::findExpired(reg)
ids.done = BatchJobs::findDone(reg)
ids.notdone = c(ids.err, ids.exp)
# construct notdone error messages
msgs = rep("Job expired!", length(ids.notdone))
msgs[ids.err] = BatchJobs::getErrorMessages(reg, ids.err)
# handle errors (no impute): kill other jobs + stop on master
if (is.null(impute.error) && length(c(ids.notdone)) > 0) {
extra.msg = sprintf("Please note that remaining jobs were killed when 1st error occurred to save cluster time.\nIf you want to further debug errors, your BatchJobs registry is here:\n%s",
reg$file.dir)
onsys = BatchJobs::findOnSystem(reg)
suppressMessages(
BatchJobs::killJobs(reg, onsys)
)
onsys = BatchJobs::findOnSystem(reg)
if (length(onsys) > 0L)
warningf("Still %i jobs from operation on system! kill them manually!", length(onsys))
if (length(ids.notdone) > 0L)
stopWithJobErrorMessages(ids.notdone, msgs, extra.msg)
}
# if we reached this line and error occurred, we have impute.error != NULL (NULL --> stop before)
res = vector("list", length(ids))
res[ids.done] = BatchJobs::loadResults(reg, simplify = FALSE, use.names = FALSE)
res[ids.notdone] = lapply(msgs, function(s) impute.error.fun(simpleError(s)))
} else if (isModeBatchtools()) {
# don't log extra in batchtools
more.args = insert(more.args, list(.fun = fun, .logdir = NA_character_))
old = getOption("batchtools.verbose")
options(batchtools.verbose = FALSE)
on.exit(options(batchtools.verbose = old))
reg = getBatchtoolsReg()
if (nrow(reg$status) > 0L)
batchtools::clearRegistry(reg = reg)
ids = batchtools::batchMap(fun = slaveWrapper, ..., more.args = more.args, reg = reg)
batchtools::submitJobs(ids = ids, resources = getPMOptBatchtoolsResources(), reg = reg)
ok = batchtools::waitForJobs(ids = ids, stop.on.error = is.null(impute.error), reg = reg)
# copy log files of terminated jobs to designated directory
if (!is.na(logdir)) {
x = batchtools::findStarted(reg = reg)
x$log.file = file.path(reg$file.dir, "logs", sprintf("%s.log", x$job.hash))
.mapply(function(id, fn) writeLines(batchtools::getLog(id, reg = reg), con = fn), x, NULL)
}
if (ok) {
res = batchtools::reduceResultsList(ids, reg = reg)
} else {
if (is.null(impute.error)) {
extra.msg = sprintf("Please note that remaining jobs were killed when 1st error occurred to save cluster time.\nIf you want to further debug errors, your batchtools registry is here:\n%s",
reg$file.dir)
batchtools::killJobs(reg = reg)
ids.notdone = batchtools::findNotDone(reg = reg)
stopWithJobErrorMessages(
inds = ids.notdone$job.id,
batchtools::getErrorMessages(ids.notdone, missing.as.error = TRUE, reg = reg)$message,
extra.msg)
} else { # if we reached this line and error occurred, we have impute.error != NULL (NULL --> stop before)
res = batchtools::findJobs(reg = reg)
res$result = list()
ids.complete = batchtools::findDone(reg = reg)
ids.incomplete = batchtools::findNotDone(reg = reg)
res[ids.complete, "result" := batchtools::reduceResultsList(ids.complete, reg = reg), with = FALSE]
ids[ids.complete, "result" := lapply(batchtools::getErrorMessages(ids.incomplete, reg = reg)$message, simpleError), with = FALSE]
}
}
}
}
# handle potential errors in res, depending on user setting
if (is.null(impute.error)) {
checkResultsAndStopWithErrorsMessages(res)
} else {
res = lapply(res, function(x) {
if (inherits(x, "parallelMapErrorWrapper"))
impute.error.fun(attr(x$try.object, "condition"))
else
x
})
}
if (use.names && is.character(..1)) {
names(res) = ..1
}
if (!use.names) {
names(res) = NULL
}
if (isTRUE(simplify) && length(res) > 0L)
res = simplify2array(res, higher = (simplify == "array"))
# count number of mapping operations for log dir
options(parallelMap.nextmap = (getPMOptNextMap() + 1L))
return(res)
}
slaveWrapper = function(..., .i, .fun, .logdir = NA_character_) {
if (!is.na(.logdir)) {
options(warning.length = 8170L, warn = 1L)
.fn = file.path(.logdir, sprintf("%05i.log", .i))
.fn = file(.fn, open = "wt")
.start.time = as.integer(Sys.time())
sink(.fn)
sink(.fn, type = "message")
on.exit(sink(NULL))
}
# make sure we dont parallelize any further
options(parallelMap.on.slave = TRUE)
# just make sure, we should not have changed anything on the master
# except for BatchJobs / interactive
on.exit(options(parallelMap.on.slave = FALSE))
# wrap in try block so we can handle error on master
res = try(.fun(...))
# now we cant simply return the error object, because clusterMap would act on it. great...
if (is.error(res)) {
res = list(try.object = res)
class(res) = "parallelMapErrorWrapper"
}
if (!is.na(.logdir)) {
.end.time = as.integer(Sys.time())
print(gc())
message(sprintf("Job time in seconds: %i", .end.time - .start.time))
# I am not sure why i need to do this again, but without i crash in multicore
sink(NULL)
}
return(res)
}
assignInFunctionNamespace = function(fun, li = list(), env = new.env()) {
# copy exported objects in PKG_LOCAL_ENV to env of fun so we can find them in any case in call
ee = environment(fun)
ns = ls(env)
for (n in ns)
assign(n, get(n, envir = env), envir = ee)
ns = names(li)
for (n in ns)
assign(n, li[[n]], envir = ee)
}
| /R/parallelMap.R | no_license | shadogray/parallelMap | R | false | false | 12,002 | r | #' @title Maps a function over lists or vectors in parallel.
#'
#' @description
#' Uses the parallelization mode and the other options specified in
#' \code{\link{parallelStart}}.
#'
#' Libraries and source file can be initialized on slaves with
#' \code{\link{parallelLibrary}} and \code{\link{parallelSource}}.
#'
#' Large objects can be separately exported via \code{\link{parallelExport}},
#' they can be simply used under their exported name in slave body code.
#'
#' Regarding error handling, see the argument \code{impute.error}.
#'
#' @param fun [\code{function}]\cr
#' Function to map over \code{...}.
#' @param ... [any]\cr
#' Arguments to vectorize over (list or vector).
#' @param more.args [\code{list}]\cr
#' A list of other arguments passed to \code{fun}.
#' Default is empty list.
#' @param simplify [\code{logical(1)}]\cr
#' Should the result be simplified?
#' See \code{\link{sapply}}.
#' Default is \code{FALSE}.
#' @param use.names [\code{logical(1)}]\cr
#' Should result be named by first vector if that is
#' of class character?
#' Default is \code{FALSE}.
#' @param impute.error [\code{NULL} | \code{function(x)}]\cr
#' This argument can be used for improved error handling.
#' \code{NULL} means that, if an exception is generated on one of the slaves, it is also
#' thrown on the master. Usually all slave jobs will have to terminate until this exception on
#' the master can be thrown.
#' If you pass a constant value or a function, all jobs are guaranteed to return a result object,
#' without generating an exception on the master for slave errors.
#' In case of an error,
#' this is a \code{\link{simpleError}} object containing the error message.
#' If you passed a constant object, the error-objects will be substituted with this object.
#' If you passed a function, it will be used to operate
#' on these error-objects (it will ONLY be applied to the error results).
#' For example, using \code{identity} would keep and return the \code{simpleError}-object,
#' or \code{function(x) 99} would impute a constant value
#' (which could be achieved more easily by simply passing \code{99}).
#' Default is \code{NULL}.
#' @param level [\code{character(1)}]\cr
#' If a (non-missing) level is specified in \code{\link{parallelStart}},
#' this call is only parallelized if the level specified here matches.
#' Useful if this function is used in a package.
#' Default is \code{NA}.
#' @param show.info [\code{logical(1)}]\cr
#' Verbose output on console?
#' Can be used to override setting from options / \code{\link{parallelStart}}.
#' Default is NA which means no overriding.
#' @return Result.
#' @export
#' @examples
#' parallelStart()
#' parallelMap(identity, 1:2)
#' parallelStop()
parallelMap = function(fun, ..., more.args = list(), simplify = FALSE, use.names = FALSE,
impute.error = NULL, level = NA_character_, show.info = NA) {
assertFunction(fun)
assertList(more.args)
assertFlag(simplify)
assertFlag(use.names)
# if it is a constant value construct function to impute
if (!is.null(impute.error)) {
if (is.function(impute.error))
impute.error.fun = impute.error
else
impute.error.fun = function(x) impute.error
}
assertString(level, na.ok = TRUE)
assertFlag(show.info, na.ok = TRUE)
if (!is.na(level) && level %nin% unlist(getPMOption("registered.levels", list())))
stopf("Level '%s' not registered", level)
cpus = getPMOptCpus()
load.balancing = getPMOptLoadBalancing()
logging = getPMOptLogging()
# use NA to encode "no logging" in logdir
logdir = ifelse(logging, getNextLogDir(), NA_character_)
if (isModeLocal() || !isParallelizationLevel(level) || getPMOptOnSlave()) {
if (!is.null(impute.error)) {
# so we behave in local mode as in parallelSlaveWrapper
fun2 = function (...) {
res = try(fun(...), silent = getOption("parallelMap.suppress.local.errors"))
if (is.error(res)) {
res = list(try.object = res)
class(res) = "parallelMapErrorWrapper"
}
return(res)
}
} else {
fun2 = fun
}
assignInFunctionNamespace(fun, env = PKG_LOCAL_ENV)
res = mapply(fun2, ..., MoreArgs = more.args, SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else {
iters = seq_along(..1)
showInfoMessage("Mapping in parallel%s: mode = %s; level = %s; cpus = %i; elements = %i.",
ifelse(load.balancing, " (load balanced)", ""), getPMOptMode(),
level, getPMOptCpus(), length(iters), show.info = show.info)
if (isModeMulticore()) {
more.args = c(list(.fun = fun, .logdir = logdir), more.args)
res = MulticoreClusterMap(slaveWrapper, ..., .i = iters, MoreArgs = more.args, mc.cores = cpus,
SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else if (isModeSocket() || isModeMPI()) {
more.args = c(list(.fun = fun, .logdir = logdir), more.args)
if (load.balancing) {
res = clusterMapLB(cl = NULL, slaveWrapper, ..., .i = iters, MoreArgs = more.args)
} else {
res = clusterMap(cl = NULL, slaveWrapper, ..., .i = iters, MoreArgs = more.args, SIMPLIFY = FALSE, USE.NAMES = FALSE)
}
} else if (isModeBatchJobs()) {
# dont log extra in BatchJobs
more.args = c(list(.fun = fun, .logdir = NA_character_), more.args)
suppressMessages({
reg = getBatchJobsReg()
BatchJobs:::dbRemoveJobs(reg, BatchJobs::getJobIds(reg))
BatchJobs::batchMap(reg, slaveWrapper, ..., more.args = more.args)
# increase max.retries a bit, we dont want to abort here prematurely
# if no resources set we submit with the default ones from the bj conf
BatchJobs::submitJobs(reg, resources = getPMOptBatchJobsResources(), max.retries = 15)
ok = BatchJobs::waitForJobs(reg, stop.on.error = is.null(impute.error))
})
# copy log files of terminated jobs to designated dir
if (!is.na(logdir)) {
term = BatchJobs::findTerminated(reg)
fns = BatchJobs::getLogFiles(reg, term)
dests = file.path(logdir, sprintf("%05i.log", term))
file.copy(from = fns, to = dests)
}
ids = BatchJobs::getJobIds(reg)
ids.err = BatchJobs::findErrors(reg)
ids.exp = BatchJobs::findExpired(reg)
ids.done = BatchJobs::findDone(reg)
ids.notdone = c(ids.err, ids.exp)
# construct notdone error messages
msgs = rep("Job expired!", length(ids.notdone))
msgs[ids.err] = BatchJobs::getErrorMessages(reg, ids.err)
# handle errors (no impute): kill other jobs + stop on master
if (is.null(impute.error) && length(c(ids.notdone)) > 0) {
extra.msg = sprintf("Please note that remaining jobs were killed when 1st error occurred to save cluster time.\nIf you want to further debug errors, your BatchJobs registry is here:\n%s",
reg$file.dir)
onsys = BatchJobs::findOnSystem(reg)
suppressMessages(
BatchJobs::killJobs(reg, onsys)
)
onsys = BatchJobs::findOnSystem(reg)
if (length(onsys) > 0L)
warningf("Still %i jobs from operation on system! kill them manually!", length(onsys))
if (length(ids.notdone) > 0L)
stopWithJobErrorMessages(ids.notdone, msgs, extra.msg)
}
# if we reached this line and error occurred, we have impute.error != NULL (NULL --> stop before)
res = vector("list", length(ids))
res[ids.done] = BatchJobs::loadResults(reg, simplify = FALSE, use.names = FALSE)
res[ids.notdone] = lapply(msgs, function(s) impute.error.fun(simpleError(s)))
} else if (isModeBatchtools()) {
# don't log extra in batchtools
more.args = insert(more.args, list(.fun = fun, .logdir = NA_character_))
old = getOption("batchtools.verbose")
options(batchtools.verbose = FALSE)
on.exit(options(batchtools.verbose = old))
reg = getBatchtoolsReg()
if (nrow(reg$status) > 0L)
batchtools::clearRegistry(reg = reg)
ids = batchtools::batchMap(fun = slaveWrapper, ..., more.args = more.args, reg = reg)
batchtools::submitJobs(ids = ids, resources = getPMOptBatchtoolsResources(), reg = reg)
ok = batchtools::waitForJobs(ids = ids, stop.on.error = is.null(impute.error), reg = reg)
# copy log files of terminated jobs to designated directory
if (!is.na(logdir)) {
x = batchtools::findStarted(reg = reg)
x$log.file = file.path(reg$file.dir, "logs", sprintf("%s.log", x$job.hash))
.mapply(function(id, fn) writeLines(batchtools::getLog(id, reg = reg), con = fn), x, NULL)
}
if (ok) {
res = batchtools::reduceResultsList(ids, reg = reg)
} else {
if (is.null(impute.error)) {
extra.msg = sprintf("Please note that remaining jobs were killed when 1st error occurred to save cluster time.\nIf you want to further debug errors, your batchtools registry is here:\n%s",
reg$file.dir)
batchtools::killJobs(reg = reg)
ids.notdone = batchtools::findNotDone(reg = reg)
stopWithJobErrorMessages(
inds = ids.notdone$job.id,
batchtools::getErrorMessages(ids.notdone, missing.as.error = TRUE, reg = reg)$message,
extra.msg)
} else { # if we reached this line and error occurred, we have impute.error != NULL (NULL --> stop before)
res = batchtools::findJobs(reg = reg)
res$result = list()
ids.complete = batchtools::findDone(reg = reg)
ids.incomplete = batchtools::findNotDone(reg = reg)
res[ids.complete, "result" := batchtools::reduceResultsList(ids.complete, reg = reg), with = FALSE]
ids[ids.complete, "result" := lapply(batchtools::getErrorMessages(ids.incomplete, reg = reg)$message, simpleError), with = FALSE]
}
}
}
}
# handle potential errors in res, depending on user setting
if (is.null(impute.error)) {
checkResultsAndStopWithErrorsMessages(res)
} else {
res = lapply(res, function(x) {
if (inherits(x, "parallelMapErrorWrapper"))
impute.error.fun(attr(x$try.object, "condition"))
else
x
})
}
if (use.names && is.character(..1)) {
names(res) = ..1
}
if (!use.names) {
names(res) = NULL
}
if (isTRUE(simplify) && length(res) > 0L)
res = simplify2array(res, higher = (simplify == "array"))
# count number of mapping operations for log dir
options(parallelMap.nextmap = (getPMOptNextMap() + 1L))
return(res)
}
slaveWrapper = function(..., .i, .fun, .logdir = NA_character_) {
if (!is.na(.logdir)) {
options(warning.length = 8170L, warn = 1L)
.fn = file.path(.logdir, sprintf("%05i.log", .i))
.fn = file(.fn, open = "wt")
.start.time = as.integer(Sys.time())
sink(.fn)
sink(.fn, type = "message")
on.exit(sink(NULL))
}
# make sure we dont parallelize any further
options(parallelMap.on.slave = TRUE)
# just make sure, we should not have changed anything on the master
# except for BatchJobs / interactive
on.exit(options(parallelMap.on.slave = FALSE))
# wrap in try block so we can handle error on master
res = try(.fun(...))
# now we cant simply return the error object, because clusterMap would act on it. great...
if (is.error(res)) {
res = list(try.object = res)
class(res) = "parallelMapErrorWrapper"
}
if (!is.na(.logdir)) {
.end.time = as.integer(Sys.time())
print(gc())
message(sprintf("Job time in seconds: %i", .end.time - .start.time))
# I am not sure why i need to do this again, but without i crash in multicore
sink(NULL)
}
return(res)
}
assignInFunctionNamespace = function(fun, li = list(), env = new.env()) {
# copy exported objects in PKG_LOCAL_ENV to env of fun so we can find them in any case in call
ee = environment(fun)
ns = ls(env)
for (n in ns)
assign(n, get(n, envir = env), envir = ee)
ns = names(li)
for (n in ns)
assign(n, li[[n]], envir = ee)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mobr_boxplots.R
\name{calc_PIE}
\alias{calc_PIE}
\title{Calculate probability of interspecific encounter (PIE)}
\usage{
calc_PIE(x, ENS = FALSE)
}
\arguments{
\item{x}{can either be a: 1) mob_in object, 2) community matrix-like
object in which rows represent plots and columns represent species, or 3)
a vector which contains the abundance of each species.}
\item{ENS}{Boolean that determines if the effective number of species should
be returned or the raw PIE value. Defaults to FALSE}
}
\description{
\code{calc_PIE} returns the probability of interspecific encounter (PIE)
which is also known as Simpson's evenness index and Gini-Simpson index. For \code{ENS=TRUE},
PIE will be converted to an asymptotic effective number of species (S_PIE).
}
\details{
The formula of Hurlbert (1971) is used to calculate PIE:
\eqn{PIE = N /(N - 1) * (1 - p_i^2)}
where N is the total number of individuals and \eqn{p_i} is the relative abundance
of species i. This formulation uses sampling without replacement and it is
sometimes referred to as the bias corrected formulation of PIE.
For \code{ENS = TRUE}, S_PIE will be returned which represents the species richness of
a hypothetical community with equally-abundant species and infinitely many individuals
corresponding to the observed value of PIE. It is computed as
\eqn{S_PIE = 1 /(1 - PIE)}, which is equal to the
asymptotic estimator for Hill numbers of diversity order 2 provided by Chao et al (2014).
Note that S_PIE is undefined for communities with exactly one individual per species.
The code in this function borrows heavily from the function vegan::diversity()
but computes a different quantity. The function vegan::diversity() computes
PIE when sampling with replacement is assumed. The difference between the two
formulations will decrease as N becomes large. Jari Oksanen and Bob O'Hara are
the original authors of the function vegan::diversity().
}
\examples{
data(inv_comm)
calc_PIE(inv_comm)
calc_PIE(inv_comm, ENS=TRUE)
}
\references{
Hurlbert, S. H. (1971) The nonconcept of species diversity: a critique and
alternative parameters. Ecology 52, 577-586.
Chao, A., Gotelli, N. J., Hsieh, T. C., Sander, E. L., Ma, K. H., Colwell, R. K., & Ellison, A. M. (2014).
Rarefaction and extrapolation with Hill numbers: A framework for sampling and estimation in species diversity studies.
Ecological Monographs 84(1), 45-67.
}
\author{
Dan McGlinn, Thore Engel
}
| /man/calc_PIE.Rd | permissive | cran/mobr | R | false | true | 2,568 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mobr_boxplots.R
\name{calc_PIE}
\alias{calc_PIE}
\title{Calculate probability of interspecific encounter (PIE)}
\usage{
calc_PIE(x, ENS = FALSE)
}
\arguments{
\item{x}{can either be a: 1) mob_in object, 2) community matrix-like
object in which rows represent plots and columns represent species, or 3)
a vector which contains the abundance of each species.}
\item{ENS}{Boolean that determines if the effective number of species should
be returned or the raw PIE value. Defaults to FALSE}
}
\description{
\code{calc_PIE} returns the probability of interspecific encounter (PIE)
which is also known as Simpson's evenness index and Gini-Simpson index. For \code{ENS=TRUE},
PIE will be converted to an asymptotic effective number of species (S_PIE).
}
\details{
The formula of Hurlbert (1971) is used to calculate PIE:
\eqn{PIE = N /(N - 1) * (1 - p_i^2)}
where N is the total number of individuals and \eqn{p_i} is the relative abundance
of species i. This formulation uses sampling without replacement and it is
sometimes referred to as the bias corrected formulation of PIE.
For \code{ENS = TRUE}, S_PIE will be returned which represents the species richness of
a hypothetical community with equally-abundant species and infinitely many individuals
corresponding to the observed value of PIE. It is computed as
\eqn{S_PIE = 1 /(1 - PIE)}, which is equal to the
asymptotic estimator for Hill numbers of diversity order 2 provided by Chao et al (2014).
Note that S_PIE is undefined for communities with exactly one individual per species.
The code in this function borrows heavily from the function vegan::diversity()
but computes a different quantity. The function vegan::diversity() computes
PIE when sampling with replacement is assumed. The difference between the two
formulations will decrease as N becomes large. Jari Oksanen and Bob O'Hara are
the original authors of the function vegan::diversity().
}
\examples{
data(inv_comm)
calc_PIE(inv_comm)
calc_PIE(inv_comm, ENS=TRUE)
}
\references{
Hurlbert, S. H. (1971) The nonconcept of species diversity: a critique and
alternative parameters. Ecology 52, 577-586.
Chao, A., Gotelli, N. J., Hsieh, T. C., Sander, E. L., Ma, K. H., Colwell, R. K., & Ellison, A. M. (2014).
Rarefaction and extrapolation with Hill numbers: A framework for sampling and estimation in species diversity studies.
Ecological Monographs 84(1), 45-67.
}
\author{
Dan McGlinn, Thore Engel
}
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Covid19EstimationHydroxychloroquine2
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Upload results to OHDSI server
#'
#' @details
#' This function uploads the 'AllResults_<databaseId>.zip' to the OHDSI SFTP server. Before sending, you can inspect the zip file,
#' wich contains (zipped) CSV files. You can send the zip file from a different computer than the one on which is was created.
#'
#' @param privateKeyFileName A character string denoting the path to the RSA private key provided by the study coordinator.
#' @param userName A character string containing the user name provided by the study coordinator.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#'
#' @export
uploadResults <- function(outputFolder, privateKeyFileName, userName) {
fileName <- list.files(outputFolder, "^Results.*.zip$", full.names = TRUE)
if (length(fileName) == 0) {
stop("Could not find results file in folder. Did you run (and complete) execute?")
}
if (length(fileName) == 0) {
stop("Multiple results files found. Don't know which one to upload")
}
OhdsiSharing::sftpUploadFile(privateKeyFileName = privateKeyFileName,
userName = userName,
remoteFolder = "Covid19EstimationHydroxychloroquine2",
fileName = fileName)
ParallelLogger::logInfo("Finished uploading")
} | /R/Sharing.R | permissive | ohdsi-studies/Covid19EstimationHydroxychloroquine2 | R | false | false | 2,211 | r | # Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Covid19EstimationHydroxychloroquine2
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Upload results to OHDSI server
#'
#' @details
#' This function uploads the 'AllResults_<databaseId>.zip' to the OHDSI SFTP server. Before sending, you can inspect the zip file,
#' wich contains (zipped) CSV files. You can send the zip file from a different computer than the one on which is was created.
#'
#' @param privateKeyFileName A character string denoting the path to the RSA private key provided by the study coordinator.
#' @param userName A character string containing the user name provided by the study coordinator.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#'
#' @export
uploadResults <- function(outputFolder, privateKeyFileName, userName) {
fileName <- list.files(outputFolder, "^Results.*.zip$", full.names = TRUE)
if (length(fileName) == 0) {
stop("Could not find results file in folder. Did you run (and complete) execute?")
}
if (length(fileName) == 0) {
stop("Multiple results files found. Don't know which one to upload")
}
OhdsiSharing::sftpUploadFile(privateKeyFileName = privateKeyFileName,
userName = userName,
remoteFolder = "Covid19EstimationHydroxychloroquine2",
fileName = fileName)
ParallelLogger::logInfo("Finished uploading")
} |
######Understory Non-Tree Variables######
#Percentage cover 1m, upper and lower extent of height
#Abbreviates row headings in veg plot measurements
names(sstp1)[7:8]<-c("Cov","Bas")
#Reshaped 1 meter small tree plot veg measurements so that each stp record year has its
#own associated Forb, Grass, Low Shrub, High Shrub and Polyveg measurements
#stp<-reshape(sstp1, direction="wide",idvar=
# c("Installation","Plot","STP","Year_Measurement"),
# timevar="Lifeform",v.names=c("Cov","Bas","Top"))
sstp1$Lifeform[sstp1$Installation=="GS" & sstp1$Plot==5 & sstp1$STP==4 &
sstp1$Year_Measurement==2015 & sstp1$Species_Primary=="POPR" &
!is.na(sstp1$Species_Primary)] <- "G"
stp<-reshape(sstp1[!is.na(sstp1$Lifeform),], direction="wide",
idvar=c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",
drop=c("Species_Primary","Species_Secondary","ID"),
v.names=c("Cov","Bas","Top"))
#Note: no base and top height measurements for polyveg lifeform in 1m and 4m
#Makes NAs within coverage, base, and top veg meas columns = 0
veg.names<-names(stp[,substring(names(stp),4,4)=="."])
for(i in veg.names) {
stp[i][is.na(stp[i])] <- 0
}
#Merges plot history (trt etc) and stp 1m veg meas
veg_record<- merge(splot, stp,by=c("Installation","Plot"))
#Merges annual small tree growth records with 1m veg records for each year
annual.gr3<- merge(annual.gr[,!(names(annual.gr) %in% c("ID.y"))],
veg_record,
by=c("Installation","Plot","STP","Year_Measurement"))
#4m data not collected until 2007, ok to compare to 1m data
#that was colected throughout study (1998)?
#cuts tree records from ~5,500 to ~1,000
#Reshapes 4 meter veg measurements
#names(sstp4)[7:9]<-c("Cov4","Bas4","Top4")
#stp4<-reshape(sstp4, direction="wide",idvar=
# c("Installation","Plot","STP","Year_Measurement"),
# timevar="Lifeform",v.names=c("Cov4","Bas4","Top4"))
#Makes NAs within coverage, base, and top veg meas columns = 0
#veg.names<-names(stp4[,substring(names(stp4),4,4)=="."])
#for(i in veg.names) {
# stp4[i][is.na(stp4[i])] <- 0
#}
#Merges plot history (trt etc) and stp 1m veg meas
#veg_record4<- merge(splot, stp4,by=c("Installation","Plot"))
#Merges annual small tree growth records with 4m veg records for each year
#annual.gr3<- merge(annual.gr3, veg_record4,by=c("Installation","Plot","STP","Year_Measurement"))
##1m S, F, and PLOV diffs##
sstp1$diff.1m<-sstp1$Top-sstp1$Bas
#Assigns zeros to NA values (where no veg present)
veg.names<-"diff.1m"
for(i in veg.names) {
sstp1[i][is.na(sstp1[i])] <- 0
}
#Aggragates 1m F diff to the plot level
agg.1m.data <-aggregate(sstp1$diff.1m,
by=list("Installation"=sstp1$Installation,
"Plot"=sstp1$Plot,
"Year_Measurement"=sstp1$Year_Measurement,
"Lifeform"=sstp1$Lifeform),FUN=mean)
agg.1m.data1<-reshape(agg.1m.data, direction="wide",idvar=
c("Installation","Plot","Year_Measurement"),
timevar="Lifeform",v.names="x")
veg.names<-names(agg.1m.data1[,substring(names(agg.1m.data1),2,2)=="."])
for(i in veg.names) {
agg.1m.data1[i][is.na(agg.1m.data1[i])] <- 0
}
names(agg.1m.data1)[4:7]<-c("diff.F.1m","diff.G.1m","diff.HS.1m","diff.LS.1m")
names(agg.1m.data1)[9]<-c("diff.POLV.1m")
#Merges aggregated 1m data to the "big" df, this is where .gr4 is created
annual.gr4<-merge(annual.gr3[,!(names(annual.gr3) %in% c("ID.y"))],
agg.1m.data1,by=c("Installation","Plot","Year_Measurement"))
##Transect Data##
names(stran)[9:10]<-c("basT","topT")
#Assigns zeros to NA values (transect points where no veg present)
veg.T.names<-names(stran[,substring(names(stran),4,4)=="T"])
for(i in veg.T.names) {
stran[i][is.na(stran[i])] <- 0
}
#Count transect observation number
#If not figure out missing
#probably supposed to be zero
#calculate difference in top and base meas
stran$diffT<-stran$topT-stran$basT
#Aggregates transect data to the STP level
agg.tran.data <-aggregate(stran$diffT,
by=list("Installation"=stran$Installation,
"Plot"=stran$Plot,
"STP"=stran$Transect,
"Year_Measurement"=stran$Year_Measurement,
"Lifeform"=stran$Lifeform1),FUN=mean)#total/number of points
#Reshapes transect data so each stp is a row
agg.tran.data1<-reshape(agg.tran.data, direction="wide",idvar=
c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",v.names="x")
names(agg.tran.data1)[5:8]<-c("diff.F","diff.G","diff.HS","diff.LS")
tran.names<-names(agg.tran.data1[,substring(names(agg.tran.data1),5,5)=="."])
for(i in tran.names) {
agg.tran.data1[i][is.na(agg.tran.data1[i])] <- 0
}
#Merges aggregated transect data to the "big" df
annual.gr4<-merge(annual.gr4,
agg.tran.data1,by=c("Installation","Plot","STP","Year_Measurement"))
##Transect Grass Cover Data##
agg.tran.data.G <-aggregate(stranco$Pct_Grass,
by=list("Installation"=stranco$Installation,
"Plot"=stranco$Plot,
"STP"=stranco$Transect,
"Year_Measurement"=stranco$Year_Measurement
),FUN=mean)#total/number of points
names(agg.tran.data.G)[5]<-("tran.G")
#Merges aggregated transect data to the "big" df
annual.gr4<-merge(annual.gr4,agg.tran.data.G,by=c("Installation","Plot","STP","Year_Measurement"))
#code to remove all .y variables from df
y.names<-numeric(0)
for(i in 3:18){
y.names<-c(y.names,names(annual.gr4[,substring(names(annual.gr4),i-1,i)==".y"]))
}
annual.gr4<-annual.gr4[,! names(annual.gr4) %in% y.names]
#unsure what "NA" or "NULL" lifeforms translates to
#protocol seems tohave changed in later years of the study in
#favor of not distinguishing between shrubs and forbs
#Transect grass data
agg.grass.data <-aggregate(strangr$Top,
by=list("Installation"=strangr$Installation,
"Plot"=strangr$Plot,
"STP"=strangr$Transect,
"Year_Measurement"=strangr$Year_Measurement),FUN=mean)
names(agg.grass.data)[5]<-"grass.ht"
agg.grass.data[5][is.na(agg.grass.data[5])] <- 0
annual.gr4<-merge(annual.gr4,agg.grass.data,by=c("Installation","Plot","STP","Year_Measurement"))
#Removes trees with -inf ht_ annual...check the annual ht function function
annual.gr4$inf.ht<-is.infinite(annual.gr4$ht_annual)
annual.gr4<-annual.gr4[!annual.gr4$inf.ht==TRUE,]
#Function for height difference between top height of tallest shrub on each
#stp vegplot
#Find diff between init tree height and max shrub height
init_tree_shrub_ht_diff<-function(Installation,Plot,STP,Year,height){
#Installation<-"KC"
#Plot<-1
#STP<-1
#Year<-2010
#height<-10
shrub_ht<-veg_record[veg_record$Installation==Installation&
veg_record$Plot==Plot&
veg_record$STP==STP&
veg_record$Year_Measurement==Year,]
max.ht.shrub<-max(shrub_ht$Top.LS,shrub_ht$Top.HS,shrub_ht$Top.F)
tree.ht.minus.shrub<-height-max.ht.shrub
tree.ht.minus.shrub
}
annual.gr4$treeminus<-0
annual.gr4$Installation<-as.character(annual.gr4$Installation)
veg_record$Installation<-as.character(veg_record$Installation)
for(i in 1:nrow(annual.gr4)){
annual.gr4$treeminus[i]<-init_tree_shrub_ht_diff(
annual.gr4$Installation[i],
annual.gr4$Plot[i],
annual.gr4$STP[i],
annual.gr4$Year_Measurement[i],
annual.gr4$Height_Total[i]
)
}
#Function for height difference between top height of tallest shrub on each
#stp vegplot
#Find diff between init tree height and max shrub height
agg.tran.data.max <-aggregate(stran$topT,
by=list("Installation"=stran$Installation,
"Plot"=stran$Plot,
"STP"=stran$Transect,
"Year_Measurement"=stran$Year_Measurement,
"Lifeform"=stran$Lifeform1),FUN=max)#total/number of points
#Reshapes transect data so each stp is a row
agg.tran.data.max1<-reshape(agg.tran.data.max, direction="wide",idvar=
c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",v.names="x")
#Assigns zeros to NA values (transect points where no veg present)
agg.T.names<-names(agg.tran.data.max1[,substring(names(agg.tran.data.max1),1,1)=="x"])
for(i in agg.T.names) {
agg.tran.data.max1[i][is.na(agg.tran.data.max1[i])] <- 0
}
init_tree_shrub_ht_diff_trans<-function(Installation,Plot,STP,Year,height){
#Installation<-"KC"
#Plot<-1
#STP<-1
#Year<-2010
#height<-10
shrub_ht<-agg.tran.data.max1[agg.tran.data.max1$Installation==Installation&
agg.tran.data.max1$Plot==Plot&
agg.tran.data.max1$STP==STP&
agg.tran.data.max1$Year_Measurement==Year,]
max.ht.shrub<-max(shrub_ht$x.F,shrub_ht$x.HS,shrub_ht$x.LS,shrub_ht$x.S)
# tree.ht.minus.shrub<-height-max.ht.shrub
tree.ht.minus.shrub<-max.ht.shrub
tree.ht.minus.shrub
}
annual.gr4$treeminus_trans<-0
agg.tran.data.max1$Installation<-as.character(agg.tran.data.max1$Installation)
for(i in 1:nrow(annual.gr4)){
annual.gr4$treeminus_trans[i]<-init_tree_shrub_ht_diff_trans(
annual.gr4$Installation[i],
annual.gr4$Plot[i],
annual.gr4$STP[i],
annual.gr4$Year_Measurement[i],
annual.gr4$Height_Total[i]
)
}
#Removes 6th stp plots from analysis
#Makes seperate dataframe for witheld data
annual.gr6<-annual.gr4[annual.gr4$STP_rand==6,]
annual.gr4<-annual.gr4[!annual.gr4$STP_rand==6,]
#Creates crown ratio variable for annual gr4
annual.gr4$cratio<- annual.gr4$CrownLength/annual.gr4$Height_Total
#Removes tree records with negative CR
annual.gr4 <- annual.gr4[!is.na(annual.gr4$cratio) & annual.gr4$cratio>=0,]
#Selects tree record of annual growth from similar installations,
#this is where .grUV is created
annual.grUV<-annual.gr4[annual.gr4$Installation %in% sim, ]
#good point to look at not-yet removed damaged trees here with .gr4
allDamage<-annual.gr4
#
# #GAM for 1m polyveg cover
# gam.1m.polv<-gam(ht_annual~s(srHeight_Total)+s(Cov.POLV),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.polv)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.1m.polv,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m polyveg diff
# #gam.1m.polv.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.POLV.1m),data=annual.grUV, family=gaussian(link="identity"))
# #summary(gam.4m.polv)
# #Not enough (non-NA) data to do anything meaningful
#
# #GAM for 1m F diff
# gam.1m.F.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.F.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.F.diff)
#
# plot(gam.1m.F.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m LS diff
# gam.1m.LS.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.LS.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.LS.diff)
#
# plot(gam.1m.LS.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m HS diff
# gam.1m.HS.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.HS.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.HS.diff)
#
# plot(gam.1m.HS.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# #Print#
#
# #GAM for LS transect data
# gam.tran.LS<-gam(ht_annual~s(srHeight_Total)+s(diff.LS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.LS)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.LS,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for HS transect data
# gam.tran.HS<-gam(ht_annual~s(srHeight_Total)+s(diff.HS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.HS)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.HS,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for Forb transect data
# gam.tran.F<-gam(ht_annual~s(srHeight_Total)+s(diff.F),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.F)
#
# plot(gam.tran.F,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for Grass transect data
# gam.tran.GR<-gam(ht_annual~s(srHeight_Total)+s(grass.ht),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.GR)
#
# plot(gam.tran.GR,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# #Print#
#
# #GAM for Grass transect data
# gam.tran.GR.cov<-gam(ht_annual~s(srHeight_Total)+s(tran.G),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.GR.cov)
#
# plot(gam.tran.GR.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
#
#
# #GAM for 1m HS cover
# gam.tran.HS.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.HS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.HS.cov)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.HS.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m Grass cover
# gam.G.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.G),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.G.cov)
#
# plot(gam.G.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m Forb cover
# gam.F.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.F),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.F.cov)
#
# plot(gam.F.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m LS cover
# gam.LS.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.LS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.LS.cov)
#
# plot(gam.LS.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# ##Print##
#
# #GAM for 1m Grass diff
# gam.G.diff.cov<-gam(ht_annual~s(srHeight_Total)+s(diff.G.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.G.diff.cov)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.G.diff.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for init tree ht veg diff
# init.diff.1m<-gam(ht_annual~srHeight_Total+annual.grUV$cratio+s(annual.grUV$Cov.POLV),data=annual.grUV, family=gaussian(link="identity"))
# summary(init.diff.1m)
#
#
# AIC(init.diff.1m)
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(init.diff.1m,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for init tree ht veg diff trans
# init.diff.1m.trans<-gam(ht_annual~s(srHeight_Total)+s(treeminus_trans),data=annual.grUV, family=gaussian(link="identity"))
# summary(init.diff.1m.trans)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(init.diff.1m.trans,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
veg.variable<-c("Nothing","Comb.cov","F.cov","LS.cov","HS.cov",
"F.depth","LS.depth","HS.depth",
"F.vol","LS.vol","HS.vol",
"F.tran","LS.tran","HS.tran","G.tran.depth",
"G.tran.cov","mx.vg.diff.1m","mx.vg.diff.tr"
)
#Removes all Dead tree records (already done in annualization routine)
# annual.grUV<-annual.grUV[!annual.grUV$Damage %in% damageRemoved]
# annual.grUV<-annual.grUV[(annual.grUV$Height_Total!=0 & !is.na(annual.grUV$Height_Total)),]
###Quantreg
library(quantreg)
#QR for nothing
annual.grUV<-annual.grUV[!is.na(annual.grUV$cratio)==T,]
qrCW.noth<- rq(ht_annual~srHeight_Total+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-AIC(qrCW.noth)[1]
nlqmm.list.UV<-length(qrCW.noth$y)
#QR for 1m combined cover
annual.grUV$cov.comb<-annual.grUV$Cov.F+annual.grUV$Cov.LS+annual.grUV$Cov.HS
qrCW.1m.polv<- rq(ht_annual~srHeight_Total+cov.comb+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.polv)[1])
nlqmm.list.UV<-c(nlqmm.list.UV,length(qrCW.1m.polv$y))
#QR for 1m Forb cover
qrCW.1m.F<- rq(ht_annual~srHeight_Total+Cov.F+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#QR for 1m LOW Shrub cover
qrCW.1m.LS<- rq(ht_annual~srHeight_Total+Cov.LS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#QR for 1m High Shrub cover
qrCW.1m.HS<- rq(ht_annual~srHeight_Total+Cov.HS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#QR for 1m Grass cover
# qrCW.1m.G<- rq(ht_annual~srHeight_Total+Cov.G+cratio,random=~1,group=conc,tau=c(.5),data=annual.grUV)
# summary(qrCW.1m.G)
# aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.1m.G)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#QR for 1m Forb diff
qrCW.1m.F<- rq(ht_annual~srHeight_Total+diff.F.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#QR for 1m LOW Shrub diff
qrCW.1m.LS<- rq(ht_annual~srHeight_Total+diff.LS.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#QR for 1m High Shrub diff
qrCW.1m.HS<- rq(ht_annual~srHeight_Total+diff.HS.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#QR for 1m Forb Volume
annual.grUV$F.volume<-(annual.grUV$diff.F.1m*annual.grUV$Cov.F)/100
qrCW.1m.F.vol<- rq(ht_annual~srHeight_Total+F.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F.vol$y))
#QR for 1m Low Shrub Volume
annual.grUV$LS.volume<-(annual.grUV$diff.LS.1m*annual.grUV$Cov.LS)/100
qrCW.1m.LS.vol<- rq(ht_annual~srHeight_Total+LS.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS.vol$y))
#QR for 1m High Shrub Volume
annual.grUV$HS.volume<-(annual.grUV$diff.HS.1m*annual.grUV$Cov.HS)/100
qrCW.1m.HS.vol<- rq(ht_annual~srHeight_Total+HS.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS.vol$y))
#QR for 1m Grass diff
#qrCW.1m.G<- rq(ht_annual~srHeight_Total+diff.G.1m+cratio,tau=c(.5),data=annual.grUV)
#summary(qrCW.1m.G)
#aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.1m.G)[1])
#nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#QR for Forb transect cover
qrCW.forb.tran<- rq(ht_annual~srHeight_Total+diff.F+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.forb.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.forb.tran$y))
#QR for LS transect cover
qr.LS.tran<- rq(ht_annual~srHeight_Total+diff.LS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qr.LS.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.LS.tran$y))
#QR for HS transect cover
qr.HS.tran<- rq(ht_annual~srHeight_Total+diff.HS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qr.HS.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.HS.tran$y))
#QR for both Forb and Shrub Transect
#qrCW.forb.shrub.tran<- rq(ht_annual~srHeight_Total+diff.F+diff.S+cratio,tau=c(.5),data=annual.grUV)
#summary(qrCW.forb.shrub.tranCW)
#aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.forb.shrub.tran)[1])
#QR for transect grass height
qrCW.tran.gr<- rq(ht_annual~srHeight_Total+grass.ht+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.tran.gr)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr$y))
#QR for transect grass cover
qrCW.tran.gr.cov<- rq(ht_annual~srHeight_Total+tran.G+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.tran.gr.cov)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr.cov$y))
#QR for init tree height max veg difference (1m veg plot)
qrCW.1m.max.vg.diff<- rq(ht_annual~srHeight_Total+treeminus+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.max.vg.diff)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff$y))
#QR for init tree height max veg difference (transect)
qrCW.1m.max.vg.diff.tran<- rq(ht_annual~srHeight_Total+treeminus_trans+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.max.vg.diff.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff.tran$y))
UV.aic<-as.data.frame(cbind(nlqmm.list.UV,aic.list.vegCW))
UV.aic$aic.list.vegCW<-as.numeric(UV.aic$aic.list.vegCW)
veg.variable<-as.data.frame(veg.variable)
UV.aic<-cbind(veg.variable,UV.aic)
###CR VAR LQMM
# library(lqmm)
#
# #QR for nothing
# annual.grUV<-annual.grUV[!is.na(annual.grUV$cratio)==T,]
#
# qrCW.noth<-lqmm(ht_annual~srHeight_Total+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.noth)
# aic.lqmm.list.vegCW<-AIC(qrCW.noth)[1]
# nlqmm.list.UV<-length(qrCW.noth$y)
#
#
# #QR for 1m polyveg cover
# qrCW.1m.polv<-lqmm(ht_annual~srHeight_Total+Cov.POLV+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.polv)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.polv)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV,length(qrCW.1m.polv$y))
#
#
# #QR for 1m Forb cover
# qrCW.1m.F<-lqmm(ht_annual~srHeight_Total+Cov.F+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.F)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.F)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#
# #QR for 1m LOW Shrub cover
# qrCW.1m.LS<-lqmm(ht_annual~srHeight_Total+Cov.LS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.LS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.LS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#
# #QR for 1m High Shrub cover
# qrCW.1m.HS<-lqmm(ht_annual~srHeight_Total+Cov.HS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.HS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.HS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#
# #QR for 1m Grass cover
# # qrCW.1m.G<-lqmm(ht_annual~srHeight_Total+Cov.G+cratio,random=~1,group=conc,tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.G)
# # aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.G)[1])
# # nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#
# #QR for 1m Forb diff
# #qrCW.1m.F<-lqmm(ht_annual~srHeight_Total+diff.F.1m+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.1m.F)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.F)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#
# #QR for 1m LOW Shrub diff
# qrCW.1m.LS<-lqmm(ht_annual~srHeight_Total+diff.LS.1m+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.LS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.LS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#
# #QR for 1m High Shrub diff
# qrCW.1m.HS<-lqmm(ht_annual~srHeight_Total+diff.HS.1m+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.HS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.HS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#
# #QR for 1m Grass diff
# #qrCW.1m.G<-lqmm(ht_annual~srHeight_Total+diff.G.1m+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.1m.G)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.G)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#
#
# #QR for Forb transect cover
# #qrCW.forb.tran<-lqmm(ht_annual~srHeight_Total+diff.F+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.forb.tran)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.forb.tran)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.forb.tran$y))
#
# #QR for LS transect cover
# qr.LS.tran<-lqmm(ht_annual~srHeight_Total+diff.LS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qr.LS.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qr.LS.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.LS.tran$y))
#
# #QR for HS transect cover
# qr.HS.tran<-lqmm(ht_annual~srHeight_Total+diff.HS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qr.HS.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qr.HS.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.HS.tran$y))
#
# #QR for both Forb and Shrub Transect
# #qrCW.forb.shrub.tran<-lqmm(ht_annual~srHeight_Total+diff.F+diff.S+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.forb.shrub.tranCW)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.forb.shrub.tran)[1])
#
# #QR for transect grass height
# qrCW.tran.gr<-lqmm(ht_annual~srHeight_Total+grass.ht+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.tran.gr)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.tran.gr)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr$y))
#
# #QR for transect grass cover
# qrCW.tran.gr.cov<-lqmm(ht_annual~srHeight_Total+tran.G+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.tran.gr.cov)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.tran.gr.cov)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr.cov$y))
#
# #QR for init tree height max veg difference (1m veg plot)
# qrCW.1m.max.vg.diff<-lqmm(ht_annual~srHeight_Total+treeminus+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.max.vg.diff)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.max.vg.diff)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff$y))
#
# #QR for init tree height max veg difference (transect)
# qrCW.1m.max.vg.diff.tran<-lqmm(ht_annual~srHeight_Total+treeminus_trans+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.max.vg.diff.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.max.vg.diff.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff.tran$y))
#
#
#
# UV.aic<-as.data.frame(cbind(nlqmm.list.UV,aic.lqmm.list.vegCW))
# UV.aic$aic.lqmm.list.vegCW<-as.numeric(UV.aic$aic.lqmm.list.vegCW)
#
# veg.variable<-as.data.frame(veg.variable)
#
# UV.aic<-cbind(veg.variable,UV.aic)
| /1.3.UV.variable.selection.2016.dec.16.r | no_license | kirkmire/INGY | R | false | false | 27,977 | r | ######Understory Non-Tree Variables######
#Percentage cover 1m, upper and lower extent of height
#Abbreviates row headings in veg plot measurements
names(sstp1)[7:8]<-c("Cov","Bas")
#Reshaped 1 meter small tree plot veg measurements so that each stp record year has its
#own associated Forb, Grass, Low Shrub, High Shrub and Polyveg measurements
#stp<-reshape(sstp1, direction="wide",idvar=
# c("Installation","Plot","STP","Year_Measurement"),
# timevar="Lifeform",v.names=c("Cov","Bas","Top"))
sstp1$Lifeform[sstp1$Installation=="GS" & sstp1$Plot==5 & sstp1$STP==4 &
sstp1$Year_Measurement==2015 & sstp1$Species_Primary=="POPR" &
!is.na(sstp1$Species_Primary)] <- "G"
stp<-reshape(sstp1[!is.na(sstp1$Lifeform),], direction="wide",
idvar=c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",
drop=c("Species_Primary","Species_Secondary","ID"),
v.names=c("Cov","Bas","Top"))
#Note: no base and top height measurements for polyveg lifeform in 1m and 4m
#Makes NAs within coverage, base, and top veg meas columns = 0
veg.names<-names(stp[,substring(names(stp),4,4)=="."])
for(i in veg.names) {
stp[i][is.na(stp[i])] <- 0
}
#Merges plot history (trt etc) and stp 1m veg meas
veg_record<- merge(splot, stp,by=c("Installation","Plot"))
#Merges annual small tree growth records with 1m veg records for each year
annual.gr3<- merge(annual.gr[,!(names(annual.gr) %in% c("ID.y"))],
veg_record,
by=c("Installation","Plot","STP","Year_Measurement"))
#4m data not collected until 2007, ok to compare to 1m data
#that was colected throughout study (1998)?
#cuts tree records from ~5,500 to ~1,000
#Reshapes 4 meter veg measurements
#names(sstp4)[7:9]<-c("Cov4","Bas4","Top4")
#stp4<-reshape(sstp4, direction="wide",idvar=
# c("Installation","Plot","STP","Year_Measurement"),
# timevar="Lifeform",v.names=c("Cov4","Bas4","Top4"))
#Makes NAs within coverage, base, and top veg meas columns = 0
#veg.names<-names(stp4[,substring(names(stp4),4,4)=="."])
#for(i in veg.names) {
# stp4[i][is.na(stp4[i])] <- 0
#}
#Merges plot history (trt etc) and stp 1m veg meas
#veg_record4<- merge(splot, stp4,by=c("Installation","Plot"))
#Merges annual small tree growth records with 4m veg records for each year
#annual.gr3<- merge(annual.gr3, veg_record4,by=c("Installation","Plot","STP","Year_Measurement"))
##1m S, F, and PLOV diffs##
sstp1$diff.1m<-sstp1$Top-sstp1$Bas
#Assigns zeros to NA values (where no veg present)
veg.names<-"diff.1m"
for(i in veg.names) {
sstp1[i][is.na(sstp1[i])] <- 0
}
#Aggragates 1m F diff to the plot level
agg.1m.data <-aggregate(sstp1$diff.1m,
by=list("Installation"=sstp1$Installation,
"Plot"=sstp1$Plot,
"Year_Measurement"=sstp1$Year_Measurement,
"Lifeform"=sstp1$Lifeform),FUN=mean)
agg.1m.data1<-reshape(agg.1m.data, direction="wide",idvar=
c("Installation","Plot","Year_Measurement"),
timevar="Lifeform",v.names="x")
veg.names<-names(agg.1m.data1[,substring(names(agg.1m.data1),2,2)=="."])
for(i in veg.names) {
agg.1m.data1[i][is.na(agg.1m.data1[i])] <- 0
}
names(agg.1m.data1)[4:7]<-c("diff.F.1m","diff.G.1m","diff.HS.1m","diff.LS.1m")
names(agg.1m.data1)[9]<-c("diff.POLV.1m")
#Merges aggregated 1m data to the "big" df, this is where .gr4 is created
annual.gr4<-merge(annual.gr3[,!(names(annual.gr3) %in% c("ID.y"))],
agg.1m.data1,by=c("Installation","Plot","Year_Measurement"))
##Transect Data##
names(stran)[9:10]<-c("basT","topT")
#Assigns zeros to NA values (transect points where no veg present)
veg.T.names<-names(stran[,substring(names(stran),4,4)=="T"])
for(i in veg.T.names) {
stran[i][is.na(stran[i])] <- 0
}
#Count transect observation number
#If not figure out missing
#probably supposed to be zero
#calculate difference in top and base meas
stran$diffT<-stran$topT-stran$basT
#Aggregates transect data to the STP level
agg.tran.data <-aggregate(stran$diffT,
by=list("Installation"=stran$Installation,
"Plot"=stran$Plot,
"STP"=stran$Transect,
"Year_Measurement"=stran$Year_Measurement,
"Lifeform"=stran$Lifeform1),FUN=mean)#total/number of points
#Reshapes transect data so each stp is a row
agg.tran.data1<-reshape(agg.tran.data, direction="wide",idvar=
c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",v.names="x")
names(agg.tran.data1)[5:8]<-c("diff.F","diff.G","diff.HS","diff.LS")
tran.names<-names(agg.tran.data1[,substring(names(agg.tran.data1),5,5)=="."])
for(i in tran.names) {
agg.tran.data1[i][is.na(agg.tran.data1[i])] <- 0
}
#Merges aggregated transect data to the "big" df
annual.gr4<-merge(annual.gr4,
agg.tran.data1,by=c("Installation","Plot","STP","Year_Measurement"))
##Transect Grass Cover Data##
agg.tran.data.G <-aggregate(stranco$Pct_Grass,
by=list("Installation"=stranco$Installation,
"Plot"=stranco$Plot,
"STP"=stranco$Transect,
"Year_Measurement"=stranco$Year_Measurement
),FUN=mean)#total/number of points
names(agg.tran.data.G)[5]<-("tran.G")
#Merges aggregated transect data to the "big" df
annual.gr4<-merge(annual.gr4,agg.tran.data.G,by=c("Installation","Plot","STP","Year_Measurement"))
#code to remove all .y variables from df
y.names<-numeric(0)
for(i in 3:18){
y.names<-c(y.names,names(annual.gr4[,substring(names(annual.gr4),i-1,i)==".y"]))
}
annual.gr4<-annual.gr4[,! names(annual.gr4) %in% y.names]
#unsure what "NA" or "NULL" lifeforms translates to
#protocol seems tohave changed in later years of the study in
#favor of not distinguishing between shrubs and forbs
#Transect grass data
agg.grass.data <-aggregate(strangr$Top,
by=list("Installation"=strangr$Installation,
"Plot"=strangr$Plot,
"STP"=strangr$Transect,
"Year_Measurement"=strangr$Year_Measurement),FUN=mean)
names(agg.grass.data)[5]<-"grass.ht"
agg.grass.data[5][is.na(agg.grass.data[5])] <- 0
annual.gr4<-merge(annual.gr4,agg.grass.data,by=c("Installation","Plot","STP","Year_Measurement"))
#Removes trees with -inf ht_ annual...check the annual ht function function
annual.gr4$inf.ht<-is.infinite(annual.gr4$ht_annual)
annual.gr4<-annual.gr4[!annual.gr4$inf.ht==TRUE,]
#Function for height difference between top height of tallest shrub on each
#stp vegplot
#Find diff between init tree height and max shrub height
init_tree_shrub_ht_diff<-function(Installation,Plot,STP,Year,height){
#Installation<-"KC"
#Plot<-1
#STP<-1
#Year<-2010
#height<-10
shrub_ht<-veg_record[veg_record$Installation==Installation&
veg_record$Plot==Plot&
veg_record$STP==STP&
veg_record$Year_Measurement==Year,]
max.ht.shrub<-max(shrub_ht$Top.LS,shrub_ht$Top.HS,shrub_ht$Top.F)
tree.ht.minus.shrub<-height-max.ht.shrub
tree.ht.minus.shrub
}
annual.gr4$treeminus<-0
annual.gr4$Installation<-as.character(annual.gr4$Installation)
veg_record$Installation<-as.character(veg_record$Installation)
for(i in 1:nrow(annual.gr4)){
annual.gr4$treeminus[i]<-init_tree_shrub_ht_diff(
annual.gr4$Installation[i],
annual.gr4$Plot[i],
annual.gr4$STP[i],
annual.gr4$Year_Measurement[i],
annual.gr4$Height_Total[i]
)
}
#Function for height difference between top height of tallest shrub on each
#stp vegplot
#Find diff between init tree height and max shrub height
agg.tran.data.max <-aggregate(stran$topT,
by=list("Installation"=stran$Installation,
"Plot"=stran$Plot,
"STP"=stran$Transect,
"Year_Measurement"=stran$Year_Measurement,
"Lifeform"=stran$Lifeform1),FUN=max)#total/number of points
#Reshapes transect data so each stp is a row
agg.tran.data.max1<-reshape(agg.tran.data.max, direction="wide",idvar=
c("Installation","Plot","STP","Year_Measurement"),
timevar="Lifeform",v.names="x")
#Assigns zeros to NA values (transect points where no veg present)
agg.T.names<-names(agg.tran.data.max1[,substring(names(agg.tran.data.max1),1,1)=="x"])
for(i in agg.T.names) {
agg.tran.data.max1[i][is.na(agg.tran.data.max1[i])] <- 0
}
init_tree_shrub_ht_diff_trans<-function(Installation,Plot,STP,Year,height){
#Installation<-"KC"
#Plot<-1
#STP<-1
#Year<-2010
#height<-10
shrub_ht<-agg.tran.data.max1[agg.tran.data.max1$Installation==Installation&
agg.tran.data.max1$Plot==Plot&
agg.tran.data.max1$STP==STP&
agg.tran.data.max1$Year_Measurement==Year,]
max.ht.shrub<-max(shrub_ht$x.F,shrub_ht$x.HS,shrub_ht$x.LS,shrub_ht$x.S)
# tree.ht.minus.shrub<-height-max.ht.shrub
tree.ht.minus.shrub<-max.ht.shrub
tree.ht.minus.shrub
}
annual.gr4$treeminus_trans<-0
agg.tran.data.max1$Installation<-as.character(agg.tran.data.max1$Installation)
for(i in 1:nrow(annual.gr4)){
annual.gr4$treeminus_trans[i]<-init_tree_shrub_ht_diff_trans(
annual.gr4$Installation[i],
annual.gr4$Plot[i],
annual.gr4$STP[i],
annual.gr4$Year_Measurement[i],
annual.gr4$Height_Total[i]
)
}
#Removes 6th stp plots from analysis
#Makes seperate dataframe for witheld data
annual.gr6<-annual.gr4[annual.gr4$STP_rand==6,]
annual.gr4<-annual.gr4[!annual.gr4$STP_rand==6,]
#Creates crown ratio variable for annual gr4
annual.gr4$cratio<- annual.gr4$CrownLength/annual.gr4$Height_Total
#Removes tree records with negative CR
annual.gr4 <- annual.gr4[!is.na(annual.gr4$cratio) & annual.gr4$cratio>=0,]
#Selects tree record of annual growth from similar installations,
#this is where .grUV is created
annual.grUV<-annual.gr4[annual.gr4$Installation %in% sim, ]
#good point to look at not-yet removed damaged trees here with .gr4
allDamage<-annual.gr4
#
# #GAM for 1m polyveg cover
# gam.1m.polv<-gam(ht_annual~s(srHeight_Total)+s(Cov.POLV),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.polv)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.1m.polv,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m polyveg diff
# #gam.1m.polv.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.POLV.1m),data=annual.grUV, family=gaussian(link="identity"))
# #summary(gam.4m.polv)
# #Not enough (non-NA) data to do anything meaningful
#
# #GAM for 1m F diff
# gam.1m.F.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.F.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.F.diff)
#
# plot(gam.1m.F.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m LS diff
# gam.1m.LS.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.LS.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.LS.diff)
#
# plot(gam.1m.LS.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m HS diff
# gam.1m.HS.diff<-gam(ht_annual~s(srHeight_Total)+s(annual.grUV$diff.HS.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.1m.HS.diff)
#
# plot(gam.1m.HS.diff,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# #Print#
#
# #GAM for LS transect data
# gam.tran.LS<-gam(ht_annual~s(srHeight_Total)+s(diff.LS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.LS)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.LS,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for HS transect data
# gam.tran.HS<-gam(ht_annual~s(srHeight_Total)+s(diff.HS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.HS)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.HS,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for Forb transect data
# gam.tran.F<-gam(ht_annual~s(srHeight_Total)+s(diff.F),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.F)
#
# plot(gam.tran.F,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for Grass transect data
# gam.tran.GR<-gam(ht_annual~s(srHeight_Total)+s(grass.ht),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.GR)
#
# plot(gam.tran.GR,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# #Print#
#
# #GAM for Grass transect data
# gam.tran.GR.cov<-gam(ht_annual~s(srHeight_Total)+s(tran.G),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.GR.cov)
#
# plot(gam.tran.GR.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
#
#
# #GAM for 1m HS cover
# gam.tran.HS.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.HS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.tran.HS.cov)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.tran.HS.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m Grass cover
# gam.G.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.G),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.G.cov)
#
# plot(gam.G.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m Forb cover
# gam.F.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.F),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.F.cov)
#
# plot(gam.F.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for 1m LS cover
# gam.LS.cov<-gam(ht_annual~s(srHeight_Total)+s(Cov.LS),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.LS.cov)
#
# plot(gam.LS.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
# ##Print##
#
# #GAM for 1m Grass diff
# gam.G.diff.cov<-gam(ht_annual~s(srHeight_Total)+s(diff.G.1m),data=annual.grUV, family=gaussian(link="identity"))
# summary(gam.G.diff.cov)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(gam.G.diff.cov,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for init tree ht veg diff
# init.diff.1m<-gam(ht_annual~srHeight_Total+annual.grUV$cratio+s(annual.grUV$Cov.POLV),data=annual.grUV, family=gaussian(link="identity"))
# summary(init.diff.1m)
#
#
# AIC(init.diff.1m)
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(init.diff.1m,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
#
# #GAM for init tree ht veg diff trans
# init.diff.1m.trans<-gam(ht_annual~s(srHeight_Total)+s(treeminus_trans),data=annual.grUV, family=gaussian(link="identity"))
# summary(init.diff.1m.trans)
#
# par(mfrow=c(2,4),mar=c(4,4,1,2))
# plot(init.diff.1m.trans,residuals=T,se=T,pch=".",ask=F,cex.lab=1.5)
veg.variable<-c("Nothing","Comb.cov","F.cov","LS.cov","HS.cov",
"F.depth","LS.depth","HS.depth",
"F.vol","LS.vol","HS.vol",
"F.tran","LS.tran","HS.tran","G.tran.depth",
"G.tran.cov","mx.vg.diff.1m","mx.vg.diff.tr"
)
#Removes all Dead tree records (already done in annualization routine)
# annual.grUV<-annual.grUV[!annual.grUV$Damage %in% damageRemoved]
# annual.grUV<-annual.grUV[(annual.grUV$Height_Total!=0 & !is.na(annual.grUV$Height_Total)),]
###Quantreg
library(quantreg)
#QR for nothing
annual.grUV<-annual.grUV[!is.na(annual.grUV$cratio)==T,]
qrCW.noth<- rq(ht_annual~srHeight_Total+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-AIC(qrCW.noth)[1]
nlqmm.list.UV<-length(qrCW.noth$y)
#QR for 1m combined cover
annual.grUV$cov.comb<-annual.grUV$Cov.F+annual.grUV$Cov.LS+annual.grUV$Cov.HS
qrCW.1m.polv<- rq(ht_annual~srHeight_Total+cov.comb+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.polv)[1])
nlqmm.list.UV<-c(nlqmm.list.UV,length(qrCW.1m.polv$y))
#QR for 1m Forb cover
qrCW.1m.F<- rq(ht_annual~srHeight_Total+Cov.F+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#QR for 1m LOW Shrub cover
qrCW.1m.LS<- rq(ht_annual~srHeight_Total+Cov.LS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#QR for 1m High Shrub cover
qrCW.1m.HS<- rq(ht_annual~srHeight_Total+Cov.HS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#QR for 1m Grass cover
# qrCW.1m.G<- rq(ht_annual~srHeight_Total+Cov.G+cratio,random=~1,group=conc,tau=c(.5),data=annual.grUV)
# summary(qrCW.1m.G)
# aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.1m.G)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#QR for 1m Forb diff
qrCW.1m.F<- rq(ht_annual~srHeight_Total+diff.F.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#QR for 1m LOW Shrub diff
qrCW.1m.LS<- rq(ht_annual~srHeight_Total+diff.LS.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#QR for 1m High Shrub diff
qrCW.1m.HS<- rq(ht_annual~srHeight_Total+diff.HS.1m+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#QR for 1m Forb Volume
annual.grUV$F.volume<-(annual.grUV$diff.F.1m*annual.grUV$Cov.F)/100
qrCW.1m.F.vol<- rq(ht_annual~srHeight_Total+F.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.F.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F.vol$y))
#QR for 1m Low Shrub Volume
annual.grUV$LS.volume<-(annual.grUV$diff.LS.1m*annual.grUV$Cov.LS)/100
qrCW.1m.LS.vol<- rq(ht_annual~srHeight_Total+LS.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.LS.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS.vol$y))
#QR for 1m High Shrub Volume
annual.grUV$HS.volume<-(annual.grUV$diff.HS.1m*annual.grUV$Cov.HS)/100
qrCW.1m.HS.vol<- rq(ht_annual~srHeight_Total+HS.volume+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.HS.vol)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS.vol$y))
#QR for 1m Grass diff
#qrCW.1m.G<- rq(ht_annual~srHeight_Total+diff.G.1m+cratio,tau=c(.5),data=annual.grUV)
#summary(qrCW.1m.G)
#aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.1m.G)[1])
#nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#QR for Forb transect cover
qrCW.forb.tran<- rq(ht_annual~srHeight_Total+diff.F+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.forb.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.forb.tran$y))
#QR for LS transect cover
qr.LS.tran<- rq(ht_annual~srHeight_Total+diff.LS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qr.LS.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.LS.tran$y))
#QR for HS transect cover
qr.HS.tran<- rq(ht_annual~srHeight_Total+diff.HS+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qr.HS.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.HS.tran$y))
#QR for both Forb and Shrub Transect
#qrCW.forb.shrub.tran<- rq(ht_annual~srHeight_Total+diff.F+diff.S+cratio,tau=c(.5),data=annual.grUV)
#summary(qrCW.forb.shrub.tranCW)
#aic list.vegCW<-c(aic list.vegCW,AIC(qrCW.forb.shrub.tran)[1])
#QR for transect grass height
qrCW.tran.gr<- rq(ht_annual~srHeight_Total+grass.ht+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.tran.gr)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr$y))
#QR for transect grass cover
qrCW.tran.gr.cov<- rq(ht_annual~srHeight_Total+tran.G+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.tran.gr.cov)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr.cov$y))
#QR for init tree height max veg difference (1m veg plot)
qrCW.1m.max.vg.diff<- rq(ht_annual~srHeight_Total+treeminus+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.max.vg.diff)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff$y))
#QR for init tree height max veg difference (transect)
qrCW.1m.max.vg.diff.tran<- rq(ht_annual~srHeight_Total+treeminus_trans+cratio,tau=c(.5),data=annual.grUV)
aic.list.vegCW<-c(aic.list.vegCW,AIC(qrCW.1m.max.vg.diff.tran)[1])
nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff.tran$y))
UV.aic<-as.data.frame(cbind(nlqmm.list.UV,aic.list.vegCW))
UV.aic$aic.list.vegCW<-as.numeric(UV.aic$aic.list.vegCW)
veg.variable<-as.data.frame(veg.variable)
UV.aic<-cbind(veg.variable,UV.aic)
###CR VAR LQMM
# library(lqmm)
#
# #QR for nothing
# annual.grUV<-annual.grUV[!is.na(annual.grUV$cratio)==T,]
#
# qrCW.noth<-lqmm(ht_annual~srHeight_Total+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.noth)
# aic.lqmm.list.vegCW<-AIC(qrCW.noth)[1]
# nlqmm.list.UV<-length(qrCW.noth$y)
#
#
# #QR for 1m polyveg cover
# qrCW.1m.polv<-lqmm(ht_annual~srHeight_Total+Cov.POLV+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.polv)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.polv)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV,length(qrCW.1m.polv$y))
#
#
# #QR for 1m Forb cover
# qrCW.1m.F<-lqmm(ht_annual~srHeight_Total+Cov.F+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.F)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.F)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#
# #QR for 1m LOW Shrub cover
# qrCW.1m.LS<-lqmm(ht_annual~srHeight_Total+Cov.LS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.LS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.LS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#
# #QR for 1m High Shrub cover
# qrCW.1m.HS<-lqmm(ht_annual~srHeight_Total+Cov.HS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.HS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.HS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#
# #QR for 1m Grass cover
# # qrCW.1m.G<-lqmm(ht_annual~srHeight_Total+Cov.G+cratio,random=~1,group=conc,tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.G)
# # aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.G)[1])
# # nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#
# #QR for 1m Forb diff
# #qrCW.1m.F<-lqmm(ht_annual~srHeight_Total+diff.F.1m+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.1m.F)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.F)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.F$y))
#
# #QR for 1m LOW Shrub diff
# qrCW.1m.LS<-lqmm(ht_annual~srHeight_Total+diff.LS.1m+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.LS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.LS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.LS$y))
#
# #QR for 1m High Shrub diff
# qrCW.1m.HS<-lqmm(ht_annual~srHeight_Total+diff.HS.1m+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.HS)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.HS)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.HS$y))
#
# #QR for 1m Grass diff
# #qrCW.1m.G<-lqmm(ht_annual~srHeight_Total+diff.G.1m+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.1m.G)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.G)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.G$y))
#
#
# #QR for Forb transect cover
# #qrCW.forb.tran<-lqmm(ht_annual~srHeight_Total+diff.F+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.forb.tran)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.forb.tran)[1])
# #nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.forb.tran$y))
#
# #QR for LS transect cover
# qr.LS.tran<-lqmm(ht_annual~srHeight_Total+diff.LS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qr.LS.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qr.LS.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.LS.tran$y))
#
# #QR for HS transect cover
# qr.HS.tran<-lqmm(ht_annual~srHeight_Total+diff.HS+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qr.HS.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qr.HS.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qr.HS.tran$y))
#
# #QR for both Forb and Shrub Transect
# #qrCW.forb.shrub.tran<-lqmm(ht_annual~srHeight_Total+diff.F+diff.S+cratio,tau=c(.5),data=annual.grUV)
# #summary(qrCW.forb.shrub.tranCW)
# #aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.forb.shrub.tran)[1])
#
# #QR for transect grass height
# qrCW.tran.gr<-lqmm(ht_annual~srHeight_Total+grass.ht+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.tran.gr)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.tran.gr)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr$y))
#
# #QR for transect grass cover
# qrCW.tran.gr.cov<-lqmm(ht_annual~srHeight_Total+tran.G+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.tran.gr.cov)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.tran.gr.cov)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.tran.gr.cov$y))
#
# #QR for init tree height max veg difference (1m veg plot)
# qrCW.1m.max.vg.diff<-lqmm(ht_annual~srHeight_Total+treeminus+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.max.vg.diff)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.max.vg.diff)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff$y))
#
# #QR for init tree height max veg difference (transect)
# qrCW.1m.max.vg.diff.tran<-lqmm(ht_annual~srHeight_Total+treeminus_trans+cratio,random=~1,
# na.action=na.exclude,
# group=conc,control=list(LP_tol_ll=1e-01,LP_max_iter=1000,method="df"),tau=c(.5),data=annual.grUV)
# # summary(qrCW.1m.max.vg.diff.tran)
# aic.lqmm.list.vegCW<-c(aic.lqmm.list.vegCW,AIC(qrCW.1m.max.vg.diff.tran)[1])
# nlqmm.list.UV<-c(nlqmm.list.UV, length(qrCW.1m.max.vg.diff.tran$y))
#
#
#
# UV.aic<-as.data.frame(cbind(nlqmm.list.UV,aic.lqmm.list.vegCW))
# UV.aic$aic.lqmm.list.vegCW<-as.numeric(UV.aic$aic.lqmm.list.vegCW)
#
# veg.variable<-as.data.frame(veg.variable)
#
# UV.aic<-cbind(veg.variable,UV.aic)
|
ScatterplotBaskets <-
function (x = Headdimensions.data[, 5], y = Headdimensions.data[,
6], n = 36, pp = c(0.6, 0.7, 0.8, 0.9, 0.95, 0.98, 0.99,
0.995, 0.998), lty = 1, colr = "blue", lwd = 1)
{
if (length(x) != length(y))
stop("x and y are not of the same length! \n")
expectile <- function(x, a) {
u <- mean(x)
for (it in 1:20) {
w <- a * (x > u) + (1 - a) * (x <= u)
u0 <- u
u <- sum(w * x)/sum(w)
if (u == u0)
break
}
return(u)
}
exp_conts <- function(x, y, p = 0.9, n = 12) {
a <- sn <- cs <- rep(0, n + 2)
a0 <- 0
for (j in 1:(n + 2)) {
h <- 2 * pi * j/n
sn[j] <- sin(h)
cs[j] <- cos(h)
z <- cs[j] * x + sn[j] * y
a[j] <- expectile(z, p)
}
qx <- qy <- rep(0, n + 1)
S <- cbind(sn, cs)
for (j in 1:(n + 1)) {
r <- j:(j + 1)
u <- solve(S[r, ], a[r])
qy[j] <- u[1]
qx[j] <- u[2]
}
return(list(x = qx, y = qy))
}
mx <- mean(x)
sx <- sd(x)
my <- mean(y)
sy <- sd(y)
xs <- (x - mx)/sx
ys <- (y - my)/sy
m <- length(x)
par(mfrow = c(1, 1))
n <- n
phis <- (1:(n + 1)) * 2 * pi/n
sn <- sin(phis)
cs <- cos(phis)
uqs <- 0 * phis
pp <- pp
np <- length(pp)
cols <- rainbow(np)
j <- 0
for (p in pp) {
j <- j + 1
k <- 0
for (phi in phis) {
k <- k + 1
u <- xs * cs[k] + ys * sn[k]
v <- -xs * sn[k] + ys * cs[k]
uq <- expectile(u, p)
xq <- uq * cs[k]
yq <- uq * sn[k]
uqs[k] <- uq
g <- 10 * c(-1, 1)
}
qp <- exp_conts(xs, ys, p = p, n = n)
lines(qp$x * sx + mx, qp$y * sy + my, pch = 15, col = colr,
lty = lty, lwd = lwd)
}
points(mx, my, pch = 3, cex = 2)
}
| /R/ScatterplotBaskets.R | no_license | carelvdmerwe/UBbipl3 | R | false | false | 2,003 | r | ScatterplotBaskets <-
function (x = Headdimensions.data[, 5], y = Headdimensions.data[,
6], n = 36, pp = c(0.6, 0.7, 0.8, 0.9, 0.95, 0.98, 0.99,
0.995, 0.998), lty = 1, colr = "blue", lwd = 1)
{
if (length(x) != length(y))
stop("x and y are not of the same length! \n")
expectile <- function(x, a) {
u <- mean(x)
for (it in 1:20) {
w <- a * (x > u) + (1 - a) * (x <= u)
u0 <- u
u <- sum(w * x)/sum(w)
if (u == u0)
break
}
return(u)
}
exp_conts <- function(x, y, p = 0.9, n = 12) {
a <- sn <- cs <- rep(0, n + 2)
a0 <- 0
for (j in 1:(n + 2)) {
h <- 2 * pi * j/n
sn[j] <- sin(h)
cs[j] <- cos(h)
z <- cs[j] * x + sn[j] * y
a[j] <- expectile(z, p)
}
qx <- qy <- rep(0, n + 1)
S <- cbind(sn, cs)
for (j in 1:(n + 1)) {
r <- j:(j + 1)
u <- solve(S[r, ], a[r])
qy[j] <- u[1]
qx[j] <- u[2]
}
return(list(x = qx, y = qy))
}
mx <- mean(x)
sx <- sd(x)
my <- mean(y)
sy <- sd(y)
xs <- (x - mx)/sx
ys <- (y - my)/sy
m <- length(x)
par(mfrow = c(1, 1))
n <- n
phis <- (1:(n + 1)) * 2 * pi/n
sn <- sin(phis)
cs <- cos(phis)
uqs <- 0 * phis
pp <- pp
np <- length(pp)
cols <- rainbow(np)
j <- 0
for (p in pp) {
j <- j + 1
k <- 0
for (phi in phis) {
k <- k + 1
u <- xs * cs[k] + ys * sn[k]
v <- -xs * sn[k] + ys * cs[k]
uq <- expectile(u, p)
xq <- uq * cs[k]
yq <- uq * sn[k]
uqs[k] <- uq
g <- 10 * c(-1, 1)
}
qp <- exp_conts(xs, ys, p = p, n = n)
lines(qp$x * sx + mx, qp$y * sy + my, pch = 15, col = colr,
lty = lty, lwd = lwd)
}
points(mx, my, pch = 3, cex = 2)
}
|
#' @title Get Tree-Ring Borders
#' @description Identify tree-ring borders
#' @param x an object of class "xRing"
#' @param k integer; width of the rolling window
#' @param minTrw integer; width of the narrowest tree-ring, rings narrower than this value will not be considered
#' @param threshold the minimum difference between the local maximum and minimum density to detect tree-ring borders
#' @param addLastBorder logical; if \code{FALSE} the last border is not added. If \code{TRUE} the last border is placed at the position of the last value.
#' @details
#' This function uses local maximum and minimum densities in order to detect tree-ring borders.
#' @return The \code{getBorders} function returns an object of lass "xRing" including the following elements:
#' @return \code{names} a \code{string} giving the series name
#' @return \code{span} the first and last year
#' @return \code{trw} a \code{data.frame} with tree-ring width
#' @return \code{limits} a \code{vector} with the position of the tree-ring borders
#' @return \code{years} a \code{vector} with the calendar year
#' @return \code{profile.raw} a \code{vector} with the raw X-ray values
#' @return \code{profile} a \code{vector} with the the smoothed X-ray values (if is supplied in the input)
#' @export
#' @examples
#'
#' data("PaPiRaw")
#' data("PaPiSpan")
#' AFO1001a <- toxRing(PaPiRaw, PaPiSpan, "AFO1001a")
#' AFO1001a <- getBorders(AFO1001a)
#'
#' AFO1001a <- toxRing(PaPiRaw, seriesName = "AFO1001a")
#' AFO1001a <- getBorders(AFO1001a)
#'
getBorders <- function(x,
k = 3,
minTrw = 3,
threshold = 0.215,
addLastBorder = FALSE) {
extractedProfile <- x$profile
lastYear <- x$span[[2]]
if (is.na(lastYear)) {
lastYear <- as.integer(format(Sys.time(), "%Y")) - 1
message(paste(x$name, lastYear, "#"))
x$span[2] <- lastYear
} else {
message(paste(x$name, lastYear))
}
lastBorder <- NULL
if (addLastBorder) {
lastBorder <- length(extractedProfile)
}
limits <-
Limits <-
c(
border(x = extractedProfile, k = k, threshold = threshold),
lastBorder
)
limits0 <- NA
problems <- which(dif(limits) < minTrw) - 1
if (length(problems) > 0) {
limits <- Limits[-problems]
limits.problems <- Limits[which(dif(Limits) < minTrw) - 1]
}
years <- lastYear - (length(limits[-1]):0)
x$trw <-
as.data.frame(matrix(diff(limits), dimnames = list(years[-1], paste0(x$name, ".trw"))))
x$limits <- limits
x$years <- years
x$limits0 <- limits0
if (is.na(x$span[1])) {
x$span[1] <- years[1]
}
as.xRing(x)
}
| /R/getBorders.R | no_license | cran/xRing | R | false | false | 2,653 | r | #' @title Get Tree-Ring Borders
#' @description Identify tree-ring borders
#' @param x an object of class "xRing"
#' @param k integer; width of the rolling window
#' @param minTrw integer; width of the narrowest tree-ring, rings narrower than this value will not be considered
#' @param threshold the minimum difference between the local maximum and minimum density to detect tree-ring borders
#' @param addLastBorder logical; if \code{FALSE} the last border is not added. If \code{TRUE} the last border is placed at the position of the last value.
#' @details
#' This function uses local maximum and minimum densities in order to detect tree-ring borders.
#' @return The \code{getBorders} function returns an object of lass "xRing" including the following elements:
#' @return \code{names} a \code{string} giving the series name
#' @return \code{span} the first and last year
#' @return \code{trw} a \code{data.frame} with tree-ring width
#' @return \code{limits} a \code{vector} with the position of the tree-ring borders
#' @return \code{years} a \code{vector} with the calendar year
#' @return \code{profile.raw} a \code{vector} with the raw X-ray values
#' @return \code{profile} a \code{vector} with the the smoothed X-ray values (if is supplied in the input)
#' @export
#' @examples
#'
#' data("PaPiRaw")
#' data("PaPiSpan")
#' AFO1001a <- toxRing(PaPiRaw, PaPiSpan, "AFO1001a")
#' AFO1001a <- getBorders(AFO1001a)
#'
#' AFO1001a <- toxRing(PaPiRaw, seriesName = "AFO1001a")
#' AFO1001a <- getBorders(AFO1001a)
#'
getBorders <- function(x,
k = 3,
minTrw = 3,
threshold = 0.215,
addLastBorder = FALSE) {
extractedProfile <- x$profile
lastYear <- x$span[[2]]
if (is.na(lastYear)) {
lastYear <- as.integer(format(Sys.time(), "%Y")) - 1
message(paste(x$name, lastYear, "#"))
x$span[2] <- lastYear
} else {
message(paste(x$name, lastYear))
}
lastBorder <- NULL
if (addLastBorder) {
lastBorder <- length(extractedProfile)
}
limits <-
Limits <-
c(
border(x = extractedProfile, k = k, threshold = threshold),
lastBorder
)
limits0 <- NA
problems <- which(dif(limits) < minTrw) - 1
if (length(problems) > 0) {
limits <- Limits[-problems]
limits.problems <- Limits[which(dif(Limits) < minTrw) - 1]
}
years <- lastYear - (length(limits[-1]):0)
x$trw <-
as.data.frame(matrix(diff(limits), dimnames = list(years[-1], paste0(x$name, ".trw"))))
x$limits <- limits
x$years <- years
x$limits0 <- limits0
if (is.na(x$span[1])) {
x$span[1] <- years[1]
}
as.xRing(x)
}
|
library(libamtrack)
### Name: AT.effective.charge.from.E.MeV.u
### Title: AT.effective.charge.from.E.MeV.u
### Aliases: AT.effective.charge.from.E.MeV.u
### ** Examples
# Charge pick-up of several nuclids depending on ion energy
df <- data.frame( E.MeV.u = 10^seq(-1, 2, length.out = 50),
particle.name = c("1H", "3He", "6Li", "12C",
"16O"),
effective.charge = 0)
for(i in 1:nrow(df)){
df$effective.charge[i] <- AT.effective.charge.from.E.MeV.u( E.MeV.u =
df$E.MeV.u[i],
particle.no =
AT.particle.no.from.particle.name(df$particle.name[i])[1])
}
| /data/genthat_extracted_code/libamtrack/examples/AT.effective.charge.from.E.MeV.u.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 704 | r | library(libamtrack)
### Name: AT.effective.charge.from.E.MeV.u
### Title: AT.effective.charge.from.E.MeV.u
### Aliases: AT.effective.charge.from.E.MeV.u
### ** Examples
# Charge pick-up of several nuclids depending on ion energy
df <- data.frame( E.MeV.u = 10^seq(-1, 2, length.out = 50),
particle.name = c("1H", "3He", "6Li", "12C",
"16O"),
effective.charge = 0)
for(i in 1:nrow(df)){
df$effective.charge[i] <- AT.effective.charge.from.E.MeV.u( E.MeV.u =
df$E.MeV.u[i],
particle.no =
AT.particle.no.from.particle.name(df$particle.name[i])[1])
}
|
context("DOCX shape dimensions")
library(xml2)
test_that("rect has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
rect(0.2, 0.2, 0.8, 0.8)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("lines has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
lines(par("usr")[1:2], par("usr")[3:4], lwd = 1)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("polygon has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
x <- par("usr")[1:2]
y <- par("usr")[3:4]
x <- c(x[1], x[1], x[2], x[2], NA )
y <- c(y[2], y[1], y[1], y[2], NA )
polygon(x, y, lwd = 1)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("text has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
text(x = .5, y = .5, labels = "test")
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
| /rvg/tests/testthat/test-docx-xfrm.R | no_license | ingted/R-Examples | R | false | false | 3,397 | r | context("DOCX shape dimensions")
library(xml2)
test_that("rect has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
rect(0.2, 0.2, 0.8, 0.8)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("lines has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
lines(par("usr")[1:2], par("usr")[3:4], lwd = 1)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("polygon has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
x <- par("usr")[1:2]
y <- par("usr")[3:4]
x <- c(x[1], x[1], x[2], x[2], NA )
y <- c(y[2], y[1], y[1], y[2], NA )
polygon(x, y, lwd = 1)
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
test_that("text has dimensions", {
file <- tempfile()
dml_docx( file = file, bg = "transparent" )
plot.new()
text(x = .5, y = .5, labels = "test")
dev.off()
doc <- read_xml(file)
xfrm_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm", ns = xml_ns( doc ))
expect_is(object = xfrm_node, class = "xml_node")
off_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:off", ns = xml_ns( doc ))
offx <- xml_attr(off_node, "x")
offy <- xml_attr(off_node, "y")
expect_true( grepl("^[0-9]+$", offx ) )
expect_true( grepl("^[0-9]+$", offy ) )
ext_node <- xml_find_one(doc, ".//wps:wsp/wps:spPr/a:xfrm/a:ext", ns = xml_ns( doc ))
cx <- xml_attr(ext_node, "cx")
cy <- xml_attr(ext_node, "cy")
expect_true( grepl("^[0-9]+$", cx ) )
expect_true( grepl("^[0-9]+$", cy ) )
})
|
\name{fm.get_num_tuples}
\alias{fm.get_num_tuples}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function for exporting number of tuples
}
\description{Returns the number of tuples.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
fm.get_num_tuples(envsp=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{envsp}{Structure required for sparse representation which stores the relevant values (k-tuples). It is obtained from fm.PrepareSparseFM(n).}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{output}{The output is the number of tuples.}
}
\author{
%% ~~who you are~~
Gleb Beliakov, Andrei Kelarev, Quan Vu, Daniela L. Calderon, Deakin University
}
\examples{
n <- 3
envsp <- fm.PrepareSparseFM(n, vector(), vector())
envsp <- fm.add_singletons_sparse(c(0.2,0.1,0.2),envsp)
envsp <- fm.add_tuple_sparse(c(1,2,3),0.4,envsp);
fm.get_num_tuples(envsp)
envsp <-fm.FreeSparseFM(envsp)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ get num tuples }
\keyword{ Tuples }% __ONLY ONE__ keyword per line | /man/fm.get_num_tuples.Rd | no_license | cran/Rfmtool | R | false | false | 1,345 | rd | \name{fm.get_num_tuples}
\alias{fm.get_num_tuples}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function for exporting number of tuples
}
\description{Returns the number of tuples.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
fm.get_num_tuples(envsp=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{envsp}{Structure required for sparse representation which stores the relevant values (k-tuples). It is obtained from fm.PrepareSparseFM(n).}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{output}{The output is the number of tuples.}
}
\author{
%% ~~who you are~~
Gleb Beliakov, Andrei Kelarev, Quan Vu, Daniela L. Calderon, Deakin University
}
\examples{
n <- 3
envsp <- fm.PrepareSparseFM(n, vector(), vector())
envsp <- fm.add_singletons_sparse(c(0.2,0.1,0.2),envsp)
envsp <- fm.add_tuple_sparse(c(1,2,3),0.4,envsp);
fm.get_num_tuples(envsp)
envsp <-fm.FreeSparseFM(envsp)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ get num tuples }
\keyword{ Tuples }% __ONLY ONE__ keyword per line |
# Extract text strings
sel <- grep("new house|new flat|moving house|move house|moving home|move home|#newhouse|#newflat|#movinghouse|#movehouse|#movinghome|#movehome", t.out$text, ignore.case = T )
summary(sel)
t.out[sel,]
sel <- grepl("new house|#newhouse|old house|#oldhouse|new home|#newhome|old home|#oldhome|new flat|#newflat|old flat|#oldflat|moving house|#movinghouse|move house|#movehouse|moving home|#movinghome|move home|#movehome|packing to move|packing up everything|unpacking everything|removals van|#packingtomove|#packingupeverything|#unpackingeverything|#removalsvan|bought a house|house bought|moved house|house sold|#boughtahouse|#housebought|#movedhouse|#housesold|first rent|#firstrent|new gaff|new housing|new accommodation|new crib|new bungalow|new apartment|new semi detached|new semi-detached|new detached|new cottage|new digs|new dwelling|new residence|new pad|new homes|new home's|new houses|new house's|#newgaff|#newhousing|#newaccommodation|#newcrib|#newbungalow|#newapartment|#newsemidetached|#newdetached|#newcottage|#newdigs|#newdwelling|#newresidence|#newpad|#newhomes|#newhouses|old gaff|old housing|old accommodation|old crib|old bungalow|old apartment|old semi detached|old semi-detached|old detached|old cottage|old digs|old dwelling|old residence|old pad|old homes|old home's|old houses|old house's|#oldgaff|#oldhousing|#oldaccommodation|#oldcrib|#oldbungalow|#oldapartment|#oldsemidetached|#olddetached|#oldcottage|#olddigs|#olddwelling|#oldresidence|#oldpad|#oldhomes|#oldhouses", t_out$text, ignore.case = T) | /analysis/sel-housemove.R | permissive | khaled-saber/twitter_listener | R | false | false | 1,549 | r | # Extract text strings
sel <- grep("new house|new flat|moving house|move house|moving home|move home|#newhouse|#newflat|#movinghouse|#movehouse|#movinghome|#movehome", t.out$text, ignore.case = T )
summary(sel)
t.out[sel,]
sel <- grepl("new house|#newhouse|old house|#oldhouse|new home|#newhome|old home|#oldhome|new flat|#newflat|old flat|#oldflat|moving house|#movinghouse|move house|#movehouse|moving home|#movinghome|move home|#movehome|packing to move|packing up everything|unpacking everything|removals van|#packingtomove|#packingupeverything|#unpackingeverything|#removalsvan|bought a house|house bought|moved house|house sold|#boughtahouse|#housebought|#movedhouse|#housesold|first rent|#firstrent|new gaff|new housing|new accommodation|new crib|new bungalow|new apartment|new semi detached|new semi-detached|new detached|new cottage|new digs|new dwelling|new residence|new pad|new homes|new home's|new houses|new house's|#newgaff|#newhousing|#newaccommodation|#newcrib|#newbungalow|#newapartment|#newsemidetached|#newdetached|#newcottage|#newdigs|#newdwelling|#newresidence|#newpad|#newhomes|#newhouses|old gaff|old housing|old accommodation|old crib|old bungalow|old apartment|old semi detached|old semi-detached|old detached|old cottage|old digs|old dwelling|old residence|old pad|old homes|old home's|old houses|old house's|#oldgaff|#oldhousing|#oldaccommodation|#oldcrib|#oldbungalow|#oldapartment|#oldsemidetached|#olddetached|#oldcottage|#olddigs|#olddwelling|#oldresidence|#oldpad|#oldhomes|#oldhouses", t_out$text, ignore.case = T) |
test_that("ForceChannels works", {
library(magrittr)
x <- lapply(1:300, function(x) matrix(runif(4), nrow = 2)) %>%
Reduce(function(x, y) abind::abind(x, y, along = 3),
.)
expect_equal(ForceChannels(x, 6) %>% dim, c(2, 2, 6, 50))
expect_error(ForceChannels(x, 7), "multiple")
})
test_that("Stack2DTifs works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
img <- array(0:8, dim = rep(2, 3))
WriteIntImage(img[, , 1], "50_1.tif")
WriteIntImage(img[, , 2], "50_2.tif")
Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("50_1_2.tif"),
abind::abind(img[, , 1], img[, , 2], along = 3),
check.attributes = FALSE)
}
suppressWarnings(file.remove(list.files()))
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
WriteIntImage(img[, , 1], "50_1.tif")
WriteIntImage(img[, , 1:2], "50_2.tif")
expect_error(Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2"), "same dim")
suppressWarnings(file.remove(list.files()))
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
WriteIntImage(img[, , 1:2], "50_1.tif")
WriteIntImage(img[, , 1:2], "50_2.tif")
expect_error(Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2"), "2-dim")
suppressWarnings(file.remove(list.files()))
})
test_that("WriteIntImage works", {
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
WriteIntImage(img, "50.tif")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("50.tif"), img, check.attributes = FALSE)
}
img[33] <- NA
expect_equal(WriteIntImage(img, "pimg.tiff", na = pi), img)
suppressWarnings(file.remove(list.files()))
})
test_that("ReadImageData works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
file.path <- system.file('extdata', '50.tif', package = 'nandb')
expect_equal(EBImage::imageData(EBImage::readImage(file.path, as.is = TRUE)),
ReadImageData(file.path, 3))
expect_error(ReadImageData(file.path, TRUE), "integer")
expect_equal(max(ReadImageData(system.file("extdata", "needs_rescaling.tif",
package = "nandb"))),
6)
badwrite <- WriteIntImage(matrix(c(rep(0, 3), 256), nrow = 2), "badwrite.tif")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("badwrite.tif"), badwrite / 256)
}
suppressWarnings(file.remove(list.files()))
})
test_that("WriteImageTxt works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
img <- ReadImageData(system.file('extdata', '50.tif', package = 'nandb'))
expect_equal(mean(unlist(WriteImageTxt(img, 'temp'))), mean(img))
expect_error(WriteImageTxt(1:3, "abc"), "dimension")
expect_error(WriteIntImage(matrix(0.5), "a"), "integer")
expect_error(WriteIntImage(matrix(-1), "a"), "negative")
WriteIntImage(matrix(2^9), "16bit")
expect_error(WriteIntImage(matrix(2 ^ 17)),
"The maximum value")
expect_equal(mean(unlist(WriteImageTxt(img, 'temp'))), mean(img))
img_01 <- ReadImageTxt("temp_01.csv")
expect_equal(img_01, img[, , 1], check.attributes = FALSE)
four.d <- array(1:(2^4), dim = rep(2, 4))
WriteImageTxt(four.d, "fourD")
for (i in 1:2) {
for (j in 1:2) {
expect_equal(ReadImageTxt(paste0("fourD_", i, "_", j, ".csv")),
four.d[, , i, j], check.attributes = FALSE)
}
}
suppressWarnings(file.remove(list.files()))
})
test_that("Bin2Tiff works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
dir.create("temp_dir")
expect_true(file.copy(system.file("extdata", "b.bin", package = "nandb"),
"temp_dir"))
Bin2Tiff("temp_dir/b.bin", height = 2, width = 2, bits = 8)
Bin2TiffFolder("temp_dir", height = 2, width = 2, bits = 8)
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(list.files("temp_dir"), c("b.bin", "b.tif"))
setwd("temp_dir")
expect_equal(readBin("b.bin", "int", size = 1, n = 4),
as.vector(ReadImageData("b.tif")))
}
setwd("..")
filesstrings::dir.remove("temp_dir")
})
| /junk/test_io.R | no_license | rorynolan/nandb | R | false | false | 4,726 | r | test_that("ForceChannels works", {
library(magrittr)
x <- lapply(1:300, function(x) matrix(runif(4), nrow = 2)) %>%
Reduce(function(x, y) abind::abind(x, y, along = 3),
.)
expect_equal(ForceChannels(x, 6) %>% dim, c(2, 2, 6, 50))
expect_error(ForceChannels(x, 7), "multiple")
})
test_that("Stack2DTifs works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
img <- array(0:8, dim = rep(2, 3))
WriteIntImage(img[, , 1], "50_1.tif")
WriteIntImage(img[, , 2], "50_2.tif")
Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("50_1_2.tif"),
abind::abind(img[, , 1], img[, , 2], along = 3),
check.attributes = FALSE)
}
suppressWarnings(file.remove(list.files()))
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
WriteIntImage(img[, , 1], "50_1.tif")
WriteIntImage(img[, , 1:2], "50_2.tif")
expect_error(Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2"), "same dim")
suppressWarnings(file.remove(list.files()))
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
WriteIntImage(img[, , 1:2], "50_1.tif")
WriteIntImage(img[, , 1:2], "50_2.tif")
expect_error(Stack2DTifs(c("50_1.tif", "50_2.tif"), "50_1_2"), "2-dim")
suppressWarnings(file.remove(list.files()))
})
test_that("WriteIntImage works", {
img <- ReadImageData(system.file("extdata", "50.tif",
package = "nandb"))
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
WriteIntImage(img, "50.tif")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("50.tif"), img, check.attributes = FALSE)
}
img[33] <- NA
expect_equal(WriteIntImage(img, "pimg.tiff", na = pi), img)
suppressWarnings(file.remove(list.files()))
})
test_that("ReadImageData works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
file.path <- system.file('extdata', '50.tif', package = 'nandb')
expect_equal(EBImage::imageData(EBImage::readImage(file.path, as.is = TRUE)),
ReadImageData(file.path, 3))
expect_error(ReadImageData(file.path, TRUE), "integer")
expect_equal(max(ReadImageData(system.file("extdata", "needs_rescaling.tif",
package = "nandb"))),
6)
badwrite <- WriteIntImage(matrix(c(rep(0, 3), 256), nrow = 2), "badwrite.tif")
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(ReadImageData("badwrite.tif"), badwrite / 256)
}
suppressWarnings(file.remove(list.files()))
})
test_that("WriteImageTxt works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
img <- ReadImageData(system.file('extdata', '50.tif', package = 'nandb'))
expect_equal(mean(unlist(WriteImageTxt(img, 'temp'))), mean(img))
expect_error(WriteImageTxt(1:3, "abc"), "dimension")
expect_error(WriteIntImage(matrix(0.5), "a"), "integer")
expect_error(WriteIntImage(matrix(-1), "a"), "negative")
WriteIntImage(matrix(2^9), "16bit")
expect_error(WriteIntImage(matrix(2 ^ 17)),
"The maximum value")
expect_equal(mean(unlist(WriteImageTxt(img, 'temp'))), mean(img))
img_01 <- ReadImageTxt("temp_01.csv")
expect_equal(img_01, img[, , 1], check.attributes = FALSE)
four.d <- array(1:(2^4), dim = rep(2, 4))
WriteImageTxt(four.d, "fourD")
for (i in 1:2) {
for (j in 1:2) {
expect_equal(ReadImageTxt(paste0("fourD_", i, "_", j, ".csv")),
four.d[, , i, j], check.attributes = FALSE)
}
}
suppressWarnings(file.remove(list.files()))
})
test_that("Bin2Tiff works", {
cwd <- getwd()
on.exit(setwd(cwd))
setwd(tempdir())
dir.create("temp_dir")
expect_true(file.copy(system.file("extdata", "b.bin", package = "nandb"),
"temp_dir"))
Bin2Tiff("temp_dir/b.bin", height = 2, width = 2, bits = 8)
Bin2TiffFolder("temp_dir", height = 2, width = 2, bits = 8)
if (!stringr::str_detect(tolower(Sys.info()['sysname']), "windows")) {
# these fail on windows due to an issue with readTIFF(..., as.is = TRUE)
expect_equal(list.files("temp_dir"), c("b.bin", "b.tif"))
setwd("temp_dir")
expect_equal(readBin("b.bin", "int", size = 1, n = 4),
as.vector(ReadImageData("b.tif")))
}
setwd("..")
filesstrings::dir.remove("temp_dir")
})
|
\name{is.ClinicalExperiment}
\alias{is.ClinicalExperiment}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Checks if an object a ClinicalExperiment object }
\description{
Checks if an object a ClinicalExperiment object
}
\usage{
is.ClinicalExperiment(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ any object }
}
\details{
}
\value{
Returns true or false
}
\references{ }
\author{ Balasubramanian Narasimhan }
\note{ }
\seealso{ \code{\link{ClinicalExperiment}} }
\examples{
expt <- ClinicalExperiment(number.of.factors = 3,
number.of.factor.levels = c(2, 2, 3),
number.of.treatments = 3)
is.ClinicalExperiment(expt)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ distribution }% __ONLY ONE__ keyword per line
| /man/is.ClinicalExperiment.Rd | no_license | atrihub/SRS | R | false | false | 910 | rd | \name{is.ClinicalExperiment}
\alias{is.ClinicalExperiment}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Checks if an object a ClinicalExperiment object }
\description{
Checks if an object a ClinicalExperiment object
}
\usage{
is.ClinicalExperiment(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ any object }
}
\details{
}
\value{
Returns true or false
}
\references{ }
\author{ Balasubramanian Narasimhan }
\note{ }
\seealso{ \code{\link{ClinicalExperiment}} }
\examples{
expt <- ClinicalExperiment(number.of.factors = 3,
number.of.factor.levels = c(2, 2, 3),
number.of.treatments = 3)
is.ClinicalExperiment(expt)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ distribution }% __ONLY ONE__ keyword per line
|
testlist <- list(a = -63745L, b = -1L, x = c(-50780161L, -32768L, 2097164L, 16777215L, -10616833L, -163L, -14024705L, 117440511L, -1L, -1L, -1L, -63998L, -1L, -49153L, 671088639L, -1L, -1L, -1L, -1L, -1L, 65536L, 63996L, -114819308L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, NA, 336860180L, 336860180L, 336860180L, 343932928L, 536873984L, -1L, 1560281088L, 704643071L, 100859903L, -1L, 721364991L, 570425343L, 2470399L, -65281L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610128393-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 527 | r | testlist <- list(a = -63745L, b = -1L, x = c(-50780161L, -32768L, 2097164L, 16777215L, -10616833L, -163L, -14024705L, 117440511L, -1L, -1L, -1L, -63998L, -1L, -49153L, 671088639L, -1L, -1L, -1L, -1L, -1L, 65536L, 63996L, -114819308L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, NA, 336860180L, 336860180L, 336860180L, 343932928L, 536873984L, -1L, 1560281088L, 704643071L, 100859903L, -1L, 721364991L, 570425343L, 2470399L, -65281L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
rm(list=ls()) ## clear the workspace
totrep <- 1000
stats <- matrix(0, totrep, 1)
rej <- 0
Tsize <- 30
for (irep in 1:totrep){
y <- matrix(0, Tsize, 1)
e <- rnorm(Tsize)
mu <- 0.1
phi <- 0.95
y[1] <- mu + sqrt(1/(1-phi^2))*e[1]
x <- runif(Tsize)
for (t in 2:Tsize){
y[t] <- mu + phi*x[t-1] + e[t]
# y[t] <- mu + phi*y[t-1] + e[t]
}
xx <- cbind(1,x[ 1 : (Tsize-1)] )
# xx <- cbind(1,y[ 1 : (Tsize-1)] )
yy <- y[2:Tsize]
betahat <- solve( t(xx) %*% xx) %*% t(xx) %*% yy
ehat <- yy - xx %*% betahat
df <- dim(xx)[1] - dim(xx)[2]
sigmahat<- t(ehat)%*%ehat/df
varbeta <- matrix(sigmahat,2,2) * solve( t(xx) %*% xx)
tstat <- (betahat[2]-phi)/sqrt(varbeta[2,2])
pval <- 2*(1- pt(abs(tstat),df))
if (pval < 0.05) rej <- rej +1
stats[irep] <- betahat[2]
}
print(mean(stats)-phi)
print(100*rej/totrep) | /AR1bias.R | no_license | Simonpboucher/econometrics_code_class | R | false | false | 911 | r | rm(list=ls()) ## clear the workspace
totrep <- 1000
stats <- matrix(0, totrep, 1)
rej <- 0
Tsize <- 30
for (irep in 1:totrep){
y <- matrix(0, Tsize, 1)
e <- rnorm(Tsize)
mu <- 0.1
phi <- 0.95
y[1] <- mu + sqrt(1/(1-phi^2))*e[1]
x <- runif(Tsize)
for (t in 2:Tsize){
y[t] <- mu + phi*x[t-1] + e[t]
# y[t] <- mu + phi*y[t-1] + e[t]
}
xx <- cbind(1,x[ 1 : (Tsize-1)] )
# xx <- cbind(1,y[ 1 : (Tsize-1)] )
yy <- y[2:Tsize]
betahat <- solve( t(xx) %*% xx) %*% t(xx) %*% yy
ehat <- yy - xx %*% betahat
df <- dim(xx)[1] - dim(xx)[2]
sigmahat<- t(ehat)%*%ehat/df
varbeta <- matrix(sigmahat,2,2) * solve( t(xx) %*% xx)
tstat <- (betahat[2]-phi)/sqrt(varbeta[2,2])
pval <- 2*(1- pt(abs(tstat),df))
if (pval < 0.05) rej <- rej +1
stats[irep] <- betahat[2]
}
print(mean(stats)-phi)
print(100*rej/totrep) |
#' Add a documentation title here
#'
#' Add a subtitle / brief summary here
#'
#' @source {Enter dataset citation here}
#' \url{Add link for data package here}
"lef_anole"
| /R/lef_anole_doc.R | permissive | karenezhao/alohakez | R | false | false | 174 | r | #' Add a documentation title here
#'
#' Add a subtitle / brief summary here
#'
#' @source {Enter dataset citation here}
#' \url{Add link for data package here}
"lef_anole"
|
c5983bda0f9b5f3ccb9fdf89fedab00e tlc04-uniform-depth-120.qdimacs 42593 113601 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/trafficlight-controller/tlc04-uniform-depth-120/tlc04-uniform-depth-120.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 77 | r | c5983bda0f9b5f3ccb9fdf89fedab00e tlc04-uniform-depth-120.qdimacs 42593 113601 |
require(scales)
library(ggplot2)
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
| /Insulation/insulation_plots.R | no_license | Snehal1894/Calico | R | false | false | 87,298 | r | require(scales)
library(ggplot2)
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP2-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP4-R1-2/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP5-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/hTERT-TP6-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL25-TP2-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL33-TP4-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL37-TP5-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
da = read.csv("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1.tsv",sep = "\t")
newdata = da[which(da$chrom=='chr1'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr1.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr1")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr2'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr2.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr2")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr3'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr3.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr3")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr4'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr4.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr4")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr5'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr5.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr5")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr6'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr6.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr6")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr7'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr7.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr7")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr8'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr8.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr8")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr9'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr9.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr9")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr10'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr10.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr10")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr11'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr11.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr11")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr12'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr12.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr12")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr13'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr13.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr13")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr14'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr14.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr14")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr15'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr15.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr15")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr16'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr16.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr16")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr17'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr17.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr17")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr18'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr18.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr18")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr19'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr19.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr19")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr20'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr20.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr20")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr21'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr21.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr21")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chr22'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chr22.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chr22")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrX'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chrX.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrX")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
newdata = da[which(da$chrom=='chrY'),]
png("/home/snehal/Downloads/Calico/cooltool_analysis/Insulation_res100_win500/PDL46-TP6-R1/chrY.png",width=4000,height=1000)
ggplot(newdata,aes(x=start,y=log2_insulation_score_500000))+geom_line()+ geom_hline(yintercept = 0) + xlab("chrY")+scale_x_continuous(labels = comma)+theme(axis.text=element_text(size=20),axis.title=element_text(size=40))
dev.off()
|
getZCol<- function(n, type){
if (type == "unequal"){
outMatrix = matrix(ncol = 1, nrow = 2*n)
nameList = rep("z", 2*n)
nameList = paste(nameList, c(1:(2*n)), sep = "")
outMatrix[,1]=nameList
return(outMatrix)
}else if (type == "equal"){
outMatrix = matrix(ncol = 1, nrow = 2*n)
nameList = rep ("z1", n)
nameList = c(nameList, rep("z2",n))
outMatrix[,1]=nameList
return(outMatrix)
}
} | /fun_getZCol.R | no_license | katlbest/RDSpending | R | false | false | 431 | r | getZCol<- function(n, type){
if (type == "unequal"){
outMatrix = matrix(ncol = 1, nrow = 2*n)
nameList = rep("z", 2*n)
nameList = paste(nameList, c(1:(2*n)), sep = "")
outMatrix[,1]=nameList
return(outMatrix)
}else if (type == "equal"){
outMatrix = matrix(ncol = 1, nrow = 2*n)
nameList = rep ("z1", n)
nameList = c(nameList, rep("z2",n))
outMatrix[,1]=nameList
return(outMatrix)
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plan-todo.R
\name{post_todo}
\alias{post_todo}
\title{Post to-do list (issues) to GitHub repository}
\usage{
post_todo(ref, todo, distinct = TRUE)
}
\arguments{
\item{ref}{Repository reference (list) created by \code{create_repo_ref()}}
\item{todo}{To-do R list structure as read with \code{read_todo()}}
\item{distinct}{Logical value to denote whether issues with the same title
as a current open issue should be allowed. Passed to \code{get_issues()}}
}
\value{
Number (identifier) of posted issue
}
\description{
Post custom to-do lists (i.e. issues) based on yaml read in by \code{read_todo}.
Please see the "Building Custom Plans" vignette for details.
}
\details{
Currently has know bug in that cannot be used to introduce new labels.
}
\examples{
\dontrun{
# This example uses example file included in pkg
# You should be able to run example as-is after creating your own repo reference
file_path <- system.file("extdata", "todo.yml", package = "projmgr", mustWork = TRUE)
my_todo <- read_todo(file_path)
post_todo(ref, my_todo)
}
}
\seealso{
Other plans and todos:
\code{\link{post_plan}()},
\code{\link{read_plan}()},
\code{\link{read_todo}()},
\code{\link{report_plan}()},
\code{\link{report_todo}()},
\code{\link{template_yaml}()}
}
\concept{plans and todos}
| /man/post_todo.Rd | permissive | emilyriederer/projmgr | R | false | true | 1,351 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plan-todo.R
\name{post_todo}
\alias{post_todo}
\title{Post to-do list (issues) to GitHub repository}
\usage{
post_todo(ref, todo, distinct = TRUE)
}
\arguments{
\item{ref}{Repository reference (list) created by \code{create_repo_ref()}}
\item{todo}{To-do R list structure as read with \code{read_todo()}}
\item{distinct}{Logical value to denote whether issues with the same title
as a current open issue should be allowed. Passed to \code{get_issues()}}
}
\value{
Number (identifier) of posted issue
}
\description{
Post custom to-do lists (i.e. issues) based on yaml read in by \code{read_todo}.
Please see the "Building Custom Plans" vignette for details.
}
\details{
Currently has know bug in that cannot be used to introduce new labels.
}
\examples{
\dontrun{
# This example uses example file included in pkg
# You should be able to run example as-is after creating your own repo reference
file_path <- system.file("extdata", "todo.yml", package = "projmgr", mustWork = TRUE)
my_todo <- read_todo(file_path)
post_todo(ref, my_todo)
}
}
\seealso{
Other plans and todos:
\code{\link{post_plan}()},
\code{\link{read_plan}()},
\code{\link{read_todo}()},
\code{\link{report_plan}()},
\code{\link{report_todo}()},
\code{\link{template_yaml}()}
}
\concept{plans and todos}
|
#' Miscellaneous Functions for Community Ecology
#'
#' This is just a small collection of miscellaneous functions
#' that may be useful, primarily for community ecology analyses,
#' particularly for paleoecological data. They are here mainly for
#' pedagogical reasons (i.e. for students) as they don't appear
#' to be available in other ecology-focused packages.
#' @param x The community abundance matrix. Must be a matrix with two dimensions for
#' \code{pairwiseSpearmanRho}; if a vector is supplied to \code{HurlbertPIE}, then
#' it is treated as if it was matrix with a single row and number of columns equal to
#' its length. Taxonomic units are assumed to be the columns and sites (samples)
#' are assumed to be the rows, for both functions.
#' @param dropAbsent Should absent taxa be dropped? Must be one of either 'bothAbsent' (drop taxa
#' absent in both sites for a given pairwise comparison),'eitherAbsent' (drop taxa absent in either
#' site), or 'noDrop' (drop none of the taxa). The default 'bothAbsent' is recommended, see examples.
#' @param asDistance Should the rho coefficients be rescaled on a scale similar to
#' dissimilarity metrics, i.e. bounded 0 to 1, with 1 representing maximum dissimilarity (i.e.
#' a Spearman rho correlation of -1)? ( dissimilarity = (1 - rho) / 2 )
#' @param diag Should the diagonal of the output distance matrix be included?
#' @param upper Should the upper triangle of the output distance matrix be included?
#' @param na.rm Should taxa listed with NA values be dropped from a pair-wise site comparison?
#' If FALSE, the returned value for that site pair will be NA if NAs are present.
#' @param nAnalyze Allows users to select that PIE be calculated only on \code{nAnalyze} most
#' abundant taxa in a site sample. \code{nAnalyze} must be a vector of length 1, consisting of
#' whole-number value at least equal to 2. By default, \code{nAnalyze = Inf} so all taxa are
#' accepted. Note that if there are less taxa in a sample than nAnalyze, the number present will
#' be used.
#' @details
#' \code{pairwiseSpearmanRho} returns Spearman rho correlation coefficients
#' based on the rank abundances of taxa (columns) within sites (rows) from
#' the input matrix, by internally wrapping the function \code{cor.test}.
#' It allows for various options that ultimatically allow
#' for dropping taxa not shared between two sites (the default), as well as
#' several other options. This allows the rho coefficient to behave like the
#' Bray-Curtis distance, in that it is not affected by the number of taxa absent
#' in both sites.
#'
#' \code{pairwiseSpearmanRho} can also rescale the rho coefficients with (1-rho)/2
#' to provide a measure similar to a dissimilarity metric, bounded between 0 and 1.
#' This function was written so several arguments would be in a similar format to
#' the \code{vegan} library function \code{vegdist}. If used to obtain rho
#' rescaled as a dissimilarity, the default output will be the lower triangle of
#' a distance matrix object, just as is returned by default by \code{vegdist}.
#' This behavior can be modified via the arguments for including the diagonal
#' and upper triangle of the matrix. Otherwise, a full matrix is returned (by default)
#' if the \code{asDistance} argument is not enabled.
#'
#' \code{HurlbertPIE} provides the Probability of Interspecific Encounter metric for
#' relative community abundance data, a commonly used metric for evenness of community
#' abundance data based on derivations in Hurlbert (1971). An optional argument allows
#' users to apply Hurlbert's PIE to only a subselection of the most abundant taxa.
#' @return
#' \code{pairwiseSpearmanRho} will return either a full matrix (the default) or (if
#' \code{asDistance} is true, a distance matrix, with only the lower triangle
#' shown (by default). See details.
#'
#' \code{HurlbertPIE} returns a named vector of PIE values for the input data.
#' @aliases communityEcology pairwiseSpearmanRho HurlbertPIE
#' @seealso
#' Example dataset: \code{\link{kanto}}
#' @name communityEcology
#' @author David W. Bapst
#' @references
#' Hurlbert, S. H. 1971. The nonconcept of species diversity:
#' a critique and alternative parameters. \emph{Ecology} 52(4):577-586.
#' @examples
#'
#' # let's load some example data:
#' # a classic dataset collected by Satoshi and Okido from the Kanto region
#'
#' data(kanto)
#'
#' rhoBothAbsent<-pairwiseSpearmanRho(kanto,dropAbsent="bothAbsent")
#'
#' #other dropping options
#' rhoEitherAbsent<-pairwiseSpearmanRho(kanto,dropAbsent="eitherAbsent")
#' rhoNoDrop<-pairwiseSpearmanRho(kanto,dropAbsent="noDrop")
#'
#' #compare
#' layout(1:3)
#' lim<-c(-1,1)
#' plot(rhoBothAbsent, rhoEitherAbsent, xlim=lim, ylim=lim)
#' abline(0,1)
#' plot(rhoBothAbsent, rhoNoDrop, xlim=lim, ylim=lim)
#' abline(0,1)
#' plot(rhoEitherAbsent, rhoNoDrop, xlim=lim, ylim=lim)
#' abline(0,1)
#' layout(1)
#'
#' #using dropAbsent="eitherAbsent" reduces the number of taxa so much that
#' # the number of taxa present drops too low to be useful
#' #dropping none of the taxa restricts the rho measures to high coefficients
#' # due to the many shared 0s for absent taxa
#'
#' #############
#'
#' # Try the rho coefficients as a rescaled dissimilarity
#' rhoDist<-pairwiseSpearmanRho(kanto,asDistance=TRUE,dropAbsent="bothAbsent")
#'
#' # What happens if we use these in typical distance matrix based analyses?
#'
#' # Cluster analysis
#' clustRes<-hclust(rhoDist)
#' plot(clustRes)
#'
#' # Principle Coordinates Analysis
#' pcoRes <- pcoa(rhoDist,correction="lingoes")
#' scores <- pcoRes$vectors
#' #plot the PCO
#' plot(scores,type="n")
#' text(labels=rownames(kanto),scores[,1],scores[,2],cex=0.5)
#'
#' ##################################
#'
#' # measuring evenness with Hurlbert's PIE
#'
#' kantoPIE<-HurlbertPIE(kanto)
#'
#' #histogram
#' hist(kantoPIE)
#' #evenness of the kanto data is fairly high
#'
#' #barplot
#' parX<-par(mar=c(7,5,3,3))
#' barplot(kantoPIE,las=3,cex.names=0.7,
#' ylab="Hurlbert's PIE",ylim=c(0.5,1),xpd=FALSE)
#' par(parX)
#'
#' #and we can see that the Tower has extremely low unevenness
#' #...overly high abundance of ghosts?
#'
#' #let's look at evenness of 5 most abundant taxa
#' kantoPIE_5<-HurlbertPIE(kanto,nAnalyze=5)
#'
#' #barplot
#' parX<-par(mar=c(7,5,3,3))
#' barplot(kantoPIE_5,las=3,cex.names=0.7,
#' ylab="Hurlbert's PIE for 5 most abundant taxa",ylim=c(0.5,1),xpd=FALSE)
#' par(parX)
#' @rdname communityEcology
#' @export
pairwiseSpearmanRho<-function(x, dropAbsent="bothAbsent", asDistance=FALSE,
diag=NULL, upper=NULL, na.rm=FALSE){
#for ecology: SPECIES ARE COLUMNS, SAMPLES ARE ROWS
if(length(dim(x))!=2){
stop('x must be a matrix, with samples as rows')
}
dropPar<-c('bothAbsent','eitherAbsent','noDrop')
dropAbsent<-dropPar[pmatch(dropAbsent,dropPar)]
if(asDistance){
if(is.null(diag)){diag<-FALSE}
if(is.null(upper)){upper<-FALSE}
}else{
if(is.null(diag)){diag<-TRUE}
if(is.null(upper)){upper<-TRUE}
}
if(is.na(dropAbsent)){
stop(paste0('dropAbsent must be one of ',
paste(dropPar,collapse=', ')))}
rhos<-matrix(,nrow(x),nrow(x))
for(i in 1:nrow(x)){
for(j in 1:nrow(x)){
if(i>=j){
#if no dropping NA and there are any NAs
if(!na.rm & any(is.na(x[c(i,j),]))){
#just return NA
rhos[i,j]<-rhos[j,i]<-NA
}else{
if(na.rm){
selector<-!is.na(x[i,]) & !is.na(x[j,])
}else{
selector<-rep(TRUE,ncol(x))
}
#now, if dropping absent, add to selector
if(dropAbsent=='bothAbsent'){
selector<-selector & (x[i,]!=0 | x[j,]!=0)
}
if(dropAbsent=='eitherAbsent'){
selector<-selector & (x[i,]!=0 & x[j,]!=0)
}
#check selector
if(sum(selector)<2){
#then these two samples aren't comparable, return NA
rhos[i,j]<-rhos[j,i]<-NA
}else{
samp1<-x[i,selector]
samp2<-x[j,selector]
#print(sum(selector))
rho<-suppressWarnings(
cor.test(samp1,samp2,method='spearman')$estimate
)
rhos[i,j]<-rhos[j,i]<-rho
}
}
}
}
}
colnames(rhos)<-rownames(rhos)<-rownames(x)
if(asDistance){
rhos<-(1-rhos)/2
result<-as.dist(rhos)
attr(result, 'Diag') <- diag
attr(result, 'Upper') <- upper
}else{
result<-rhos
}
return(result)
}
#' @rdname communityEcology
#' @export
HurlbertPIE<-function(x,nAnalyze=Inf){
if(is.vector(x)){
x<-matrix(x,1,length(x))
}
if(length(nAnalyze)!=1 | !is.numeric(nAnalyze) | nAnalyze<2){
stop("nAnalyze must be a numeric vector of length 1, with a value at least equal to 2")
}
if(!is.infinite(nAnalyze)){if(!is.integer(nAnalyze)){
nAnalyze2<-as.integer(nAnalyze)
if(nAnalyze2!=nAnalyze){
stop("nAnalyze must be a whole number")
}
nAnalyze<-nAnalyze2
}}
PIE<-numeric()
for(i in 1:nrow(x)){
#first need to test there is actually more than one species
samp<-x[i,]
#remove all but n most abundant
if(!is.infinite(nAnalyze)){
samp<-samp[rank(-samp)<(nAnalyze+1)]
}
#drop zeroes
samp<-samp[samp>0]
#first need to test there is actually more than one species
diversity<-length(samp)
if(diversity>1){
PIE[i]<-diversity/(diversity-1) * (1-sum((samp/sum(samp))^2))
}else{
PIE[i]<-0
}
}
names(PIE)<-rownames(x)
return(PIE)
}
| /paleotree/R/communityEcology.R | no_license | ingted/R-Examples | R | false | false | 9,553 | r | #' Miscellaneous Functions for Community Ecology
#'
#' This is just a small collection of miscellaneous functions
#' that may be useful, primarily for community ecology analyses,
#' particularly for paleoecological data. They are here mainly for
#' pedagogical reasons (i.e. for students) as they don't appear
#' to be available in other ecology-focused packages.
#' @param x The community abundance matrix. Must be a matrix with two dimensions for
#' \code{pairwiseSpearmanRho}; if a vector is supplied to \code{HurlbertPIE}, then
#' it is treated as if it was matrix with a single row and number of columns equal to
#' its length. Taxonomic units are assumed to be the columns and sites (samples)
#' are assumed to be the rows, for both functions.
#' @param dropAbsent Should absent taxa be dropped? Must be one of either 'bothAbsent' (drop taxa
#' absent in both sites for a given pairwise comparison),'eitherAbsent' (drop taxa absent in either
#' site), or 'noDrop' (drop none of the taxa). The default 'bothAbsent' is recommended, see examples.
#' @param asDistance Should the rho coefficients be rescaled on a scale similar to
#' dissimilarity metrics, i.e. bounded 0 to 1, with 1 representing maximum dissimilarity (i.e.
#' a Spearman rho correlation of -1)? ( dissimilarity = (1 - rho) / 2 )
#' @param diag Should the diagonal of the output distance matrix be included?
#' @param upper Should the upper triangle of the output distance matrix be included?
#' @param na.rm Should taxa listed with NA values be dropped from a pair-wise site comparison?
#' If FALSE, the returned value for that site pair will be NA if NAs are present.
#' @param nAnalyze Allows users to select that PIE be calculated only on \code{nAnalyze} most
#' abundant taxa in a site sample. \code{nAnalyze} must be a vector of length 1, consisting of
#' whole-number value at least equal to 2. By default, \code{nAnalyze = Inf} so all taxa are
#' accepted. Note that if there are less taxa in a sample than nAnalyze, the number present will
#' be used.
#' @details
#' \code{pairwiseSpearmanRho} returns Spearman rho correlation coefficients
#' based on the rank abundances of taxa (columns) within sites (rows) from
#' the input matrix, by internally wrapping the function \code{cor.test}.
#' It allows for various options that ultimatically allow
#' for dropping taxa not shared between two sites (the default), as well as
#' several other options. This allows the rho coefficient to behave like the
#' Bray-Curtis distance, in that it is not affected by the number of taxa absent
#' in both sites.
#'
#' \code{pairwiseSpearmanRho} can also rescale the rho coefficients with (1-rho)/2
#' to provide a measure similar to a dissimilarity metric, bounded between 0 and 1.
#' This function was written so several arguments would be in a similar format to
#' the \code{vegan} library function \code{vegdist}. If used to obtain rho
#' rescaled as a dissimilarity, the default output will be the lower triangle of
#' a distance matrix object, just as is returned by default by \code{vegdist}.
#' This behavior can be modified via the arguments for including the diagonal
#' and upper triangle of the matrix. Otherwise, a full matrix is returned (by default)
#' if the \code{asDistance} argument is not enabled.
#'
#' \code{HurlbertPIE} provides the Probability of Interspecific Encounter metric for
#' relative community abundance data, a commonly used metric for evenness of community
#' abundance data based on derivations in Hurlbert (1971). An optional argument allows
#' users to apply Hurlbert's PIE to only a subselection of the most abundant taxa.
#' @return
#' \code{pairwiseSpearmanRho} will return either a full matrix (the default) or (if
#' \code{asDistance} is true, a distance matrix, with only the lower triangle
#' shown (by default). See details.
#'
#' \code{HurlbertPIE} returns a named vector of PIE values for the input data.
#' @aliases communityEcology pairwiseSpearmanRho HurlbertPIE
#' @seealso
#' Example dataset: \code{\link{kanto}}
#' @name communityEcology
#' @author David W. Bapst
#' @references
#' Hurlbert, S. H. 1971. The nonconcept of species diversity:
#' a critique and alternative parameters. \emph{Ecology} 52(4):577-586.
#' @examples
#'
#' # let's load some example data:
#' # a classic dataset collected by Satoshi and Okido from the Kanto region
#'
#' data(kanto)
#'
#' rhoBothAbsent<-pairwiseSpearmanRho(kanto,dropAbsent="bothAbsent")
#'
#' #other dropping options
#' rhoEitherAbsent<-pairwiseSpearmanRho(kanto,dropAbsent="eitherAbsent")
#' rhoNoDrop<-pairwiseSpearmanRho(kanto,dropAbsent="noDrop")
#'
#' #compare
#' layout(1:3)
#' lim<-c(-1,1)
#' plot(rhoBothAbsent, rhoEitherAbsent, xlim=lim, ylim=lim)
#' abline(0,1)
#' plot(rhoBothAbsent, rhoNoDrop, xlim=lim, ylim=lim)
#' abline(0,1)
#' plot(rhoEitherAbsent, rhoNoDrop, xlim=lim, ylim=lim)
#' abline(0,1)
#' layout(1)
#'
#' #using dropAbsent="eitherAbsent" reduces the number of taxa so much that
#' # the number of taxa present drops too low to be useful
#' #dropping none of the taxa restricts the rho measures to high coefficients
#' # due to the many shared 0s for absent taxa
#'
#' #############
#'
#' # Try the rho coefficients as a rescaled dissimilarity
#' rhoDist<-pairwiseSpearmanRho(kanto,asDistance=TRUE,dropAbsent="bothAbsent")
#'
#' # What happens if we use these in typical distance matrix based analyses?
#'
#' # Cluster analysis
#' clustRes<-hclust(rhoDist)
#' plot(clustRes)
#'
#' # Principle Coordinates Analysis
#' pcoRes <- pcoa(rhoDist,correction="lingoes")
#' scores <- pcoRes$vectors
#' #plot the PCO
#' plot(scores,type="n")
#' text(labels=rownames(kanto),scores[,1],scores[,2],cex=0.5)
#'
#' ##################################
#'
#' # measuring evenness with Hurlbert's PIE
#'
#' kantoPIE<-HurlbertPIE(kanto)
#'
#' #histogram
#' hist(kantoPIE)
#' #evenness of the kanto data is fairly high
#'
#' #barplot
#' parX<-par(mar=c(7,5,3,3))
#' barplot(kantoPIE,las=3,cex.names=0.7,
#' ylab="Hurlbert's PIE",ylim=c(0.5,1),xpd=FALSE)
#' par(parX)
#'
#' #and we can see that the Tower has extremely low unevenness
#' #...overly high abundance of ghosts?
#'
#' #let's look at evenness of 5 most abundant taxa
#' kantoPIE_5<-HurlbertPIE(kanto,nAnalyze=5)
#'
#' #barplot
#' parX<-par(mar=c(7,5,3,3))
#' barplot(kantoPIE_5,las=3,cex.names=0.7,
#' ylab="Hurlbert's PIE for 5 most abundant taxa",ylim=c(0.5,1),xpd=FALSE)
#' par(parX)
#' @rdname communityEcology
#' @export
pairwiseSpearmanRho<-function(x, dropAbsent="bothAbsent", asDistance=FALSE,
diag=NULL, upper=NULL, na.rm=FALSE){
#for ecology: SPECIES ARE COLUMNS, SAMPLES ARE ROWS
if(length(dim(x))!=2){
stop('x must be a matrix, with samples as rows')
}
dropPar<-c('bothAbsent','eitherAbsent','noDrop')
dropAbsent<-dropPar[pmatch(dropAbsent,dropPar)]
if(asDistance){
if(is.null(diag)){diag<-FALSE}
if(is.null(upper)){upper<-FALSE}
}else{
if(is.null(diag)){diag<-TRUE}
if(is.null(upper)){upper<-TRUE}
}
if(is.na(dropAbsent)){
stop(paste0('dropAbsent must be one of ',
paste(dropPar,collapse=', ')))}
rhos<-matrix(,nrow(x),nrow(x))
for(i in 1:nrow(x)){
for(j in 1:nrow(x)){
if(i>=j){
#if no dropping NA and there are any NAs
if(!na.rm & any(is.na(x[c(i,j),]))){
#just return NA
rhos[i,j]<-rhos[j,i]<-NA
}else{
if(na.rm){
selector<-!is.na(x[i,]) & !is.na(x[j,])
}else{
selector<-rep(TRUE,ncol(x))
}
#now, if dropping absent, add to selector
if(dropAbsent=='bothAbsent'){
selector<-selector & (x[i,]!=0 | x[j,]!=0)
}
if(dropAbsent=='eitherAbsent'){
selector<-selector & (x[i,]!=0 & x[j,]!=0)
}
#check selector
if(sum(selector)<2){
#then these two samples aren't comparable, return NA
rhos[i,j]<-rhos[j,i]<-NA
}else{
samp1<-x[i,selector]
samp2<-x[j,selector]
#print(sum(selector))
rho<-suppressWarnings(
cor.test(samp1,samp2,method='spearman')$estimate
)
rhos[i,j]<-rhos[j,i]<-rho
}
}
}
}
}
colnames(rhos)<-rownames(rhos)<-rownames(x)
if(asDistance){
rhos<-(1-rhos)/2
result<-as.dist(rhos)
attr(result, 'Diag') <- diag
attr(result, 'Upper') <- upper
}else{
result<-rhos
}
return(result)
}
#' @rdname communityEcology
#' @export
HurlbertPIE<-function(x,nAnalyze=Inf){
if(is.vector(x)){
x<-matrix(x,1,length(x))
}
if(length(nAnalyze)!=1 | !is.numeric(nAnalyze) | nAnalyze<2){
stop("nAnalyze must be a numeric vector of length 1, with a value at least equal to 2")
}
if(!is.infinite(nAnalyze)){if(!is.integer(nAnalyze)){
nAnalyze2<-as.integer(nAnalyze)
if(nAnalyze2!=nAnalyze){
stop("nAnalyze must be a whole number")
}
nAnalyze<-nAnalyze2
}}
PIE<-numeric()
for(i in 1:nrow(x)){
#first need to test there is actually more than one species
samp<-x[i,]
#remove all but n most abundant
if(!is.infinite(nAnalyze)){
samp<-samp[rank(-samp)<(nAnalyze+1)]
}
#drop zeroes
samp<-samp[samp>0]
#first need to test there is actually more than one species
diversity<-length(samp)
if(diversity>1){
PIE[i]<-diversity/(diversity-1) * (1-sum((samp/sum(samp))^2))
}else{
PIE[i]<-0
}
}
names(PIE)<-rownames(x)
return(PIE)
}
|
#' Load Atlantis scenario output
#'
#' NOTE: Modified by Robert Wildermuth to interface with old NEUS codebase 10/21/2020.
#'
#' Reads in data generated from an Atlantis scenario and returns a list
#' containing the desired information. The list contains the 'truth' as known
#' from the Atlantis scenario. The truth can later be sampled
#' from to create a data set with observation error.
#' Currently, the \code{run_truth} depends on the following files
#' being in your working directory (\code{dir}):
#' \itemize{
#' \item{"functionalGroups.csv"}
#' \item{"[...]TOTCATCH.nc"}
#' \item{"[...]DietCheck.txt"}
#' },
#' where [...] specifies the entry used for the \code{scenario} argument.
#'
#' @family run functions
#' @author Sean Lucey, Kelli Faye Johnson
#'
#' @template scenario
#' @template dir
#' @template file_fgs
#' @template file_bgm
#' @template select_groups
#' @template file_init
#' @template file_biolprm
#' @template file_runprm
#' @template verbose
#' @template save
#'
#' @return Returns a list object.
#' @export
#' @examples
#' d <- system.file("extdata", "SETAS_Example", package = "atlantisom")
#' groups <- load_fgs(dir = d, "Functional_groups.csv")
#' truth <- run_truth(scenario = "outputs",
#' dir = d,
#' file_fgs = "Functional_groups.csv",
#' file_bgm = "Geography.bgm",
#' select_groups = groups[groups$IsTurnedOn > 0, "Name"],
#' file_init = "Initial_condition.nc",
# #' file_biolprm = "Biology.prm",
#' file_runprm = "Run_settings.xml",
#' file_fish = "Fisheries.csv")
#' str(truth)
#' rm(truth)
#'
run_truth <- function(scenario, dir = getwd(),
file_fgs, file_bgm, select_groups, file_init, file_biolprm,
file_runprm = "NEUSv1",
#file_fish,
verbose = FALSE, save = TRUE, annage = FALSE){
# Read in information
# Read in the functional groups csv since that is used by many functions
fgs <- load_fgs(dir = dir, file_fgs = file_fgs)
# Read in the biomass pools
bps <- load_bps(dir = dir, fgs = file_fgs, file_init = file_init)
# Read in the biological parameters
biol <- load_biolprm(dir = dir, file_biolprm = file_biolprm)
if(file_runprm == "NEUSv1"){
# Use GF's stop-gap function instead
runprm <- load_neus_v1_runprm()
} else {
# Read in the run parameters
runprm <- load_runprm(dir = dir, file_runprm = file_runprm)
}
nc_catch <- paste0(scenario, 'CATCH.nc')
dietcheck <- paste0(scenario, 'DietCheck.txt')
nc_out <- paste0(scenario, ".nc")
nc_prod <- paste0(scenario, "PROD.nc")
file_catchfish <- file.path(dir,
paste0(scenario, "CatchPerFishery.txt"))
file_catch <- paste0(scenario, "Catch.txt")
# if(annage){
# if(!file.exists(paste0(file.path(dir,paste0(scenario, 'ANNAGEBIO.nc'))))){
# stop("ANNAGEBIO.nc file not found")
# }
# if(!file.exists(paste0(file.path(dir,paste0(scenario, 'ANNAGECATCH.nc'))))){
# stop("ANNAGECATCH.nc file not found")
# }
# nc_annagebio <- paste0(scenario, 'ANNAGEBIO.nc')
# nc_annagecatch <- paste0(scenario, 'ANNAGECATCH.nc')
# }
# Get the boundary boxes
allboxes <- load_box(dir = dir, file_bgm = file_bgm)
boxes <- get_boundary(allboxes)
#Extract from NetCDF files
# Need: dir, file_nc, bps, fgs, select_groups, select_variable,
# check_acronyms, bboxes
nums <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "Nums",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Numbers read in.")
resn <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "ResN",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Reserve nitrogen read in.")
structn <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "StructN",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Structural nitrogen read in.")
# eat <- load_nc(dir = dir,
# file_nc = nc_prod,
# bps = bps,
# fgs = fgs,
# select_groups = select_groups,
# select_variable = "Eat",
# check_acronyms = TRUE,
# bboxes = boxes)
# if(verbose) message("Eaten read in.")
#
# grazing <- load_nc(dir = dir,
# file_nc = nc_prod,
# bps = bps,
# fgs = fgs,
# select_groups = select_groups,
# select_variable = "Grazing",
# check_acronyms = TRUE,
# bboxes = boxes)
# if(verbose) message("Grazing read in.")
# vol <- load_nc_physics(dir = dir,
# file_nc = nc_out,
# physic_variables = "volume",
# aggregate_layers = FALSE,
# bboxes = boxes)
# if(verbose) message("Volume read in.")
catch <- load_nc(dir = dir,
file_nc = nc_catch,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "Catch",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Catch read in.")
# if(annage){
# numsage <- load_nc_annage(dir = dir,
# file_nc = nc_annagebio,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Nums",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Numbers read in from ANNAGEBIO.")
# Weight output seems wrong compared with standard nc weights
# Don't include until we can sort this out
# weightage <- load_nc_annage(dir = dir,
# file_nc = nc_annagebio,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Weight",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Weight read in from ANNAGEBIO.")
# catchage <- load_nc_annage(dir = dir,
# file_nc = nc_annagecatch,
# file_fish = file_fish,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Catch",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Catch read in from ANNAGECATCH.")
# discage <- load_nc_annage(dir = dir,
# file_nc = nc_annagecatch,
# file_fish = file_fish,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Discard",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Discard read in from ANNAGECATCH.")
#
# }
# May 2019 this is the catch in nums correction needed for legacy atlantis codebases
# check for logfile, send warning if not found.
# if(file.exists(paste0(file.path(dir, "log.txt")))){
# #if found compare codedate and do catch numbers correction if necessary
# logfile <- paste0(file.path(dir, "log.txt"))
# codedate <- system(paste0("grep 'Atlantis SVN' ", logfile), intern = TRUE)
# codedate <- as.Date(stringr::str_extract(codedate, "\\d+[- \\/.]\\d+[- \\/.]\\d+"))
# if(codedate < "2015-12-15"){
if(file_runprm == "NEUSv1"){
if(verbose) message("Catch numbers correction needed for this codebase, starting")
# read in initial conditions NC file
at_init <- RNetCDF::open.nc(con = file.path(dir, file_init))
# Get info from netcdf file! (Filestructure and all variable names)
var_names_ncdf <- sapply(seq_len(RNetCDF::file.inq.nc(at_init)$nvars - 1),
function(x) RNetCDF::var.inq.nc(at_init, x)$name)
numlayers <- RNetCDF::var.get.nc(ncfile = at_init, variable = "numlayers")
# !!RW: provides three columns with box layer numbers. Not sure what column needed
RNetCDF::close.nc(at_init)
# are these in box order??? if so make a box-numlayer lookup
layerbox.lookup <- data.frame(polygon=c(0:(allboxes$nbox - 1)), numlayers)
catch.tmp <- merge(catch, layerbox.lookup)
# divide the numbers at age by (86400 * number_water_column_layers_in_the_box)
# replace truth$catch atoutput with correction
catch <- catch.tmp %>%
rename(numlayers = X1) %>%
mutate(atoutput = atoutput / (86400 * numlayers)) %>%
dplyr::select(species, agecl, polygon, time, atoutput)
if(verbose) message("Catch numbers corrected")
}
# else{
# message("Codebase later than December 2015, no correction needed")
# }
# }else{
# warning(strwrap(prefix = " ", initial = "",
# "log.txt file not found; catch in numbers correction not done. For Atlantis SVN dates prior to December 2015, CATCH.nc output units were incorrect. Correction requires presence of log.txt file in the directory."))
# }
catchfish <- read.table(file_catchfish, header = TRUE)
over <- colnames(catchfish)[-(1:2)]
catchfish <- reshape(catchfish, direction = "long",
varying = over, v.names = "catch",
timevar = "species", times = over)
rownames(catchfish) <- 1:NROW(catchfish)
catchfish <- catchfish[catchfish$catch > 0,
-which(colnames(catchfish) == "id")]
catchfish$species <- fgs$Name[match(catchfish$species, fgs$Code)]
colnames(catchfish) <- tolower(colnames(catchfish))
catchfish$time <- catchfish$time / runprm$toutfinc
if(verbose) message("Catch per fishery read in.")
# Get catch from txt. Sum per species and compare with values from nc-file!
catch_all <- load_catch(dir = dir, file_catch = file_catch, fgs = fgs)
# over <- colnames(catch_all)[(colnames(catch_all) %in% fgs$Code)]
# catch_all <- reshape(catch_all[, c("Time", over)], direction = "long",
# varying = over, v.names = "catch",
# timevar = "species", times = over)
# rownames(catch_all) <- 1:NROW(catch_all)
# catch_all <- catch_all[catch_all$catch > 0,
# -which(colnames(catch_all) == "id")]
# catch_all$species <- fgs$Name[match(catch_all$species, fgs$Code)]
# colnames(catch_all) <- tolower(colnames(catch_all))
catch_all$time <- catch_all$time / runprm$toutfinc
if(verbose) message("Catch for all fisheries in biomass read in.")
# diet <- load_diet_comp(dir = dir, file_diet = dietcheck, fgs = fgs)
# May 2019 let's not do the catch calcs until they are corrected
# if(verbose) message("Start calc_functions")
# # catchbio <- calc_biomass_age(nums = catch,
# # resn = resn, structn = structn, biolprm = biol)
# biomass_eaten <- calc_pred_diet(dietcomp = diet,
# eat = eat, grazing = grazing, vol = vol, biolprm = biol,
# runprm = runprm)
# biomass_ages <- calc_biomass_age(nums = nums,
# resn = resn, structn = structn, biolprm = biol)
if(file_runprm == "NEUSv1"){
bio_catch <- calc_biomass_age(nums = catch,
resn = resn, structn = structn, biolprm = biol)
bio_catch <- aggregate(atoutput ~ species + time,
data = bio_catch, sum)
# todo: check that the biomass of the catches are correct
# also should catch in biomass be exported as well
# as catch in numbers?
# RW: time steps don't always match, so commenting out to deal with this out of fxn for now
# check <- merge(catch_all, bio_catch,
# by = c("species", "time"))
# check$check <- with(check, atoutput / catch)
}
# SKG May 2019, no export of catch in biomass for now
# does not match catch.txt output file
# read that in separately instead
if(file_runprm == "NEUSv1"){
result <- list(
# "biomass_eaten" = biomass_eaten,
# "biomass_ages" = biomass_ages,
# RW: add 'bio_catch' to calculate conversion proportion later
"bio_catch" = bio_catch,
"catch" = catch,
"catch_all" = catch_all,
"nums" = nums,
"resn" = resn,
"structn" = structn,
"biolprm" = biol,
"fgs" = fgs)
} else {
result <- list(
"catch" = catch,
"catch_all" = catch_all,
"nums" = nums,
"resn" = resn,
"structn" = structn,
"biolprm" = biol,
"fgs" = fgs)
}
# if(annage){
# result <- list("biomass_eaten" = biomass_eaten,
# "biomass_ages" = biomass_ages,
# "catch" = catch,
# "catch_all" = catch_all,
# "nums" = nums,
# "numsage" = numsage,
# "catchage" = catchage,
# "discage" = discage,
# "resn" = resn,
# "structn" = structn,
# "biolprm" = biol,
# "fgs" = fgs)
# }
if(verbose) message("Start writing to HDD.")
if(save) {
save(result,
file = file.path(dir, paste0(scenario, "run_truth.RData")))
}
invisible(result)
} | /run_truth.R | no_license | rwildermuth/omNEUS | R | false | false | 14,925 | r | #' Load Atlantis scenario output
#'
#' NOTE: Modified by Robert Wildermuth to interface with old NEUS codebase 10/21/2020.
#'
#' Reads in data generated from an Atlantis scenario and returns a list
#' containing the desired information. The list contains the 'truth' as known
#' from the Atlantis scenario. The truth can later be sampled
#' from to create a data set with observation error.
#' Currently, the \code{run_truth} depends on the following files
#' being in your working directory (\code{dir}):
#' \itemize{
#' \item{"functionalGroups.csv"}
#' \item{"[...]TOTCATCH.nc"}
#' \item{"[...]DietCheck.txt"}
#' },
#' where [...] specifies the entry used for the \code{scenario} argument.
#'
#' @family run functions
#' @author Sean Lucey, Kelli Faye Johnson
#'
#' @template scenario
#' @template dir
#' @template file_fgs
#' @template file_bgm
#' @template select_groups
#' @template file_init
#' @template file_biolprm
#' @template file_runprm
#' @template verbose
#' @template save
#'
#' @return Returns a list object.
#' @export
#' @examples
#' d <- system.file("extdata", "SETAS_Example", package = "atlantisom")
#' groups <- load_fgs(dir = d, "Functional_groups.csv")
#' truth <- run_truth(scenario = "outputs",
#' dir = d,
#' file_fgs = "Functional_groups.csv",
#' file_bgm = "Geography.bgm",
#' select_groups = groups[groups$IsTurnedOn > 0, "Name"],
#' file_init = "Initial_condition.nc",
# #' file_biolprm = "Biology.prm",
#' file_runprm = "Run_settings.xml",
#' file_fish = "Fisheries.csv")
#' str(truth)
#' rm(truth)
#'
run_truth <- function(scenario, dir = getwd(),
file_fgs, file_bgm, select_groups, file_init, file_biolprm,
file_runprm = "NEUSv1",
#file_fish,
verbose = FALSE, save = TRUE, annage = FALSE){
# Read in information
# Read in the functional groups csv since that is used by many functions
fgs <- load_fgs(dir = dir, file_fgs = file_fgs)
# Read in the biomass pools
bps <- load_bps(dir = dir, fgs = file_fgs, file_init = file_init)
# Read in the biological parameters
biol <- load_biolprm(dir = dir, file_biolprm = file_biolprm)
if(file_runprm == "NEUSv1"){
# Use GF's stop-gap function instead
runprm <- load_neus_v1_runprm()
} else {
# Read in the run parameters
runprm <- load_runprm(dir = dir, file_runprm = file_runprm)
}
nc_catch <- paste0(scenario, 'CATCH.nc')
dietcheck <- paste0(scenario, 'DietCheck.txt')
nc_out <- paste0(scenario, ".nc")
nc_prod <- paste0(scenario, "PROD.nc")
file_catchfish <- file.path(dir,
paste0(scenario, "CatchPerFishery.txt"))
file_catch <- paste0(scenario, "Catch.txt")
# if(annage){
# if(!file.exists(paste0(file.path(dir,paste0(scenario, 'ANNAGEBIO.nc'))))){
# stop("ANNAGEBIO.nc file not found")
# }
# if(!file.exists(paste0(file.path(dir,paste0(scenario, 'ANNAGECATCH.nc'))))){
# stop("ANNAGECATCH.nc file not found")
# }
# nc_annagebio <- paste0(scenario, 'ANNAGEBIO.nc')
# nc_annagecatch <- paste0(scenario, 'ANNAGECATCH.nc')
# }
# Get the boundary boxes
allboxes <- load_box(dir = dir, file_bgm = file_bgm)
boxes <- get_boundary(allboxes)
#Extract from NetCDF files
# Need: dir, file_nc, bps, fgs, select_groups, select_variable,
# check_acronyms, bboxes
nums <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "Nums",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Numbers read in.")
resn <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "ResN",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Reserve nitrogen read in.")
structn <- load_nc(dir = dir,
file_nc = nc_out,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "StructN",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Structural nitrogen read in.")
# eat <- load_nc(dir = dir,
# file_nc = nc_prod,
# bps = bps,
# fgs = fgs,
# select_groups = select_groups,
# select_variable = "Eat",
# check_acronyms = TRUE,
# bboxes = boxes)
# if(verbose) message("Eaten read in.")
#
# grazing <- load_nc(dir = dir,
# file_nc = nc_prod,
# bps = bps,
# fgs = fgs,
# select_groups = select_groups,
# select_variable = "Grazing",
# check_acronyms = TRUE,
# bboxes = boxes)
# if(verbose) message("Grazing read in.")
# vol <- load_nc_physics(dir = dir,
# file_nc = nc_out,
# physic_variables = "volume",
# aggregate_layers = FALSE,
# bboxes = boxes)
# if(verbose) message("Volume read in.")
catch <- load_nc(dir = dir,
file_nc = nc_catch,
bps = bps,
fgs = fgs,
select_groups = select_groups,
select_variable = "Catch",
check_acronyms = TRUE,
bboxes = boxes)
if(verbose) message("Catch read in.")
# if(annage){
# numsage <- load_nc_annage(dir = dir,
# file_nc = nc_annagebio,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Nums",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Numbers read in from ANNAGEBIO.")
# Weight output seems wrong compared with standard nc weights
# Don't include until we can sort this out
# weightage <- load_nc_annage(dir = dir,
# file_nc = nc_annagebio,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Weight",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Weight read in from ANNAGEBIO.")
# catchage <- load_nc_annage(dir = dir,
# file_nc = nc_annagecatch,
# file_fish = file_fish,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Catch",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Catch read in from ANNAGECATCH.")
# discage <- load_nc_annage(dir = dir,
# file_nc = nc_annagecatch,
# file_fish = file_fish,
# bps = bps,
# fgs = fgs,
# biolprm = biol,
# select_groups = select_groups,
# select_variable = "Discard",
# check_acronyms = TRUE,
# bboxes = boxes,
# verbose = TRUE)
# if(verbose) message("Discard read in from ANNAGECATCH.")
#
# }
# May 2019 this is the catch in nums correction needed for legacy atlantis codebases
# check for logfile, send warning if not found.
# if(file.exists(paste0(file.path(dir, "log.txt")))){
# #if found compare codedate and do catch numbers correction if necessary
# logfile <- paste0(file.path(dir, "log.txt"))
# codedate <- system(paste0("grep 'Atlantis SVN' ", logfile), intern = TRUE)
# codedate <- as.Date(stringr::str_extract(codedate, "\\d+[- \\/.]\\d+[- \\/.]\\d+"))
# if(codedate < "2015-12-15"){
if(file_runprm == "NEUSv1"){
if(verbose) message("Catch numbers correction needed for this codebase, starting")
# read in initial conditions NC file
at_init <- RNetCDF::open.nc(con = file.path(dir, file_init))
# Get info from netcdf file! (Filestructure and all variable names)
var_names_ncdf <- sapply(seq_len(RNetCDF::file.inq.nc(at_init)$nvars - 1),
function(x) RNetCDF::var.inq.nc(at_init, x)$name)
numlayers <- RNetCDF::var.get.nc(ncfile = at_init, variable = "numlayers")
# !!RW: provides three columns with box layer numbers. Not sure what column needed
RNetCDF::close.nc(at_init)
# are these in box order??? if so make a box-numlayer lookup
layerbox.lookup <- data.frame(polygon=c(0:(allboxes$nbox - 1)), numlayers)
catch.tmp <- merge(catch, layerbox.lookup)
# divide the numbers at age by (86400 * number_water_column_layers_in_the_box)
# replace truth$catch atoutput with correction
catch <- catch.tmp %>%
rename(numlayers = X1) %>%
mutate(atoutput = atoutput / (86400 * numlayers)) %>%
dplyr::select(species, agecl, polygon, time, atoutput)
if(verbose) message("Catch numbers corrected")
}
# else{
# message("Codebase later than December 2015, no correction needed")
# }
# }else{
# warning(strwrap(prefix = " ", initial = "",
# "log.txt file not found; catch in numbers correction not done. For Atlantis SVN dates prior to December 2015, CATCH.nc output units were incorrect. Correction requires presence of log.txt file in the directory."))
# }
catchfish <- read.table(file_catchfish, header = TRUE)
over <- colnames(catchfish)[-(1:2)]
catchfish <- reshape(catchfish, direction = "long",
varying = over, v.names = "catch",
timevar = "species", times = over)
rownames(catchfish) <- 1:NROW(catchfish)
catchfish <- catchfish[catchfish$catch > 0,
-which(colnames(catchfish) == "id")]
catchfish$species <- fgs$Name[match(catchfish$species, fgs$Code)]
colnames(catchfish) <- tolower(colnames(catchfish))
catchfish$time <- catchfish$time / runprm$toutfinc
if(verbose) message("Catch per fishery read in.")
# Get catch from txt. Sum per species and compare with values from nc-file!
catch_all <- load_catch(dir = dir, file_catch = file_catch, fgs = fgs)
# over <- colnames(catch_all)[(colnames(catch_all) %in% fgs$Code)]
# catch_all <- reshape(catch_all[, c("Time", over)], direction = "long",
# varying = over, v.names = "catch",
# timevar = "species", times = over)
# rownames(catch_all) <- 1:NROW(catch_all)
# catch_all <- catch_all[catch_all$catch > 0,
# -which(colnames(catch_all) == "id")]
# catch_all$species <- fgs$Name[match(catch_all$species, fgs$Code)]
# colnames(catch_all) <- tolower(colnames(catch_all))
catch_all$time <- catch_all$time / runprm$toutfinc
if(verbose) message("Catch for all fisheries in biomass read in.")
# diet <- load_diet_comp(dir = dir, file_diet = dietcheck, fgs = fgs)
# May 2019 let's not do the catch calcs until they are corrected
# if(verbose) message("Start calc_functions")
# # catchbio <- calc_biomass_age(nums = catch,
# # resn = resn, structn = structn, biolprm = biol)
# biomass_eaten <- calc_pred_diet(dietcomp = diet,
# eat = eat, grazing = grazing, vol = vol, biolprm = biol,
# runprm = runprm)
# biomass_ages <- calc_biomass_age(nums = nums,
# resn = resn, structn = structn, biolprm = biol)
if(file_runprm == "NEUSv1"){
bio_catch <- calc_biomass_age(nums = catch,
resn = resn, structn = structn, biolprm = biol)
bio_catch <- aggregate(atoutput ~ species + time,
data = bio_catch, sum)
# todo: check that the biomass of the catches are correct
# also should catch in biomass be exported as well
# as catch in numbers?
# RW: time steps don't always match, so commenting out to deal with this out of fxn for now
# check <- merge(catch_all, bio_catch,
# by = c("species", "time"))
# check$check <- with(check, atoutput / catch)
}
# SKG May 2019, no export of catch in biomass for now
# does not match catch.txt output file
# read that in separately instead
if(file_runprm == "NEUSv1"){
result <- list(
# "biomass_eaten" = biomass_eaten,
# "biomass_ages" = biomass_ages,
# RW: add 'bio_catch' to calculate conversion proportion later
"bio_catch" = bio_catch,
"catch" = catch,
"catch_all" = catch_all,
"nums" = nums,
"resn" = resn,
"structn" = structn,
"biolprm" = biol,
"fgs" = fgs)
} else {
result <- list(
"catch" = catch,
"catch_all" = catch_all,
"nums" = nums,
"resn" = resn,
"structn" = structn,
"biolprm" = biol,
"fgs" = fgs)
}
# if(annage){
# result <- list("biomass_eaten" = biomass_eaten,
# "biomass_ages" = biomass_ages,
# "catch" = catch,
# "catch_all" = catch_all,
# "nums" = nums,
# "numsage" = numsage,
# "catchage" = catchage,
# "discage" = discage,
# "resn" = resn,
# "structn" = structn,
# "biolprm" = biol,
# "fgs" = fgs)
# }
if(verbose) message("Start writing to HDD.")
if(save) {
save(result,
file = file.path(dir, paste0(scenario, "run_truth.RData")))
}
invisible(result)
} |
####################################################################################################################################################3
####################################################################################################################################################
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
#anotherev = newJuliaEvaluator()
mysystemargs = insilicosystemargs(G = 3, RD.NC.outdeg.exp = 3, PC.PTM.p = 0.5)
insilicosystem = createInSilicoSystem(mysystemargs, empty = T)
# plotGlobalSystem(insilicosystem, show = T)
# plotRegulationSystem(insilicosystem, show = T)
myindivargs = insilicoindividualargs()
insilicopopulation = createPopulation(15, insilicosystem, myindivargs)
tic()
res = simulateSystemStochastic(insilicosystem, insilicopopulation, simtime = 3600, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F)
toc()
tic()
res2 = simulateSystemStochasticParallel(insilicosystem, insilicopopulation, simtime = 1, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F)
toc()
sapply(1:length(insilicopopulation$individualsList), function(i){identical(res$resTable[[i]][1,], res2[[i]][1,])})
# resTable = res$resTable
plotExpressionProfiles(insilicosystem, insilicopopulation, res2)
myfunc = function(i){
# myev = newJuliaEvaluator()
myev = evList[[i]]
mysystemargs = insilicosystemargs(G = 3, RD.NC.outdeg.exp = 3, PC.PTM.p = 0.5)
insilicosystem = createInSilicoSystem(mysystemargs, ev = myev)
myindivargs = insilicoindividualargs()
insilicopopulation = createPopulation(2, insilicosystem, myindivargs)
res = simulateSystemStochastic(insilicosystem, insilicopopulation, simtime = 1, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F, ev = myev)
#removeJuliaEvaluator(myev)
return(list("simID" = i, "insilicosystem" = insilicosystem, "insilicopopulation" = insilicopopulation, "res" = res))
}
nsim = 100
evList = sapply(1:50, function(x){
myev = newJuliaEvaluator()
print(myev)
print(showConnections())
return(myev)})
test = mclapply(1:nsim, myfunc, mc.cores = (detectCores()-1))
sapply(evList, removeJuliaEvaluator)
# ------------
setwd("~/winData/multiomics_networks_simulation")
juliaCommand("
if !haskey(Pkg.installed(), \"ClobberingReload\")
Pkg.clone(\"git://github.com/cstjean/ClobberingReload.jl.git\")
end
")
juliaCommand("addprocs(1)")
juliaCommand("@everywhere sinclude(\"julia_functions.jl\")")
# ------------
setwd("~/winData/multiomics_networks_simulation")
source("network_generation.R")
load("/home/oangelin/Documents/noerror.RData")
stochmodel = createStochSystem(insilicosystem, insilicopopulation$indargs, returnList = F)
evaluator = XR::getInterface(getClass("JuliaInterface"))
expr = gettextf("%s(%s)","stochasticsimulation", evaluator$ServerArglist(stochmodel$JuliaObject, insilicopopulation$individualsList[[1]]$QTLeffects, insilicopopulation$individualsList[[1]]$InitVar, df2list(insilicosystem$genes), 0.00001, modelname = "Ind1", ntrials = 1, nepochs = 1, simalgorithm = "SSA"))
#expr = "jat()"
key = RJulia()$ProxyName()
cmd = jsonlite::toJSON(c("eval", expr, key, T))
writeLines(cmd, evaluator$connection)
#evaluator$ServerQuit()
for(try in 1:10) {
value <- readLines(evaluator$connection, 1)
#print(value)
if(length(value) == 0) # But shouldn't happen?
Sys.sleep(1)
else
break
}
test = XR::valueFromServer(value, key, T, evaluator)
# ------------
mysystemargs = insilicosystemargs(G = 10, PC.PTM.form.p = 0)
myinsilicosystemEmpty = createInSilicoSystem(mysystemargs, empty = T)
plotGlobalSystem(myinsilicosystemEmpty, show = T)
plotRegulationSystem(myinsilicosystem, c("TC"))
myindivargs = insilicoindividualargs()
mypopulation = createPopulation(20, myinsilicosystemEmpty, myindivargs)
test = createStochSystem(myinsilicosystemEmpty, myindivargs, returnList = T)
##############################################
library(tictoc)
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
mysystemargs = insilicosystemargs(G = 500)
tic(); myinsilicosystem = createInSilicoSystem(mysystemargs); toc()
## plotmosystem
test = plotGlobalSystem(myinsilicosystem, show = T)
ggsave("/media/sf_data/globalPanel1.png", plot = test$globalPanel1, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel2.png", plot = test$globalPanel2, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel3.png", plot = test$globalPanel3, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel4.png", plot = test$globalPanel4, width = 33.9, height = 19.1, units = "cm")
test2 = plotRegulationSystem(myinsilicosystem, c("TC"))
ggsave("/media/sf_data/TCPanel1.png", plot = test2$TCPanel1, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/TCPanel2.png", plot = test2$TCPanel2, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/TCPanel3.png", plot = test2$TCPanel3, width = 33.9, height = 19.1, units = "cm")
##############################################
library(tictoc)
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
nsim = 100
sizesim = c(50, 100, 200, 500)
size_telapsed = vector("list", length(sizesim))
size_simnod = vector("list", length(sizesim))
size_simedg = vector("list", length(sizesim))
for(s in sizesim){
print(s)
mysystemargs = insilicosystemargs(G = s)
telapsed = vector("numeric", nsim)
simnod = vector("list", nsim)
simedg = vector("list", nsim)
for(i in 1:nsim){
tic(); myinsilicosystem = createInSilicoSystem(mysystemargs); itel = toc(quiet = T)
telapsed[i] = itel$toc - itel$tic
simnod[[i]] = myinsilicosystem$genes
simedg[[i]] = myinsilicosystem$mosystem$edg
}
size_telapsed[[s]] = telapsed
size_simnod[[s]] = simnod
size_simedg[[s]] = simedg
}
#plot(rep(sizesim, rep(nsim, length(sizesim))), unlist(size_telapsed), xlab = "System size (G: number of nodes)", ylab = c("Running time (sec)"))
runningtime = data.frame("G" = rep(sizesim, rep(nsim, length(sizesim))), "runningtime" = unlist(size_telapsed))
runtime = ggplot(runningtime, aes(x = G, y = runningtime)) + geom_point() + xlab("System size (number of genes)") + ylab("Running time (s)")
ggsave("/media/sf_data/runtime.png", plot = runtime, width = 33.9, height = 19.1, units = "cm")
ressimdf = data.frame("simid" = numeric(), "G" = numeric(), "ratioPC" = numeric(), "E" = numeric())
for(s in sizesim){
ratioPC = sapply(size_simnod[[s]], function(x){sum(x$coding =="PC")/nrow(x)})
E = sapply(size_simedg[[s]], nrow)
ressimdf = rbind(ressimdf, data.frame("simid" = 1:nsim, "G" = rep(s, nsim), "ratioPC" = ratioPC, "E" = E))
}
g1 = ggplot(ressimdf, aes(x = ratioPC)) + geom_histogram() + facet_grid(G~.) + xlab("Ratio of protein coding genes in the system")
g2 = ggplot(ressimdf, aes(x = E)) + geom_histogram() + facet_grid(G~.) + annotate("text", x = 5000, y = 70, label = sapply(sizesim, function(x){paste("mean =", mean(ressimdf[ressimdf$G == x, "E"]), sep = " ")})) + xlab("Number of regulatory interactions")
sevsimplot2 = ggarrange(g1, g2, ncol = 2)
ggsave("/media/sf_data/sevsimplot2.png", plot = sevsimplot2, width = 33.9, height = 19.1, units = "cm")
#################################################################################################################
#################################################################################################################
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
mybaseport = RJulia()$port
cat("Starting simulations at", format(Sys.time(), usetz = T), "\n")
tic()
test = mclapply(1:20, function(i){
myev = newJuliaEvaluator(port = mybaseport + i) ## create a new Julia evaluator with a port number equal to mybaseport + i (id of the simulation)
sleeprand = sample(1:50, 1)
#print(paste0("Port ", mybaseport+i," sleeping ", sleeprand, " seconds\n"))
juliaCommand("println(\"port %s sleeping for %s seconds\")", mybaseport + i, sleeprand, evaluator = myev)
juliaCommand("sleep(%s)", sleeprand, evaluator = myev)
juliaCommand("println(\"port %s done\")", mybaseport + i, evaluator = myev)
#print(paste0("Port ", mybaseport+i," done!"))
removeJuliaEvaluator(myev)
return(sleeprand)
}, mc.cores = detectCores()-1)
toc()
########################################################################3
| /testnewversion.R | permissive | oliviaAB/multiomics_networks_simulation | R | false | false | 8,708 | r |
####################################################################################################################################################3
####################################################################################################################################################
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
#anotherev = newJuliaEvaluator()
mysystemargs = insilicosystemargs(G = 3, RD.NC.outdeg.exp = 3, PC.PTM.p = 0.5)
insilicosystem = createInSilicoSystem(mysystemargs, empty = T)
# plotGlobalSystem(insilicosystem, show = T)
# plotRegulationSystem(insilicosystem, show = T)
myindivargs = insilicoindividualargs()
insilicopopulation = createPopulation(15, insilicosystem, myindivargs)
tic()
res = simulateSystemStochastic(insilicosystem, insilicopopulation, simtime = 3600, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F)
toc()
tic()
res2 = simulateSystemStochasticParallel(insilicosystem, insilicopopulation, simtime = 1, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F)
toc()
sapply(1:length(insilicopopulation$individualsList), function(i){identical(res$resTable[[i]][1,], res2[[i]][1,])})
# resTable = res$resTable
plotExpressionProfiles(insilicosystem, insilicopopulation, res2)
myfunc = function(i){
# myev = newJuliaEvaluator()
myev = evList[[i]]
mysystemargs = insilicosystemargs(G = 3, RD.NC.outdeg.exp = 3, PC.PTM.p = 0.5)
insilicosystem = createInSilicoSystem(mysystemargs, ev = myev)
myindivargs = insilicoindividualargs()
insilicopopulation = createPopulation(2, insilicosystem, myindivargs)
res = simulateSystemStochastic(insilicosystem, insilicopopulation, simtime = 1, nepochs = 20, ntrialsPerInd = 1, simalgorithm = "ODM", returnStochModel = F, ev = myev)
#removeJuliaEvaluator(myev)
return(list("simID" = i, "insilicosystem" = insilicosystem, "insilicopopulation" = insilicopopulation, "res" = res))
}
nsim = 100
evList = sapply(1:50, function(x){
myev = newJuliaEvaluator()
print(myev)
print(showConnections())
return(myev)})
test = mclapply(1:nsim, myfunc, mc.cores = (detectCores()-1))
sapply(evList, removeJuliaEvaluator)
# ------------
setwd("~/winData/multiomics_networks_simulation")
juliaCommand("
if !haskey(Pkg.installed(), \"ClobberingReload\")
Pkg.clone(\"git://github.com/cstjean/ClobberingReload.jl.git\")
end
")
juliaCommand("addprocs(1)")
juliaCommand("@everywhere sinclude(\"julia_functions.jl\")")
# ------------
setwd("~/winData/multiomics_networks_simulation")
source("network_generation.R")
load("/home/oangelin/Documents/noerror.RData")
stochmodel = createStochSystem(insilicosystem, insilicopopulation$indargs, returnList = F)
evaluator = XR::getInterface(getClass("JuliaInterface"))
expr = gettextf("%s(%s)","stochasticsimulation", evaluator$ServerArglist(stochmodel$JuliaObject, insilicopopulation$individualsList[[1]]$QTLeffects, insilicopopulation$individualsList[[1]]$InitVar, df2list(insilicosystem$genes), 0.00001, modelname = "Ind1", ntrials = 1, nepochs = 1, simalgorithm = "SSA"))
#expr = "jat()"
key = RJulia()$ProxyName()
cmd = jsonlite::toJSON(c("eval", expr, key, T))
writeLines(cmd, evaluator$connection)
#evaluator$ServerQuit()
for(try in 1:10) {
value <- readLines(evaluator$connection, 1)
#print(value)
if(length(value) == 0) # But shouldn't happen?
Sys.sleep(1)
else
break
}
test = XR::valueFromServer(value, key, T, evaluator)
# ------------
mysystemargs = insilicosystemargs(G = 10, PC.PTM.form.p = 0)
myinsilicosystemEmpty = createInSilicoSystem(mysystemargs, empty = T)
plotGlobalSystem(myinsilicosystemEmpty, show = T)
plotRegulationSystem(myinsilicosystem, c("TC"))
myindivargs = insilicoindividualargs()
mypopulation = createPopulation(20, myinsilicosystemEmpty, myindivargs)
test = createStochSystem(myinsilicosystemEmpty, myindivargs, returnList = T)
##############################################
library(tictoc)
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
mysystemargs = insilicosystemargs(G = 500)
tic(); myinsilicosystem = createInSilicoSystem(mysystemargs); toc()
## plotmosystem
test = plotGlobalSystem(myinsilicosystem, show = T)
ggsave("/media/sf_data/globalPanel1.png", plot = test$globalPanel1, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel2.png", plot = test$globalPanel2, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel3.png", plot = test$globalPanel3, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/globalPanel4.png", plot = test$globalPanel4, width = 33.9, height = 19.1, units = "cm")
test2 = plotRegulationSystem(myinsilicosystem, c("TC"))
ggsave("/media/sf_data/TCPanel1.png", plot = test2$TCPanel1, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/TCPanel2.png", plot = test2$TCPanel2, width = 33.9, height = 19.1, units = "cm")
ggsave("/media/sf_data/TCPanel3.png", plot = test2$TCPanel3, width = 33.9, height = 19.1, units = "cm")
##############################################
library(tictoc)
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
nsim = 100
sizesim = c(50, 100, 200, 500)
size_telapsed = vector("list", length(sizesim))
size_simnod = vector("list", length(sizesim))
size_simedg = vector("list", length(sizesim))
for(s in sizesim){
print(s)
mysystemargs = insilicosystemargs(G = s)
telapsed = vector("numeric", nsim)
simnod = vector("list", nsim)
simedg = vector("list", nsim)
for(i in 1:nsim){
tic(); myinsilicosystem = createInSilicoSystem(mysystemargs); itel = toc(quiet = T)
telapsed[i] = itel$toc - itel$tic
simnod[[i]] = myinsilicosystem$genes
simedg[[i]] = myinsilicosystem$mosystem$edg
}
size_telapsed[[s]] = telapsed
size_simnod[[s]] = simnod
size_simedg[[s]] = simedg
}
#plot(rep(sizesim, rep(nsim, length(sizesim))), unlist(size_telapsed), xlab = "System size (G: number of nodes)", ylab = c("Running time (sec)"))
runningtime = data.frame("G" = rep(sizesim, rep(nsim, length(sizesim))), "runningtime" = unlist(size_telapsed))
runtime = ggplot(runningtime, aes(x = G, y = runningtime)) + geom_point() + xlab("System size (number of genes)") + ylab("Running time (s)")
ggsave("/media/sf_data/runtime.png", plot = runtime, width = 33.9, height = 19.1, units = "cm")
ressimdf = data.frame("simid" = numeric(), "G" = numeric(), "ratioPC" = numeric(), "E" = numeric())
for(s in sizesim){
ratioPC = sapply(size_simnod[[s]], function(x){sum(x$coding =="PC")/nrow(x)})
E = sapply(size_simedg[[s]], nrow)
ressimdf = rbind(ressimdf, data.frame("simid" = 1:nsim, "G" = rep(s, nsim), "ratioPC" = ratioPC, "E" = E))
}
g1 = ggplot(ressimdf, aes(x = ratioPC)) + geom_histogram() + facet_grid(G~.) + xlab("Ratio of protein coding genes in the system")
g2 = ggplot(ressimdf, aes(x = E)) + geom_histogram() + facet_grid(G~.) + annotate("text", x = 5000, y = 70, label = sapply(sizesim, function(x){paste("mean =", mean(ressimdf[ressimdf$G == x, "E"]), sep = " ")})) + xlab("Number of regulatory interactions")
sevsimplot2 = ggarrange(g1, g2, ncol = 2)
ggsave("/media/sf_data/sevsimplot2.png", plot = sevsimplot2, width = 33.9, height = 19.1, units = "cm")
#################################################################################################################
#################################################################################################################
setwd("~/winData/multiomics_networks_simulation")
#setwd("~/GitHub/multiomics_networks_simulation")
source("network_generation.R")
mybaseport = RJulia()$port
cat("Starting simulations at", format(Sys.time(), usetz = T), "\n")
tic()
test = mclapply(1:20, function(i){
myev = newJuliaEvaluator(port = mybaseport + i) ## create a new Julia evaluator with a port number equal to mybaseport + i (id of the simulation)
sleeprand = sample(1:50, 1)
#print(paste0("Port ", mybaseport+i," sleeping ", sleeprand, " seconds\n"))
juliaCommand("println(\"port %s sleeping for %s seconds\")", mybaseport + i, sleeprand, evaluator = myev)
juliaCommand("sleep(%s)", sleeprand, evaluator = myev)
juliaCommand("println(\"port %s done\")", mybaseport + i, evaluator = myev)
#print(paste0("Port ", mybaseport+i," done!"))
removeJuliaEvaluator(myev)
return(sleeprand)
}, mc.cores = detectCores()-1)
toc()
########################################################################3
|
#1.go to tools and click on install selevt package and install
install.packages("LiblineaR")
library()#it will give list of package installed
search()#it will give list of packages which are load
require("LiblineaR")#to load a package which is install
detach("package:LiblineaR",unload=TRUE)
remove.packages("LiblineaR")
#to remove u simple untick from packages-->
#first go to package and install-->>
data()
library(help="datasets")
str(iris) #structure of data
iris # it represent data
| /pakages_install.R | no_license | KeshavSharma-IT/R | R | false | false | 527 | r | #1.go to tools and click on install selevt package and install
install.packages("LiblineaR")
library()#it will give list of package installed
search()#it will give list of packages which are load
require("LiblineaR")#to load a package which is install
detach("package:LiblineaR",unload=TRUE)
remove.packages("LiblineaR")
#to remove u simple untick from packages-->
#first go to package and install-->>
data()
library(help="datasets")
str(iris) #structure of data
iris # it represent data
|
im = function(n,d =0.1,iter = 250,b=1.9)
{
#generate initial matrix and simulation history storage
cur_mat = matrix(0,n,n)
cur_mat[] = 3*rbinom(n^2,1,d)
n = dim(cur_mat)[1]
nbd_mat = matrix(1,3,3)
storage = array(0,c(n,n,(iter+1)))
props = rep(0,(iter+1))
storage[,,1] = cur_mat
#generate filters
for(i in 1:8)
{
nam = paste("fil2",i,sep="")
temp_mat=matrix(0,3,3)
assign(nam,temp_mat)
}
fil21[1,1]=1
fil22[1,2]=1
fil23[1,3]=1
fil24[2,1]=1
fil25[2,3]=1
fil26[3,1]=1
fil27[3,2]=1
fil28[3,3]=1
for (r in 1:iter) #rounds
{
#generate score_mat
score_mat = matrix(0,n,n)
sum_mat = round(filter2((1*(cur_mat>1)),nbd_mat))
score_mat = ((((1*(cur_mat>1)))*(9-sum_mat)*b) + ((1-(1*(cur_mat>1)))*(9-sum_mat)))
#find highest score
for (i in 1:8)
{
assign(paste("score",i,sep=""),round(filter2(score_mat,get(paste("fil2",i,sep=""))),5))
}
highscore_mat = pmax(score_mat,score1,score2,score3,score4,score5,score6,score7,score8)
#find highest scoring neighbour
nbr_mat = matrix(0,n,n)
for(i in 1:8)
{
nbr_mat = nbr_mat + (nbr_mat==0)*i*(highscore_mat == get(paste("score",i,sep="")))
}
#generate neighbours move matrix
for(i in 1:8)
{
assign(paste("move",i,sep=""),round(filter2((1*(cur_mat > 1)),get(paste("fil2",i,sep="")))))
}
nbr_move_mat = matrix(0,n,n)
for(i in 1:8)
{
nbr_move_mat = nbr_move_mat + (nbr_mat == i)*(get(paste("move",i,sep="")))
}
nbr_move_mat = nbr_move_mat + (nbr_mat == 0)*(cur_mat > 1)*1
cur_mat = 3*(cur_mat > 1)
#generate new current matrix
cur_mat = cur_mat + 2*(cur_mat == 0)*nbr_move_mat - 2*(cur_mat == 3)*(1 - nbr_move_mat)
storage[,,(r+1)] = cur_mat
print(r)
}
for(i in 1:(iter+1)) #find proportion of cooperation
{
props[i] = sum(1*(storage[,,i]<2))/(n^2)
}
return(props)
} | /Imitation.R | no_license | RWilliamson1994/R-Code-for-Project-III | R | false | false | 2,013 | r | im = function(n,d =0.1,iter = 250,b=1.9)
{
#generate initial matrix and simulation history storage
cur_mat = matrix(0,n,n)
cur_mat[] = 3*rbinom(n^2,1,d)
n = dim(cur_mat)[1]
nbd_mat = matrix(1,3,3)
storage = array(0,c(n,n,(iter+1)))
props = rep(0,(iter+1))
storage[,,1] = cur_mat
#generate filters
for(i in 1:8)
{
nam = paste("fil2",i,sep="")
temp_mat=matrix(0,3,3)
assign(nam,temp_mat)
}
fil21[1,1]=1
fil22[1,2]=1
fil23[1,3]=1
fil24[2,1]=1
fil25[2,3]=1
fil26[3,1]=1
fil27[3,2]=1
fil28[3,3]=1
for (r in 1:iter) #rounds
{
#generate score_mat
score_mat = matrix(0,n,n)
sum_mat = round(filter2((1*(cur_mat>1)),nbd_mat))
score_mat = ((((1*(cur_mat>1)))*(9-sum_mat)*b) + ((1-(1*(cur_mat>1)))*(9-sum_mat)))
#find highest score
for (i in 1:8)
{
assign(paste("score",i,sep=""),round(filter2(score_mat,get(paste("fil2",i,sep=""))),5))
}
highscore_mat = pmax(score_mat,score1,score2,score3,score4,score5,score6,score7,score8)
#find highest scoring neighbour
nbr_mat = matrix(0,n,n)
for(i in 1:8)
{
nbr_mat = nbr_mat + (nbr_mat==0)*i*(highscore_mat == get(paste("score",i,sep="")))
}
#generate neighbours move matrix
for(i in 1:8)
{
assign(paste("move",i,sep=""),round(filter2((1*(cur_mat > 1)),get(paste("fil2",i,sep="")))))
}
nbr_move_mat = matrix(0,n,n)
for(i in 1:8)
{
nbr_move_mat = nbr_move_mat + (nbr_mat == i)*(get(paste("move",i,sep="")))
}
nbr_move_mat = nbr_move_mat + (nbr_mat == 0)*(cur_mat > 1)*1
cur_mat = 3*(cur_mat > 1)
#generate new current matrix
cur_mat = cur_mat + 2*(cur_mat == 0)*nbr_move_mat - 2*(cur_mat == 3)*(1 - nbr_move_mat)
storage[,,(r+1)] = cur_mat
print(r)
}
for(i in 1:(iter+1)) #find proportion of cooperation
{
props[i] = sum(1*(storage[,,i]<2))/(n^2)
}
return(props)
} |
library(mederrRank)
### Name: mixnegbinom.em
### Title: Expectation-Maximization Algorithm for the Mixture of Negative
### Binomial Distributions
### Aliases: mixnegbinom.em
### Keywords: EM Empirical Bayes Medication Errors Optimal Bayesian Ranking
### ranking
### ** Examples
## Not run:
##D data("simdata", package = "mederrRank")
##D summary(simdata)
##D
##D fit <- bhm.mcmc(simdata, nsim = 1000, burnin = 500, scale.factor = 1.1)
##D resamp <- bhm.resample(fit, simdata, p.resample = .1,
##D k = c(3, 6, 10, 30, 60, Inf), eta = c(.5, .8, 1, 1.25, 2))
##D fit2 <- bhm.constr.resamp(fit, resamp, k = 3, eta = .8)
##D plot(fit, fit2, simdata)
##D
##D theta0 <- c(10, 6, 100, 100, .1)
##D ans <- mixnegbinom.em(simdata, theta0, 50000, 0.01,
##D se = TRUE, stratified = TRUE)
##D ans$theta
##D ans$se
##D
##D summary(fit2, ans, simdata)
## End(Not run)
| /data/genthat_extracted_code/mederrRank/examples/mixnegbinom.em.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 870 | r | library(mederrRank)
### Name: mixnegbinom.em
### Title: Expectation-Maximization Algorithm for the Mixture of Negative
### Binomial Distributions
### Aliases: mixnegbinom.em
### Keywords: EM Empirical Bayes Medication Errors Optimal Bayesian Ranking
### ranking
### ** Examples
## Not run:
##D data("simdata", package = "mederrRank")
##D summary(simdata)
##D
##D fit <- bhm.mcmc(simdata, nsim = 1000, burnin = 500, scale.factor = 1.1)
##D resamp <- bhm.resample(fit, simdata, p.resample = .1,
##D k = c(3, 6, 10, 30, 60, Inf), eta = c(.5, .8, 1, 1.25, 2))
##D fit2 <- bhm.constr.resamp(fit, resamp, k = 3, eta = .8)
##D plot(fit, fit2, simdata)
##D
##D theta0 <- c(10, 6, 100, 100, .1)
##D ans <- mixnegbinom.em(simdata, theta0, 50000, 0.01,
##D se = TRUE, stratified = TRUE)
##D ans$theta
##D ans$se
##D
##D summary(fit2, ans, simdata)
## End(Not run)
|
# run_analysis.R downloads, combines, and cleans the University of California Irvine's
# Human Activity Recognition (HAR) dataset created from Galaxy S smartphones.
#
# Load necessary packages
library(data.table)
library(reshape2)
# Source: UCI-HAR Dataset.
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Set working directory and download zipped data file.
# setwd("~/GitHub/")
zipfile <- "./getdata-projectfiles-UCI HAR Dataset.zip"
if (!file.exists(zipfile)) {
download.file(url, destfile = zipfile, mode = "wb")
}
# Create a folder to unzip the file to.
zipfolder <- "./UCI HAR Dataset"
if (!file.exists(zipfolder)) {
unzip(zipfile)
}
# Create data table of activity names and create a column to describe the activity.
activity_names <- paste(zipfolder, "/activity_labels.txt", sep = "")
dt_activity_names <- data.table(read.table(activity_names, stringsAsFactors = F))
dt_activity_names$V1 <- NULL
setnames(dt_activity_names, 1, "activity")
dt_activity_names$activity <- tolower(dt_activity_names$activity)
# Create data table of feature names and add a column name for description.
feature_names <- paste(zipfolder, "/features.txt", sep = "")
dt_feature_names <- data.table(read.table(feature_names, stringsAsFactors = F))
dt_feature_names$V1 <- NULL
setnames(dt_feature_names, 1, "name")
# Load the mean and standard deviation measurements, transform rows into columns, discard the garbage.
measurement_features <- grep("^[t|f].*-mean\\(\\)|^[t|f].*-std\\(\\)", dt_feature_names$name)
measurement_columns <- paste("V", measurement_features, sep = "")
dt_features <- dt_feature_names[measurement_features,]
# Make feature names more descriptive.
dt_feature_names$name <- sub("BodyBody", "body_", dt_feature_names$name)
dt_feature_names$name <- sub("Body", "body_", dt_feature_names$name)
dt_feature_names$name <- sub("^[t]", "time_", dt_feature_names$name)
dt_feature_names$name <- sub("^[f]", "frequency_", dt_feature_names$name)
dt_feature_names$name <- sub("Acc", "accelerometer_", dt_feature_names$name)
dt_feature_names$name <- sub("Gravity", "gravity_motion_", dt_feature_names$name)
dt_feature_names$name <- sub("Jerk", "jerk_motion_", dt_feature_names$name)
dt_feature_names$name <- sub("Gyro", "gyroscope_", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-X", "x_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-Y", "y_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-Z", "z_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-X", "x_STD", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-Y", "y_STD", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-Z","z_STD",dt_feature_names$name)
dt_feature_names$name <- sub("Mag-mean\\(\\)", "magnitude_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("Mag-std\\(\\)", "magnitude_STD", dt_feature_names$name)
# Extract the Subject IDs from the training file and create a Subject ID column.
subject_training <- paste(zipfolder, "/train/subject_train.txt", sep = "")
dt_subject_training <- data.table(read.table(subject_training, stringsAsFactors = F))
setnames(dt_subject_training, 1, "subject_id")
# Extract the Activity IDs from training file, create an Activity ID column, and
# and replace Activity IDs with Activity Names.
labels_training <- paste(zipfolder, "/train/y_train.txt", sep = "")
dt_labels_training <- data.table(read.table(labels_training, stringsAsFactors = F))
setnames(dt_labels_training, 1, "activity")
dt_labels_training$activity <- dt_activity_names[dt_labels_training$activity,]
# Combine all of the Training data tabels into new data table for future merging.
training_data <- paste(zipfolder, "/train/X_train.txt", sep = "")
dt_training_data <- data.table(read.table(training_data, stringsAsFactors = F))
dt_training_data <- subset(dt_training_data, select = measurement_columns)
for (i in 1:length(dt_training_data)) {
setnames(dt_training_data, i, dt_feature_names$name[i])
}
dt_training_data <- cbind(dt_subject_training, dt_labels_training, dt_training_data)
# Extract the Subject IDs from the test file and create a Subject ID column.
subject_test <- paste(zipfolder, "/test/subject_test.txt", sep = "")
dt_subject_test <- data.table(read.table(subject_test, stringsAsFactors = F))
setnames(dt_subject_test, 1, "subject_id")
# Extract the Activity IDs from test file, create an Activity ID column, and
# and replace Activity IDs with Activity Names.
labels_test <- paste(zipfolder, "/test/y_test.txt", sep = "")
dt_labels_test <- data.table(read.table(labels_test, stringsAsFactors = F))
setnames(dt_labels_test, 1, "activity")
dt_labels_test$activity <- dt_activity_names[dt_labels_test$activity,]
# Combine all of the Test data tabels into new data table for future merging.
test_data <- paste(zipfolder, "/test/X_test.txt", sep = "")
dt_test_data <- data.table(read.table(test_data, stringsAsFactors = F))
dt_test_data <- subset(dt_test_data, select = measurement_columns)
for (i in 1:length(dt_test_data)) {
setnames(dt_test_data, i, dt_feature_names$name[i])
}
dt_test_data <- cbind(dt_subject_test, dt_labels_test, dt_test_data)
# Merge the Training and Test data tables.
dt_merged_data <- rbind(dt_training_data, dt_test_data)
setkeyv(dt_merged_data, c("activity", "subject_id"))
# Create tidy data table from the combined data table,
# calculating the means of each variable by activity and subject_id pair.
# This tidy dataset consists of 180 observations (1 observation per activity and subject_id pair).
dt_tidydata <- dt_merged_data[, lapply(.SD, mean), by = list(activity, subject_id)]
# Create comma-separated flat text file of the tidy data table in the current working directory.
txt_file_tidydata <- "./UCI HAR Dataset/tidydata.txt"
csv_file_tidydata <- "./UCI HAR Dataset/tidydata.csv"
write.table(dt_tidydata, file = txt_file_tidydata, sep = ",", row.names = F)
write.table(dt_tidydata, file = csv_file_tidydata, sep = ",", row.names = F) | /run_analysis.R | no_license | jjennings-m/TidyData | R | false | false | 6,065 | r | # run_analysis.R downloads, combines, and cleans the University of California Irvine's
# Human Activity Recognition (HAR) dataset created from Galaxy S smartphones.
#
# Load necessary packages
library(data.table)
library(reshape2)
# Source: UCI-HAR Dataset.
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Set working directory and download zipped data file.
# setwd("~/GitHub/")
zipfile <- "./getdata-projectfiles-UCI HAR Dataset.zip"
if (!file.exists(zipfile)) {
download.file(url, destfile = zipfile, mode = "wb")
}
# Create a folder to unzip the file to.
zipfolder <- "./UCI HAR Dataset"
if (!file.exists(zipfolder)) {
unzip(zipfile)
}
# Create data table of activity names and create a column to describe the activity.
activity_names <- paste(zipfolder, "/activity_labels.txt", sep = "")
dt_activity_names <- data.table(read.table(activity_names, stringsAsFactors = F))
dt_activity_names$V1 <- NULL
setnames(dt_activity_names, 1, "activity")
dt_activity_names$activity <- tolower(dt_activity_names$activity)
# Create data table of feature names and add a column name for description.
feature_names <- paste(zipfolder, "/features.txt", sep = "")
dt_feature_names <- data.table(read.table(feature_names, stringsAsFactors = F))
dt_feature_names$V1 <- NULL
setnames(dt_feature_names, 1, "name")
# Load the mean and standard deviation measurements, transform rows into columns, discard the garbage.
measurement_features <- grep("^[t|f].*-mean\\(\\)|^[t|f].*-std\\(\\)", dt_feature_names$name)
measurement_columns <- paste("V", measurement_features, sep = "")
dt_features <- dt_feature_names[measurement_features,]
# Make feature names more descriptive.
dt_feature_names$name <- sub("BodyBody", "body_", dt_feature_names$name)
dt_feature_names$name <- sub("Body", "body_", dt_feature_names$name)
dt_feature_names$name <- sub("^[t]", "time_", dt_feature_names$name)
dt_feature_names$name <- sub("^[f]", "frequency_", dt_feature_names$name)
dt_feature_names$name <- sub("Acc", "accelerometer_", dt_feature_names$name)
dt_feature_names$name <- sub("Gravity", "gravity_motion_", dt_feature_names$name)
dt_feature_names$name <- sub("Jerk", "jerk_motion_", dt_feature_names$name)
dt_feature_names$name <- sub("Gyro", "gyroscope_", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-X", "x_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-Y", "y_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-mean\\(\\)-Z", "z_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-X", "x_STD", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-Y", "y_STD", dt_feature_names$name)
dt_feature_names$name <- sub("-std\\(\\)-Z","z_STD",dt_feature_names$name)
dt_feature_names$name <- sub("Mag-mean\\(\\)", "magnitude_MEAN", dt_feature_names$name)
dt_feature_names$name <- sub("Mag-std\\(\\)", "magnitude_STD", dt_feature_names$name)
# Extract the Subject IDs from the training file and create a Subject ID column.
subject_training <- paste(zipfolder, "/train/subject_train.txt", sep = "")
dt_subject_training <- data.table(read.table(subject_training, stringsAsFactors = F))
setnames(dt_subject_training, 1, "subject_id")
# Extract the Activity IDs from training file, create an Activity ID column, and
# and replace Activity IDs with Activity Names.
labels_training <- paste(zipfolder, "/train/y_train.txt", sep = "")
dt_labels_training <- data.table(read.table(labels_training, stringsAsFactors = F))
setnames(dt_labels_training, 1, "activity")
dt_labels_training$activity <- dt_activity_names[dt_labels_training$activity,]
# Combine all of the Training data tabels into new data table for future merging.
training_data <- paste(zipfolder, "/train/X_train.txt", sep = "")
dt_training_data <- data.table(read.table(training_data, stringsAsFactors = F))
dt_training_data <- subset(dt_training_data, select = measurement_columns)
for (i in 1:length(dt_training_data)) {
setnames(dt_training_data, i, dt_feature_names$name[i])
}
dt_training_data <- cbind(dt_subject_training, dt_labels_training, dt_training_data)
# Extract the Subject IDs from the test file and create a Subject ID column.
subject_test <- paste(zipfolder, "/test/subject_test.txt", sep = "")
dt_subject_test <- data.table(read.table(subject_test, stringsAsFactors = F))
setnames(dt_subject_test, 1, "subject_id")
# Extract the Activity IDs from test file, create an Activity ID column, and
# and replace Activity IDs with Activity Names.
labels_test <- paste(zipfolder, "/test/y_test.txt", sep = "")
dt_labels_test <- data.table(read.table(labels_test, stringsAsFactors = F))
setnames(dt_labels_test, 1, "activity")
dt_labels_test$activity <- dt_activity_names[dt_labels_test$activity,]
# Combine all of the Test data tabels into new data table for future merging.
test_data <- paste(zipfolder, "/test/X_test.txt", sep = "")
dt_test_data <- data.table(read.table(test_data, stringsAsFactors = F))
dt_test_data <- subset(dt_test_data, select = measurement_columns)
for (i in 1:length(dt_test_data)) {
setnames(dt_test_data, i, dt_feature_names$name[i])
}
dt_test_data <- cbind(dt_subject_test, dt_labels_test, dt_test_data)
# Merge the Training and Test data tables.
dt_merged_data <- rbind(dt_training_data, dt_test_data)
setkeyv(dt_merged_data, c("activity", "subject_id"))
# Create tidy data table from the combined data table,
# calculating the means of each variable by activity and subject_id pair.
# This tidy dataset consists of 180 observations (1 observation per activity and subject_id pair).
dt_tidydata <- dt_merged_data[, lapply(.SD, mean), by = list(activity, subject_id)]
# Create comma-separated flat text file of the tidy data table in the current working directory.
txt_file_tidydata <- "./UCI HAR Dataset/tidydata.txt"
csv_file_tidydata <- "./UCI HAR Dataset/tidydata.csv"
write.table(dt_tidydata, file = txt_file_tidydata, sep = ",", row.names = F)
write.table(dt_tidydata, file = csv_file_tidydata, sep = ",", row.names = F) |
#' @name dunnetts1
#' @title Dunnett's One-tailed
#' @param targetcolumn Character string, name of response column to be tested
#' @param alpha Significance level (numeric) to be used
#' @param direction Direction of the anticipated difference
#' @param data Input dataframe.
#' @keywords internal
#' @export
dunnetts1 <- function (targetcolumn, alpha, direction, data) {
dose_fac <- data$dose_fac
data.anova <- aov(data[,targetcolumn] ~ dose_fac, data=data)
onetaildunnetts.glht <- glht(data.anova, linfct=mcp(dose_fac = "Dunnett"), alternative=direction)
# Retrieve simplified output
dunnetts1summary <- summary(onetaildunnetts.glht)
levels <- levels(data$dose_fac)
output <- dunnetts.format(dunnetts1summary, "Dunnett's One-tailed Multiple Comparisons Test", levels)
return(output)
} | /R/dunnetts1.r | no_license | cran/drsmooth | R | false | false | 822 | r | #' @name dunnetts1
#' @title Dunnett's One-tailed
#' @param targetcolumn Character string, name of response column to be tested
#' @param alpha Significance level (numeric) to be used
#' @param direction Direction of the anticipated difference
#' @param data Input dataframe.
#' @keywords internal
#' @export
dunnetts1 <- function (targetcolumn, alpha, direction, data) {
dose_fac <- data$dose_fac
data.anova <- aov(data[,targetcolumn] ~ dose_fac, data=data)
onetaildunnetts.glht <- glht(data.anova, linfct=mcp(dose_fac = "Dunnett"), alternative=direction)
# Retrieve simplified output
dunnetts1summary <- summary(onetaildunnetts.glht)
levels <- levels(data$dose_fac)
output <- dunnetts.format(dunnetts1summary, "Dunnett's One-tailed Multiple Comparisons Test", levels)
return(output)
} |
filterTabsInput <- function(id) {
# Create a namespace function using the provided id
ns <- NS(id)
tagList(
h3("Filter by"),
uiOutput(ns("filter_boxes"))
)
}
# Module server function
filterTabs <- function(input, output, session, source_data) {
filter_box_columns <- function() {
factors <- unlist(purrr::map(names(source_data),
.f = function(x) is.factor(source_data[[x]])))
return(names(source_data)[factors])
}
output$filter_boxes <- renderUI({
purrr::map(filter_box_columns(),
.f = function(x) box(title = x,
collapsible = TRUE,
collapsed = TRUE,
width = NULL,
checkboxGroupInput(
inputId = session$ns(x),
label = "Show",
choices = levels(
source_data[[x]]),
selected = levels(
source_data[[x]])
)))
})
filtered_source_data <- reactive({
filters <- purrr::map(filter_box_columns(),
.f = function(x) {
enquo_x <- enquo(x)
rlang::expr(.data[[x]] %in% input[[!!enquo_x]])
})
dplyr::filter(source_data, !!! filters)
})
return(filtered_source_data)
}
| /filter_tabs.R | no_license | md0u80c9/FrailtyDashboard | R | false | false | 1,221 | r |
filterTabsInput <- function(id) {
# Create a namespace function using the provided id
ns <- NS(id)
tagList(
h3("Filter by"),
uiOutput(ns("filter_boxes"))
)
}
# Module server function
filterTabs <- function(input, output, session, source_data) {
filter_box_columns <- function() {
factors <- unlist(purrr::map(names(source_data),
.f = function(x) is.factor(source_data[[x]])))
return(names(source_data)[factors])
}
output$filter_boxes <- renderUI({
purrr::map(filter_box_columns(),
.f = function(x) box(title = x,
collapsible = TRUE,
collapsed = TRUE,
width = NULL,
checkboxGroupInput(
inputId = session$ns(x),
label = "Show",
choices = levels(
source_data[[x]]),
selected = levels(
source_data[[x]])
)))
})
filtered_source_data <- reactive({
filters <- purrr::map(filter_box_columns(),
.f = function(x) {
enquo_x <- enquo(x)
rlang::expr(.data[[x]] %in% input[[!!enquo_x]])
})
dplyr::filter(source_data, !!! filters)
})
return(filtered_source_data)
}
|
#' module_emissions_L152.MACC
#'
#' Create Marginal Abatement Cost Curves, in percent reduction by 1990 USD abatement costs from EPA cost curves.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L152.MAC_pct_R_S_Proc_EPA}. The corresponding file in the
#' original data system was \code{L152.MACC.R} (emissions level1).
#' @details Create Marginal abatement cost curves, in percent reduction by 1990 USD costs from EPA cost curves.
#' Choose between 2020 or 2030 data in constants file - emissions.EPA_MACC_YEAR.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author RMH May 2017
module_emissions_L152.MACC <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "emissions/EPA_MACC_baselines_MtCO2e",
FILE = "emissions/EPA_MACC_2020_MtCO2e",
FILE = "emissions/EPA_MACC_2030_MtCO2e"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L152.MAC_pct_R_S_Proc_EPA"))
} else if(command == driver.MAKE) {
Process <- EPA_region <- cost_2010USD_tCO2e <- reduction_MtCO2e <- Sector <-
EPA_region_code <- cost_1990USD_tCe <- year <- baseline_MtCO2e <-
reduction_pct <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
EPA_MACC_baselines_MtCO2e_in <- get_data(all_data, "emissions/EPA_MACC_baselines_MtCO2e")
EPA_MACC_2020_MtCO2e <- get_data(all_data, "emissions/EPA_MACC_2020_MtCO2e")
EPA_MACC_2030_MtCO2e <- get_data(all_data, "emissions/EPA_MACC_2030_MtCO2e")
# Assign MACC data based on MACC curve year assumption (emissions.EPA_MACC_YEAR)
if(emissions.EPA_MACC_YEAR %in% c(2020, 2030)) {
if(emissions.EPA_MACC_YEAR == 2020) EPA_MACC_MtCO2e <- EPA_MACC_2020_MtCO2e
if(emissions.EPA_MACC_YEAR == 2030) EPA_MACC_MtCO2e <- EPA_MACC_2030_MtCO2e
} else {
stop("MAC curve year needs to be either 2020 or 2030")
}
# Make processes and region names consistent
EPA_MACC_baselines_MtCO2e <- EPA_MACC_baselines_MtCO2e_in %>%
mutate(Process = sub("\\&", "and", Process),
EPA_region = sub("\\&", "and", EPA_region),
EPA_region = sub("World", "Global", EPA_region),
EPA_region = sub("Global Total", "Global", EPA_region))
EPA_MACC_MtCO2e <- EPA_MACC_MtCO2e %>%
mutate(Process = sub("\\&", "and", Process))
# Convert MAC curves to long form
# Convert from 2010$/tCO2e to 1990$/tC
L152.EPA_MACC_MtCO2e <- EPA_MACC_MtCO2e %>%
gather(cost_2010USD_tCO2e, reduction_MtCO2e, -Sector, -Process, -EPA_region, -EPA_region_code) %>%
mutate(cost_2010USD_tCO2e = as.numeric(cost_2010USD_tCO2e),
cost_1990USD_tCe = round(cost_2010USD_tCO2e * emissions.CONV_C_CO2 * gdp_deflator(1990, base_year = 2010), 0)) %>%
select(-cost_2010USD_tCO2e)
# For in abatement and basebline data:
# Combine aluminum and magnesium processes: define function, then call in both instances
combine_Al_Mg <- function(x) {
x %>%
mutate(Process = sub("Primary Aluminum Production", "Aluminum and Magnesium Production", Process),
Process = sub("Magnesium Manufacturing", "Aluminum and Magnesium Production", Process))
}
# Abatement data
L152.EPA_MACC_MtCO2e <- L152.EPA_MACC_MtCO2e %>%
ungroup %>%
combine_Al_Mg %>%
group_by(Sector, Process, EPA_region, EPA_region_code, cost_1990USD_tCe) %>%
summarize_at(vars(reduction_MtCO2e), sum)
# Baseline data
# Also filter for only EPA MACC year
L152.EPA_MACC_baselines_MtCO2e <- EPA_MACC_baselines_MtCO2e %>%
combine_Al_Mg %>%
gather_years(value_col = "baseline_MtCO2e") %>%
filter(year == emissions.EPA_MACC_YEAR) %>%
group_by(Sector, Process, EPA_region) %>%
summarize_at(vars(baseline_MtCO2e), sum)
# Match in the baseline emissions quantities to abatement tibble then calculate abatement percentages
# Use left_join - there should be NAs (i.e., there are sectors where the baseline is zero) - then drop those NAs
# (ie. MAC curves in regions where the sector/process does not exist - the baseline is zero)
L152.EPA_MACC_percent_MtCO2e <- L152.EPA_MACC_MtCO2e %>%
left_join(L152.EPA_MACC_baselines_MtCO2e ,
by = c("Sector", "Process", "EPA_region")) %>%
mutate(reduction_pct = reduction_MtCO2e / baseline_MtCO2e) %>%
filter(!is.na(reduction_pct)) %>%
ungroup %>%
select(-EPA_region_code, -reduction_MtCO2e, -baseline_MtCO2e)
# Select reduction percentage data for the given tax levels,
# tax levels in emissions.MAC_TAXES are simply a range of costs in $1990 USD so we aren't retaining superfluous detail
# create a new df with all rows for all costs for each unique Sector-Process-Region,
# then add reduction percentages at those costs
L152.MAC_pct_R_S_Proc_EPA <- L152.EPA_MACC_percent_MtCO2e %>%
select(Sector, Process, EPA_region) %>%
unique %>%
repeat_add_columns(tibble(cost_1990USD_tCe = round(emissions.MAC_TAXES * emissions.CONV_C_CO2 * gdp_deflator(1990, base_year = 2010), 0))) %>%
left_join_error_no_match(L152.EPA_MACC_percent_MtCO2e,
by = c("Sector", "Process", "EPA_region", "cost_1990USD_tCe")) %>%
spread(cost_1990USD_tCe, reduction_pct)
# ===================================================
# Produce outputs
L152.MAC_pct_R_S_Proc_EPA <- L152.MAC_pct_R_S_Proc_EPA %>%
add_title("Marginal abatement cost curves by EPA region / EPA sector / process") %>%
add_units("%") %>%
add_comments("Marginal abatement cost curves, in percent reduction by 1990 USD abatement costs from EPA cost curves") %>%
add_legacy_name("L152.MAC_pct_R_S_Proc_EPA") %>%
add_precursors("emissions/EPA_MACC_baselines_MtCO2e",
"emissions/EPA_MACC_2020_MtCO2e",
"emissions/EPA_MACC_2030_MtCO2e")
return_data(L152.MAC_pct_R_S_Proc_EPA)
} else {
stop("Unknown command")
}
}
| /input/gcamdata/R/zchunk_L152.MACC.R | permissive | thuliuyang/gcam-core | R | false | false | 6,323 | r | #' module_emissions_L152.MACC
#'
#' Create Marginal Abatement Cost Curves, in percent reduction by 1990 USD abatement costs from EPA cost curves.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L152.MAC_pct_R_S_Proc_EPA}. The corresponding file in the
#' original data system was \code{L152.MACC.R} (emissions level1).
#' @details Create Marginal abatement cost curves, in percent reduction by 1990 USD costs from EPA cost curves.
#' Choose between 2020 or 2030 data in constants file - emissions.EPA_MACC_YEAR.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author RMH May 2017
module_emissions_L152.MACC <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "emissions/EPA_MACC_baselines_MtCO2e",
FILE = "emissions/EPA_MACC_2020_MtCO2e",
FILE = "emissions/EPA_MACC_2030_MtCO2e"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L152.MAC_pct_R_S_Proc_EPA"))
} else if(command == driver.MAKE) {
Process <- EPA_region <- cost_2010USD_tCO2e <- reduction_MtCO2e <- Sector <-
EPA_region_code <- cost_1990USD_tCe <- year <- baseline_MtCO2e <-
reduction_pct <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
EPA_MACC_baselines_MtCO2e_in <- get_data(all_data, "emissions/EPA_MACC_baselines_MtCO2e")
EPA_MACC_2020_MtCO2e <- get_data(all_data, "emissions/EPA_MACC_2020_MtCO2e")
EPA_MACC_2030_MtCO2e <- get_data(all_data, "emissions/EPA_MACC_2030_MtCO2e")
# Assign MACC data based on MACC curve year assumption (emissions.EPA_MACC_YEAR)
if(emissions.EPA_MACC_YEAR %in% c(2020, 2030)) {
if(emissions.EPA_MACC_YEAR == 2020) EPA_MACC_MtCO2e <- EPA_MACC_2020_MtCO2e
if(emissions.EPA_MACC_YEAR == 2030) EPA_MACC_MtCO2e <- EPA_MACC_2030_MtCO2e
} else {
stop("MAC curve year needs to be either 2020 or 2030")
}
# Make processes and region names consistent
EPA_MACC_baselines_MtCO2e <- EPA_MACC_baselines_MtCO2e_in %>%
mutate(Process = sub("\\&", "and", Process),
EPA_region = sub("\\&", "and", EPA_region),
EPA_region = sub("World", "Global", EPA_region),
EPA_region = sub("Global Total", "Global", EPA_region))
EPA_MACC_MtCO2e <- EPA_MACC_MtCO2e %>%
mutate(Process = sub("\\&", "and", Process))
# Convert MAC curves to long form
# Convert from 2010$/tCO2e to 1990$/tC
L152.EPA_MACC_MtCO2e <- EPA_MACC_MtCO2e %>%
gather(cost_2010USD_tCO2e, reduction_MtCO2e, -Sector, -Process, -EPA_region, -EPA_region_code) %>%
mutate(cost_2010USD_tCO2e = as.numeric(cost_2010USD_tCO2e),
cost_1990USD_tCe = round(cost_2010USD_tCO2e * emissions.CONV_C_CO2 * gdp_deflator(1990, base_year = 2010), 0)) %>%
select(-cost_2010USD_tCO2e)
# For in abatement and basebline data:
# Combine aluminum and magnesium processes: define function, then call in both instances
combine_Al_Mg <- function(x) {
x %>%
mutate(Process = sub("Primary Aluminum Production", "Aluminum and Magnesium Production", Process),
Process = sub("Magnesium Manufacturing", "Aluminum and Magnesium Production", Process))
}
# Abatement data
L152.EPA_MACC_MtCO2e <- L152.EPA_MACC_MtCO2e %>%
ungroup %>%
combine_Al_Mg %>%
group_by(Sector, Process, EPA_region, EPA_region_code, cost_1990USD_tCe) %>%
summarize_at(vars(reduction_MtCO2e), sum)
# Baseline data
# Also filter for only EPA MACC year
L152.EPA_MACC_baselines_MtCO2e <- EPA_MACC_baselines_MtCO2e %>%
combine_Al_Mg %>%
gather_years(value_col = "baseline_MtCO2e") %>%
filter(year == emissions.EPA_MACC_YEAR) %>%
group_by(Sector, Process, EPA_region) %>%
summarize_at(vars(baseline_MtCO2e), sum)
# Match in the baseline emissions quantities to abatement tibble then calculate abatement percentages
# Use left_join - there should be NAs (i.e., there are sectors where the baseline is zero) - then drop those NAs
# (ie. MAC curves in regions where the sector/process does not exist - the baseline is zero)
L152.EPA_MACC_percent_MtCO2e <- L152.EPA_MACC_MtCO2e %>%
left_join(L152.EPA_MACC_baselines_MtCO2e ,
by = c("Sector", "Process", "EPA_region")) %>%
mutate(reduction_pct = reduction_MtCO2e / baseline_MtCO2e) %>%
filter(!is.na(reduction_pct)) %>%
ungroup %>%
select(-EPA_region_code, -reduction_MtCO2e, -baseline_MtCO2e)
# Select reduction percentage data for the given tax levels,
# tax levels in emissions.MAC_TAXES are simply a range of costs in $1990 USD so we aren't retaining superfluous detail
# create a new df with all rows for all costs for each unique Sector-Process-Region,
# then add reduction percentages at those costs
L152.MAC_pct_R_S_Proc_EPA <- L152.EPA_MACC_percent_MtCO2e %>%
select(Sector, Process, EPA_region) %>%
unique %>%
repeat_add_columns(tibble(cost_1990USD_tCe = round(emissions.MAC_TAXES * emissions.CONV_C_CO2 * gdp_deflator(1990, base_year = 2010), 0))) %>%
left_join_error_no_match(L152.EPA_MACC_percent_MtCO2e,
by = c("Sector", "Process", "EPA_region", "cost_1990USD_tCe")) %>%
spread(cost_1990USD_tCe, reduction_pct)
# ===================================================
# Produce outputs
L152.MAC_pct_R_S_Proc_EPA <- L152.MAC_pct_R_S_Proc_EPA %>%
add_title("Marginal abatement cost curves by EPA region / EPA sector / process") %>%
add_units("%") %>%
add_comments("Marginal abatement cost curves, in percent reduction by 1990 USD abatement costs from EPA cost curves") %>%
add_legacy_name("L152.MAC_pct_R_S_Proc_EPA") %>%
add_precursors("emissions/EPA_MACC_baselines_MtCO2e",
"emissions/EPA_MACC_2020_MtCO2e",
"emissions/EPA_MACC_2030_MtCO2e")
return_data(L152.MAC_pct_R_S_Proc_EPA)
} else {
stop("Unknown command")
}
}
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = numeric()) {
# holds the cached value or NULL if nothing is cached
# initially nothing is cached so set it to NULL
cache <- NULL
# store a matrix
setMatrix <- function(newValue) {
x <<- newValue
# since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
# returns the stored matrix
getMatrix <- function() {
x
}
# cache the given argument
cacheInverse <- function(solve) {
cache <<- solve
}
# get the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(y, ...) {
# get the cached value
inverse <- y$getInverse()
# if a cached value exists return it
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# otherwise get the matrix, caclulate the inverse and store it in
# the cache
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# return the inverse
inverse
} | /cachematrix.R | no_license | avinish1/ProgrammingAssignment2 | R | false | false | 1,917 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = numeric()) {
# holds the cached value or NULL if nothing is cached
# initially nothing is cached so set it to NULL
cache <- NULL
# store a matrix
setMatrix <- function(newValue) {
x <<- newValue
# since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
# returns the stored matrix
getMatrix <- function() {
x
}
# cache the given argument
cacheInverse <- function(solve) {
cache <<- solve
}
# get the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(y, ...) {
# get the cached value
inverse <- y$getInverse()
# if a cached value exists return it
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# otherwise get the matrix, caclulate the inverse and store it in
# the cache
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# return the inverse
inverse
} |
library(ggplot2)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SRC <- readRDS("Source_Classification_Code.rds")
VEH <- grep("vehicle",SRC$EI.Sector,value=T,ignore.case=T)
SRC.VEH <- subset(SRC, SRC$EI.Sector %in% VEH, select=SCC)
BC.LA <- subset(NEI, fips == "24510"|fips == "06037")
NEI.VEH <- subset(BC.LA, BC.LA$SCC %in% SRC.VEH$SCC)
pd <- aggregate(NEI.VEH[c("Emissions")], list(fips = NEI.VEH$fips, year = NEI.VEH$year), sum)
pd$city <- rep(NA, nrow(pd))
pd[pd$fips == "06037", ][, "city"] <- "Los Angles County"
pd[pd$fips == "24510", ][, "city"] <- "Baltimore City"
png('plot6.png')
p <- ggplot(pd, aes(x=year, y=Emissions, colour=city)) +
geom_point(alpha=.3) +
geom_smooth(alpha=.2, size=1, method="loess")
print(p)
dev.off()
| /plot6.R | no_license | fukazawa/ExData_Plotting2 | R | false | false | 817 | r | library(ggplot2)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SRC <- readRDS("Source_Classification_Code.rds")
VEH <- grep("vehicle",SRC$EI.Sector,value=T,ignore.case=T)
SRC.VEH <- subset(SRC, SRC$EI.Sector %in% VEH, select=SCC)
BC.LA <- subset(NEI, fips == "24510"|fips == "06037")
NEI.VEH <- subset(BC.LA, BC.LA$SCC %in% SRC.VEH$SCC)
pd <- aggregate(NEI.VEH[c("Emissions")], list(fips = NEI.VEH$fips, year = NEI.VEH$year), sum)
pd$city <- rep(NA, nrow(pd))
pd[pd$fips == "06037", ][, "city"] <- "Los Angles County"
pd[pd$fips == "24510", ][, "city"] <- "Baltimore City"
png('plot6.png')
p <- ggplot(pd, aes(x=year, y=Emissions, colour=city)) +
geom_point(alpha=.3) +
geom_smooth(alpha=.2, size=1, method="loess")
print(p)
dev.off()
|
#' Load the data from VGIS Oracle database
#'
#'
#' @description This function is to read the data from VGIS Oracle database
#'
#' @param userName character, Specifies a valid user name in VGIS Oracle database.
#' @param passWord character, Specifies the password to the user name.
#' @param saveThem logical, Specifies whether the loaded data should be saved or returned.
#' The default value is FALSE, which means the function will not save files
#' for you.
#' @param savePath character, Specifies the path that directs to the VRI original data soruce, i.e.,
#' \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}.
#'
#' @return no files
#' @export
#'
#' @rdname loadVGIS
#' @author Yong Luo
loadVGIS <- function(userName, passWord,
saveThem = FALSE, savePath = "."){
loadVGISSample(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISCrew(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSampleAccess(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISPlot(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISRange(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISCWD(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISTreeC(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISLossIndicator(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSiteTree(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISStumpSTree(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISTreeI(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISEcology(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISVeg(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSuccession(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISNotes(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISPhoto(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
}
| /R/loadVGIS.R | permissive | bcgov/FAIBOracle | R | false | false | 3,346 | r | #' Load the data from VGIS Oracle database
#'
#'
#' @description This function is to read the data from VGIS Oracle database
#'
#' @param userName character, Specifies a valid user name in VGIS Oracle database.
#' @param passWord character, Specifies the password to the user name.
#' @param saveThem logical, Specifies whether the loaded data should be saved or returned.
#' The default value is FALSE, which means the function will not save files
#' for you.
#' @param savePath character, Specifies the path that directs to the VRI original data soruce, i.e.,
#' \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}.
#'
#' @return no files
#' @export
#'
#' @rdname loadVGIS
#' @author Yong Luo
loadVGIS <- function(userName, passWord,
saveThem = FALSE, savePath = "."){
loadVGISSample(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISCrew(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSampleAccess(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISPlot(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISRange(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISCWD(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISTreeC(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISLossIndicator(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSiteTree(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISStumpSTree(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISTreeI(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISEcology(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISVeg(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISSuccession(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISNotes(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
loadVGISPhoto(userName = userName,
passWord = passWord,
saveThem = saveThem,
savePath = savePath)
}
|
#' R wrapper for luchtmeetnet api
#'
#' @param obj The object to query
#' @param params The params to pass to the query
#' @param verbose Whether to print out stuffs
#' @return A data.frame with reponse data
#'
#' @examples
#' luchtmeetnet_get_data("measurements", params = list(start = "2020-01-01T00:00:00", end = "2020-01-06T23:00:00", formula = "PM25"))
#'
#' @export
#' @importFrom dplyr %>%
luchtmeetnet_get_data <- function(obj, params = list(), verbose = FALSE) {
luchtmeetnet_request <- function (url, params) {
next_page = 1
last_page = -1
current_page = 0
pages <- list()
while (current_page != last_page) {
params$page <- next_page
pparams <- paste(names(params), params, sep = "=", collapse = "&")
if (verbose) message("Downloading from {url} with {pparams}" %>% glue::glue())
r <- httr::RETRY("GET", url, query = params, times = 10)
httr::stop_for_status(r)
body <- httr::content(r)
if(is.null(names(body$data))) {
pages[[next_page]] = body$data %>%
dplyr::bind_rows()
} else {
# not a paginated list
return(body$data)
}
next_page <- body$pagination$next_page
last_page <- body$pagination$last_page
current_page <- body$pagination$current_page
}
return(pages %>% jsonlite::rbind_pages())
}
url <- "https://api.luchtmeetnet.nl/open_api/{obj}/" %>% glue::glue()
luchtmeetnet_request(url, params)
}
| /R/get_data.R | no_license | markbaas/luchtmeetnetr | R | false | false | 1,510 | r | #' R wrapper for luchtmeetnet api
#'
#' @param obj The object to query
#' @param params The params to pass to the query
#' @param verbose Whether to print out stuffs
#' @return A data.frame with reponse data
#'
#' @examples
#' luchtmeetnet_get_data("measurements", params = list(start = "2020-01-01T00:00:00", end = "2020-01-06T23:00:00", formula = "PM25"))
#'
#' @export
#' @importFrom dplyr %>%
luchtmeetnet_get_data <- function(obj, params = list(), verbose = FALSE) {
luchtmeetnet_request <- function (url, params) {
next_page = 1
last_page = -1
current_page = 0
pages <- list()
while (current_page != last_page) {
params$page <- next_page
pparams <- paste(names(params), params, sep = "=", collapse = "&")
if (verbose) message("Downloading from {url} with {pparams}" %>% glue::glue())
r <- httr::RETRY("GET", url, query = params, times = 10)
httr::stop_for_status(r)
body <- httr::content(r)
if(is.null(names(body$data))) {
pages[[next_page]] = body$data %>%
dplyr::bind_rows()
} else {
# not a paginated list
return(body$data)
}
next_page <- body$pagination$next_page
last_page <- body$pagination$last_page
current_page <- body$pagination$current_page
}
return(pages %>% jsonlite::rbind_pages())
}
url <- "https://api.luchtmeetnet.nl/open_api/{obj}/" %>% glue::glue()
luchtmeetnet_request(url, params)
}
|
\name{plot.simrnet}
\alias{plot.simrnet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Box and whisker plot of RMSE and R-square
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Box and whisker plot of RMSE and R-square obtained by simrnet function.
}
\usage{
\method{plot}{simrnet}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The list of RMSE and R-square obtained by simrnet function.
}
\item{...}{
Additional arguments of details in boxplot.
}
% \item{\dots}{Other graphical parameters to plot}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
%\value{
%}
\references{
Okinaga, Y., Kyogoku, D., Kondo, S., Nagano, J. A. and Hirose, K. The Accuracy of Lasso Estimation of Scale-Free Network, \emph{manuscript.}
}
\author{
Kei Hirose and Yuichi Okinaga\cr
\email{mail@keihirose.com}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{simrnet} object.}
\examples{
data(nagano2019)
attach(nagano2019)
rho <- seq(0.1, 0.9, by = 0.1)
pars <- genpar(X,Y,rho)
result <- simrnet(pars,times.sim=5)
plot(result)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
| /man/plot.simrnet.Rd | no_license | keihirose/simrnet | R | false | false | 1,316 | rd | \name{plot.simrnet}
\alias{plot.simrnet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Box and whisker plot of RMSE and R-square
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Box and whisker plot of RMSE and R-square obtained by simrnet function.
}
\usage{
\method{plot}{simrnet}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The list of RMSE and R-square obtained by simrnet function.
}
\item{...}{
Additional arguments of details in boxplot.
}
% \item{\dots}{Other graphical parameters to plot}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
%\value{
%}
\references{
Okinaga, Y., Kyogoku, D., Kondo, S., Nagano, J. A. and Hirose, K. The Accuracy of Lasso Estimation of Scale-Free Network, \emph{manuscript.}
}
\author{
Kei Hirose and Yuichi Okinaga\cr
\email{mail@keihirose.com}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{simrnet} object.}
\examples{
data(nagano2019)
attach(nagano2019)
rho <- seq(0.1, 0.9, by = 0.1)
pars <- genpar(X,Y,rho)
result <- simrnet(pars,times.sim=5)
plot(result)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
##########################
# Script for preparing the R workspace for modeling COTS population...
#
# Authors: Kevin Shoemaker, Sam Matthews, Camille Mellin, Damien Fordham
#
# NOTE: this is the only script that refers to specific users and file structures.
#
# 07 April 2015 -- started scripting
##########################
#############################
# LOAD PACKAGES
#############################
# note: 'loadPackage' should install the package from CRAN automatically if it is not already installed
loadPackages() # load all packages into the global environment
###############################
# LOAD PROJECTION FOR READING IN SPATIAL DATA
###############################
projection <- "+proj=longlat +datum=WGS84" #"+proj=lcc +lat_1=33 +lat_2=45 +lat_0=39 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
#############
# END SCRIPT
############# | /ARCHIVE/COTSModel_Initialization.R | no_license | sammatthews990/COTS_Model | R | false | false | 888 | r | ##########################
# Script for preparing the R workspace for modeling COTS population...
#
# Authors: Kevin Shoemaker, Sam Matthews, Camille Mellin, Damien Fordham
#
# NOTE: this is the only script that refers to specific users and file structures.
#
# 07 April 2015 -- started scripting
##########################
#############################
# LOAD PACKAGES
#############################
# note: 'loadPackage' should install the package from CRAN automatically if it is not already installed
loadPackages() # load all packages into the global environment
###############################
# LOAD PROJECTION FOR READING IN SPATIAL DATA
###############################
projection <- "+proj=longlat +datum=WGS84" #"+proj=lcc +lat_1=33 +lat_2=45 +lat_0=39 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
#############
# END SCRIPT
############# |
# Migration project analysis script in R, plus visualizations
# Matt Motyl
# 7/6/2019
| /analysisandviz.R | no_license | mattmotyl/migration | R | false | false | 87 | r | # Migration project analysis script in R, plus visualizations
# Matt Motyl
# 7/6/2019
|
Explote<-function(x,df,Aesthetic="variable",Method="lm",Parasites=MainParasites){
require(reshape2);require(ggplot2);require(grid)
RDeer_long<-melt(df,
id.vars=colnames(df)[!colnames(df)%in%Parasites],
value.name="EPG")
RDeer_long$EPG<-RDeer_long$EPG/RDeer_long$cFDM
X<-RDeer_long[,x]
data<-RDeer_long
ParasiteColoursUse<-ParasiteColours[1:length(Parasites)]
AESTHETIC<-data[,Aesthetic]
if(is.numeric(X)){
ggplot(data,aes(X,log(EPG+1),colour=variable,shape=AESTHETIC))+
facet_wrap(~variable)+
geom_point()+
geom_smooth(method=Method,aes(lty=AESTHETIC))+
THEME+theme(strip.background = element_rect(fill = "white", color = "grey", size = 0))+
scale_color_manual(values=ParasiteColoursUse)+
xlab(x)
}else{
if(is.factor(X)|is.character(X)){
ggplot(data,aes(X,log(EPG+1),colour=variable,shape=AESTHETIC))+
facet_wrap(~variable)+
geom_violin(aes(lty=AESTHETIC))+
geom_point(position=position_dodge(w=0.9))+
THEME+theme(strip.background = element_rect(fill = "white", color = "grey", size = 0))+
scale_color_manual(values=ParasiteColoursUse)+
xlab(x)+
stat_summary(fun.data=mean_se, fun.args = list(mult=1),geom="errorbar",width=0.35,colour="black",position=position_dodge(w=0.9))+
stat_summary(fun.y = "mean", geom = "point",size= 1.5,colour="black",shape=3,position=position_dodge(w=0.9))
}
}
}
| /R/Explote.R | no_license | gfalbery/ggregplot | R | false | false | 1,467 | r | Explote<-function(x,df,Aesthetic="variable",Method="lm",Parasites=MainParasites){
require(reshape2);require(ggplot2);require(grid)
RDeer_long<-melt(df,
id.vars=colnames(df)[!colnames(df)%in%Parasites],
value.name="EPG")
RDeer_long$EPG<-RDeer_long$EPG/RDeer_long$cFDM
X<-RDeer_long[,x]
data<-RDeer_long
ParasiteColoursUse<-ParasiteColours[1:length(Parasites)]
AESTHETIC<-data[,Aesthetic]
if(is.numeric(X)){
ggplot(data,aes(X,log(EPG+1),colour=variable,shape=AESTHETIC))+
facet_wrap(~variable)+
geom_point()+
geom_smooth(method=Method,aes(lty=AESTHETIC))+
THEME+theme(strip.background = element_rect(fill = "white", color = "grey", size = 0))+
scale_color_manual(values=ParasiteColoursUse)+
xlab(x)
}else{
if(is.factor(X)|is.character(X)){
ggplot(data,aes(X,log(EPG+1),colour=variable,shape=AESTHETIC))+
facet_wrap(~variable)+
geom_violin(aes(lty=AESTHETIC))+
geom_point(position=position_dodge(w=0.9))+
THEME+theme(strip.background = element_rect(fill = "white", color = "grey", size = 0))+
scale_color_manual(values=ParasiteColoursUse)+
xlab(x)+
stat_summary(fun.data=mean_se, fun.args = list(mult=1),geom="errorbar",width=0.35,colour="black",position=position_dodge(w=0.9))+
stat_summary(fun.y = "mean", geom = "point",size= 1.5,colour="black",shape=3,position=position_dodge(w=0.9))
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LD.R
\name{LD.1KG}
\alias{LD.1KG}
\title{Compute LD from 1000 Genomes}
\usage{
LD.1KG(
locus_dir,
subset_DT,
LD_reference = "1KGphase1",
superpopulation = "EUR",
vcf_folder = NULL,
remote_LD = T,
LD_block = F,
LD_block_size = 0.7,
remove_correlates = F,
remove_tmps = T,
fillNA = 0,
download_method = "wget",
nThread = 4,
conda_env = "echoR",
verbose = T
)
}
\arguments{
\item{LD_reference}{Which linkage disequilibrium reference panel do you want to use.
Options include:
\describe{
\item{"UKB"}{A pre-caclulated LD reference matrix from a subset of caucasian British individuals from the UK Biobank. See \href{https://www.biorxiv.org/content/10.1101/807792v2}{Wiessbrod et al. (2019)} for more details.}
\item{"1KGphase1"}{Download a subset of the 1000 Genomes Project Phase 1 vcf and calculate LD on the fly with plink.}
\item{"1KGphase3"}{Download a subset of the 1000 Genomes Project Phase 3 vcf and calculate LD on the fly with plink.}
\item{"<path>/*.vcf" or "<path>/*.vcf.gz"}{Alternatively, users can provide their own custom panel by supplying a list of \emph{.vcf} file path (one per locus) which \pkg{echolocatoR} will use to compute LD (using \emph{plink}).}
}}
\item{superpopulation}{Subset your LD reference panel by superopulation.
Setting the superpopulation is not currently possible when \code{LD_reference="UKB"}.
\href{https://www.internationalgenome.org/faq/which-populations-are-part-your-study/}{1KGphase1 options} include:
\describe{
\item{"AFR"}{African [descent]}
\item{"AMR"}{Ad-mixed American}
\item{"EAS"}{East Asian}
\item{"EUR"}{European}
\item{"SAS"}{South Asian}
}}
\item{remote_LD}{When acquiring LD matrixes,
the default is to delete the full vcf or npz files after \pkg{echolocatoR} has extracted the necssary subset.
However, if you wish to keep these full files (which can be quite large) set \code{remote_LD=T}.}
\item{LD_block}{Calculate LD blocks with \emph{plink} and only include the block to which the lead SNP belongs.}
\item{LD_block_size}{Adjust the granularity of block sizes when \code{LD_block=T}.}
\item{remove_correlates}{A named list, where the names are the RSIDs of SNPs
whose LD correlates you wish to remove,
and the value is the absolute r2 threshold you wish to filter at for each RSID respectively
(e.g. \code{ remove_correlates = c("rs76904798"=.2, "rs10000737"=.8)}).
This will also remove the SNPs in \code{remove_correlates} themselves.}
\item{remove_tmps}{Whether to remove any temporary files (e.g. FINEMAP output files) after the pipeline is done running.}
\item{fillNA}{When pairwise LD (r) between two SNPs is \code{NA}, replace with 0.}
\item{conda_env}{The name of a conda environment to use.}
\item{verbose}{Whether \pkg{echolocatoR} should be verbose or silent.}
}
\description{
Downloads a subset vcf of the 1KG database that matches your locus coordinates.
Then uses \emph{plink} to calculate LD on the fly.
}
\details{
This approach is taken, because other API query tools have limitations with the window size being queried.
This approach does not have this limitations, allowing you to fine-map loci more completely.
}
\examples{
\dontrun{
data("BST1"); data("locus_dir");
BST1 <- limit_SNPs(max_snps = 500, subset_DT = BST1)
LD_matrix <- LD.1KG(locus_dir=file.path("~/Desktop",locus_dir), subset_DT=BST1, LD_reference="1KGphase1")
## Kunkle et al 2019
locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
}
}
\seealso{
Other LD:
\code{\link{LD.1KG_download_vcf}()},
\code{\link{LD.LD_blocks}()},
\code{\link{LD.UKBiobank}()},
\code{\link{LD.calculate_LD}()},
\code{\link{LD.construct_subset_vcf_name}()},
\code{\link{LD.custom_panel}()},
\code{\link{LD.dprime_table}()},
\code{\link{LD.filter_LD}()},
\code{\link{LD.filter_vcf_gaston}()},
\code{\link{LD.filter_vcf}()},
\code{\link{LD.get_locus_vcf_folder}()},
\code{\link{LD.index_vcf}()},
\code{\link{LD.leadSNP_block}()},
\code{\link{LD.load_or_create}()},
\code{\link{LD.plink_LD}()},
\code{\link{LD.plink_file}()},
\code{\link{LD.plot_LD}()},
\code{\link{LD.query_vcf}()},
\code{\link{LD.rds_to_npz}()},
\code{\link{LD.read_bin}()},
\code{\link{LD.read_ld_table}()},
\code{\link{LD.run_plink_LD}()},
\code{\link{LD.save_LD_matrix}()},
\code{\link{LD.snpstats_get_LD}()},
\code{\link{LD.snpstats_get_MAF}()},
\code{\link{LD.translate_population}()},
\code{\link{LD.vcf_to_bed}()},
\code{\link{LDlinkR.LDproxy_batch}()},
\code{\link{popDat_1KGphase1}},
\code{\link{popDat_1KGphase3}},
\code{\link{saveSparse}()}
}
\concept{LD}
\keyword{internal}
| /man/LD.1KG.Rd | permissive | alexMarCar/echolocatoR | R | false | true | 4,634 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LD.R
\name{LD.1KG}
\alias{LD.1KG}
\title{Compute LD from 1000 Genomes}
\usage{
LD.1KG(
locus_dir,
subset_DT,
LD_reference = "1KGphase1",
superpopulation = "EUR",
vcf_folder = NULL,
remote_LD = T,
LD_block = F,
LD_block_size = 0.7,
remove_correlates = F,
remove_tmps = T,
fillNA = 0,
download_method = "wget",
nThread = 4,
conda_env = "echoR",
verbose = T
)
}
\arguments{
\item{LD_reference}{Which linkage disequilibrium reference panel do you want to use.
Options include:
\describe{
\item{"UKB"}{A pre-caclulated LD reference matrix from a subset of caucasian British individuals from the UK Biobank. See \href{https://www.biorxiv.org/content/10.1101/807792v2}{Wiessbrod et al. (2019)} for more details.}
\item{"1KGphase1"}{Download a subset of the 1000 Genomes Project Phase 1 vcf and calculate LD on the fly with plink.}
\item{"1KGphase3"}{Download a subset of the 1000 Genomes Project Phase 3 vcf and calculate LD on the fly with plink.}
\item{"<path>/*.vcf" or "<path>/*.vcf.gz"}{Alternatively, users can provide their own custom panel by supplying a list of \emph{.vcf} file path (one per locus) which \pkg{echolocatoR} will use to compute LD (using \emph{plink}).}
}}
\item{superpopulation}{Subset your LD reference panel by superopulation.
Setting the superpopulation is not currently possible when \code{LD_reference="UKB"}.
\href{https://www.internationalgenome.org/faq/which-populations-are-part-your-study/}{1KGphase1 options} include:
\describe{
\item{"AFR"}{African [descent]}
\item{"AMR"}{Ad-mixed American}
\item{"EAS"}{East Asian}
\item{"EUR"}{European}
\item{"SAS"}{South Asian}
}}
\item{remote_LD}{When acquiring LD matrixes,
the default is to delete the full vcf or npz files after \pkg{echolocatoR} has extracted the necssary subset.
However, if you wish to keep these full files (which can be quite large) set \code{remote_LD=T}.}
\item{LD_block}{Calculate LD blocks with \emph{plink} and only include the block to which the lead SNP belongs.}
\item{LD_block_size}{Adjust the granularity of block sizes when \code{LD_block=T}.}
\item{remove_correlates}{A named list, where the names are the RSIDs of SNPs
whose LD correlates you wish to remove,
and the value is the absolute r2 threshold you wish to filter at for each RSID respectively
(e.g. \code{ remove_correlates = c("rs76904798"=.2, "rs10000737"=.8)}).
This will also remove the SNPs in \code{remove_correlates} themselves.}
\item{remove_tmps}{Whether to remove any temporary files (e.g. FINEMAP output files) after the pipeline is done running.}
\item{fillNA}{When pairwise LD (r) between two SNPs is \code{NA}, replace with 0.}
\item{conda_env}{The name of a conda environment to use.}
\item{verbose}{Whether \pkg{echolocatoR} should be verbose or silent.}
}
\description{
Downloads a subset vcf of the 1KG database that matches your locus coordinates.
Then uses \emph{plink} to calculate LD on the fly.
}
\details{
This approach is taken, because other API query tools have limitations with the window size being queried.
This approach does not have this limitations, allowing you to fine-map loci more completely.
}
\examples{
\dontrun{
data("BST1"); data("locus_dir");
BST1 <- limit_SNPs(max_snps = 500, subset_DT = BST1)
LD_matrix <- LD.1KG(locus_dir=file.path("~/Desktop",locus_dir), subset_DT=BST1, LD_reference="1KGphase1")
## Kunkle et al 2019
locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
}
}
\seealso{
Other LD:
\code{\link{LD.1KG_download_vcf}()},
\code{\link{LD.LD_blocks}()},
\code{\link{LD.UKBiobank}()},
\code{\link{LD.calculate_LD}()},
\code{\link{LD.construct_subset_vcf_name}()},
\code{\link{LD.custom_panel}()},
\code{\link{LD.dprime_table}()},
\code{\link{LD.filter_LD}()},
\code{\link{LD.filter_vcf_gaston}()},
\code{\link{LD.filter_vcf}()},
\code{\link{LD.get_locus_vcf_folder}()},
\code{\link{LD.index_vcf}()},
\code{\link{LD.leadSNP_block}()},
\code{\link{LD.load_or_create}()},
\code{\link{LD.plink_LD}()},
\code{\link{LD.plink_file}()},
\code{\link{LD.plot_LD}()},
\code{\link{LD.query_vcf}()},
\code{\link{LD.rds_to_npz}()},
\code{\link{LD.read_bin}()},
\code{\link{LD.read_ld_table}()},
\code{\link{LD.run_plink_LD}()},
\code{\link{LD.save_LD_matrix}()},
\code{\link{LD.snpstats_get_LD}()},
\code{\link{LD.snpstats_get_MAF}()},
\code{\link{LD.translate_population}()},
\code{\link{LD.vcf_to_bed}()},
\code{\link{LDlinkR.LDproxy_batch}()},
\code{\link{popDat_1KGphase1}},
\code{\link{popDat_1KGphase3}},
\code{\link{saveSparse}()}
}
\concept{LD}
\keyword{internal}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252168713e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615778845-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 348 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252168713e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
###
shinyUI(fluidPage(
titlePanel(""),
fluidRow(column(6, align="center", offset = 3,
uiOutput("ui1"))),
fluidRow(column(6, align="center", offset = 3,
uiOutput("ui2"))),
fluidRow(column(6, align="center", offset = 3,div(style="height:40px"),
uiOutput("ui3"))),
fluidRow(column(6, align="center", offset = 3,div(style="height:30px"),
uiOutput("ui4")))
)) | /ui.R | no_license | luzius/dynamic-interface-with-rshiny | R | false | false | 448 | r | ###
shinyUI(fluidPage(
titlePanel(""),
fluidRow(column(6, align="center", offset = 3,
uiOutput("ui1"))),
fluidRow(column(6, align="center", offset = 3,
uiOutput("ui2"))),
fluidRow(column(6, align="center", offset = 3,div(style="height:40px"),
uiOutput("ui3"))),
fluidRow(column(6, align="center", offset = 3,div(style="height:30px"),
uiOutput("ui4")))
)) |
writeLines(sprintf("var url ='https://www.americanexpress.com/us/credit-cards/view-all-personal-cards/';
var page = new WebPage()
var fs = require('fs');
page.open(url, function (status) {
just_wait();
});
function just_wait() {
setTimeout(function() {
fs.write('1.html', page.content, 'w');
phantom.exit();
}, 20000);
}"), con="scrape.js")
system("phantomjs scrape.js")
pg <- read_html("1.html")
amexCardName <- pg %>%
html_nodes(xpath='//*[@itemprop="name"]/a') %>%
html_text()
amexCardName <- gsub('[^A-z&-+\']', ' ', amexCardName)
amexCardName <- str_trim(gsub('\\s{2,}', ' ', amexCardName))
amexCardName <- amexCardName[2:length(amexCardName)]
amexCardOffer <- pg %>%
html_nodes("[itemprop=offers]") %>%
html_text()
amexCardOffer <- gsub('\n', '', amexCardOffer)
amexCardOffer <- str_trim(amexCardOffer)
amexCardOffer <- amexCardOffer[seq(from=1, to=39, by=2)]
if(length(amexCardOffer)>length(amexCardName)){
amexCardOffer <- amexCardOffer[-1]
}
amexLinks <- pg %>%
html_nodes(xpath="//*[@itemprop='url']") %>%
html_attr("content")
if(length(amexLinks)>length(amexCardName)){
amexLinks <- amexLinks[-1]
}
amex <- data.frame(CardName = amexCardName,
Issuer = 'Amex',
Program = 'American Express Membership Rewards',
Link = amexLinks,
IntroOffer = amexCardOffer, stringsAsFactors = FALSE)
amex$Cash <- as.numeric(ifelse(grepl('\\$\\d{3} back after', tolower(amex$IntroOffer)),
gsub('.*?\\$(\\d{3}) back after.*', '\\1', tolower(amex$IntroOffer)), 0))
amex$Cash <- as.numeric(ifelse(grepl('\\$\\d{2,3} [S|s]tatement', amex$IntroOffer),
gsub('.*?\\$(\\d{2,3}).*', '\\1', amex$IntroOffer), amex$Cash))
amex$Points <- ifelse(grepl(' \\d{1,3},\\d{3}', amex$IntroOffer),
gsub('.*? (\\d{1,3},\\d{3}).*', '\\1', amex$IntroOffer), 0)
amex$Points <- ifelse(grepl(' \\d{1,2},\\d{3} bonus', amex$IntroOffer),
gsub('.*? (\\d{1,2},\\d{3}) bonus.*', '\\1', amex$IntroOffer), amex$Points)
amex$Points <- as.numeric(gsub(',','',amex$Points))
amex$Nights <- ifelse(grepl('nights', amex$IntroOffer),
2, 0)
amexCredit <- c()
amexIntro <- c()
amexIMG <- c()
i <- 1
for(l in amex$Link){
writeLines(paste0(i))
writeLines(sprintf("var url ='%s';
var page = new WebPage()
var fs = require('fs');
page.open(url, function (status) {
just_wait();
});
function just_wait() {
setTimeout(function() {
fs.write('1.html', page.content, 'w');
phantom.exit();
}, 7000);
}", l), con="scrape_amex.js")
system("phantomjs scrape_amex.js")
url <- read_html("1.html")
intro <- url %>%
html_nodes(xpath='//*[@id="overview"]/section[2]/div/div[1]/div[1]/p') %>%
html_text()
amexIntro <- c(amexIntro, intro)
credit <- url %>%
html_nodes(xpath="//*[@class='card-detail__features-list']") %>%
html_text()
credit <- credit[grepl('Airline Fee Credit', credit)]
credit <- ifelse(length(credit)==0, 0, credit)
credit <- as.numeric(gsub('.*?\\$(\\d{3}).*', '\\1', credit))
amexCredit <- c(amexCredit, credit)
img <- url %>%
html_nodes(xpath="//*[@itemprop='image']") %>%
html_attr("content")
amexIMG <- c(amexIMG, img)
i <- i + 1
}
amex$Credit <- amexCredit
amexFee <- pg %>%
html_nodes('[itemprop=description]') %>%
html_text()
amexFee <- gsub('\\s{2,}', ' ', amexFee)
amexFee <- str_trim(amexFee)
if(length(amexFee)>length(amexCardName)){
amexFee <- amexFee[-1]
}
amexFee <- gsub('.*?Annual Fee(.*)', '\\1', amexFee)
amex$FeeWaived1stYr <- ifelse(grepl('\\$0.*first year', amexFee), 1, 0)
amex$Fee <- as.numeric(ifelse(grepl('\\$\\d{2,3}', amexFee),
gsub('.*?\\$(\\d{2,3}).*', '\\1', amexFee), 0))
amex$Fee <- ifelse(grepl('No Annual Fee', amexFee), 0, amex$Fee)
amex$Fee <- ifelse(amex$Fee>450, 0, amex$Fee)
amexSpend <- gsub(',', '', amex$IntroOffer)
amexSpend <- ifelse(grepl('\\$(\\d{3,4}) [i|o]n purchases', amexSpend),
gsub('.*?\\$(\\d{3,4}) [i|o]n purchases.*', '\\1', amexSpend), 0)
amex$Spend <- as.numeric(gsub('\\D', '', amexSpend))
amex <- amex[amex$CardName!='Serve from American Express', ]
amex$img <- amexIMG
for(p in rates[,1][rates[,1]!='American']){
amex$Program <- ifelse(grepl(p, amex$CardName), p, amex$Program)
}
for(p in rates[,1][rates[,1]!='American']){
amex$Program <- ifelse(grepl(p, amex$IntroOffer), p, amex$Program)
}
| /scraper/amex.R | no_license | catshark9/Credit-Card-App | R | false | false | 5,005 | r | writeLines(sprintf("var url ='https://www.americanexpress.com/us/credit-cards/view-all-personal-cards/';
var page = new WebPage()
var fs = require('fs');
page.open(url, function (status) {
just_wait();
});
function just_wait() {
setTimeout(function() {
fs.write('1.html', page.content, 'w');
phantom.exit();
}, 20000);
}"), con="scrape.js")
system("phantomjs scrape.js")
pg <- read_html("1.html")
amexCardName <- pg %>%
html_nodes(xpath='//*[@itemprop="name"]/a') %>%
html_text()
amexCardName <- gsub('[^A-z&-+\']', ' ', amexCardName)
amexCardName <- str_trim(gsub('\\s{2,}', ' ', amexCardName))
amexCardName <- amexCardName[2:length(amexCardName)]
amexCardOffer <- pg %>%
html_nodes("[itemprop=offers]") %>%
html_text()
amexCardOffer <- gsub('\n', '', amexCardOffer)
amexCardOffer <- str_trim(amexCardOffer)
amexCardOffer <- amexCardOffer[seq(from=1, to=39, by=2)]
if(length(amexCardOffer)>length(amexCardName)){
amexCardOffer <- amexCardOffer[-1]
}
amexLinks <- pg %>%
html_nodes(xpath="//*[@itemprop='url']") %>%
html_attr("content")
if(length(amexLinks)>length(amexCardName)){
amexLinks <- amexLinks[-1]
}
amex <- data.frame(CardName = amexCardName,
Issuer = 'Amex',
Program = 'American Express Membership Rewards',
Link = amexLinks,
IntroOffer = amexCardOffer, stringsAsFactors = FALSE)
amex$Cash <- as.numeric(ifelse(grepl('\\$\\d{3} back after', tolower(amex$IntroOffer)),
gsub('.*?\\$(\\d{3}) back after.*', '\\1', tolower(amex$IntroOffer)), 0))
amex$Cash <- as.numeric(ifelse(grepl('\\$\\d{2,3} [S|s]tatement', amex$IntroOffer),
gsub('.*?\\$(\\d{2,3}).*', '\\1', amex$IntroOffer), amex$Cash))
amex$Points <- ifelse(grepl(' \\d{1,3},\\d{3}', amex$IntroOffer),
gsub('.*? (\\d{1,3},\\d{3}).*', '\\1', amex$IntroOffer), 0)
amex$Points <- ifelse(grepl(' \\d{1,2},\\d{3} bonus', amex$IntroOffer),
gsub('.*? (\\d{1,2},\\d{3}) bonus.*', '\\1', amex$IntroOffer), amex$Points)
amex$Points <- as.numeric(gsub(',','',amex$Points))
amex$Nights <- ifelse(grepl('nights', amex$IntroOffer),
2, 0)
amexCredit <- c()
amexIntro <- c()
amexIMG <- c()
i <- 1
for(l in amex$Link){
writeLines(paste0(i))
writeLines(sprintf("var url ='%s';
var page = new WebPage()
var fs = require('fs');
page.open(url, function (status) {
just_wait();
});
function just_wait() {
setTimeout(function() {
fs.write('1.html', page.content, 'w');
phantom.exit();
}, 7000);
}", l), con="scrape_amex.js")
system("phantomjs scrape_amex.js")
url <- read_html("1.html")
intro <- url %>%
html_nodes(xpath='//*[@id="overview"]/section[2]/div/div[1]/div[1]/p') %>%
html_text()
amexIntro <- c(amexIntro, intro)
credit <- url %>%
html_nodes(xpath="//*[@class='card-detail__features-list']") %>%
html_text()
credit <- credit[grepl('Airline Fee Credit', credit)]
credit <- ifelse(length(credit)==0, 0, credit)
credit <- as.numeric(gsub('.*?\\$(\\d{3}).*', '\\1', credit))
amexCredit <- c(amexCredit, credit)
img <- url %>%
html_nodes(xpath="//*[@itemprop='image']") %>%
html_attr("content")
amexIMG <- c(amexIMG, img)
i <- i + 1
}
amex$Credit <- amexCredit
amexFee <- pg %>%
html_nodes('[itemprop=description]') %>%
html_text()
amexFee <- gsub('\\s{2,}', ' ', amexFee)
amexFee <- str_trim(amexFee)
if(length(amexFee)>length(amexCardName)){
amexFee <- amexFee[-1]
}
amexFee <- gsub('.*?Annual Fee(.*)', '\\1', amexFee)
amex$FeeWaived1stYr <- ifelse(grepl('\\$0.*first year', amexFee), 1, 0)
amex$Fee <- as.numeric(ifelse(grepl('\\$\\d{2,3}', amexFee),
gsub('.*?\\$(\\d{2,3}).*', '\\1', amexFee), 0))
amex$Fee <- ifelse(grepl('No Annual Fee', amexFee), 0, amex$Fee)
amex$Fee <- ifelse(amex$Fee>450, 0, amex$Fee)
amexSpend <- gsub(',', '', amex$IntroOffer)
amexSpend <- ifelse(grepl('\\$(\\d{3,4}) [i|o]n purchases', amexSpend),
gsub('.*?\\$(\\d{3,4}) [i|o]n purchases.*', '\\1', amexSpend), 0)
amex$Spend <- as.numeric(gsub('\\D', '', amexSpend))
amex <- amex[amex$CardName!='Serve from American Express', ]
amex$img <- amexIMG
for(p in rates[,1][rates[,1]!='American']){
amex$Program <- ifelse(grepl(p, amex$CardName), p, amex$Program)
}
for(p in rates[,1][rates[,1]!='American']){
amex$Program <- ifelse(grepl(p, amex$IntroOffer), p, amex$Program)
}
|
library(tidyverse)
#Set working directory to where the Git repo lives on your computer
setwd("~/GitHub/deltaoutflowviz")
##############
#Read the csv from the CDEC webpage
#https://cdec.water.ca.gov/reportapp/javareports?name=WSIHIST
WSI_HIST<-read.csv("WSIHIST.csv")
str(WSI_HIST)
#We're only interested in Sac Valley Index (at least for now, as it makes up 80% of flow into the Delta on average)
#Remove NA from data
WSI_HIST_sac<-WSI_HIST[complete.cases(WSI_HIST$Sac_Index),]
#Add color list
WSI_HIST_sac <- WSI_HIST_sac %>%
mutate(Color = as.character(case_when(Sac_WY %in% "W" ~ "dodgerblue",
Sac_WY %in% "AN" ~ "lightskyblue",
Sac_WY %in% "BN" ~ "gold1",
Sac_WY %in% "D" ~ "darkorange2",
Sac_WY %in% "C" ~ "red"))) %>%
filter(WY>=1928) #Subset to just 1928 and on in order to match with Denise's figure
###############
#Create figure
tiff(filename="Water_index_timeseries.tiff",
units="in", bg="white", height=2.4, width=8, res=600, pointsize=6.8,
compression="lzw")
par(mar=c(0.5,3,0,0) + 0.2, mgp=c(2,0.8,0), lend=1, lwd=0.5)
use_seq <- seq(1, nrow(WSI_HIST_sac), by=4)
WSI_HIST_sac$YearLabel <- NA
WSI_HIST_sac$YearLabel[use_seq] <- WSI_HIST_sac$WY[use_seq]
WSI_HIST_sac$Dummy <- ""
aa <- barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
yaxt="n", bty="n", space=0.2, xlab="",
ylab=" Index Value", col=NA, border=NA)
a <- barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
xlim=c(0,(max(aa) + 10)),
yaxt="n", bty="n", space=0.2, xlab="",
ylab=" Index Value", col=NA, border=NA)
## Put water year type cutoff lines in the background:
## http://cdec.water.ca.gov/reportapp/javareports?name=WSIHIST
index_cutoffs <- c(5.4,6.5,7.8,9.2)
text_y <- c(index_cutoffs[1] - 0.6, mean(index_cutoffs[1:2]),
mean(index_cutoffs[2:3]), mean(index_cutoffs[3:4]),
index_cutoffs[4] + 0.6)
text_lab <- c("Critically Dry", "Dry", "Below Normal", "Above Normal", "Wet")
abline(h=index_cutoffs, lty=2, col="gray50")
text(x=120.9, y=text_y, labels=text_lab, adj=1)
barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
xlim=c(0,(max(aa) + 2)),
yaxt="n", bty="n", space=0.2, xlab="",
ylab="", col=WSI_HIST_sac$Color, border="black",
add=TRUE)
axis(side=1, at=a, labels=WSI_HIST_sac$YearLabel, pos=0, lwd=0.5)
axis(side=2, at=seq(0, 15, by=5), lwd=0.5)
dev.off() | /R_SFEWS_Figure_A.R | no_license | Denise-Colombano/deltaoutflowviz | R | false | false | 2,790 | r | library(tidyverse)
#Set working directory to where the Git repo lives on your computer
setwd("~/GitHub/deltaoutflowviz")
##############
#Read the csv from the CDEC webpage
#https://cdec.water.ca.gov/reportapp/javareports?name=WSIHIST
WSI_HIST<-read.csv("WSIHIST.csv")
str(WSI_HIST)
#We're only interested in Sac Valley Index (at least for now, as it makes up 80% of flow into the Delta on average)
#Remove NA from data
WSI_HIST_sac<-WSI_HIST[complete.cases(WSI_HIST$Sac_Index),]
#Add color list
WSI_HIST_sac <- WSI_HIST_sac %>%
mutate(Color = as.character(case_when(Sac_WY %in% "W" ~ "dodgerblue",
Sac_WY %in% "AN" ~ "lightskyblue",
Sac_WY %in% "BN" ~ "gold1",
Sac_WY %in% "D" ~ "darkorange2",
Sac_WY %in% "C" ~ "red"))) %>%
filter(WY>=1928) #Subset to just 1928 and on in order to match with Denise's figure
###############
#Create figure
tiff(filename="Water_index_timeseries.tiff",
units="in", bg="white", height=2.4, width=8, res=600, pointsize=6.8,
compression="lzw")
par(mar=c(0.5,3,0,0) + 0.2, mgp=c(2,0.8,0), lend=1, lwd=0.5)
use_seq <- seq(1, nrow(WSI_HIST_sac), by=4)
WSI_HIST_sac$YearLabel <- NA
WSI_HIST_sac$YearLabel[use_seq] <- WSI_HIST_sac$WY[use_seq]
WSI_HIST_sac$Dummy <- ""
aa <- barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
yaxt="n", bty="n", space=0.2, xlab="",
ylab=" Index Value", col=NA, border=NA)
a <- barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
xlim=c(0,(max(aa) + 10)),
yaxt="n", bty="n", space=0.2, xlab="",
ylab=" Index Value", col=NA, border=NA)
## Put water year type cutoff lines in the background:
## http://cdec.water.ca.gov/reportapp/javareports?name=WSIHIST
index_cutoffs <- c(5.4,6.5,7.8,9.2)
text_y <- c(index_cutoffs[1] - 0.6, mean(index_cutoffs[1:2]),
mean(index_cutoffs[2:3]), mean(index_cutoffs[3:4]),
index_cutoffs[4] + 0.6)
text_lab <- c("Critically Dry", "Dry", "Below Normal", "Above Normal", "Wet")
abline(h=index_cutoffs, lty=2, col="gray50")
text(x=120.9, y=text_y, labels=text_lab, adj=1)
barplot(height=WSI_HIST_sac$Sac_Index, names.arg=WSI_HIST_sac$Dummy, ylim=c(-1,16),
xlim=c(0,(max(aa) + 2)),
yaxt="n", bty="n", space=0.2, xlab="",
ylab="", col=WSI_HIST_sac$Color, border="black",
add=TRUE)
axis(side=1, at=a, labels=WSI_HIST_sac$YearLabel, pos=0, lwd=0.5)
axis(side=2, at=seq(0, 15, by=5), lwd=0.5)
dev.off() |
# Code for plotting out data for exploratory purposes
library(dplyr)
library(tidyr)
library(ggplot2)
theme_set(theme_classic())
library(gganimate)
library(mice)
library(janitor)
library(patchwork)
# atmospheric data plots ####
# Sdep maps
str(Sdep_avg)
Sdep_avg_long <- melt(Sdep_avg, id.vars = c("x","y"))
Sdep_avg_long$year <- substring(Sdep_avg_long$variable, 9,12)
ggplot(Sdep_avg_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep grid average kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep grid average maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# animated version
Sdep_avg_long %>%
mutate(year = as.integer(year)) %>%
ggplot(aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "Sdep (kgS/ha)") +
coord_fixed() +
theme(axis.text = element_blank(), axis.ticks = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
str(Sdep_for)
Sdep_for_long <- melt(Sdep_for, id.vars = c("x","y"))
Sdep_for_long$year <- substring(Sdep_for_long$variable, 8,11)
ggplot(Sdep_for_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep forest kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep forest maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Sdep_moo)
Sdep_moo_long <- melt(Sdep_moo, id.vars = c("x","y"))
Sdep_moo_long$year <- substring(Sdep_moo_long$variable, 6,9)
ggplot(Sdep_moo_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep moorland kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep moorland maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# Ndep maps
str(Ndep_avg)
Ndep_avg_long <- melt(Ndep_avg, id.vars = c("x","y"))
Ndep_avg_long$year <- substring(Ndep_avg_long$variable, 9,12)
ggplot(Ndep_avg_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep grid average kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep grid average maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Ndep_for)
Ndep_for_long <- melt(Ndep_for, id.vars = c("x","y"))
Ndep_for_long$year <- substring(Ndep_for_long$variable, 8,11)
ggplot(Ndep_for_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep forest kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep forest maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Ndep_moo)
Ndep_moo_long <- melt(Ndep_moo, id.vars = c("x","y"))
Ndep_moo_long$year <- substring(Ndep_moo_long$variable, 6,9)
ggplot(Ndep_moo_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep moorland kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep moorland maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# Plot Sdep and Ndep against each other
colnames(Sdep_avg_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_avg_long) <- c("x","y","variable","Ndep","year")
AtDep_avg <- merge(Sdep_avg_long, Ndep_avg_long, by = c("x","y","variable","year"))
str(AtDep_avg)
AtDep_avg$year <- as.integer(AtDep_avg$year)
AtDep_avg <- na.omit(AtDep_avg)
AtDep_avg$square <- paste(AtDep_avg$x, AtDep_avg$y, sep = "_")
ggplot(AtDep_avg, aes(x = Sdep, y = Ndep)) +
geom_point(aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_avg %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# forest
colnames(Sdep_for_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_for_long) <- c("x","y","variable","Ndep","year")
AtDep_for <- merge(Sdep_for_long, Ndep_for_long, by = c("x","y","variable","year"))
str(AtDep_for)
AtDep_for$year <- as.integer(AtDep_for$year)
AtDep_for <- na.omit(AtDep_for)
AtDep_for$square <- paste(AtDep_for$x, AtDep_for$y, sep = "_")
ggplot(AtDep_for, aes(x = Sdep, y = Ndep)) +
geom_point(aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_for %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# moorland
colnames(Sdep_moo_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_moo_long) <- c("x","y","variable","Ndep","year")
AtDep_moo <- merge(Sdep_moo_long, Ndep_moo_long, by = c("x","y","variable","year"))
str(AtDep_moo)
AtDep_moo$year <- as.integer(AtDep_moo$year)
AtDep_moo <- na.omit(AtDep_moo)
AtDep_moo$square <- paste(AtDep_moo$x, AtDep_moo$y, sep = "_")
ggplot(AtDep_moo, aes(x = Sdep, y = Ndep)) +
geom_point(alpha = 0.2, aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_moo %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# Cumulative deposition per square ####
AtDepavg_cumdep_sq <- AtDep_avg %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
psych::pairs.panels(select(AtDepavg_cumdep_sq,-x,-y,-square))
# merge with CS plots
dep_x <- AtDepavg_cumdep_sq$x
dep_y <- AtDepavg_cumdep_sq$y
CS_m <- CS07_PLOTS %>% select(plot_x = POINT_X,
plot_y = POINT_Y, REP_ID)
for(i in 1:nrow(CS_m)) {
CS_m[i,"x"] <- dep_x[which.min(abs(dep_x - CS_m$plot_x[i]))]
CS_m[i,"y"] <- dep_y[which.min(abs(dep_y - CS_m$plot_y[i]))]
}
CS_Atdep <- left_join(CS_m, AtDepavg_cumdep_sq)
summary(CS_Atdep)
psych::multi.hist(select_if(CS_Atdep, is.numeric))
AtDepfor_cumdep_sq <- AtDep_for %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
# difference since 1970
AtDepavg_diff_sq <- AtDep_avg %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
psych::pairs.panels(select(AtDepavg_cumdep_sq,-x,-y,-square))
# merge with CS plots
dep_x <- AtDepavg_cumdep_sq$x
dep_y <- AtDepavg_cumdep_sq$y
# Soil pH data ####
# data manipulation
str(CS78_PH)
str(CS98_PH)
str(CS07_PH)
str(CS16_PH)
str(UK19_PH)
CS78_PH$REP_ID <- paste(CS78_PH$SQUARE_NUM,CS78_PH$REP_NUM, sep = "X")
CS98_PH$REP_ID <- paste(CS98_PH$SQUARE_NUM,CS98_PH$REP_NUM, sep = "X")
CS07_PH$REP_ID <- paste(CS07_PH$SQUARE_NUM,CS07_PH$REP_NUM, sep = "X")
CS16_PH$REP_ID <- paste(CS16_PH$SQUARE_NUM,CS16_PH$REP_NUM, sep = "X")
UK19_PH$REP_ID <- paste(UK19_PH$SQUARE_NUM,UK19_PH$REP_NUM, sep = "X")
PH <- full_join(select(CS78_PH, REP_ID, PH1978),
select(CS98_PH, REP_ID, PH2000 = PHF2000)) %>%
full_join(select(CS07_PH, REP_ID, PH2007 = PH2007_IN_WATER)) %>%
full_join(select(CS16_PH, REP_ID, PH2016 = PH_DIW)) %>%
full_join(select(UK19_PH, REP_ID, PH2019 = PH_DIW))
str(PH)
summary(PH)
mice::md.pattern(PH)
# histograms
PH_long <- pivot_longer(PH, starts_with("PH"),
values_to = "pH",
values_drop_na = TRUE)
ggplot(PH_long, aes(x = pH)) +
geom_histogram() +
facet_wrap(~name, scales = "free_y")
PH_long$year <- as.integer(substring(PH_long$name, 3,6))
ggplot(PH_long, aes(x = year, y = pH, group = REP_ID)) +
geom_line(alpha = 0.2, col = "dodgerblue2")+
geom_jitter(alpha = 0.2, width = 1, height = 0, shape = 16)
# calculate differences between survey years
PH <- PH %>%
mutate(diff7898 = PH2000 - PH1978,
diff7807 = PH2007 - PH1978,
diff7816 = PH2016 - PH1978,
diff7819 = PH2019 - PH1978,
diff9807 = PH2007 - PH2000,
diff9816 = PH2016 - PH2000,
diff9819 = PH2019 - PH2000,
diff0716 = PH2016 - PH2007,
diff0719 = PH2019 - PH2007) %>%
mutate(diff0718 = ifelse(!is.na(diff0719), diff0719,
ifelse(!is.na(diff0716), diff0716, NA)),
diff7818 = ifelse(!is.na(diff7819), diff7819,
ifelse(!is.na(diff7816), diff7816, NA)),
diff9818 = ifelse(!is.na(diff9819), diff9819,
ifelse(!is.na(diff9816), diff9816, NA)))
summary(PH)
PH_diff_long <- PH %>%
select(REP_ID, starts_with("diff")) %>%
pivot_longer(starts_with("diff"),
values_to = "pH",
values_drop_na = TRUE) %>%
mutate(name = as.factor(name)) %>%
mutate(name = forcats::fct_inorder(name))
ggplot(PH_diff_long, aes(x = pH)) +
geom_histogram() +
facet_wrap(~name, scales = "free_y") +
geom_vline(xintercept = 0)
# select only most recent change and convert into wide format for plotting
PH_Diff_wide <- select(PH, REP_ID, diff0718) %>%
na.omit() %>%
left_join(select(CS07_PLOTS, REP_ID, POINT_X, POINT_Y))
summary(PH_Diff_wide)
ggplot(PH_Diff_wide, aes(x = POINT_X, y = POINT_Y, colour = diff0718)) +
geom_jitter(width = 5000, height = 5000) +
coord_fixed() +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_Diff_wide$diff0718)),
name = "pH change", na.value = "white") +
theme_dark()
# ** pH maps ####
library(sf)
library(leaflet)
# convert to sf object
CS_PH_loc <- PH_Diff_wide %>%
select(POINT_X, POINT_Y) %>%
as.matrix() %>%
st_multipoint(dim="XY") %>%
st_sfc(crs = 27700) %>%
st_transform(crs = 4326) %>%
st_cast("POINT")
CS_PH_loc <- st_sf(cbind(select(PH_Diff_wide, REP_ID, pH_change = diff0718),CS_PH_loc))
# Create variable for colouring of points. first cut the continuous variable
# into bins - these bins are now factors
CS_PH_loc$pH_lev <- cut(CS_PH_loc$pH_change,
c(-3,-1.5,-1,-0.5,0,0.5,1,1.5,3))
pHCol <- colorFactor(palette = 'RdBu', CS_PH_loc$pH_lev)
# add random jitter to points so not overplotting
CS_PH_loc_jitter <- st_jitter(CS_PH_loc, factor = 0.005)
# read in UK boundary shapefile
UK_boundary <- st_read("../../../GBR_adm/GBR_adm0.shp")
# plot interactively
leaflet() %>%
addPolygons(data = UK_boundary, stroke = FALSE,
color = "black") %>%
addCircleMarkers(data = CS_PH_loc_jitter, radius = 5,
label = CS_PH_loc$REP_ID,
color = ~pHCol(CS_PH_loc$pH_lev),
fillOpacity = 1, stroke = FALSE) %>%
addLegend('topright', pal = pHCol, values = CS_PH_loc$pH_lev,
title = 'pH change',
opacity = 1)
# plot histograms of difference between survey years wrapping together 16 and 19
PH_diff_long %>% filter(name %in%
c("diff7807","diff9807","diff7898",
"diff7818","diff9818","diff0718")) %>%
ggplot(aes(x = pH)) +
geom_histogram() +
facet_wrap(~name) +
geom_vline(xintercept = 0)
ggsave("pH change histograms facet by survey comparison.png",
path = "Outputs/Graphs/",
width = 20, height = 12, units = "cm")
# remove 18 variables for consistency later in script
PH_diff_long <- filter(PH_diff_long,
names %in% c("diff7898",
"diff7807",
"diff7816",
"diff7819",
"diff9807",
"diff9816",
"diff9819",
"diff0716",
"diff0719"))
# ** breakdown by AVC data ####
# AVC data manipulation
hab07 <- select(CS07_IBD, REP_ID = REP_ID07, AVC07) %>%
unique()
hab98 <- select(CS98_IBD, REP_ID = REP_ID98, AVC98) %>%
unique()
hab78 <- select(CS78_IBD, REP_ID = REP_ID78, AVC78) %>%
unique()
# create combined AVC variable, if 07 has AVC use that otherwise use 98 then 78.
# There are only 3 sites with no AVC data and I can't see how to get theirs as
# they don't appear in 2016/19.
hab <- full_join(hab07, hab98) %>% full_join(hab78) %>%
mutate_if(is.factor, as.character) %>%
mutate(AVC = ifelse(!is.na(AVC07), AVC07,
ifelse(!is.na(AVC98), AVC98,
ifelse(!is.na(AVC78), AVC78, NA)))) %>%
mutate(AVC_desc = recode(AVC,
`1` = "Crops/Weeds",
`2` = "Tall herb/grass",
`3` = "Fertile grassland",
`4` = "Infertile grassland",
`5` = "Lowland wooded",
`6` = "Upland wooded",
`7` = "Moorland grass/mosaic",
`8` = "Heath/bog"))
# calculate total change in pH over survey years
PH$change_dir <- rowSums(select(PH, diff7898, diff9807, diff0719), na.rm = TRUE)
summary(PH$change_dir)
filter(PH,change_dir == 0) %>% select(starts_with("diff")) %>%
summary()
PH$change_dir <- ifelse(PH$change_dir == 0 & !is.na(PH$diff7807), PH$diff7807, PH$change_dir)
PH$change_dir <- ifelse(PH$change_dir == 0 & !is.na(PH$diff7819), PH$diff7819, PH$change_dir)
# Combine pH and AVC data and convert to long format
PH_long_hab <- left_join(PH, select(BH_IMP, REP_ID, Management)) %>%
droplevels() %>%
select(-starts_with("diff")) %>%
pivot_longer(starts_with("PH"),
names_to = c("Variable","year"),
names_sep = "_",
values_to = "pH",
values_drop_na = TRUE) %>%
filter(!is.na(Management )) %>%
mutate(year = as.numeric(year))
# plots of pH change over time
PH_long_hab %>%
ggplot(aes(x = year, y = pH, group = REP_ID)) +
geom_line(alpha = 0.5, aes(colour = change_dir) )+
geom_jitter(size = 0.2, width = 1, height = 0, shape = 16, alpha = 0.8,
aes(colour = change_dir)) +
facet_wrap(~Management, nrow = 2) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_long_hab$change_dir)),
name = "pH change", na.value = "white") +
theme_dark()
ggsave("pH change over time facetted by Management.png", path = "Outputs/Graphs/",
width = 12, height = 12, units = "cm")
PH_long_hab %>%
ggplot(aes(x = year, y = pH)) +
geom_line(alpha = 0.5, aes(colour = change_dir, group = REP_ID))+
geom_jitter(size = 0.2, width = 1, height = 0,
shape = 16, alpha = 0.1,
colour = "grey50") +
geom_boxplot(fill= NA, aes(group = year), outlier.shape = NA) +
facet_wrap(~Management, nrow = 2) +
# geom_smooth(formula = y ~ poly(x,3)) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_long_hab$change_dir)),
name = "pH change", na.value = "white") +
# theme_dark() +
NULL
ggsave("pH change over time boxplots facetted by management.png", path = "Outputs/Graphs/",
width = 12, height = 15, units = "cm")
# combine ph difference and AVC data
PH_diff_long <- left_join(PH_diff_long,
select(hab, REP_ID, AVC = AVC_desc)) %>%
droplevels()
table(PH_diff_long$AVC)
get_dupes(PH_diff_long, REP_ID, name)
PH_diff_long %>%
filter(!is.na(AVC)) %>%
filter(name %in% c("diff7807","diff7898","diff9807","diff7818","diff9818","diff0718")) %>%
ggplot(aes(x = pH)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_grid(AVC ~ name, scales = "free_y")
ggsave("pH difference histograms facetted by AVC and year.png",
path = "Outputs/Graphs/", width = 28, height = 24, units = "cm")
# ** Soil pH in CaCl2 ####
# Only have pH in CaCl2 data for 2007 onwards
str(CS78_PH)
str(CS98_PH)
str(CS07_PH)
str(CS16_PH)
str(UK19_PH)
# data manipulation
PHC <- full_join(select(CS07_PH, REP_ID, PHC2007 = PH2007_IN_CACL2),
select(CS16_PH, REP_ID, PHC2016 = PH_CACL2)) %>%
full_join(select(UK19_PH, REP_ID, PHC2019 = PH_CACL)) %>%
mutate(pH_change = ifelse(!is.na(PHC2019), PHC2019 - PHC2007,
ifelse(!is.na(PHC2016), PHC2016 - PHC2007, NA))) %>%
left_join(unique(select(hab, REP_ID, AVC = AVC_desc)))
str(PHC)
summary(PHC)
md.pattern(PHC)
# histograms of pH CaCl2 change
PHC %>% filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_change)) + geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC, nrow = 2)
ggsave("pH CaCl2 change 07 to 1619 facet by AVC.png", path = "Outputs/Graphs/",
width = 28, height = 12, units = "cm")
p1 <-PHC %>%
ggplot(aes(x = pH_change)) + geom_histogram() +
geom_vline(xintercept = 0)+
labs(x = "pH change", title = bquote("pH (CaCl"[2]*")")) +
scale_x_continuous(limits = c(-3,3))+
scale_y_continuous(limits = c(0,110), expand = c(0,0))
p2 <- PH_diff_long %>% filter(name %in% c("diff0716","diff0719")) %>%
ggplot(aes(x = pH)) + geom_histogram() +
geom_vline(xintercept = 0) +
labs(x = "", title = "pH (DIW)")+
scale_x_continuous(limits = c(-3,3))+
scale_y_continuous(limits = c(0,110), expand = c(0,0))
p2/p1
ggsave("pH change 07 to 1619 DIW and CaCl2.png", path = "Outputs/Graphs/",
width = 15, height = 18, units = "cm")
# data manipulation into long format
PHC_long <- PHC %>%
pivot_longer(starts_with("PHC"), names_to = "year",
names_prefix = "PHC", values_to = "pH_CaCl2",
values_drop_na = TRUE)
str(PHC_long)
# boxplot/line/scatter plot of pH CaCl2 change over time
PHC_long %>% mutate(year = as.numeric(year)) %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = year, y = pH_CaCl2)) +
geom_jitter(shape = 16, size = 0.5, alpha = 0.5,
width = 1, height = 0) +
geom_boxplot(aes(group = year), fill = NA) +
geom_line(aes(group = REP_ID, colour = pH_change), alpha = 0.5) +
facet_wrap(~AVC, nrow = 2) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*abs(max(PHC_long$pH_change)))
ggsave("pH CaCl2 over time boxplots facet by AVC.png",
path = "Outputs/Graphs/",
width =28, height = 15, units = "cm")
# combine pH in CaCl2 and DIW and plot against each other
phc_wide_diff <- PH %>%
mutate(pH_diw_change = ifelse(!is.na(diff0719),diff0719,
ifelse(!is.na(diff0716), diff0716, NA))) %>%
select(REP_ID, PH2007, PH2016, PH2019, pH_diw_change) %>%
full_join(PHC)
ggplot(phc_wide_diff, aes(x = pH_diw_change, y = pH_change)) +
geom_abline(intercept = 0,slope = 1, colour = "grey") +
geom_vline(xintercept = 0, colour = "grey") +
geom_hline(yintercept = 0, colour = "grey") +
geom_point() +
# geom_smooth(method = "lm") +
labs(x = "pH (DIW) change", y = bquote("pH (CaCl"[2]*") change"))
ggsave("pH change over time DIW vs CaCl2 scatterplot.png",
path = "Outputs/Graphs/",
width = 15, height = 15, units = "cm")
# there is one sample with a NA for AVC so removing
phc_wide_diff %>% filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_diw_change, y = pH_change)) +
geom_abline(intercept = 0,slope = 1, colour = "grey") +
geom_vline(xintercept = 0, colour = "grey") +
geom_hline(yintercept = 0, colour = "grey") +
facet_wrap(~AVC, nrow = 2) +
geom_point() +
# geom_smooth(method = "lm") +
labs(x = "pH (DIW) change", y = bquote("pH (CaCl"[2]*") change"))
ggsave("pH change over time DIW vs CaCl2 scatterplots facet by AVC.png",
path = "Outputs/Graphs/",
width = 28, height = 15, units = "cm")
PH %>%
select(REP_ID, PHC_2007, PH_2007, PH_2019, PHC_2019) %>%
pivot_longer(starts_with("PH"),
names_to = c("Variable","Year"),
names_sep = "_") %>%
na.omit() %>%
pivot_wider(names_from = "Variable",
values_from = "value") %>%
ggplot(aes(x = PH, y = PHC, colour = Year)) +
geom_point() +
geom_abline(slope = 1, intercept = 0) +
coord_fixed() +
facet_wrap(~Year) +
labs(x = "pH (DIW)", y = bquote("pH CaCl"[2]))
# Plant Ellenberg scores ####
str(CS19_SP)
table(CS19_SP$PLOT_TYPE)
unique(CS19_SP[CS19_SP$PLOT_TYPE=="XX","REP_ID"])
table(CS19_SP[CS19_SP$PLOT_TYPE=="X","PLOTYEAR"])
str(SPECIES_LIB_TRAITS)
filter(SPECIES_LIB_CODES, COLUMN_NAME == "GROWTH_FORM")
CS18_ELL <- filter(CS19_SP, PLOT_TYPE %in% c("X","XX")) %>%
mutate(REP_ID = paste0(SQUARE,PLOT_TYPE,PLOT_NUMBER)) %>%
mutate(REP_ID = gsub("XX","X",REP_ID)) %>%
left_join(select(SPECIES_LIB_TRAITS, BRC_NUMBER,
starts_with("EBER"),
GROWTH_FORM)) %>%
filter(GROWTH_FORM %in% c("f","fe","g","m","s","ss","w")) %>% # filter to vascular plants
mutate(across(starts_with("EBER"), na_if, y = 0)) %>% # set 0 values to NA
group_by(REP_ID) %>%
summarise(across(starts_with("EBER"), mean, na.rm = TRUE,
.names = "{col}18")) %>%
rename_with(~gsub("EBERG","",.x))
summary(CS18_ELL)
test <- full_join(CS18_ELL, GM16_IBD, by = c("REP_ID" = "REP_ID16"))
plot(N18 ~ N16, test);abline(0,1)
CS98_ELL <- CS98_SP %>%
select(REP_ID, BRC_NUMBER, TOTAL_COVER) %>%
unique() %>%
filter(TOTAL_COVER > 0) %>%
left_join(select(SPECIES_LIB_TRAITS, BRC_NUMBER,
starts_with("EBER"),
GROWTH_FORM)) %>%
# filter(GROWTH_FORM %in% c("f","fe","g","m","s","ss","w")) %>% # filter to vascular plants
mutate(across(starts_with("EBER"), na_if, y = 0)) %>% # set 0 values to NA
group_by(REP_ID) %>%
summarise(across(starts_with("EBER"), function(x) sum(x, na.rm=TRUE)/length(na.omit(EBERGN)),
.names = "{col}98_new")) %>%
rename_with(~gsub("EBERG","",.x))
test <- full_join(CS98_ELL, CS98_IBD, by = c("REP_ID" = "REP_ID98"))
#par(mfrow=c(2,2))
plot(R98_new ~ R98, test);abline(0,1)
plot(N98_new ~ N98, test);abline(0,1)
plot(W98_new ~ F98, test);abline(0,1)
plot(L98_new ~ L98, test);abline(0,1)
par(mfrow=c(1,1))
summary(CS18_ELL)
X_Ell_comp <- full_join(X_Ell_inner, X_Ell_whole) %>%
left_join(hab) %>%
mutate(R_diff = SM_R - WH_R,
N_diff = SM_N - WH_N,
W_diff = SM_W - WH_W,
L_diff = SM_L - WH_L)
p1 <- ggplot(X_Ell_comp, aes(x = WH_R, y = SM_R)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg R 400m"^2~"plot"),
y = bquote("Ellenberg R 4m"^2~"plot"))
ggplot(X_Ell_comp, aes(x = WH_N, y = SM_N)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc)
ggplot(X_Ell_comp, aes(x = R_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
ggplot(X_Ell_comp, aes(x = WH_R, y = SM_R)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
facet_grid(Year~AVC_desc) +
theme_bw()
ggplot(X_Ell_comp, aes(x = N_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
X_Ell_comp %>%
select(Year, REP_ID, ends_with("diff"), AVC_desc) %>%
pivot_longer(ends_with("diff"), names_to = "Ellenberg") %>%
ggplot(aes(x = value)) +
geom_histogram() +
geom_vline(xintercept = 0) +
scale_x_continuous(limits = c(-2.5,2.5)) +
facet_grid(Ellenberg~AVC_desc)
# weighted Ellenberg comparison
X_wEll_comp <- full_join(X_wEll_inner, X_wEll_whole) %>%
left_join(hab) %>%
mutate(R_diff = SM_R - WH_R,
N_diff = SM_N - WH_N,
W_diff = SM_W - WH_W,
L_diff = SM_L - WH_L)
p2 <- ggplot(filter(X_wEll_comp, Year != 1978), aes(x = WH_R, y = SM_R)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg R 400m"^2~"plot"),
y = bquote("Ellenberg R 4m"^2~"plot"))
ggplot(filter(X_wEll_comp, Year != 1978), aes(x = R_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
X_wEll_comp %>%
select(Year, REP_ID, ends_with("diff"), AVC_desc) %>%
pivot_longer(ends_with("diff"), names_to = "Ellenberg") %>%
ggplot(aes(x = value)) +
geom_histogram() +
geom_vline(xintercept = 0) +
scale_x_continuous(limits = c(-2.5,2.5)) +
facet_grid(Ellenberg~AVC_desc)
t.test(X_wEll_comp$SM_R,X_wEll_comp$WH_R)
# t = -0.47853, df = 18816, p-value = 0.6323
t.test(X_Ell_comp$SM_R,X_Ell_comp$WH_R)
# t = -3.3001, df = 19015, p-value = 0.0009682
x <- na.omit(unique(X_wEll_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_wEll_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_R,dat$WH_R)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.85153"
# [1] "Crops/Weeds p = 0.04266"
# [1] "Fertile grassland p = 0.92217"
# [1] "Heath/bog p = 2e-05"
# [1] "Moorland grass/mosaic p = 0.12709"
# [1] "Upland wooded p = 0.74556"
# [1] "Infertile grassland p = 0.92811"
# [1] "Lowland wooded p = 0.39651"
x <- na.omit(unique(X_Ell_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_Ell_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_R,dat$WH_R)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.91431"
# [1] "Crops/Weeds p = 0.06944"
# [1] "Fertile grassland p = 0.05292"
# [1] "Heath/bog p = 0"
# [1] "Moorland grass/mosaic p = 0"
# [1] "Upland wooded p = 0.00329"
# [1] "Infertile grassland p = 0.06137"
# [1] "Lowland wooded p = 0.96831"
p1 + ggtitle("Unweighted") + p2 + ggtitle("Cover weighted")
ggsave("Ellenberg R plot size comparison.png", path = "Outputs/Graphs/",
width = 24, height = 12, units = "cm")
p1 <- ggplot(filter(X_Ell_comp, Year != 1978), aes(x = WH_N, y = SM_N)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg N 400m"^2~"plot"),
y = bquote("Ellenberg N 4m"^2~"plot")) +
ggtitle("Unweighted")
p2 <- ggplot(filter(X_wEll_comp, Year != 1978), aes(x = WH_N, y = SM_N)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg N 400m"^2~"plot"),
y = bquote("Ellenberg N 4m"^2~"plot")) +
ggtitle("Cover weighted")
p1 + p2
ggsave("Ellenberg N plot size comparison.png", path = "Outputs/Graphs/",
width = 24, height = 12, units = "cm")
t.test(X_wEll_comp$SM_N,X_wEll_comp$WH_N)
# t = -0.12149, df = 18823, p-value = 0.9033
t.test(X_Ell_comp$SM_N,X_Ell_comp$WH_N)
# t = -1.621, df = 19043, p-value = 0.105
# correlations
x <- na.omit(unique(X_wEll_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_wEll_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_N,dat$WH_N)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.8639"
# [1] "Crops/Weeds p = 0.04501"
# [1] "Fertile grassland p = 0.83626"
# [1] "Heath/bog p = 0.03957"
# [1] "Moorland grass/mosaic p = 0.07602"
# [1] "Upland wooded p = 0.63365"
# [1] "Infertile grassland p = 0.38431"
# [1] "Lowland wooded p = 0.31157"
x <- na.omit(unique(X_Ell_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_Ell_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_N,dat$WH_N)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.91795"
# [1] "Crops/Weeds p = 0.05538"
# [1] "Fertile grassland p = 0.38703"
# [1] "Heath/bog p = 0"
# [1] "Moorland grass/mosaic p = 0"
# [1] "Upland wooded p = 0.01399"
# [1] "Infertile grassland p = 0.41287"
# [1] "Lowland wooded p = 0.62069"
# Data manipulation
str(CS07_IBD)
str(CS98_IBD)
str(CS78_IBD)
str(GM16_IBD)
str(CS18_ELL)
# get GMEP data to have CS REP_ID
GMEP_CS_match <- CS16_PH %>%
select(SQUARE_NUM, GMEP_NUM, PLOT_TYPE, REP_NUM) %>%
filter(!is.na(GMEP_NUM)) %>%
mutate(CS_REP_ID = paste0(SQUARE_NUM, PLOT_TYPE, REP_NUM),
GMEP_REP_ID = paste0(GMEP_NUM, PLOT_TYPE, REP_NUM)) %>%
select(CS_REP_ID, GMEP_REP_ID)
# Get GMEP data into similar format to CS data
GM16_IBD <- GM16_IBD %>%
right_join(GMEP_CS_match, by = c("REP_ID" = "GMEP_REP_ID")) %>%
select(REP_ID16 = CS_REP_ID, R16 = PH, N16 = FERT, L16 = LIGHT, F16 = WET)
# Combine IBD files for the different years
IBD_comb <- full_join(CS07_IBD, CS98_IBD, by = c("REP_ID07" = "REP_ID98")) %>%
full_join(CS78_IBD, by = c("REP_ID07" = "REP_ID78")) %>%
full_join(GM16_IBD, by = c("REP_ID07" = "REP_ID16")) %>%
full_join(CS18_ELL, by = c("REP_ID07" = "REP_ID"))
# Use AVC data from 2007 if it is there, otherwise 98 or 78
IBD_comb$AVC <- ifelse(!is.na(IBD_comb$AVC07), IBD_comb$AVC07,
ifelse(!is.na(IBD_comb$AVC98), IBD_comb$AVC98,
IBD_comb$AVC78))
summary(IBD_comb$AVC)
# get plot type from REP_ID
IBD_comb$PLOT_TYPE <- gsub("[^a-zA-Z]", "", IBD_comb$REP_ID07)
summary(as.factor(IBD_comb$PLOT_TYPE))
# Calculate difference in Ell R over the years
ELL <- X_Ell %>%
select(Year, REP_ID, contains("_R_")) %>%
pivot_longer(contains("_R_"), names_to = "Ellenberg") %>%
mutate(Year = as.character(Year)) %>%
pivot_wider(names_from = Year,
names_prefix = "R") %>%
mutate(diff7890 = R1990 - R1978,
diff9098 = R1998 - R1990,
diff9807 = R2007 - R1998,
diff0719 = R2019 - R2007
)
# Calculate overall Ell R change
ELL$Rchange <- rowSums(select(ELL, diff7890, diff9098,
diff9807, diff0719), na.rm = TRUE)
summary(ELL$Rchange)
filter(ELL, Rchange == 0) %>% select(starts_with("diff")) %>%
summary()
# Convert Ell R change into long format
ELL_diff_long <- ELL %>%
select(REP_ID, starts_with("diff")) %>%
droplevels %>%
pivot_longer(starts_with("diff"), values_to = "Ell_R") %>%
filter(!is.na(Ell_R))
ELL_diff_long %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = Ell_R)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_grid(name ~ AVC, scales = "free_y")
ggsave("Ellenberg R change histograms all plots facetted AVC year.png",
path = "Outputs/Graphs/", width = 28, height = 20, units ="cm")
# Convert Ellenberg R scores file to long format
ELL_R_LONG <- ELL %>% select(REP_ID = REP_ID07,
PLOT_TYPE = PLOT_TYPE.x,
AVC = AVC_desc, R07, R98, R78, R16, Rchange) %>%
filter(PLOT_TYPE == "X") %>%
droplevels() %>%
select(-PLOT_TYPE) %>%
pivot_longer(cols = c(R07,R98,R78,R16), names_to = "year",
names_prefix = "R") %>%
mutate(year = ifelse(year == "07", 2007,
ifelse(year == "98", 1998,
ifelse(year == "78", 1978,
ifelse(year == "16", 2016, NA)))))
str(ELL_R_LONG)
summary(ELL_R_LONG$AVC)
# Ellenberg R score change over time boxplot/scatter/line graph
ELL %>%
select(-starts_with("diff")) %>%
pivot_longer(R1978:R2019,
names_to = "year",
names_prefix = "R",
names_transform = list(year = as.integer)) %>%
left_join(BH_IMP) %>%
filter(!is.na(Management)) %>%
mutate(Management = recode(Management,
"High" = "High intensity",
"Low" = "Low intensity"),
Ellenberg = recode(Ellenberg,
"SM_R_UW" = "Small unweighted",
"SM_R_W" = "Small weighted",
"WH_R_UW" = "Full unweighted",
"WH_R_W"= "Full weighted")) %>%
ggplot(aes(x = year, y = value)) +
geom_line(alpha = 0.5, aes(group = REP_ID, colour = Rchange))+
geom_jitter(size = 0.2, width = 1, height = 0,
shape = 16, alpha = 0.1,
colour = "grey50") +
geom_boxplot(fill= NA, aes(group = year), outlier.shape = NA, width = 3) +
facet_grid(Ellenberg~Management) +
labs(y = "Ellenberg R") +
# geom_smooth(formula = y ~ poly(x,3)) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(ELL$Rchange)),
name = "Ell R change", na.value = "white") +
# theme_dark() +
NULL
ggsave("Ellenberg R change over time X plots boxplots facetted by Management.png",
path = "Outputs/Graphs/", width = 15, height = 20, units = "cm")
str(X_Ell)
str(BH_IMP)
plot_dat <- left_join(X_Ell, BH_IMP) %>%
filter(!is.na(Management)) %>%
select(REP_ID, Year, contains("_R_"), Management) %>%
pivot_longer(contains("_R_"),
names_to = c("PlotSize","Score","Weighting"),
names_sep = "_") %>%
mutate(PlotSize = recode(PlotSize,
"SM" = "Small",
"WH" = "Full"),
Weighting = recode(Weighting,
"UW" = "Unweighted",
"W" = "Weighted"),
Management = recode(Management,
"High" = "High intensity management",
"Low" = "Low intensity management")) %>%
pivot_wider(names_from = PlotSize,
values_from = value)
ggplot(plot_dat, aes(x = Full, y = Small)) +
geom_point(alpha = 0.25, colour = "dodgerblue3") +
geom_abline(slope = 1, intercept = 0) +
coord_fixed() +
facet_grid(Management~Weighting) +
labs(x = "Full size plot", y = "Smaller size plot")
ggsave("Ellenberg R plot size comparison by weighting and management.png",
path = "Outputs/Graphs/", width = 12, height = 12, units ="cm")
# Combined pH and ellenberg R ####
# change graphs - combine difference stats
ph_ell_comb <- ELL_diff_long %>%
select(-AVC) %>%
filter(grepl("X", REP_ID)) %>%
full_join(filter(PH_diff_long, name %in% unique(ELL_diff_long$name)))
str(ph_ell_comb)
md.pattern(ph_ell_comb)
# scatter plot of pH change against Ellenberg R change facetted by survey year
# comparison and AVC
ph_ell_comb %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = pH, y = Ell_R)) +
geom_point() +
facet_grid(name~AVC) +
geom_smooth(method = "lm")
ggsave("Ellenberg R vs pH change by AVC and survey.png",
path = "Outputs/Graphs/", width = 28, height = 20, units = "cm")
# pH in CaCl2 and Ellenberg R
phc_ell_comb <- ELL_diff_long %>%
filter(grepl("X", REP_ID)) %>%
filter(name %in% c("diff0716","diff0719")) %>%
full_join(na.omit(select(PHC, REP_ID, pH_change)))
str(phc_ell_comb)
md.pattern(phc_ell_comb)
# scatter plot of pH CaCl2 vs Ellenberg R change, no facets
phc_ell_comb %>%
# filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_change, y = Ell_R)) +
geom_point() +
# facet_wrap(~AVC) +
geom_smooth(method = "lm")
# combine pH and Ellenberg R scores in wide format
str(PH)
str(ELL)
# differences
ph_ell_wide_diff <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, diff7807, diff7898, diff9807) %>%
full_join(select(PH, REP_ID,diff7807, diff7898, diff9807),
suffix = c("_ellR","_ph"), by = "REP_ID")
md.pattern(ph_ell_wide_diff)
psych::pairs.panels(select_if(ph_ell_wide_diff, is.numeric))
# actual pH and Ellenberg R values
ph_ell_wide <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, R78, R98, R07, R16) %>%
full_join(select(PH, REP_ID, PH1978, PH2000, PH2007, PH2016, PH2019),
suffix = c("_ellR","_phw"), by = "REP_ID")
md.pattern(ph_ell_wide)
psych::pairs.panels(select_if(ph_ell_wide, is.numeric),
ellipses = FALSE, rug = FALSE,
method = "spearman")
# strongest correlation tends to be current year
ph_ell_long <- ph_ell_wide %>%
pivot_longer(ends_with(c("78","98","00","07","16","19")),
values_drop_na = TRUE) %>%
mutate(year = sapply(strsplit(name, "[A-Z]{1,2}"),"[",2),
variable = sapply(strsplit(name, "[0-9]{1,4}"),"[",1)) %>%
mutate(year = as.numeric(recode(year, "98" = "1998",
"78" = "1978",
"07" = "2007",
"16" = "2018",
"19" = "2018",
"2000" = "1998",
"2016" = "2018",
"2019" = "2018")),
variable = recode(variable,
"PH" = "Soil_pH",
"R" = "Ell_R")) %>%
select(-name) %>%
unique() %>%
pivot_wider(names_from = variable,
values_from = value)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
labs(x = "Soil pH", y = "Ellenberg R")
ggsave("Ellenberg R vs Soil pH by year.png", path = "Outputs/Graphs",
width = 15, height = 15, units = "cm")
# checking if sigmoidal curve seems appropriate
x <- seq(3.5,9,0.1)
c1 <- 4.5
c2 <- 1.5
c3 <- 5
c4 <- 2
y <- c1/(1 + exp(-c2*(x - c3))) + c4
c1 <- 4.5
c2 <- 2
c3 <- 4.5
c4 <- 2
y2 <- c1/(1 + exp(-c2*(x - c3))) + c4
dat <- data.frame(x,y,y2)
# asymmetrical sigmoidal curve
c1 <- 4
c2 <- 1.5
c3 <- 3.5
c4 <- 2.5
c5 <- 6
y3 <- c1/((1 + exp(-c2*(x - c3)))^c5) + c4
dat <- data.frame(x,y,y2,y3)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
geom_smooth() +
geom_line(data = dat, aes(x = x, y = y),
colour = "purple", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y2),
colour = "red", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y3),
colour = "orange", size = 1.5) +
labs(x = "Soil pH", y = "Ellenberg R")
# seems reasonable - interestingly in 1978 the Ellenberg R value for any given
# pH is higher. So in model need to make sure that c3 varies by year, and not
# sure about the other parameters.
# other monotonic curves instead
# monomolecular/Mitscherlich law/von Bertalanffy law.
a <- 7
b <- 0
c <- 0.3
ym1 <- a - (a-b)*exp(-c*x)
plot(x,ym1)
# von bertalanffy
a <- 7
b <- 0.5
c <- -2
yb <- a*(1-exp(-b*(x-c)))
plot(x,yb)
# Michaelis Menten
a <- 9
b <- 4
ym <- a*x/(b + x)
dat <- data.frame(x,ym,y2,y3,yb)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
geom_smooth() +
geom_line(data = dat, aes(x = x, y = yb),
colour = "purple", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y2),
colour = "red", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y3),
colour = "orange", size = 1.5) +
labs(x = "Soil pH", y = "Ellenberg R")
# include CaCl2 but only recent years for graphical simplicity
phr_ell_wide <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, R07, R16) %>%
full_join(select(PH, REP_ID, PH2007, PH2016),
suffix = c("_ellR","_phw"), by = "REP_ID") %>%
full_join(select(PHC, REP_ID, PHC2007, PHC2016))
psych::pairs.panels(select_if(phr_ell_wide, is.numeric),
ellipses = FALSE, rug = FALSE,
method = "spearman")
# Soil moisture ####
MOISTURE <- CS_tier4 %>%
mutate(REP_ID = ifelse(!is.na(REP_ID07), REP_ID07,
ifelse(!is.na(REP_ID98), REP_ID98,
ifelse(!is.na(REP_ID78), REP_ID78, NA)))) %>%
select(REP_ID, MOISTURE_CONTENT_07, MOISTURE_CONTENT_98) %>%
full_join(select(UK19_WET, REP_ID, MOISTURE_CONTENT_19 = `g_water/wet_weight_of_soil`)) %>%
# mutate(VWC_19 = 100*VWC_19) %>%
pivot_longer(starts_with("MOISTURE"), names_to = "variable",
values_to = "Moisture") %>%
mutate(Year = ifelse(variable == "MOISTURE_CONTENT_07", 2007,
ifelse(variable == "MOISTURE_CONTENT_98", 1998,
ifelse(variable == "MOISTURE_CONTENT_19", 2019, NA)))) %>%
left_join(select(CS_REP_ID, REPEAT_PLOT_ID, REP_ID = Y07)) %>%
mutate(REP_ID = ifelse(!is.na(REPEAT_PLOT_ID), REPEAT_PLOT_ID, REP_ID)) %>%
select(-REPEAT_PLOT_ID)
ggplot(MOISTURE, aes(x = Moisture)) +
geom_histogram() +
facet_wrap(~Year, nrow = 3, scales = "free_y") +
labs(x = "Soil moisture (%)") +
scale_x_continuous(limits = c(0,100)) +
scale_y_continuous(expand = expansion(mult = c(0,0.1)))
ggsave("Soil moisture g per wet soil histograms.png", path = "Outputs/Graphs/",
width = 12, height = 15, units = "cm")
MOISTURE_PH <- PH_long %>%
mutate(year = ifelse(year == 2000, 1998, year)) %>%
full_join(rename(MOISTURE, year = Year)) %>%
filter(year %in% c(1998, 2007, 2019))
ggplot(MOISTURE_PH, aes(x = Moisture, y = pH,
colour = as.factor(year), fill = as.factor(year))) +
geom_point(alpha = 0.5, size = 0.8) +
geom_smooth(formula = 'y ~ log(x) + log(x)', method = "lm") +
scale_colour_brewer(palette = "Dark2", name = "Year") +
scale_fill_brewer(palette = "Dark2", name = "Year") +
labs(x = "Soil moisture (g/g wet soil)") +
scale_y_continuous(limits = c(3.3,9.2))
ggsave("Soil moisture vs pH log line.png", path = "Outputs/Graphs",
width =14, height = 12, units = "cm")
ggplot(MOISTURE_PH, aes(x = Moisture, y = pH)) +
geom_point(alpha = 0.5, size = 0.8, colour = "dodgerblue2") +
geom_smooth(colour = "black", formula = 'y ~ log(x)', method = "lm") +
facet_wrap(~year) +
labs(x = "Soil moisture (g/g wet soil)") +
scale_y_continuous(limits = c(3.3,9.2))
ggsave("Soil moisture vs pH facet log line.png", path = "Outputs/Graphs",
width =18, height = 12, units = "cm")
library(brms)
MOISTURE_PH$Year_cat <- as.factor(MOISTURE_PH$year)
get_prior(bf(pH ~ a*exp(b*Moisture) + c*exp(d*Moisture) + e,
a + b + c + d + e ~1, nl = TRUE),
data = MOISTURE_PH)
# CHess soil moisture
soil_moist_long <- soil_moist %>%
# `colnames<-`(c(paste0(LETTERS[1:14],colnames(soil_moist)))) %>%
pivot_longer(AJan:LDec, names_to = "Month", values_to = "Moisture") %>%
mutate(month_num = as.numeric(as.factor(Month)))
ggplot(soil_moist_long, aes(x = Measting, y = Nnorthing, fill = Moisture)) +
geom_tile() +
coord_fixed() +
facet_wrap(~month_num) +
theme(axis.ticks = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
ggsave("JULES soil moisture maps 2007.png",
path = "Outputs/Graphs/", width = 30, height = 30, units = "cm")
# Rainfall data ####
ggplot(cs_survey_rainfall, aes(x = mean_rainfall)) +
geom_histogram() +
facet_wrap(~Year) +
labs(x = "Average monthly rainfall for 4 months pre-survey")
p1 <- ggplot(cs_survey_rainfall, aes(x = mean_rainfall)) +
geom_histogram() +
facet_wrap(~Year, ncol = 1, scales = "free_y") +
scale_x_continuous(expand = c(0,0)) +
labs(x = "Average monthly rainfall for 4 months pre-survey")
p2 <- ggplot(cs_survey_rainfall, aes(x = sum_rainfall)) +
geom_histogram() +
facet_wrap(~Year, ncol = 1, scales = "free_y") +
scale_x_continuous(expand = c(0,0)) +
labs(x = "Total rainfall for 4 months pre-survey")
p1 + p2
ggplot(cs_survey_rainfall, aes(x = mean_rainfall, y = sum_rainfall)) +
geom_point() +
geom_abline(slope=4, intercept = 0) +
facet_wrap(~Year)
# basically the same
p1
ggsave("Average monthly rainfall for 4 months pre-survey.png",
path = "Outputs/Graphs/",width = 12, height = 18, units = "cm")
# Rainfall and soil moisture ####
rain_moist <- cs_survey_rainfall %>% ungroup() %>%
mutate(Year = as.numeric(Year)) %>%
full_join(MOISTURE)
summary(rain_moist)
mice::md.pattern(rain_moist)
rain_moist %>% filter(is.na(mean_rainfall)) %>% .$Year %>% table()
ggplot(rain_moist, aes(x = mean_rainfall, y = Moisture,
colour = as.factor(Year))) +
geom_point()
ggplot(rain_moist, aes(x = mean_rainfall, y = Moisture)) +
geom_point() +
facet_wrap(~Year)
rain_moist_hab <- left_join(rain_moist, BH_comb)
summary(rain_moist_hab)
ggplot(rain_moist_hab, aes(x = mean_rainfall, y = Moisture)) +
geom_point() +
facet_wrap(~BH_DESC)
rain_moist_man <- left_join(rain_moist, BH_IMP)
summary(rain_moist_man)
ggplot(na.omit(rain_moist_man),
aes(x = mean_rainfall, y = Moisture)) +
geom_point(aes(colour = Management, fill = Management)) +
geom_smooth(method="lm", colour = "black") +
geom_smooth(method="lm", aes(colour = Management, fill = Management)) +
facet_wrap(~Year)
# compare rainfall to sample and EO soil moisture
rain_moist_hab <- left_join(rain_moist_hab, cs_loc_moist07_long)
ggplot(rain_moist_hab, aes(x = mean_moisture, y = Moisture)) +
geom_point(aes(colour = month))
rain_moist_hab07 <- rain_moist_hab %>%
filter(Year == 2007) %>%
full_join(cs_loc_moist07_sample_month)
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point(aes(colour = month))
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point() +
labs(x = "JULES soil moisture", y = "Field soil moisture")
ggsave("Field moisture compared to JULES soil moisture.png",
path = "Outputs/Graphs/", width = 20, height = 12, units = "cm")
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = mean_moisture)) +
geom_point()
ggplot(rain_moist_hab07, aes(x = mean_rainfall, y = Moisture)) +
geom_point(aes(colour = eo_moisture))
rain_moist_hab07 <- left_join(rename(rain_moist_hab07, REPEAT_PLOT_ID = REP_ID),
filter(plot_locations, YEAR == "y07"))
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point(aes(colour = N_10_FIG_1M))
p1_eo <- ggplot(filter(rain_moist_hab07, !is.na(eo_moisture)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = eo_moisture)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("JULES soil moisture")
p2_meas <- ggplot(filter(rain_moist_hab07, !is.na(Moisture)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = Moisture)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("Field soil moisture")
p3_rain <- ggplot(filter(rain_moist_hab07, !is.na(mean_rainfall)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = mean_rainfall)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("Mean rainfall")
p1_eo + p2_meas + p3_rain
ggsave("Map of Soil moisture and rainfall over CS 2007.png",
path = "Outputs/Graphs", width = 30, height = 15, units = "cm")
p1_eo <- ggplot(rain_moist_hab07, aes(x = eo_moisture)) +
geom_histogram(bins = 40)
p2_meas <- ggplot(rain_moist_hab07, aes(x = Moisture)) +
geom_histogram(bins = 40)
p3_rain <- ggplot(rain_moist_hab07, aes(x = mean_rainfall)) +
geom_histogram(bins = 40)
p1_eo + p2_meas + p3_rain
psych::pairs.panels(select(rain_moist_hab07, eo_moisture, Moisture,
mean_rainfall), rug=FALSE)
summary(lm(Moisture ~ eo_moisture + mean_rainfall, data = rain_moist_hab07))
summary(lm(Moisture ~ mean_moisture + mean_rainfall, data = rain_moist_hab07))
ggplot(PH_moisture,
aes(x = mean_moisture, y = PH_2007)) +
geom_point()
ggplot(PH_moisture,
aes(x = Moisture, y = PH_2007)) +
geom_point()
summary(lm(Moisture ~ mean_moisture + mean_rainfall, data = rain_moist_hab07))
PH_moisture <- left_join(PH_moisture,
select(CS07_CN, REP_ID = REP_ID07,
C_PERCENT, N_PERCENT))
summary(lm(PH_2007 ~ log(C_PERCENT) + eo_moisture,
data = PH_moisture))
summary(lm(PHC_2007 ~ Moisture + log(C_PERCENT),
data = PH_moisture))
ggplot(PH_moisture,
aes(x = C_PERCENT, y = PH_2007)) +
geom_point() +
scale_x_log10()
# pH and atmospheric deposition ####
ph_atdep <- PH %>% select(REP_ID, diff7807) %>%
mutate(Year = 2007) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff7807))+
geom_point()
ph_atdep <- PH %>%
select(REP_ID, diff7819) %>%
na.omit() %>%
mutate(Year = 2018) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff7819))+
geom_point()
ph_atdep <- PH %>%
mutate(diffc0719 = PHC_2019 - PHC_2007) %>%
select(REP_ID, diff0719, diffc0719) %>%
filter(!is.na(diff0719)) %>%
mutate(Year = 2018) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff0719))+
geom_point()
ggplot(ph_atdep, aes(x = Sdep, y = diffc0719))+
geom_point()
ph_atdep <- PH %>% select(REP_ID, PH_2007, PHC_2007) %>%
mutate(Year = 2007) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = PH_2007))+
geom_point()
ggplot(ph_atdep, aes(x = Sdep, y = PHC_2007))+
geom_point()
# soil N against N deposition
summary(CS07_CN)
CN_atdep <- CS07_CN %>%
mutate(CN_ratio = C_PERCENT/N_PERCENT,
Year = 2007,
REP_ID = REP_ID07) %>%
select(REP_ID, Year, CN_ratio) %>%
full_join(mutate(CS98_CN,
CN_ratio = C_PERCENT/N_PERCENT,
Year = 1998,
REP_ID = REP_ID98)) %>%
left_join(CS_plot_atdep)
ggplot(CN_atdep,
aes(x = Ndep, y = CN_ratio)) +
geom_point() +
labs(x = "Cumulative N deposition", y = "C:N") +
facet_wrap(~Year + Habitat, nrow = 3)
# Soil nitrogen ###
# Investigating which total N based metric is most related to mineralisable
# N/nitrate. Options are N% or N:C, there are multiple metrics of mineralisable
# N but nitrate is known to be related to NPP.
Nitrogen <- CS07_MINN %>%
mutate(REP_ID = paste0(SQUARE_NUM, PLOT_TYPE, REP_NUM)) %>%
left_join(select(CS_REP_ID, REPEAT_PLOT_ID, REP_ID = Y07)) %>%
mutate(REP_ID = ifelse(!is.na(REPEAT_PLOT_ID), REPEAT_PLOT_ID, REP_ID)) %>%
select(-REPEAT_PLOT_ID) %>%
left_join(CS07_CN)
ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NMINTOT_SOIL)) +
geom_point() +
labs(x = "Total N (%)", y = "Total Mineralisable N (mg N / g dry soil)")
p3 <- ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NMINTOT_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$N_PERCENT, Nitrogen$NE_NMINTOT_SOM, method = "spearman",
use = "complete.obs")
# [1] -0.5087721
ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NH4N_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total NH4 (mg N / g LOI)") +
scale_y_log10()
p2<- ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NO3N_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total NO3 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$N_PERCENT, Nitrogen$NE_NO3N_SOM, method = "spearman",
use = "complete.obs")
# [1] -0.5469838
Nitrogen$NC_ratio <- Nitrogen$N_PERCENT/Nitrogen$C_PERCENT
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOIL)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g dry soil)") +
scale_y_log10()
p4 <- ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NMINTOT_SOM, method = "spearman",
use = "complete.obs")
# 0.5645039
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NH4N_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total NH4 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NH4N_SOM, method = "spearman",
use = "complete.obs")
# [1] 0.1507093
p1 <- ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NO3N_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total NO3 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NO3N_SOM, method = "spearman",
use = "complete.obs")
# 0.608635
p2+p1+p3+p4
ggsave("Total N and mineralisable N by LOI.png",
path = "Outputs/Graphs/", width = 25, height = 20, units = "cm")
Nitrogen <- left_join(Nitrogen, select(CS07_PH, -BATCH_NUM))
ggplot(Nitrogen, aes(x = NC_ratio, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
scale_y_log10()
ggplot(Nitrogen, aes(x = N_PERCENT, y = C_PERCENT)) +
geom_point() +
labs(x = "Total N", y = "Total C") +
scale_y_log10()
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOM,
colour = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
coplot(NC_ratio ~ log(NE_NMINTOT_SOM) | PH2007_IN_WATER, Nitrogen)
coplot(log(NE_NMINTOT_SOM) ~ NC_ratio | PH2007_IN_WATER, Nitrogen)
p1 <- ggplot(Nitrogen, aes(x = NC_ratio, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
geom_smooth(method = "lm", fill = "#3366FF")
p2 <- ggplot(Nitrogen, aes(x = NE_NMINTOT_SOM, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p3 <- ggplot(Nitrogen, aes(x = NE_NMINTOT_SOIL, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g soil)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p4 <- ggplot(Nitrogen, aes(x = NE_NO3N_SOM, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total NO3-N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p1 + p2 + p3 + p4
ggsave("pH and N measurements.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
Nitrogen2 <- Nitrogen %>%
left_join(filter(BH_IMP, Year == 2007)) %>%
filter(!is.na(Management))
p1 <- ggplot(Nitrogen2, aes(x = NC_ratio, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
geom_smooth(method = "lm", aes(fill = Management))
p2 <- ggplot(Nitrogen2, aes(x = NE_NMINTOT_SOM, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p3 <- ggplot(Nitrogen2, aes(x = NE_NMINTOT_SOIL, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g soil)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p4 <- ggplot(Nitrogen2, aes(x = NE_NO3N_SOM, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total NO3-N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p1 + p2 + p3 + p4 + plot_layout(guides="collect")
ggsave("pH and N measurements by Management type.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
Nitrogen3 <- Nitrogen2 %>%
left_join(filter(X_Ell, Year == 2007) %>%
select(REP_ID, contains("_R_")))
p1 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = WH_R_W, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (full X weighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p2 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = WH_R_UW, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (full X unweighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p3 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = SM_R_W, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (small X weighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p4 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = SM_R_UW, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (small X unweighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p1+p2+p3+p4+ plot_layout(guides="collect")
ggsave("Ellenberg R and NC by Management type.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
# Moisture and rainfall and pH ####
# Look at whether soil moisture actually predicted by change in rainfall to
# evaluate whether change in rainfall is a useful metric
rain_moist <- cs_survey_rainfall %>% ungroup() %>%
mutate(Year = as.numeric(Year)) %>%
full_join(MOISTURE)
test <- rain_moist %>% select(-sum_rainfall) %>%
pivot_wider(names_from = Year, values_from = c(mean_rainfall, Moisture)) %>%
mutate(Moisture_diff9807 = Moisture_2007 - Moisture_1998,
Moisture_diff0719 = Moisture_2019 - Moisture_2007,
rain_diff9807 = mean_rainfall_2007 - mean_rainfall_1998,
rain_diff0719 = mean_rainfall_2019 - mean_rainfall_2007) %>%
select(REP_ID, contains("diff")) %>%
pivot_longer(contains("diff"), names_to = c("Variable","Time_period"),
names_sep = "_diff") %>%
pivot_wider(names_from = Variable, values_from = value) %>%
na.omit()
# Also combine with LOI as moisture highly dependent on SOM
test <- LOI %>%
mutate(LOI_2019 = ifelse(!is.na(LOI_2019), LOI_2019, LOI_2016)) %>%
mutate(LOI_diff9807 = LOI_2007 - LOI_1998,
LOI_diff0719 = LOI_2019 - LOI_2007) %>%
select(REP_ID, contains("diff")) %>%
pivot_longer(contains("diff"), names_to = c("Variable","Time_period"),
names_sep = "_diff") %>%
pivot_wider(names_from = Variable, values_from = value) %>%
right_join(test)
ggplot(test, aes(x = rain, y = Moisture)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test, aes(x = LOI, y = Moisture)) +
geom_point() +
geom_smooth(method = "lm")
summary(lm(Moisture ~ rain+LOI, test))
# Moisture difference predicted by rain + LOI difference (19%)
# however we want to predict change in pH
test2 <- PH_diff_long %>%
mutate(Time_period = gsub("diff","",as.character(name))) %>%
right_join(test)
ggplot(test2, aes(x = rain, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test2, aes(x = LOI, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test2, aes(x = Moisture, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
summary(lm(pH ~ Moisture*rain*LOI, test2))
summary(lm(pH ~ Moisture, test2))
summary(lm(pH ~ rain, test2))
summary(lm(pH ~ LOI, test2))
# pretty low variance explained, change in rainfall explained a lot more than
# change in soil moisture or LOI but still low
# Ran the above for different time windows of rainfall calculation and looked at
# correlation. This supported initial assumption that 4 months was best based on
# evidence provided by Don about the upwater monitoring network
# 2 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# 0.05777657
# 3 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# [1] 0.08958804
# 4 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
#[1] 0.1373099
# 5 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# [1] 0.1050855
# Species functional graphs ####
str(Sp_Ell)
Sp_Ell %>%
na.omit() %>%
pivot_longer(BUTTLARVFOOD:Low_grass, names_to = "Function",
values_to = "Count") %>%
mutate(Count = ifelse(Count == 1, "Yes","No")) %>%
group_by(EBERGR, Function) %>%
count(Count) %>%
ungroup() %>% group_by(Function, EBERGR) %>%
mutate(prop = n/sum(n, na.rm = TRUE)) %>%
filter(Count == "Yes") %>%
select(-n,-Count) %>%
pivot_wider(names_from = Function, values_from = prop)
p1 <- Sp_Ell %>%
select(BRC_NUMBER, EBERGR, BUTTLARVFOOD,
KgSughacovyr,Low_grass) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(BUTTLARVFOOD:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(BUTTLARVFOOD:Low_grass), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
# unique() %>%
# na.omit() %>%
# pivot_longer(c(BUTTLARVFOOD,KgSughacovyr,Low_grass), names_to = "Function",
# values_to = "Count") %>%
mutate(Function = recode(Function,
"BUTTLARVFOOD" = "Butterfly larvae food",
"KgSughacovyr" = "Nectar producing",
"Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p2 <- Ell_F %>%
select(Year, REP_ID, WH_R, F_BUTTLARVFOOD,
F_KgSughacovyr,
F_Low_grass) %>%
pivot_longer(starts_with("F_"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"F_BUTTLARVFOOD" = "Butterfly larvae food",
"F_KgSughacovyr" = "Nectar producing",
"F_Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Proportion of plant species") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, Fr_BUTTLARVFOOD,
Fr_KgSughacovyr,
Fr_Low_grass) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_BUTTLARVFOOD" = "Butterfly larvae food",
"Fr_KgSughacovyr" = "Nectar producing",
"Fr_Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p2 + p3
ggsave("Ellenberg R and plant functions.png",
path = "Outputs/Graphs/",
width = 25, height = 15, units= "cm")
p1+p3
ggsave("Ellenberg R and plant functions cols 1 and 3.png",
path = "Outputs/Graphs/",
width = 18, height = 15, units= "cm")
p1 <- Sp_Ell %>%
select(-starts_with("nrec"),-contains("name"),
-Sphag_SPECIES) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(Coastal:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(Coastal:Arable), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
# mutate(Count = ifelse(Count == 1, "Yes","No")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(position = "stack", stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p2 <- Ell_F %>%
select(Year, REP_ID, WH_R, starts_with("F_")) %>%
select(-F_Sphag_SPECIES) %>%
filter(!is.na(WH_R)) %>%
pivot_longer(starts_with("F_"), names_to = "Function",
values_to = "value") %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Proportion of plant species") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, starts_with("Fr")) %>%
select(-Fr_Sphag_SPECIES) %>%
filter(!is.na(WH_R)) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_Butt" = "Butterfly larvae food",
"Fr_Nectar" = "Nectar producing",
"Fr_Lgrass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1,
scales = "free_y") +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p2 + p3
ggsave("Ellenberg R and plant functions v2.png",
path = "Outputs/Graphs/",
width = 25, height = 100, units= "cm")
p1 <- Sp_Ell %>%
select(BRC_NUMBER, EBERGR, BUTTLARVFOOD,
KgSughacovyr,Low_grass,Cwr,BIRDFOOD,
Forage_grasses, IW) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(BUTTLARVFOOD:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(BUTTLARVFOOD:IW), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
mutate(Function = recode(Function,
"BUTTLARVFOOD" = "Butterfly larvae food",
"KgSughacovyr" = "Nectar producing",
"Low_grass" = "Lowland grass indicators",
"BIRDFOOD" = "Bird food",
"Cwr" = "Crop wild relatives",
"Forage_grasses" = "Forage grasses",
"IW" = "Injurious weeds")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, Fr_BUTTLARVFOOD,
Fr_KgSughacovyr,
Fr_Low_grass,Fr_Low_grass,Fr_Cwr,Fr_BIRDFOOD,
Fr_Forage_grasses, Fr_IW) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_BUTTLARVFOOD" = "Butterfly larvae food",
"Fr_KgSughacovyr" = "Nectar producing",
"Fr_Low_grass" = "Lowland grass indicators",
"Fr_BIRDFOOD" = "Bird food",
"Fr_Cwr" = "Crop wild relatives",
"Fr_Forage_grasses" = "Forage grasses",
"Fr_IW" = "Injurious weeds")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1, scales = "free_y") +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p3
ggsave("Ellenberg R and plant functions v3.png",
path = "Outputs/Graphs/",
width = 15, height = 25, units= "cm")
ggplot(X_Ell_nect, aes(x = WH_R, y = Nectar)) +
geom_point(alpha = 0.1) +
scale_y_log10()
library(brms)
mod_pr <- c(prior(normal(0,1), class = "b"),
prior(student_t(3, 0, 1), class = "Intercept"),
prior(student_t(3, 0, 1), class = "sd"),
prior(student_t(3, 0, 1), class = "sigma"),
prior(normal(0,1), class = "ar"))
str(Ell_F)
Ell_F <- Ell_F %>% ungroup() %>%
mutate(SQUARE = sapply(strsplit(REP_ID, "[A-Z]"),"[",1),
YR = as.factor(Year),
YRnm = as.integer(YR))
buttmod <- brm(F_Butt ~ WH_R + (YR|YR*SQUARE) +
ar(time = YRnm, gr = REP_ID),
data = Ell_F, prior = mod_pr, cores = 4,
iter = 4000)
| /02_Exploratory_graphs.R | no_license | fseaton/Sdep-analysis | R | false | false | 70,967 | r | # Code for plotting out data for exploratory purposes
library(dplyr)
library(tidyr)
library(ggplot2)
theme_set(theme_classic())
library(gganimate)
library(mice)
library(janitor)
library(patchwork)
# atmospheric data plots ####
# Sdep maps
str(Sdep_avg)
Sdep_avg_long <- melt(Sdep_avg, id.vars = c("x","y"))
Sdep_avg_long$year <- substring(Sdep_avg_long$variable, 9,12)
ggplot(Sdep_avg_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep grid average kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep grid average maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# animated version
Sdep_avg_long %>%
mutate(year = as.integer(year)) %>%
ggplot(aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "Sdep (kgS/ha)") +
coord_fixed() +
theme(axis.text = element_blank(), axis.ticks = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
str(Sdep_for)
Sdep_for_long <- melt(Sdep_for, id.vars = c("x","y"))
Sdep_for_long$year <- substring(Sdep_for_long$variable, 8,11)
ggplot(Sdep_for_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep forest kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep forest maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Sdep_moo)
Sdep_moo_long <- melt(Sdep_moo, id.vars = c("x","y"))
Sdep_moo_long$year <- substring(Sdep_moo_long$variable, 6,9)
ggplot(Sdep_moo_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Sdep moorland kgS / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Sdep moorland maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# Ndep maps
str(Ndep_avg)
Ndep_avg_long <- melt(Ndep_avg, id.vars = c("x","y"))
Ndep_avg_long$year <- substring(Ndep_avg_long$variable, 9,12)
ggplot(Ndep_avg_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep grid average kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep grid average maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Ndep_for)
Ndep_for_long <- melt(Ndep_for, id.vars = c("x","y"))
Ndep_for_long$year <- substring(Ndep_for_long$variable, 8,11)
ggplot(Ndep_for_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep forest kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep forest maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
str(Ndep_moo)
Ndep_moo_long <- melt(Ndep_moo, id.vars = c("x","y"))
Ndep_moo_long$year <- substring(Ndep_moo_long$variable, 6,9)
ggplot(Ndep_moo_long, aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_viridis_c(name = "") +
coord_fixed() +
facet_wrap(~year, nrow = 5) +
labs(x = "",y = "", title = "Ndep moorland kgN / ha") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
ggsave("Ndep moorland maps.png", path = "Outputs/Graphs/",
width = 28, height = 28, units = "cm")
# Plot Sdep and Ndep against each other
colnames(Sdep_avg_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_avg_long) <- c("x","y","variable","Ndep","year")
AtDep_avg <- merge(Sdep_avg_long, Ndep_avg_long, by = c("x","y","variable","year"))
str(AtDep_avg)
AtDep_avg$year <- as.integer(AtDep_avg$year)
AtDep_avg <- na.omit(AtDep_avg)
AtDep_avg$square <- paste(AtDep_avg$x, AtDep_avg$y, sep = "_")
ggplot(AtDep_avg, aes(x = Sdep, y = Ndep)) +
geom_point(aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_avg %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# forest
colnames(Sdep_for_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_for_long) <- c("x","y","variable","Ndep","year")
AtDep_for <- merge(Sdep_for_long, Ndep_for_long, by = c("x","y","variable","year"))
str(AtDep_for)
AtDep_for$year <- as.integer(AtDep_for$year)
AtDep_for <- na.omit(AtDep_for)
AtDep_for$square <- paste(AtDep_for$x, AtDep_for$y, sep = "_")
ggplot(AtDep_for, aes(x = Sdep, y = Ndep)) +
geom_point(aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_for %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# moorland
colnames(Sdep_moo_long) <- c("x","y","variable","Sdep","year")
colnames(Ndep_moo_long) <- c("x","y","variable","Ndep","year")
AtDep_moo <- merge(Sdep_moo_long, Ndep_moo_long, by = c("x","y","variable","year"))
str(AtDep_moo)
AtDep_moo$year <- as.integer(AtDep_moo$year)
AtDep_moo <- na.omit(AtDep_moo)
AtDep_moo$square <- paste(AtDep_moo$x, AtDep_moo$y, sep = "_")
ggplot(AtDep_moo, aes(x = Sdep, y = Ndep)) +
geom_point(alpha = 0.2, aes(group = square)) +
labs(title = 'Year: {frame_time}', x = 'Sdep (kgS/ha)', y = 'Ndep (kgN/ha)') +
transition_time(year) +
ease_aes('linear')
AtDep_moo %>%
pivot_longer(Sdep:Ndep, names_to = "Element",
values_to = "measure") %>%
mutate(Element = ifelse(Element == "Sdep", "Sdep (kgS/ha)", "Ndep (kgN/ha)")) %>%
ggplot(aes(x = x, y = y, fill = measure)) +
geom_tile() +
scale_fill_viridis_c(name = "", na.value = "white") +
coord_fixed() +
facet_wrap(~Element) +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
axis.line = element_blank()) +
labs(title = 'Year: {frame_time}', x = '', y = '') +
transition_time(year) +
ease_aes('linear')
# Cumulative deposition per square ####
AtDepavg_cumdep_sq <- AtDep_avg %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
psych::pairs.panels(select(AtDepavg_cumdep_sq,-x,-y,-square))
# merge with CS plots
dep_x <- AtDepavg_cumdep_sq$x
dep_y <- AtDepavg_cumdep_sq$y
CS_m <- CS07_PLOTS %>% select(plot_x = POINT_X,
plot_y = POINT_Y, REP_ID)
for(i in 1:nrow(CS_m)) {
CS_m[i,"x"] <- dep_x[which.min(abs(dep_x - CS_m$plot_x[i]))]
CS_m[i,"y"] <- dep_y[which.min(abs(dep_y - CS_m$plot_y[i]))]
}
CS_Atdep <- left_join(CS_m, AtDepavg_cumdep_sq)
summary(CS_Atdep)
psych::multi.hist(select_if(CS_Atdep, is.numeric))
AtDepfor_cumdep_sq <- AtDep_for %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
# difference since 1970
AtDepavg_diff_sq <- AtDep_avg %>%
mutate(year_cat = ifelse(year %in% 1970:1978, "70_78",
ifelse(year %in% 1979:1998, "79_98",
ifelse(year %in% 1999:2007, "99_07",
ifelse(year %in% 2008:2018, "08_18",NA))))) %>%
group_by(x,y,square, year_cat) %>%
summarise(Sdep = sum(Sdep),
Ndep = sum(Ndep)) %>%
ungroup() %>%
pivot_wider(names_from = year_cat,
values_from = c(Sdep, Ndep))
psych::pairs.panels(select(AtDepavg_cumdep_sq,-x,-y,-square))
# merge with CS plots
dep_x <- AtDepavg_cumdep_sq$x
dep_y <- AtDepavg_cumdep_sq$y
# Soil pH data ####
# data manipulation
str(CS78_PH)
str(CS98_PH)
str(CS07_PH)
str(CS16_PH)
str(UK19_PH)
CS78_PH$REP_ID <- paste(CS78_PH$SQUARE_NUM,CS78_PH$REP_NUM, sep = "X")
CS98_PH$REP_ID <- paste(CS98_PH$SQUARE_NUM,CS98_PH$REP_NUM, sep = "X")
CS07_PH$REP_ID <- paste(CS07_PH$SQUARE_NUM,CS07_PH$REP_NUM, sep = "X")
CS16_PH$REP_ID <- paste(CS16_PH$SQUARE_NUM,CS16_PH$REP_NUM, sep = "X")
UK19_PH$REP_ID <- paste(UK19_PH$SQUARE_NUM,UK19_PH$REP_NUM, sep = "X")
PH <- full_join(select(CS78_PH, REP_ID, PH1978),
select(CS98_PH, REP_ID, PH2000 = PHF2000)) %>%
full_join(select(CS07_PH, REP_ID, PH2007 = PH2007_IN_WATER)) %>%
full_join(select(CS16_PH, REP_ID, PH2016 = PH_DIW)) %>%
full_join(select(UK19_PH, REP_ID, PH2019 = PH_DIW))
str(PH)
summary(PH)
mice::md.pattern(PH)
# histograms
PH_long <- pivot_longer(PH, starts_with("PH"),
values_to = "pH",
values_drop_na = TRUE)
ggplot(PH_long, aes(x = pH)) +
geom_histogram() +
facet_wrap(~name, scales = "free_y")
PH_long$year <- as.integer(substring(PH_long$name, 3,6))
ggplot(PH_long, aes(x = year, y = pH, group = REP_ID)) +
geom_line(alpha = 0.2, col = "dodgerblue2")+
geom_jitter(alpha = 0.2, width = 1, height = 0, shape = 16)
# calculate differences between survey years
PH <- PH %>%
mutate(diff7898 = PH2000 - PH1978,
diff7807 = PH2007 - PH1978,
diff7816 = PH2016 - PH1978,
diff7819 = PH2019 - PH1978,
diff9807 = PH2007 - PH2000,
diff9816 = PH2016 - PH2000,
diff9819 = PH2019 - PH2000,
diff0716 = PH2016 - PH2007,
diff0719 = PH2019 - PH2007) %>%
mutate(diff0718 = ifelse(!is.na(diff0719), diff0719,
ifelse(!is.na(diff0716), diff0716, NA)),
diff7818 = ifelse(!is.na(diff7819), diff7819,
ifelse(!is.na(diff7816), diff7816, NA)),
diff9818 = ifelse(!is.na(diff9819), diff9819,
ifelse(!is.na(diff9816), diff9816, NA)))
summary(PH)
PH_diff_long <- PH %>%
select(REP_ID, starts_with("diff")) %>%
pivot_longer(starts_with("diff"),
values_to = "pH",
values_drop_na = TRUE) %>%
mutate(name = as.factor(name)) %>%
mutate(name = forcats::fct_inorder(name))
ggplot(PH_diff_long, aes(x = pH)) +
geom_histogram() +
facet_wrap(~name, scales = "free_y") +
geom_vline(xintercept = 0)
# select only most recent change and convert into wide format for plotting
PH_Diff_wide <- select(PH, REP_ID, diff0718) %>%
na.omit() %>%
left_join(select(CS07_PLOTS, REP_ID, POINT_X, POINT_Y))
summary(PH_Diff_wide)
ggplot(PH_Diff_wide, aes(x = POINT_X, y = POINT_Y, colour = diff0718)) +
geom_jitter(width = 5000, height = 5000) +
coord_fixed() +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_Diff_wide$diff0718)),
name = "pH change", na.value = "white") +
theme_dark()
# ** pH maps ####
library(sf)
library(leaflet)
# convert to sf object
CS_PH_loc <- PH_Diff_wide %>%
select(POINT_X, POINT_Y) %>%
as.matrix() %>%
st_multipoint(dim="XY") %>%
st_sfc(crs = 27700) %>%
st_transform(crs = 4326) %>%
st_cast("POINT")
CS_PH_loc <- st_sf(cbind(select(PH_Diff_wide, REP_ID, pH_change = diff0718),CS_PH_loc))
# Create variable for colouring of points. first cut the continuous variable
# into bins - these bins are now factors
CS_PH_loc$pH_lev <- cut(CS_PH_loc$pH_change,
c(-3,-1.5,-1,-0.5,0,0.5,1,1.5,3))
pHCol <- colorFactor(palette = 'RdBu', CS_PH_loc$pH_lev)
# add random jitter to points so not overplotting
CS_PH_loc_jitter <- st_jitter(CS_PH_loc, factor = 0.005)
# read in UK boundary shapefile
UK_boundary <- st_read("../../../GBR_adm/GBR_adm0.shp")
# plot interactively
leaflet() %>%
addPolygons(data = UK_boundary, stroke = FALSE,
color = "black") %>%
addCircleMarkers(data = CS_PH_loc_jitter, radius = 5,
label = CS_PH_loc$REP_ID,
color = ~pHCol(CS_PH_loc$pH_lev),
fillOpacity = 1, stroke = FALSE) %>%
addLegend('topright', pal = pHCol, values = CS_PH_loc$pH_lev,
title = 'pH change',
opacity = 1)
# plot histograms of difference between survey years wrapping together 16 and 19
PH_diff_long %>% filter(name %in%
c("diff7807","diff9807","diff7898",
"diff7818","diff9818","diff0718")) %>%
ggplot(aes(x = pH)) +
geom_histogram() +
facet_wrap(~name) +
geom_vline(xintercept = 0)
ggsave("pH change histograms facet by survey comparison.png",
path = "Outputs/Graphs/",
width = 20, height = 12, units = "cm")
# remove 18 variables for consistency later in script
PH_diff_long <- filter(PH_diff_long,
names %in% c("diff7898",
"diff7807",
"diff7816",
"diff7819",
"diff9807",
"diff9816",
"diff9819",
"diff0716",
"diff0719"))
# ** breakdown by AVC data ####
# AVC data manipulation
hab07 <- select(CS07_IBD, REP_ID = REP_ID07, AVC07) %>%
unique()
hab98 <- select(CS98_IBD, REP_ID = REP_ID98, AVC98) %>%
unique()
hab78 <- select(CS78_IBD, REP_ID = REP_ID78, AVC78) %>%
unique()
# create combined AVC variable, if 07 has AVC use that otherwise use 98 then 78.
# There are only 3 sites with no AVC data and I can't see how to get theirs as
# they don't appear in 2016/19.
hab <- full_join(hab07, hab98) %>% full_join(hab78) %>%
mutate_if(is.factor, as.character) %>%
mutate(AVC = ifelse(!is.na(AVC07), AVC07,
ifelse(!is.na(AVC98), AVC98,
ifelse(!is.na(AVC78), AVC78, NA)))) %>%
mutate(AVC_desc = recode(AVC,
`1` = "Crops/Weeds",
`2` = "Tall herb/grass",
`3` = "Fertile grassland",
`4` = "Infertile grassland",
`5` = "Lowland wooded",
`6` = "Upland wooded",
`7` = "Moorland grass/mosaic",
`8` = "Heath/bog"))
# calculate total change in pH over survey years
PH$change_dir <- rowSums(select(PH, diff7898, diff9807, diff0719), na.rm = TRUE)
summary(PH$change_dir)
filter(PH,change_dir == 0) %>% select(starts_with("diff")) %>%
summary()
PH$change_dir <- ifelse(PH$change_dir == 0 & !is.na(PH$diff7807), PH$diff7807, PH$change_dir)
PH$change_dir <- ifelse(PH$change_dir == 0 & !is.na(PH$diff7819), PH$diff7819, PH$change_dir)
# Combine pH and AVC data and convert to long format
PH_long_hab <- left_join(PH, select(BH_IMP, REP_ID, Management)) %>%
droplevels() %>%
select(-starts_with("diff")) %>%
pivot_longer(starts_with("PH"),
names_to = c("Variable","year"),
names_sep = "_",
values_to = "pH",
values_drop_na = TRUE) %>%
filter(!is.na(Management )) %>%
mutate(year = as.numeric(year))
# plots of pH change over time
PH_long_hab %>%
ggplot(aes(x = year, y = pH, group = REP_ID)) +
geom_line(alpha = 0.5, aes(colour = change_dir) )+
geom_jitter(size = 0.2, width = 1, height = 0, shape = 16, alpha = 0.8,
aes(colour = change_dir)) +
facet_wrap(~Management, nrow = 2) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_long_hab$change_dir)),
name = "pH change", na.value = "white") +
theme_dark()
ggsave("pH change over time facetted by Management.png", path = "Outputs/Graphs/",
width = 12, height = 12, units = "cm")
PH_long_hab %>%
ggplot(aes(x = year, y = pH)) +
geom_line(alpha = 0.5, aes(colour = change_dir, group = REP_ID))+
geom_jitter(size = 0.2, width = 1, height = 0,
shape = 16, alpha = 0.1,
colour = "grey50") +
geom_boxplot(fill= NA, aes(group = year), outlier.shape = NA) +
facet_wrap(~Management, nrow = 2) +
# geom_smooth(formula = y ~ poly(x,3)) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(PH_long_hab$change_dir)),
name = "pH change", na.value = "white") +
# theme_dark() +
NULL
ggsave("pH change over time boxplots facetted by management.png", path = "Outputs/Graphs/",
width = 12, height = 15, units = "cm")
# combine ph difference and AVC data
PH_diff_long <- left_join(PH_diff_long,
select(hab, REP_ID, AVC = AVC_desc)) %>%
droplevels()
table(PH_diff_long$AVC)
get_dupes(PH_diff_long, REP_ID, name)
PH_diff_long %>%
filter(!is.na(AVC)) %>%
filter(name %in% c("diff7807","diff7898","diff9807","diff7818","diff9818","diff0718")) %>%
ggplot(aes(x = pH)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_grid(AVC ~ name, scales = "free_y")
ggsave("pH difference histograms facetted by AVC and year.png",
path = "Outputs/Graphs/", width = 28, height = 24, units = "cm")
# ** Soil pH in CaCl2 ####
# Only have pH in CaCl2 data for 2007 onwards
str(CS78_PH)
str(CS98_PH)
str(CS07_PH)
str(CS16_PH)
str(UK19_PH)
# data manipulation
PHC <- full_join(select(CS07_PH, REP_ID, PHC2007 = PH2007_IN_CACL2),
select(CS16_PH, REP_ID, PHC2016 = PH_CACL2)) %>%
full_join(select(UK19_PH, REP_ID, PHC2019 = PH_CACL)) %>%
mutate(pH_change = ifelse(!is.na(PHC2019), PHC2019 - PHC2007,
ifelse(!is.na(PHC2016), PHC2016 - PHC2007, NA))) %>%
left_join(unique(select(hab, REP_ID, AVC = AVC_desc)))
str(PHC)
summary(PHC)
md.pattern(PHC)
# histograms of pH CaCl2 change
PHC %>% filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_change)) + geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC, nrow = 2)
ggsave("pH CaCl2 change 07 to 1619 facet by AVC.png", path = "Outputs/Graphs/",
width = 28, height = 12, units = "cm")
p1 <-PHC %>%
ggplot(aes(x = pH_change)) + geom_histogram() +
geom_vline(xintercept = 0)+
labs(x = "pH change", title = bquote("pH (CaCl"[2]*")")) +
scale_x_continuous(limits = c(-3,3))+
scale_y_continuous(limits = c(0,110), expand = c(0,0))
p2 <- PH_diff_long %>% filter(name %in% c("diff0716","diff0719")) %>%
ggplot(aes(x = pH)) + geom_histogram() +
geom_vline(xintercept = 0) +
labs(x = "", title = "pH (DIW)")+
scale_x_continuous(limits = c(-3,3))+
scale_y_continuous(limits = c(0,110), expand = c(0,0))
p2/p1
ggsave("pH change 07 to 1619 DIW and CaCl2.png", path = "Outputs/Graphs/",
width = 15, height = 18, units = "cm")
# data manipulation into long format
PHC_long <- PHC %>%
pivot_longer(starts_with("PHC"), names_to = "year",
names_prefix = "PHC", values_to = "pH_CaCl2",
values_drop_na = TRUE)
str(PHC_long)
# boxplot/line/scatter plot of pH CaCl2 change over time
PHC_long %>% mutate(year = as.numeric(year)) %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = year, y = pH_CaCl2)) +
geom_jitter(shape = 16, size = 0.5, alpha = 0.5,
width = 1, height = 0) +
geom_boxplot(aes(group = year), fill = NA) +
geom_line(aes(group = REP_ID, colour = pH_change), alpha = 0.5) +
facet_wrap(~AVC, nrow = 2) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*abs(max(PHC_long$pH_change)))
ggsave("pH CaCl2 over time boxplots facet by AVC.png",
path = "Outputs/Graphs/",
width =28, height = 15, units = "cm")
# combine pH in CaCl2 and DIW and plot against each other
phc_wide_diff <- PH %>%
mutate(pH_diw_change = ifelse(!is.na(diff0719),diff0719,
ifelse(!is.na(diff0716), diff0716, NA))) %>%
select(REP_ID, PH2007, PH2016, PH2019, pH_diw_change) %>%
full_join(PHC)
ggplot(phc_wide_diff, aes(x = pH_diw_change, y = pH_change)) +
geom_abline(intercept = 0,slope = 1, colour = "grey") +
geom_vline(xintercept = 0, colour = "grey") +
geom_hline(yintercept = 0, colour = "grey") +
geom_point() +
# geom_smooth(method = "lm") +
labs(x = "pH (DIW) change", y = bquote("pH (CaCl"[2]*") change"))
ggsave("pH change over time DIW vs CaCl2 scatterplot.png",
path = "Outputs/Graphs/",
width = 15, height = 15, units = "cm")
# there is one sample with a NA for AVC so removing
phc_wide_diff %>% filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_diw_change, y = pH_change)) +
geom_abline(intercept = 0,slope = 1, colour = "grey") +
geom_vline(xintercept = 0, colour = "grey") +
geom_hline(yintercept = 0, colour = "grey") +
facet_wrap(~AVC, nrow = 2) +
geom_point() +
# geom_smooth(method = "lm") +
labs(x = "pH (DIW) change", y = bquote("pH (CaCl"[2]*") change"))
ggsave("pH change over time DIW vs CaCl2 scatterplots facet by AVC.png",
path = "Outputs/Graphs/",
width = 28, height = 15, units = "cm")
PH %>%
select(REP_ID, PHC_2007, PH_2007, PH_2019, PHC_2019) %>%
pivot_longer(starts_with("PH"),
names_to = c("Variable","Year"),
names_sep = "_") %>%
na.omit() %>%
pivot_wider(names_from = "Variable",
values_from = "value") %>%
ggplot(aes(x = PH, y = PHC, colour = Year)) +
geom_point() +
geom_abline(slope = 1, intercept = 0) +
coord_fixed() +
facet_wrap(~Year) +
labs(x = "pH (DIW)", y = bquote("pH CaCl"[2]))
# Plant Ellenberg scores ####
str(CS19_SP)
table(CS19_SP$PLOT_TYPE)
unique(CS19_SP[CS19_SP$PLOT_TYPE=="XX","REP_ID"])
table(CS19_SP[CS19_SP$PLOT_TYPE=="X","PLOTYEAR"])
str(SPECIES_LIB_TRAITS)
filter(SPECIES_LIB_CODES, COLUMN_NAME == "GROWTH_FORM")
CS18_ELL <- filter(CS19_SP, PLOT_TYPE %in% c("X","XX")) %>%
mutate(REP_ID = paste0(SQUARE,PLOT_TYPE,PLOT_NUMBER)) %>%
mutate(REP_ID = gsub("XX","X",REP_ID)) %>%
left_join(select(SPECIES_LIB_TRAITS, BRC_NUMBER,
starts_with("EBER"),
GROWTH_FORM)) %>%
filter(GROWTH_FORM %in% c("f","fe","g","m","s","ss","w")) %>% # filter to vascular plants
mutate(across(starts_with("EBER"), na_if, y = 0)) %>% # set 0 values to NA
group_by(REP_ID) %>%
summarise(across(starts_with("EBER"), mean, na.rm = TRUE,
.names = "{col}18")) %>%
rename_with(~gsub("EBERG","",.x))
summary(CS18_ELL)
test <- full_join(CS18_ELL, GM16_IBD, by = c("REP_ID" = "REP_ID16"))
plot(N18 ~ N16, test);abline(0,1)
CS98_ELL <- CS98_SP %>%
select(REP_ID, BRC_NUMBER, TOTAL_COVER) %>%
unique() %>%
filter(TOTAL_COVER > 0) %>%
left_join(select(SPECIES_LIB_TRAITS, BRC_NUMBER,
starts_with("EBER"),
GROWTH_FORM)) %>%
# filter(GROWTH_FORM %in% c("f","fe","g","m","s","ss","w")) %>% # filter to vascular plants
mutate(across(starts_with("EBER"), na_if, y = 0)) %>% # set 0 values to NA
group_by(REP_ID) %>%
summarise(across(starts_with("EBER"), function(x) sum(x, na.rm=TRUE)/length(na.omit(EBERGN)),
.names = "{col}98_new")) %>%
rename_with(~gsub("EBERG","",.x))
test <- full_join(CS98_ELL, CS98_IBD, by = c("REP_ID" = "REP_ID98"))
#par(mfrow=c(2,2))
plot(R98_new ~ R98, test);abline(0,1)
plot(N98_new ~ N98, test);abline(0,1)
plot(W98_new ~ F98, test);abline(0,1)
plot(L98_new ~ L98, test);abline(0,1)
par(mfrow=c(1,1))
summary(CS18_ELL)
X_Ell_comp <- full_join(X_Ell_inner, X_Ell_whole) %>%
left_join(hab) %>%
mutate(R_diff = SM_R - WH_R,
N_diff = SM_N - WH_N,
W_diff = SM_W - WH_W,
L_diff = SM_L - WH_L)
p1 <- ggplot(X_Ell_comp, aes(x = WH_R, y = SM_R)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg R 400m"^2~"plot"),
y = bquote("Ellenberg R 4m"^2~"plot"))
ggplot(X_Ell_comp, aes(x = WH_N, y = SM_N)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc)
ggplot(X_Ell_comp, aes(x = R_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
ggplot(X_Ell_comp, aes(x = WH_R, y = SM_R)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
facet_grid(Year~AVC_desc) +
theme_bw()
ggplot(X_Ell_comp, aes(x = N_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
X_Ell_comp %>%
select(Year, REP_ID, ends_with("diff"), AVC_desc) %>%
pivot_longer(ends_with("diff"), names_to = "Ellenberg") %>%
ggplot(aes(x = value)) +
geom_histogram() +
geom_vline(xintercept = 0) +
scale_x_continuous(limits = c(-2.5,2.5)) +
facet_grid(Ellenberg~AVC_desc)
# weighted Ellenberg comparison
X_wEll_comp <- full_join(X_wEll_inner, X_wEll_whole) %>%
left_join(hab) %>%
mutate(R_diff = SM_R - WH_R,
N_diff = SM_N - WH_N,
W_diff = SM_W - WH_W,
L_diff = SM_L - WH_L)
p2 <- ggplot(filter(X_wEll_comp, Year != 1978), aes(x = WH_R, y = SM_R)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg R 400m"^2~"plot"),
y = bquote("Ellenberg R 4m"^2~"plot"))
ggplot(filter(X_wEll_comp, Year != 1978), aes(x = R_diff)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_wrap(~AVC_desc)
X_wEll_comp %>%
select(Year, REP_ID, ends_with("diff"), AVC_desc) %>%
pivot_longer(ends_with("diff"), names_to = "Ellenberg") %>%
ggplot(aes(x = value)) +
geom_histogram() +
geom_vline(xintercept = 0) +
scale_x_continuous(limits = c(-2.5,2.5)) +
facet_grid(Ellenberg~AVC_desc)
t.test(X_wEll_comp$SM_R,X_wEll_comp$WH_R)
# t = -0.47853, df = 18816, p-value = 0.6323
t.test(X_Ell_comp$SM_R,X_Ell_comp$WH_R)
# t = -3.3001, df = 19015, p-value = 0.0009682
x <- na.omit(unique(X_wEll_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_wEll_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_R,dat$WH_R)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.85153"
# [1] "Crops/Weeds p = 0.04266"
# [1] "Fertile grassland p = 0.92217"
# [1] "Heath/bog p = 2e-05"
# [1] "Moorland grass/mosaic p = 0.12709"
# [1] "Upland wooded p = 0.74556"
# [1] "Infertile grassland p = 0.92811"
# [1] "Lowland wooded p = 0.39651"
x <- na.omit(unique(X_Ell_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_Ell_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_R,dat$WH_R)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.91431"
# [1] "Crops/Weeds p = 0.06944"
# [1] "Fertile grassland p = 0.05292"
# [1] "Heath/bog p = 0"
# [1] "Moorland grass/mosaic p = 0"
# [1] "Upland wooded p = 0.00329"
# [1] "Infertile grassland p = 0.06137"
# [1] "Lowland wooded p = 0.96831"
p1 + ggtitle("Unweighted") + p2 + ggtitle("Cover weighted")
ggsave("Ellenberg R plot size comparison.png", path = "Outputs/Graphs/",
width = 24, height = 12, units = "cm")
p1 <- ggplot(filter(X_Ell_comp, Year != 1978), aes(x = WH_N, y = SM_N)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg N 400m"^2~"plot"),
y = bquote("Ellenberg N 4m"^2~"plot")) +
ggtitle("Unweighted")
p2 <- ggplot(filter(X_wEll_comp, Year != 1978), aes(x = WH_N, y = SM_N)) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope = 1) +
facet_wrap(~AVC_desc) +
labs(x = bquote("Ellenberg N 400m"^2~"plot"),
y = bquote("Ellenberg N 4m"^2~"plot")) +
ggtitle("Cover weighted")
p1 + p2
ggsave("Ellenberg N plot size comparison.png", path = "Outputs/Graphs/",
width = 24, height = 12, units = "cm")
t.test(X_wEll_comp$SM_N,X_wEll_comp$WH_N)
# t = -0.12149, df = 18823, p-value = 0.9033
t.test(X_Ell_comp$SM_N,X_Ell_comp$WH_N)
# t = -1.621, df = 19043, p-value = 0.105
# correlations
x <- na.omit(unique(X_wEll_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_wEll_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_N,dat$WH_N)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.8639"
# [1] "Crops/Weeds p = 0.04501"
# [1] "Fertile grassland p = 0.83626"
# [1] "Heath/bog p = 0.03957"
# [1] "Moorland grass/mosaic p = 0.07602"
# [1] "Upland wooded p = 0.63365"
# [1] "Infertile grassland p = 0.38431"
# [1] "Lowland wooded p = 0.31157"
x <- na.omit(unique(X_Ell_comp$AVC_desc))
for(i in 1:length(x)){
dat <- filter(X_Ell_comp, AVC_desc == x[i])
print(paste(x[i],"p =",round(t.test(dat$SM_N,dat$WH_N)$p.value,5)))
}
# [1] "Tall herb/grass p = 0.91795"
# [1] "Crops/Weeds p = 0.05538"
# [1] "Fertile grassland p = 0.38703"
# [1] "Heath/bog p = 0"
# [1] "Moorland grass/mosaic p = 0"
# [1] "Upland wooded p = 0.01399"
# [1] "Infertile grassland p = 0.41287"
# [1] "Lowland wooded p = 0.62069"
# Data manipulation
str(CS07_IBD)
str(CS98_IBD)
str(CS78_IBD)
str(GM16_IBD)
str(CS18_ELL)
# get GMEP data to have CS REP_ID
GMEP_CS_match <- CS16_PH %>%
select(SQUARE_NUM, GMEP_NUM, PLOT_TYPE, REP_NUM) %>%
filter(!is.na(GMEP_NUM)) %>%
mutate(CS_REP_ID = paste0(SQUARE_NUM, PLOT_TYPE, REP_NUM),
GMEP_REP_ID = paste0(GMEP_NUM, PLOT_TYPE, REP_NUM)) %>%
select(CS_REP_ID, GMEP_REP_ID)
# Get GMEP data into similar format to CS data
GM16_IBD <- GM16_IBD %>%
right_join(GMEP_CS_match, by = c("REP_ID" = "GMEP_REP_ID")) %>%
select(REP_ID16 = CS_REP_ID, R16 = PH, N16 = FERT, L16 = LIGHT, F16 = WET)
# Combine IBD files for the different years
IBD_comb <- full_join(CS07_IBD, CS98_IBD, by = c("REP_ID07" = "REP_ID98")) %>%
full_join(CS78_IBD, by = c("REP_ID07" = "REP_ID78")) %>%
full_join(GM16_IBD, by = c("REP_ID07" = "REP_ID16")) %>%
full_join(CS18_ELL, by = c("REP_ID07" = "REP_ID"))
# Use AVC data from 2007 if it is there, otherwise 98 or 78
IBD_comb$AVC <- ifelse(!is.na(IBD_comb$AVC07), IBD_comb$AVC07,
ifelse(!is.na(IBD_comb$AVC98), IBD_comb$AVC98,
IBD_comb$AVC78))
summary(IBD_comb$AVC)
# get plot type from REP_ID
IBD_comb$PLOT_TYPE <- gsub("[^a-zA-Z]", "", IBD_comb$REP_ID07)
summary(as.factor(IBD_comb$PLOT_TYPE))
# Calculate difference in Ell R over the years
ELL <- X_Ell %>%
select(Year, REP_ID, contains("_R_")) %>%
pivot_longer(contains("_R_"), names_to = "Ellenberg") %>%
mutate(Year = as.character(Year)) %>%
pivot_wider(names_from = Year,
names_prefix = "R") %>%
mutate(diff7890 = R1990 - R1978,
diff9098 = R1998 - R1990,
diff9807 = R2007 - R1998,
diff0719 = R2019 - R2007
)
# Calculate overall Ell R change
ELL$Rchange <- rowSums(select(ELL, diff7890, diff9098,
diff9807, diff0719), na.rm = TRUE)
summary(ELL$Rchange)
filter(ELL, Rchange == 0) %>% select(starts_with("diff")) %>%
summary()
# Convert Ell R change into long format
ELL_diff_long <- ELL %>%
select(REP_ID, starts_with("diff")) %>%
droplevels %>%
pivot_longer(starts_with("diff"), values_to = "Ell_R") %>%
filter(!is.na(Ell_R))
ELL_diff_long %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = Ell_R)) +
geom_histogram() +
geom_vline(xintercept = 0) +
facet_grid(name ~ AVC, scales = "free_y")
ggsave("Ellenberg R change histograms all plots facetted AVC year.png",
path = "Outputs/Graphs/", width = 28, height = 20, units ="cm")
# Convert Ellenberg R scores file to long format
ELL_R_LONG <- ELL %>% select(REP_ID = REP_ID07,
PLOT_TYPE = PLOT_TYPE.x,
AVC = AVC_desc, R07, R98, R78, R16, Rchange) %>%
filter(PLOT_TYPE == "X") %>%
droplevels() %>%
select(-PLOT_TYPE) %>%
pivot_longer(cols = c(R07,R98,R78,R16), names_to = "year",
names_prefix = "R") %>%
mutate(year = ifelse(year == "07", 2007,
ifelse(year == "98", 1998,
ifelse(year == "78", 1978,
ifelse(year == "16", 2016, NA)))))
str(ELL_R_LONG)
summary(ELL_R_LONG$AVC)
# Ellenberg R score change over time boxplot/scatter/line graph
ELL %>%
select(-starts_with("diff")) %>%
pivot_longer(R1978:R2019,
names_to = "year",
names_prefix = "R",
names_transform = list(year = as.integer)) %>%
left_join(BH_IMP) %>%
filter(!is.na(Management)) %>%
mutate(Management = recode(Management,
"High" = "High intensity",
"Low" = "Low intensity"),
Ellenberg = recode(Ellenberg,
"SM_R_UW" = "Small unweighted",
"SM_R_W" = "Small weighted",
"WH_R_UW" = "Full unweighted",
"WH_R_W"= "Full weighted")) %>%
ggplot(aes(x = year, y = value)) +
geom_line(alpha = 0.5, aes(group = REP_ID, colour = Rchange))+
geom_jitter(size = 0.2, width = 1, height = 0,
shape = 16, alpha = 0.1,
colour = "grey50") +
geom_boxplot(fill= NA, aes(group = year), outlier.shape = NA, width = 3) +
facet_grid(Ellenberg~Management) +
labs(y = "Ellenberg R") +
# geom_smooth(formula = y ~ poly(x,3)) +
scale_colour_distiller(palette = "RdBu", direction = 1,
limits = c(-1,1)*max(abs(ELL$Rchange)),
name = "Ell R change", na.value = "white") +
# theme_dark() +
NULL
ggsave("Ellenberg R change over time X plots boxplots facetted by Management.png",
path = "Outputs/Graphs/", width = 15, height = 20, units = "cm")
str(X_Ell)
str(BH_IMP)
plot_dat <- left_join(X_Ell, BH_IMP) %>%
filter(!is.na(Management)) %>%
select(REP_ID, Year, contains("_R_"), Management) %>%
pivot_longer(contains("_R_"),
names_to = c("PlotSize","Score","Weighting"),
names_sep = "_") %>%
mutate(PlotSize = recode(PlotSize,
"SM" = "Small",
"WH" = "Full"),
Weighting = recode(Weighting,
"UW" = "Unweighted",
"W" = "Weighted"),
Management = recode(Management,
"High" = "High intensity management",
"Low" = "Low intensity management")) %>%
pivot_wider(names_from = PlotSize,
values_from = value)
ggplot(plot_dat, aes(x = Full, y = Small)) +
geom_point(alpha = 0.25, colour = "dodgerblue3") +
geom_abline(slope = 1, intercept = 0) +
coord_fixed() +
facet_grid(Management~Weighting) +
labs(x = "Full size plot", y = "Smaller size plot")
ggsave("Ellenberg R plot size comparison by weighting and management.png",
path = "Outputs/Graphs/", width = 12, height = 12, units ="cm")
# Combined pH and ellenberg R ####
# change graphs - combine difference stats
ph_ell_comb <- ELL_diff_long %>%
select(-AVC) %>%
filter(grepl("X", REP_ID)) %>%
full_join(filter(PH_diff_long, name %in% unique(ELL_diff_long$name)))
str(ph_ell_comb)
md.pattern(ph_ell_comb)
# scatter plot of pH change against Ellenberg R change facetted by survey year
# comparison and AVC
ph_ell_comb %>%
filter(!is.na(AVC)) %>%
ggplot(aes(x = pH, y = Ell_R)) +
geom_point() +
facet_grid(name~AVC) +
geom_smooth(method = "lm")
ggsave("Ellenberg R vs pH change by AVC and survey.png",
path = "Outputs/Graphs/", width = 28, height = 20, units = "cm")
# pH in CaCl2 and Ellenberg R
phc_ell_comb <- ELL_diff_long %>%
filter(grepl("X", REP_ID)) %>%
filter(name %in% c("diff0716","diff0719")) %>%
full_join(na.omit(select(PHC, REP_ID, pH_change)))
str(phc_ell_comb)
md.pattern(phc_ell_comb)
# scatter plot of pH CaCl2 vs Ellenberg R change, no facets
phc_ell_comb %>%
# filter(!is.na(AVC)) %>%
ggplot(aes(x = pH_change, y = Ell_R)) +
geom_point() +
# facet_wrap(~AVC) +
geom_smooth(method = "lm")
# combine pH and Ellenberg R scores in wide format
str(PH)
str(ELL)
# differences
ph_ell_wide_diff <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, diff7807, diff7898, diff9807) %>%
full_join(select(PH, REP_ID,diff7807, diff7898, diff9807),
suffix = c("_ellR","_ph"), by = "REP_ID")
md.pattern(ph_ell_wide_diff)
psych::pairs.panels(select_if(ph_ell_wide_diff, is.numeric))
# actual pH and Ellenberg R values
ph_ell_wide <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, R78, R98, R07, R16) %>%
full_join(select(PH, REP_ID, PH1978, PH2000, PH2007, PH2016, PH2019),
suffix = c("_ellR","_phw"), by = "REP_ID")
md.pattern(ph_ell_wide)
psych::pairs.panels(select_if(ph_ell_wide, is.numeric),
ellipses = FALSE, rug = FALSE,
method = "spearman")
# strongest correlation tends to be current year
ph_ell_long <- ph_ell_wide %>%
pivot_longer(ends_with(c("78","98","00","07","16","19")),
values_drop_na = TRUE) %>%
mutate(year = sapply(strsplit(name, "[A-Z]{1,2}"),"[",2),
variable = sapply(strsplit(name, "[0-9]{1,4}"),"[",1)) %>%
mutate(year = as.numeric(recode(year, "98" = "1998",
"78" = "1978",
"07" = "2007",
"16" = "2018",
"19" = "2018",
"2000" = "1998",
"2016" = "2018",
"2019" = "2018")),
variable = recode(variable,
"PH" = "Soil_pH",
"R" = "Ell_R")) %>%
select(-name) %>%
unique() %>%
pivot_wider(names_from = variable,
values_from = value)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
labs(x = "Soil pH", y = "Ellenberg R")
ggsave("Ellenberg R vs Soil pH by year.png", path = "Outputs/Graphs",
width = 15, height = 15, units = "cm")
# checking if sigmoidal curve seems appropriate
x <- seq(3.5,9,0.1)
c1 <- 4.5
c2 <- 1.5
c3 <- 5
c4 <- 2
y <- c1/(1 + exp(-c2*(x - c3))) + c4
c1 <- 4.5
c2 <- 2
c3 <- 4.5
c4 <- 2
y2 <- c1/(1 + exp(-c2*(x - c3))) + c4
dat <- data.frame(x,y,y2)
# asymmetrical sigmoidal curve
c1 <- 4
c2 <- 1.5
c3 <- 3.5
c4 <- 2.5
c5 <- 6
y3 <- c1/((1 + exp(-c2*(x - c3)))^c5) + c4
dat <- data.frame(x,y,y2,y3)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
geom_smooth() +
geom_line(data = dat, aes(x = x, y = y),
colour = "purple", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y2),
colour = "red", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y3),
colour = "orange", size = 1.5) +
labs(x = "Soil pH", y = "Ellenberg R")
# seems reasonable - interestingly in 1978 the Ellenberg R value for any given
# pH is higher. So in model need to make sure that c3 varies by year, and not
# sure about the other parameters.
# other monotonic curves instead
# monomolecular/Mitscherlich law/von Bertalanffy law.
a <- 7
b <- 0
c <- 0.3
ym1 <- a - (a-b)*exp(-c*x)
plot(x,ym1)
# von bertalanffy
a <- 7
b <- 0.5
c <- -2
yb <- a*(1-exp(-b*(x-c)))
plot(x,yb)
# Michaelis Menten
a <- 9
b <- 4
ym <- a*x/(b + x)
dat <- data.frame(x,ym,y2,y3,yb)
ggplot(ph_ell_long, aes(x = Soil_pH, y = Ell_R)) +
geom_point(size = 1) +
facet_wrap(~year) +
geom_smooth() +
geom_line(data = dat, aes(x = x, y = yb),
colour = "purple", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y2),
colour = "red", size = 1.5) +
geom_line(data = dat, aes(x = x, y = y3),
colour = "orange", size = 1.5) +
labs(x = "Soil pH", y = "Ellenberg R")
# include CaCl2 but only recent years for graphical simplicity
phr_ell_wide <- ELL %>%
filter(REP_ID07 %in% PH$REP_ID) %>%
select(REP_ID = REP_ID07, R07, R16) %>%
full_join(select(PH, REP_ID, PH2007, PH2016),
suffix = c("_ellR","_phw"), by = "REP_ID") %>%
full_join(select(PHC, REP_ID, PHC2007, PHC2016))
psych::pairs.panels(select_if(phr_ell_wide, is.numeric),
ellipses = FALSE, rug = FALSE,
method = "spearman")
# Soil moisture ####
MOISTURE <- CS_tier4 %>%
mutate(REP_ID = ifelse(!is.na(REP_ID07), REP_ID07,
ifelse(!is.na(REP_ID98), REP_ID98,
ifelse(!is.na(REP_ID78), REP_ID78, NA)))) %>%
select(REP_ID, MOISTURE_CONTENT_07, MOISTURE_CONTENT_98) %>%
full_join(select(UK19_WET, REP_ID, MOISTURE_CONTENT_19 = `g_water/wet_weight_of_soil`)) %>%
# mutate(VWC_19 = 100*VWC_19) %>%
pivot_longer(starts_with("MOISTURE"), names_to = "variable",
values_to = "Moisture") %>%
mutate(Year = ifelse(variable == "MOISTURE_CONTENT_07", 2007,
ifelse(variable == "MOISTURE_CONTENT_98", 1998,
ifelse(variable == "MOISTURE_CONTENT_19", 2019, NA)))) %>%
left_join(select(CS_REP_ID, REPEAT_PLOT_ID, REP_ID = Y07)) %>%
mutate(REP_ID = ifelse(!is.na(REPEAT_PLOT_ID), REPEAT_PLOT_ID, REP_ID)) %>%
select(-REPEAT_PLOT_ID)
ggplot(MOISTURE, aes(x = Moisture)) +
geom_histogram() +
facet_wrap(~Year, nrow = 3, scales = "free_y") +
labs(x = "Soil moisture (%)") +
scale_x_continuous(limits = c(0,100)) +
scale_y_continuous(expand = expansion(mult = c(0,0.1)))
ggsave("Soil moisture g per wet soil histograms.png", path = "Outputs/Graphs/",
width = 12, height = 15, units = "cm")
MOISTURE_PH <- PH_long %>%
mutate(year = ifelse(year == 2000, 1998, year)) %>%
full_join(rename(MOISTURE, year = Year)) %>%
filter(year %in% c(1998, 2007, 2019))
ggplot(MOISTURE_PH, aes(x = Moisture, y = pH,
colour = as.factor(year), fill = as.factor(year))) +
geom_point(alpha = 0.5, size = 0.8) +
geom_smooth(formula = 'y ~ log(x) + log(x)', method = "lm") +
scale_colour_brewer(palette = "Dark2", name = "Year") +
scale_fill_brewer(palette = "Dark2", name = "Year") +
labs(x = "Soil moisture (g/g wet soil)") +
scale_y_continuous(limits = c(3.3,9.2))
ggsave("Soil moisture vs pH log line.png", path = "Outputs/Graphs",
width =14, height = 12, units = "cm")
ggplot(MOISTURE_PH, aes(x = Moisture, y = pH)) +
geom_point(alpha = 0.5, size = 0.8, colour = "dodgerblue2") +
geom_smooth(colour = "black", formula = 'y ~ log(x)', method = "lm") +
facet_wrap(~year) +
labs(x = "Soil moisture (g/g wet soil)") +
scale_y_continuous(limits = c(3.3,9.2))
ggsave("Soil moisture vs pH facet log line.png", path = "Outputs/Graphs",
width =18, height = 12, units = "cm")
library(brms)
MOISTURE_PH$Year_cat <- as.factor(MOISTURE_PH$year)
get_prior(bf(pH ~ a*exp(b*Moisture) + c*exp(d*Moisture) + e,
a + b + c + d + e ~1, nl = TRUE),
data = MOISTURE_PH)
# CHess soil moisture
soil_moist_long <- soil_moist %>%
# `colnames<-`(c(paste0(LETTERS[1:14],colnames(soil_moist)))) %>%
pivot_longer(AJan:LDec, names_to = "Month", values_to = "Moisture") %>%
mutate(month_num = as.numeric(as.factor(Month)))
ggplot(soil_moist_long, aes(x = Measting, y = Nnorthing, fill = Moisture)) +
geom_tile() +
coord_fixed() +
facet_wrap(~month_num) +
theme(axis.ticks = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
ggsave("JULES soil moisture maps 2007.png",
path = "Outputs/Graphs/", width = 30, height = 30, units = "cm")
# Rainfall data ####
ggplot(cs_survey_rainfall, aes(x = mean_rainfall)) +
geom_histogram() +
facet_wrap(~Year) +
labs(x = "Average monthly rainfall for 4 months pre-survey")
p1 <- ggplot(cs_survey_rainfall, aes(x = mean_rainfall)) +
geom_histogram() +
facet_wrap(~Year, ncol = 1, scales = "free_y") +
scale_x_continuous(expand = c(0,0)) +
labs(x = "Average monthly rainfall for 4 months pre-survey")
p2 <- ggplot(cs_survey_rainfall, aes(x = sum_rainfall)) +
geom_histogram() +
facet_wrap(~Year, ncol = 1, scales = "free_y") +
scale_x_continuous(expand = c(0,0)) +
labs(x = "Total rainfall for 4 months pre-survey")
p1 + p2
ggplot(cs_survey_rainfall, aes(x = mean_rainfall, y = sum_rainfall)) +
geom_point() +
geom_abline(slope=4, intercept = 0) +
facet_wrap(~Year)
# basically the same
p1
ggsave("Average monthly rainfall for 4 months pre-survey.png",
path = "Outputs/Graphs/",width = 12, height = 18, units = "cm")
# Rainfall and soil moisture ####
rain_moist <- cs_survey_rainfall %>% ungroup() %>%
mutate(Year = as.numeric(Year)) %>%
full_join(MOISTURE)
summary(rain_moist)
mice::md.pattern(rain_moist)
rain_moist %>% filter(is.na(mean_rainfall)) %>% .$Year %>% table()
ggplot(rain_moist, aes(x = mean_rainfall, y = Moisture,
colour = as.factor(Year))) +
geom_point()
ggplot(rain_moist, aes(x = mean_rainfall, y = Moisture)) +
geom_point() +
facet_wrap(~Year)
rain_moist_hab <- left_join(rain_moist, BH_comb)
summary(rain_moist_hab)
ggplot(rain_moist_hab, aes(x = mean_rainfall, y = Moisture)) +
geom_point() +
facet_wrap(~BH_DESC)
rain_moist_man <- left_join(rain_moist, BH_IMP)
summary(rain_moist_man)
ggplot(na.omit(rain_moist_man),
aes(x = mean_rainfall, y = Moisture)) +
geom_point(aes(colour = Management, fill = Management)) +
geom_smooth(method="lm", colour = "black") +
geom_smooth(method="lm", aes(colour = Management, fill = Management)) +
facet_wrap(~Year)
# compare rainfall to sample and EO soil moisture
rain_moist_hab <- left_join(rain_moist_hab, cs_loc_moist07_long)
ggplot(rain_moist_hab, aes(x = mean_moisture, y = Moisture)) +
geom_point(aes(colour = month))
rain_moist_hab07 <- rain_moist_hab %>%
filter(Year == 2007) %>%
full_join(cs_loc_moist07_sample_month)
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point(aes(colour = month))
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point() +
labs(x = "JULES soil moisture", y = "Field soil moisture")
ggsave("Field moisture compared to JULES soil moisture.png",
path = "Outputs/Graphs/", width = 20, height = 12, units = "cm")
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = mean_moisture)) +
geom_point()
ggplot(rain_moist_hab07, aes(x = mean_rainfall, y = Moisture)) +
geom_point(aes(colour = eo_moisture))
rain_moist_hab07 <- left_join(rename(rain_moist_hab07, REPEAT_PLOT_ID = REP_ID),
filter(plot_locations, YEAR == "y07"))
ggplot(rain_moist_hab07, aes(x = eo_moisture, y = Moisture)) +
geom_point(aes(colour = N_10_FIG_1M))
p1_eo <- ggplot(filter(rain_moist_hab07, !is.na(eo_moisture)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = eo_moisture)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("JULES soil moisture")
p2_meas <- ggplot(filter(rain_moist_hab07, !is.na(Moisture)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = Moisture)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("Field soil moisture")
p3_rain <- ggplot(filter(rain_moist_hab07, !is.na(mean_rainfall)),
aes(x = E_10_FIG_1M, y = N_10_FIG_1M)) +
geom_point(aes(colour = mean_rainfall)) +
coord_fixed() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
scale_color_continuous(name = "") +
ggtitle("Mean rainfall")
p1_eo + p2_meas + p3_rain
ggsave("Map of Soil moisture and rainfall over CS 2007.png",
path = "Outputs/Graphs", width = 30, height = 15, units = "cm")
p1_eo <- ggplot(rain_moist_hab07, aes(x = eo_moisture)) +
geom_histogram(bins = 40)
p2_meas <- ggplot(rain_moist_hab07, aes(x = Moisture)) +
geom_histogram(bins = 40)
p3_rain <- ggplot(rain_moist_hab07, aes(x = mean_rainfall)) +
geom_histogram(bins = 40)
p1_eo + p2_meas + p3_rain
psych::pairs.panels(select(rain_moist_hab07, eo_moisture, Moisture,
mean_rainfall), rug=FALSE)
summary(lm(Moisture ~ eo_moisture + mean_rainfall, data = rain_moist_hab07))
summary(lm(Moisture ~ mean_moisture + mean_rainfall, data = rain_moist_hab07))
ggplot(PH_moisture,
aes(x = mean_moisture, y = PH_2007)) +
geom_point()
ggplot(PH_moisture,
aes(x = Moisture, y = PH_2007)) +
geom_point()
summary(lm(Moisture ~ mean_moisture + mean_rainfall, data = rain_moist_hab07))
PH_moisture <- left_join(PH_moisture,
select(CS07_CN, REP_ID = REP_ID07,
C_PERCENT, N_PERCENT))
summary(lm(PH_2007 ~ log(C_PERCENT) + eo_moisture,
data = PH_moisture))
summary(lm(PHC_2007 ~ Moisture + log(C_PERCENT),
data = PH_moisture))
ggplot(PH_moisture,
aes(x = C_PERCENT, y = PH_2007)) +
geom_point() +
scale_x_log10()
# pH and atmospheric deposition ####
ph_atdep <- PH %>% select(REP_ID, diff7807) %>%
mutate(Year = 2007) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff7807))+
geom_point()
ph_atdep <- PH %>%
select(REP_ID, diff7819) %>%
na.omit() %>%
mutate(Year = 2018) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff7819))+
geom_point()
ph_atdep <- PH %>%
mutate(diffc0719 = PHC_2019 - PHC_2007) %>%
select(REP_ID, diff0719, diffc0719) %>%
filter(!is.na(diff0719)) %>%
mutate(Year = 2018) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = diff0719))+
geom_point()
ggplot(ph_atdep, aes(x = Sdep, y = diffc0719))+
geom_point()
ph_atdep <- PH %>% select(REP_ID, PH_2007, PHC_2007) %>%
mutate(Year = 2007) %>%
left_join(CS_plot_atdep)
ggplot(ph_atdep, aes(x = Sdep, y = PH_2007))+
geom_point()
ggplot(ph_atdep, aes(x = Sdep, y = PHC_2007))+
geom_point()
# soil N against N deposition
summary(CS07_CN)
CN_atdep <- CS07_CN %>%
mutate(CN_ratio = C_PERCENT/N_PERCENT,
Year = 2007,
REP_ID = REP_ID07) %>%
select(REP_ID, Year, CN_ratio) %>%
full_join(mutate(CS98_CN,
CN_ratio = C_PERCENT/N_PERCENT,
Year = 1998,
REP_ID = REP_ID98)) %>%
left_join(CS_plot_atdep)
ggplot(CN_atdep,
aes(x = Ndep, y = CN_ratio)) +
geom_point() +
labs(x = "Cumulative N deposition", y = "C:N") +
facet_wrap(~Year + Habitat, nrow = 3)
# Soil nitrogen ###
# Investigating which total N based metric is most related to mineralisable
# N/nitrate. Options are N% or N:C, there are multiple metrics of mineralisable
# N but nitrate is known to be related to NPP.
Nitrogen <- CS07_MINN %>%
mutate(REP_ID = paste0(SQUARE_NUM, PLOT_TYPE, REP_NUM)) %>%
left_join(select(CS_REP_ID, REPEAT_PLOT_ID, REP_ID = Y07)) %>%
mutate(REP_ID = ifelse(!is.na(REPEAT_PLOT_ID), REPEAT_PLOT_ID, REP_ID)) %>%
select(-REPEAT_PLOT_ID) %>%
left_join(CS07_CN)
ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NMINTOT_SOIL)) +
geom_point() +
labs(x = "Total N (%)", y = "Total Mineralisable N (mg N / g dry soil)")
p3 <- ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NMINTOT_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$N_PERCENT, Nitrogen$NE_NMINTOT_SOM, method = "spearman",
use = "complete.obs")
# [1] -0.5087721
ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NH4N_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total NH4 (mg N / g LOI)") +
scale_y_log10()
p2<- ggplot(Nitrogen, aes(x = N_PERCENT, y = NE_NO3N_SOM)) +
geom_point() +
labs(x = "Total N (%)", y = "Total NO3 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$N_PERCENT, Nitrogen$NE_NO3N_SOM, method = "spearman",
use = "complete.obs")
# [1] -0.5469838
Nitrogen$NC_ratio <- Nitrogen$N_PERCENT/Nitrogen$C_PERCENT
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOIL)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g dry soil)") +
scale_y_log10()
p4 <- ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NMINTOT_SOM, method = "spearman",
use = "complete.obs")
# 0.5645039
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NH4N_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total NH4 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NH4N_SOM, method = "spearman",
use = "complete.obs")
# [1] 0.1507093
p1 <- ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NO3N_SOM)) +
geom_point() +
labs(x = "Total N:C", y = "Total NO3 (mg N / g LOI)") +
scale_y_log10()
cor(Nitrogen$NC_ratio, Nitrogen$NE_NO3N_SOM, method = "spearman",
use = "complete.obs")
# 0.608635
p2+p1+p3+p4
ggsave("Total N and mineralisable N by LOI.png",
path = "Outputs/Graphs/", width = 25, height = 20, units = "cm")
Nitrogen <- left_join(Nitrogen, select(CS07_PH, -BATCH_NUM))
ggplot(Nitrogen, aes(x = NC_ratio, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
scale_y_log10()
ggplot(Nitrogen, aes(x = N_PERCENT, y = C_PERCENT)) +
geom_point() +
labs(x = "Total N", y = "Total C") +
scale_y_log10()
ggplot(Nitrogen, aes(x = NC_ratio, y = NE_NMINTOT_SOM,
colour = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "Total Mineralisable N (mg N / g LOI)") +
scale_y_log10()
coplot(NC_ratio ~ log(NE_NMINTOT_SOM) | PH2007_IN_WATER, Nitrogen)
coplot(log(NE_NMINTOT_SOM) ~ NC_ratio | PH2007_IN_WATER, Nitrogen)
p1 <- ggplot(Nitrogen, aes(x = NC_ratio, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
geom_smooth(method = "lm", fill = "#3366FF")
p2 <- ggplot(Nitrogen, aes(x = NE_NMINTOT_SOM, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p3 <- ggplot(Nitrogen, aes(x = NE_NMINTOT_SOIL, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g soil)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p4 <- ggplot(Nitrogen, aes(x = NE_NO3N_SOM, y = PH2007_IN_WATER)) +
geom_point() +
labs(x = "Total NO3-N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", fill = "#3366FF")
p1 + p2 + p3 + p4
ggsave("pH and N measurements.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
Nitrogen2 <- Nitrogen %>%
left_join(filter(BH_IMP, Year == 2007)) %>%
filter(!is.na(Management))
p1 <- ggplot(Nitrogen2, aes(x = NC_ratio, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "pH") +
geom_smooth(method = "lm", aes(fill = Management))
p2 <- ggplot(Nitrogen2, aes(x = NE_NMINTOT_SOM, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p3 <- ggplot(Nitrogen2, aes(x = NE_NMINTOT_SOIL, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total Mineralisable N (mg N / g soil)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p4 <- ggplot(Nitrogen2, aes(x = NE_NO3N_SOM, y = PH2007_IN_WATER, colour = Management)) +
geom_point() +
labs(x = "Total NO3-N (mg N / g LOI)", y = "pH") +
scale_x_log10() +
geom_smooth(method = "lm", aes(fill = Management))
p1 + p2 + p3 + p4 + plot_layout(guides="collect")
ggsave("pH and N measurements by Management type.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
Nitrogen3 <- Nitrogen2 %>%
left_join(filter(X_Ell, Year == 2007) %>%
select(REP_ID, contains("_R_")))
p1 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = WH_R_W, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (full X weighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p2 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = WH_R_UW, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (full X unweighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p3 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = SM_R_W, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (small X weighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p4 <- ggplot(Nitrogen3, aes(x = NC_ratio, y = SM_R_UW, colour = Management)) +
geom_point() +
labs(x = "Total N:C", y = "Ellenberg R (small X unweighted)") +
geom_smooth(method = "lm", aes(fill = Management))
p1+p2+p3+p4+ plot_layout(guides="collect")
ggsave("Ellenberg R and NC by Management type.png", path = "Outputs/Graphs",
width = 25, height = 20, units = "cm")
# Moisture and rainfall and pH ####
# Look at whether soil moisture actually predicted by change in rainfall to
# evaluate whether change in rainfall is a useful metric
rain_moist <- cs_survey_rainfall %>% ungroup() %>%
mutate(Year = as.numeric(Year)) %>%
full_join(MOISTURE)
test <- rain_moist %>% select(-sum_rainfall) %>%
pivot_wider(names_from = Year, values_from = c(mean_rainfall, Moisture)) %>%
mutate(Moisture_diff9807 = Moisture_2007 - Moisture_1998,
Moisture_diff0719 = Moisture_2019 - Moisture_2007,
rain_diff9807 = mean_rainfall_2007 - mean_rainfall_1998,
rain_diff0719 = mean_rainfall_2019 - mean_rainfall_2007) %>%
select(REP_ID, contains("diff")) %>%
pivot_longer(contains("diff"), names_to = c("Variable","Time_period"),
names_sep = "_diff") %>%
pivot_wider(names_from = Variable, values_from = value) %>%
na.omit()
# Also combine with LOI as moisture highly dependent on SOM
test <- LOI %>%
mutate(LOI_2019 = ifelse(!is.na(LOI_2019), LOI_2019, LOI_2016)) %>%
mutate(LOI_diff9807 = LOI_2007 - LOI_1998,
LOI_diff0719 = LOI_2019 - LOI_2007) %>%
select(REP_ID, contains("diff")) %>%
pivot_longer(contains("diff"), names_to = c("Variable","Time_period"),
names_sep = "_diff") %>%
pivot_wider(names_from = Variable, values_from = value) %>%
right_join(test)
ggplot(test, aes(x = rain, y = Moisture)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test, aes(x = LOI, y = Moisture)) +
geom_point() +
geom_smooth(method = "lm")
summary(lm(Moisture ~ rain+LOI, test))
# Moisture difference predicted by rain + LOI difference (19%)
# however we want to predict change in pH
test2 <- PH_diff_long %>%
mutate(Time_period = gsub("diff","",as.character(name))) %>%
right_join(test)
ggplot(test2, aes(x = rain, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test2, aes(x = LOI, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(test2, aes(x = Moisture, y = pH)) +
geom_point() +
geom_smooth(method = "lm")
summary(lm(pH ~ Moisture*rain*LOI, test2))
summary(lm(pH ~ Moisture, test2))
summary(lm(pH ~ rain, test2))
summary(lm(pH ~ LOI, test2))
# pretty low variance explained, change in rainfall explained a lot more than
# change in soil moisture or LOI but still low
# Ran the above for different time windows of rainfall calculation and looked at
# correlation. This supported initial assumption that 4 months was best based on
# evidence provided by Don about the upwater monitoring network
# 2 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# 0.05777657
# 3 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# [1] 0.08958804
# 4 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
#[1] 0.1373099
# 5 month calculation
cor(test2$pH, test2$rain, use = "complete.obs")
# [1] 0.1050855
# Species functional graphs ####
str(Sp_Ell)
Sp_Ell %>%
na.omit() %>%
pivot_longer(BUTTLARVFOOD:Low_grass, names_to = "Function",
values_to = "Count") %>%
mutate(Count = ifelse(Count == 1, "Yes","No")) %>%
group_by(EBERGR, Function) %>%
count(Count) %>%
ungroup() %>% group_by(Function, EBERGR) %>%
mutate(prop = n/sum(n, na.rm = TRUE)) %>%
filter(Count == "Yes") %>%
select(-n,-Count) %>%
pivot_wider(names_from = Function, values_from = prop)
p1 <- Sp_Ell %>%
select(BRC_NUMBER, EBERGR, BUTTLARVFOOD,
KgSughacovyr,Low_grass) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(BUTTLARVFOOD:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(BUTTLARVFOOD:Low_grass), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
# unique() %>%
# na.omit() %>%
# pivot_longer(c(BUTTLARVFOOD,KgSughacovyr,Low_grass), names_to = "Function",
# values_to = "Count") %>%
mutate(Function = recode(Function,
"BUTTLARVFOOD" = "Butterfly larvae food",
"KgSughacovyr" = "Nectar producing",
"Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p2 <- Ell_F %>%
select(Year, REP_ID, WH_R, F_BUTTLARVFOOD,
F_KgSughacovyr,
F_Low_grass) %>%
pivot_longer(starts_with("F_"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"F_BUTTLARVFOOD" = "Butterfly larvae food",
"F_KgSughacovyr" = "Nectar producing",
"F_Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Proportion of plant species") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, Fr_BUTTLARVFOOD,
Fr_KgSughacovyr,
Fr_Low_grass) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_BUTTLARVFOOD" = "Butterfly larvae food",
"Fr_KgSughacovyr" = "Nectar producing",
"Fr_Low_grass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p2 + p3
ggsave("Ellenberg R and plant functions.png",
path = "Outputs/Graphs/",
width = 25, height = 15, units= "cm")
p1+p3
ggsave("Ellenberg R and plant functions cols 1 and 3.png",
path = "Outputs/Graphs/",
width = 18, height = 15, units= "cm")
p1 <- Sp_Ell %>%
select(-starts_with("nrec"),-contains("name"),
-Sphag_SPECIES) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(Coastal:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(Coastal:Arable), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
# mutate(Count = ifelse(Count == 1, "Yes","No")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(position = "stack", stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p2 <- Ell_F %>%
select(Year, REP_ID, WH_R, starts_with("F_")) %>%
select(-F_Sphag_SPECIES) %>%
filter(!is.na(WH_R)) %>%
pivot_longer(starts_with("F_"), names_to = "Function",
values_to = "value") %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Proportion of plant species") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, starts_with("Fr")) %>%
select(-Fr_Sphag_SPECIES) %>%
filter(!is.na(WH_R)) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_Butt" = "Butterfly larvae food",
"Fr_Nectar" = "Nectar producing",
"Fr_Lgrass" = "Lowland grass indicators")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1,
scales = "free_y") +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p2 + p3
ggsave("Ellenberg R and plant functions v2.png",
path = "Outputs/Graphs/",
width = 25, height = 100, units= "cm")
p1 <- Sp_Ell %>%
select(BRC_NUMBER, EBERGR, BUTTLARVFOOD,
KgSughacovyr,Low_grass,Cwr,BIRDFOOD,
Forage_grasses, IW) %>%
unique() %>%
filter(!is.na(EBERGR)) %>%
mutate(Total = 1) %>%
group_by(EBERGR) %>%
summarise(across(BUTTLARVFOOD:Total, sum, na.rm = TRUE)) %>%
pivot_longer(c(BUTTLARVFOOD:IW), names_to = "Function",
values_to = "Yes_Functional") %>%
mutate(Not_functional = Total - Yes_Functional) %>%
pivot_longer(c(Not_functional, Yes_Functional), names_to = "Something",
values_to = "Count") %>%
mutate(Function = recode(Function,
"BUTTLARVFOOD" = "Butterfly larvae food",
"KgSughacovyr" = "Nectar producing",
"Low_grass" = "Lowland grass indicators",
"BIRDFOOD" = "Bird food",
"Cwr" = "Crop wild relatives",
"Forage_grasses" = "Forage grasses",
"IW" = "Injurious weeds")) %>%
ggplot(aes(x = EBERGR, y = Count, fill = Something)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("grey","black")) +
facet_wrap(~Function, ncol = 1) +
labs(x = "Ellenberg R", y = "Number of plant species") +
theme_minimal() +
theme(legend.position = "none",
panel.grid = element_blank()) +
NULL
p3 <- Ell_F %>%
select(Year, REP_ID, WH_R, Fr_BUTTLARVFOOD,
Fr_KgSughacovyr,
Fr_Low_grass,Fr_Low_grass,Fr_Cwr,Fr_BIRDFOOD,
Fr_Forage_grasses, Fr_IW) %>%
pivot_longer(starts_with("Fr"), names_to = "Function",
values_to = "value") %>%
mutate(Function = recode(Function,
"Fr_BUTTLARVFOOD" = "Butterfly larvae food",
"Fr_KgSughacovyr" = "Nectar producing",
"Fr_Low_grass" = "Lowland grass indicators",
"Fr_BIRDFOOD" = "Bird food",
"Fr_Cwr" = "Crop wild relatives",
"Fr_Forage_grasses" = "Forage grasses",
"Fr_IW" = "Injurious weeds")) %>%
ggplot(aes(x = WH_R, y = value))+
geom_point(alpha = 0.1, colour = "#0072B2") +
facet_wrap(~Function, ncol = 1, scales = "free_y") +
labs(x = "Ellenberg R", y = "Group richness") +
theme_minimal() +
# geom_smooth() +
theme(axis.line = element_line(colour = "grey"),
panel.grid = element_blank()) +
NULL
p1 + p3
ggsave("Ellenberg R and plant functions v3.png",
path = "Outputs/Graphs/",
width = 15, height = 25, units= "cm")
ggplot(X_Ell_nect, aes(x = WH_R, y = Nectar)) +
geom_point(alpha = 0.1) +
scale_y_log10()
library(brms)
mod_pr <- c(prior(normal(0,1), class = "b"),
prior(student_t(3, 0, 1), class = "Intercept"),
prior(student_t(3, 0, 1), class = "sd"),
prior(student_t(3, 0, 1), class = "sigma"),
prior(normal(0,1), class = "ar"))
str(Ell_F)
Ell_F <- Ell_F %>% ungroup() %>%
mutate(SQUARE = sapply(strsplit(REP_ID, "[A-Z]"),"[",1),
YR = as.factor(Year),
YRnm = as.integer(YR))
buttmod <- brm(F_Butt ~ WH_R + (YR|YR*SQUARE) +
ar(time = YRnm, gr = REP_ID),
data = Ell_F, prior = mod_pr, cores = 4,
iter = 4000)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
calckernelc <- function(x, k, row, col) {
.Call(`_gridkernel_calckernelc`, x, k, row, col)
}
kernelsmoothc <- function(x, k) {
.Call(`_gridkernel_kernelsmoothc`, x, k)
}
upscalec <- function(x, factor, maxPNA) {
.Call(`_gridkernel_upscalec`, x, factor, maxPNA)
}
| /R/RcppExports.R | no_license | ethanplunkett/gridkernel | R | false | false | 405 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
calckernelc <- function(x, k, row, col) {
.Call(`_gridkernel_calckernelc`, x, k, row, col)
}
kernelsmoothc <- function(x, k) {
.Call(`_gridkernel_kernelsmoothc`, x, k)
}
upscalec <- function(x, factor, maxPNA) {
.Call(`_gridkernel_upscalec`, x, factor, maxPNA)
}
|
library(dplyr)
library(ggplot2)
library(svglite)
#Wczytanie danych
data <- data.frame(read.csv("../data/the-office-lines - scripts.csv", sep=",", header = TRUE))
#Obróbka danych
data <- data[data$deleted == FALSE, c("scene", "speaker")]
data <- data %>% count(speaker)
data <- data[order(data$n, decreasing = TRUE),]
data <- data[1:12,]
#Wygenerowanie wykresu
p <- ggplot(data = data, aes(x = factor(speaker, levels = data$speaker))) +
geom_bar(aes(y = n), stat = "identity", fill = "midnightblue") +
labs(x = "Bohater serialu",
y = "Liczba linii tekstu",
title = "Najważniejsi bohaterowie") +
theme(axis.text.x = element_text(family = "cambria", color="azure4", size=14, angle = 40),
axis.text.y = element_text(family = "cambria", color="azure4", size = 14),
axis.title.x = element_text(family = "cambria", size = 16),
axis.title.y = element_text(family = "cambria", size = 16),
plot.title = element_text(family = "cambria", size = 18),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA))
#Zapisanie wykresu
#ggsave("lines_share.svg", bg = "transparent", p)
| /Projekt1/BrzozowskiPolakowskiSiemaszko/Lines_share.R | no_license | Siemashko/TechnikiWizualizacjiDanych2018 | R | false | false | 1,202 | r | library(dplyr)
library(ggplot2)
library(svglite)
#Wczytanie danych
data <- data.frame(read.csv("../data/the-office-lines - scripts.csv", sep=",", header = TRUE))
#Obróbka danych
data <- data[data$deleted == FALSE, c("scene", "speaker")]
data <- data %>% count(speaker)
data <- data[order(data$n, decreasing = TRUE),]
data <- data[1:12,]
#Wygenerowanie wykresu
p <- ggplot(data = data, aes(x = factor(speaker, levels = data$speaker))) +
geom_bar(aes(y = n), stat = "identity", fill = "midnightblue") +
labs(x = "Bohater serialu",
y = "Liczba linii tekstu",
title = "Najważniejsi bohaterowie") +
theme(axis.text.x = element_text(family = "cambria", color="azure4", size=14, angle = 40),
axis.text.y = element_text(family = "cambria", color="azure4", size = 14),
axis.title.x = element_text(family = "cambria", size = 16),
axis.title.y = element_text(family = "cambria", size = 16),
plot.title = element_text(family = "cambria", size = 18),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA))
#Zapisanie wykresu
#ggsave("lines_share.svg", bg = "transparent", p)
|
#' @title Assertions
#' @name tar_assert
#' @family utilities to extend targets
#' @description These functions assert the correctness of user inputs
#' and generate custom error conditions as needed. Useful
#' for writing packages built on top of `targets`.
#' @param x R object, input to be validated. The kind of object depends on the
#' specific assertion function called.
#' @param msg Character of length 1, a message to be printed to the console
#' if `x` is invalid.
#' @param choices Character vector of choices of `x` for certain assertions.
#' @param threshold Numeric of length 1, lower/upper bound for
#' assertions like `tar_assert_le()`/`tar_assert_ge()`.
#' @param y R object, value to compare against `x`.
#' @param class Character vector of expected class names.
#' @param package Character of length 1, name of an R package.
#' @param path Character, file path.
#' @param pattern Character of length 1, a `grep` pattern for certain
#' assertions.
#' @examples
#' tar_assert_chr("123")
#' try(tar_assert_chr(123))
NULL
tar_assert_callr_function <- function(callr_function) {
if (!is.null(callr_function)) {
tar_assert_function(
callr_function,
"callr_function must be a function or NULL."
)
}
}
#' @export
#' @rdname tar_assert
tar_assert_chr <- function(x, msg = NULL) {
if (!is.character(x)) {
default <- paste(deparse(substitute(x)), "must be a character.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_chr_no_delim <- function(x, msg = NULL) {
tar_assert_chr(x, paste(deparse(substitute(x)), "must be a character"))
if (any(grepl("|", x, fixed = TRUE) | grepl("*", x, fixed = TRUE))) {
default <- paste(deparse(substitute(x)), "must not contain | or *")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_correct_fields <- function(object, constructor) {
tar_assert_identical_chr(
sort(names(object)),
sort(names(formals(constructor)))
)
}
tar_assert_target_dag <- function(x, msg = NULL) {
if (!inherits(x, "igraph") || !igraph::is_dag(x)) {
default <- paste(
"dependency graph contains a cycle.",
"If target x depends on target y, then",
"target y must not depend on target x,",
"either directly or indirectly."
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_dbl <- function(x, msg = NULL) {
if (!is.numeric(x)) {
default <- paste(deparse(substitute(x)), "must be numeric.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_df <- function(x, msg = NULL) {
if (!is.data.frame(x)) {
default <- paste(deparse(substitute(x)), "must be a data frame.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_equal_lengths <- function(x, msg = NULL) {
lengths <- map_int(x, length)
if (length(unique(lengths)) > 1L) {
targets::tar_throw_validate(msg %|||% "x must have equal-length elements.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_envir <- function(x, msg = NULL) {
if (!is.environment(x)) {
default <- paste(deparse(substitute(x)), "must be an environment.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_expr <- function(x, msg = NULL) {
if (!is.expression(x)) {
default <- paste(deparse(substitute(x)), "must be an expression.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_flag <- function(x, choices, msg = NULL) {
tar_assert_chr(
x,
msg %|||% paste(deparse(substitute(x)), "must be a character")
)
tar_assert_scalar(
x,
msg %|||% paste(deparse(substitute(x)), "must have length 1")
)
if (!all(x %in% choices)) {
msg <- msg %|||% paste(
deparse(substitute(x)),
"equals",
deparse(x),
"but must be in",
deparse(choices)
)
tar_throw_validate(msg)
}
}
tar_assert_format <- function(format) {
tar_assert_scalar(format)
tar_assert_chr(format)
store_assert_format_setting(as_class(format))
}
#' @export
#' @rdname tar_assert
tar_assert_function <- function(x, msg = NULL) {
if (!is.function(x)) {
tar_throw_validate(msg %|||% "input must be a function.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_ge <- function(x, threshold, msg = NULL) {
if (any(x < threshold)) {
default <- paste(
deparse(substitute(x)),
"must be less than or equal to",
threshold
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_identical <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
default <- paste(
deparse(substitute(x)),
"and",
deparse(substitute(y)),
"must be identical."
)
tar_throw_validate(msg %|||% default)
}
}
tar_assert_identical_chr <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
msg_x <- paste0(deparse(x), collapse = "")
msg_y <- paste0(deparse(y), collapse = "")
tar_throw_validate(msg %|||% paste(msg_x, "and", msg_y, "not identical."))
}
}
#' @export
#' @rdname tar_assert
tar_assert_in <- function(x, choices, msg = NULL) {
if (!all(x %in% choices)) {
msg <- msg %|||% paste(
deparse(substitute(x)),
"equals",
deparse(x),
"but must be in",
deparse(choices)
)
tar_throw_validate(msg)
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_dirs <- function(x, msg = NULL) {
lapply(x, tar_assert_not_dir, msg = msg)
}
#' @export
#' @rdname tar_assert
tar_assert_not_dir <- function(x, msg = NULL) {
if (dir.exists(x)) {
tar_throw_validate(msg %|||% paste(deparse(x), "must not be a directory."))
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_in <- function(x, choices, msg = NULL) {
if (any(x %in% choices)) {
tar_throw_validate(msg %|||% paste(deparse(x), "is in", deparse(choices)))
}
}
#' @export
#' @rdname tar_assert
tar_assert_inherits <- function(x, class, msg = NULL) {
if (!inherits(x, class)) {
default <- paste(deparse(substitute(x)), "x does not inherit from", class)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_int <- function(x, msg = NULL) {
if (!is.integer(x)) {
default <- paste(deparse(substitute(x)), "must have mode integer.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_internet <- function(msg = NULL) {
tar_assert_package("curl")
if (!curl::has_internet()) {
# This line cannot be covered in automated tests
# because internet is usually on.
tar_throw_run("no internet") # nocov
}
}
#' @export
#' @rdname tar_assert
tar_assert_lang <- function(x, msg = NULL) {
if (!is.language(x)) {
tar_throw_validate(msg %|||% "x must be a language object")
}
}
#' @export
#' @rdname tar_assert
tar_assert_le <- function(x, threshold, msg = NULL) {
if (any(x > threshold)) {
default <- paste(
deparse(substitute(x)),
"must be less than or equal to",
threshold
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_list <- function(x, msg = NULL) {
if (!is.list(x)) {
default <- paste(deparse(substitute(x)), "must be a list.")
tar_throw_validate(msg %|||% "x must be a list.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_lgl <- function(x, msg = NULL) {
if (!is.logical(x)) {
default <- paste(deparse(substitute(x)), "must be logical.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_name <- function(x) {
tar_assert_chr(x)
tar_assert_scalar(x)
if (!nzchar(x)) {
tar_throw_validate("name must be a nonempty string.")
}
if (!identical(x, make.names(x))) {
tar_throw_validate(x, " is not a valid symbol name.")
}
if (grepl("\\.$", x)) {
tar_throw_validate(x, " ends with a dot.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_names <- function(x, msg = NULL) {
if (any(x != make.names(x, unique = FALSE))) {
tar_throw_validate(msg %|||% "x must legal symbol names.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_nonempty <- function(x, msg = NULL) {
if (!length(x)) {
default <- paste(deparse(substitute(x)), "must be nonempty.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_none_na <- function(x, msg = NULL) {
if (anyNA(x)) {
default <- paste(deparse(substitute(x)), "must have no missing values.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_expr <- function(x, msg = NULL) {
if (is.expression(x)) {
tar_throw_validate(msg %|||% "x must not be an expression object")
}
}
#' @export
#' @rdname tar_assert
tar_assert_nzchar <- function(x, msg = NULL) {
if (any(!nzchar(x))) {
default <- paste(deparse(substitute(x)), "has empty character strings.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_package <- function(package) {
tryCatch(
rlang::check_installed(package),
error = function(e) {
tar_throw_validate(conditionMessage(e))
}
)
}
#' @export
#' @rdname tar_assert
tar_assert_path <- function(path, msg = NULL) {
missing <- !file.exists(path)
if (any(missing)) {
tar_throw_validate(
msg %|||% paste0(
"missing files: ",
paste(path[missing], collapse = ", ")
)
)
}
}
#' @export
#' @rdname tar_assert
tar_assert_match <- function(x, pattern, msg = NULL) {
if (!grepl(pattern = pattern, x = x)) {
default <- paste(deparse(substitute(x)), "does not match pattern", pattern)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_nonmissing <- function(x, msg = NULL) {
if (rlang::is_missing(x)) {
default <- paste(deparse(substitute(x)), "is missing with no default.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_positive <- function(x, msg = NULL) {
if (any(x <= 0)) {
default <- paste(deparse(substitute(x)), "is not all positive.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_resources <- function(resources) {
tar_assert_list(resources, "resources must be list. Use tar_resources().")
if (length(resources)) {
tar_assert_nonempty(names(resources), "resources list must have names.")
tar_assert_nzchar(names(resources), "resources names must be nonempty")
tar_assert_unique(names(resources), "resources names must be unique.")
}
for (name in names(resources)) {
if (!(name %in% names(formals(tar_resources)))) {
tar_warn_deprecate(
"found non-standard resource group ",
name,
" in resources list. Unstructrued resources list are deprecated ",
"in targets >= 0.5.0.9000 (2021-06-07). Use tar_resources() ",
"and various tar_resources_*() helper functions to create the ",
"resources argument to tar_target() and tar_option_set()."
)
} else if (!inherits(resources[[name]], "tar_resources")) {
tar_warn_deprecate(
"found incorrectly formatted resource group ",
name,
" in resources list. Unstructrued resources list are deprecated ",
"in targets >= 0.5.0.9000 (2021-06-07). Use tar_resources_clustermq()",
" and various other tar_resources_*() helper functions to create ",
"arguments to tar_resources()."
)
}
}
}
#' @export
#' @rdname tar_assert
tar_assert_scalar <- function(x, msg = NULL) {
if (length(x) != 1) {
default <- paste(deparse(substitute(x)), "must have length 1.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_store <- function(store) {
tar_assert_path(
store,
paste(
"data store path", store, "not found.",
"utility functions like tar_read() and tar_progress() require a",
"data store (default: _targets/) produced by tar_make() or similar."
)
)
}
#' @export
#' @rdname tar_assert
tar_assert_target <- function(x, msg = NULL) {
msg <- msg %|||% paste(
"Found a non-target object. The target script file (default: _targets.R)",
"must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated)."
)
tar_assert_inherits(x = x, class = "tar_target", msg = msg)
}
#' @export
#' @rdname tar_assert
tar_assert_target_list <- function(x) {
msg <- paste(
"The target script file (default: _targets.R)",
"must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated). Each element of the target list",
"must be a target object or nested list of target objects."
)
tar_assert_list(x, msg = msg)
map(x, tar_assert_target, msg = msg)
}
tar_assert_script <- function(script) {
msg <- paste0(
"could not find file ",
script,
". Main functions like tar_make() require a target script file ",
"(default: _targets.R) to define the pipeline. ",
"Functions tar_edit() and tar_script() can help. "
)
tar_assert_path(script, msg)
vars <- all.vars(parse(file = script), functions = TRUE)
exclude <- c(
"glimpse",
"make",
"manifest",
"network",
"outdated",
"prune",
"renv",
"sitrep",
"validate",
"visnetwork"
)
pattern <- paste(paste0("^tar_", exclude), collapse = "|")
choices <- grep(pattern, getNamespaceExports("targets"), value = TRUE)
msg <- paste(
"The target script file",
script,
"must not call tar_make() or similar functions",
"that would source the target script again and cause infinite recursion."
)
tar_assert_not_in(vars, choices, msg)
msg <- paste(
"Do not use %s() from {devtools} or {pkgload} to load",
"packages or custom functions/globals for {targets}. If you do,",
"custom functions will go to a package environment where {targets}",
"may not track them, and the loaded data will not be available in",
"parallel workers created by tar_make_clustermq() or tar_make_future().",
"Read https://books.ropensci.org/targets/practices.html#loading-and-configuring-r-packages", # nolint
"and https://books.ropensci.org/targets/practices.html#packages-based-invalidation", # nolint
"for the correct way to load packages for {targets} pipelines.",
"Warnings like this one are important, but if you must suppress them, ",
"you can do so with Sys.setenv(TAR_WARN = \"false\")."
)
for (loader in c("load_all", "load_code", "load_data", "load_dll")) {
if (!identical(Sys.getenv("TAR_WARN"), "false") && loader %in% vars) {
tar_warn_validate(sprintf(msg, loader))
}
}
}
#' @export
#' @rdname tar_assert
tar_assert_true <- function(x, msg = NULL) {
if (!x) {
default <- paste(
deparse(substitute(x)),
"does not evaluate not TRUE."
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_unique <- function(x, msg = NULL) {
if (anyDuplicated(x)) {
dups <- paste(unique(x[duplicated(x)]), collapse = ", ")
default <- paste(
deparse(substitute(x)),
"has duplicated entries:",
dups
)
tar_throw_validate(paste(msg %|||% default))
}
}
#' @export
#' @rdname tar_assert
tar_assert_unique_targets <- function(x) {
tar_assert_unique(x, "duplicated target names:")
}
# nocov start
# tested in tests/interactive/test-tar_watch.R
tar_assert_watch_packages <- function() {
pkgs <- c(
"bs4Dash",
"DT",
"gt",
"markdown",
"pingr",
"shiny",
"shinybusy",
"shinyWidgets",
"visNetwork"
)
tar_assert_package(pkgs)
}
# nocov end
| /R/utils_assert.R | permissive | billdenney/targets | R | false | false | 15,613 | r | #' @title Assertions
#' @name tar_assert
#' @family utilities to extend targets
#' @description These functions assert the correctness of user inputs
#' and generate custom error conditions as needed. Useful
#' for writing packages built on top of `targets`.
#' @param x R object, input to be validated. The kind of object depends on the
#' specific assertion function called.
#' @param msg Character of length 1, a message to be printed to the console
#' if `x` is invalid.
#' @param choices Character vector of choices of `x` for certain assertions.
#' @param threshold Numeric of length 1, lower/upper bound for
#' assertions like `tar_assert_le()`/`tar_assert_ge()`.
#' @param y R object, value to compare against `x`.
#' @param class Character vector of expected class names.
#' @param package Character of length 1, name of an R package.
#' @param path Character, file path.
#' @param pattern Character of length 1, a `grep` pattern for certain
#' assertions.
#' @examples
#' tar_assert_chr("123")
#' try(tar_assert_chr(123))
NULL
tar_assert_callr_function <- function(callr_function) {
if (!is.null(callr_function)) {
tar_assert_function(
callr_function,
"callr_function must be a function or NULL."
)
}
}
#' @export
#' @rdname tar_assert
tar_assert_chr <- function(x, msg = NULL) {
if (!is.character(x)) {
default <- paste(deparse(substitute(x)), "must be a character.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_chr_no_delim <- function(x, msg = NULL) {
tar_assert_chr(x, paste(deparse(substitute(x)), "must be a character"))
if (any(grepl("|", x, fixed = TRUE) | grepl("*", x, fixed = TRUE))) {
default <- paste(deparse(substitute(x)), "must not contain | or *")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_correct_fields <- function(object, constructor) {
tar_assert_identical_chr(
sort(names(object)),
sort(names(formals(constructor)))
)
}
tar_assert_target_dag <- function(x, msg = NULL) {
if (!inherits(x, "igraph") || !igraph::is_dag(x)) {
default <- paste(
"dependency graph contains a cycle.",
"If target x depends on target y, then",
"target y must not depend on target x,",
"either directly or indirectly."
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_dbl <- function(x, msg = NULL) {
if (!is.numeric(x)) {
default <- paste(deparse(substitute(x)), "must be numeric.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_df <- function(x, msg = NULL) {
if (!is.data.frame(x)) {
default <- paste(deparse(substitute(x)), "must be a data frame.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_equal_lengths <- function(x, msg = NULL) {
lengths <- map_int(x, length)
if (length(unique(lengths)) > 1L) {
targets::tar_throw_validate(msg %|||% "x must have equal-length elements.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_envir <- function(x, msg = NULL) {
if (!is.environment(x)) {
default <- paste(deparse(substitute(x)), "must be an environment.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_expr <- function(x, msg = NULL) {
if (!is.expression(x)) {
default <- paste(deparse(substitute(x)), "must be an expression.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_flag <- function(x, choices, msg = NULL) {
tar_assert_chr(
x,
msg %|||% paste(deparse(substitute(x)), "must be a character")
)
tar_assert_scalar(
x,
msg %|||% paste(deparse(substitute(x)), "must have length 1")
)
if (!all(x %in% choices)) {
msg <- msg %|||% paste(
deparse(substitute(x)),
"equals",
deparse(x),
"but must be in",
deparse(choices)
)
tar_throw_validate(msg)
}
}
tar_assert_format <- function(format) {
tar_assert_scalar(format)
tar_assert_chr(format)
store_assert_format_setting(as_class(format))
}
#' @export
#' @rdname tar_assert
tar_assert_function <- function(x, msg = NULL) {
if (!is.function(x)) {
tar_throw_validate(msg %|||% "input must be a function.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_ge <- function(x, threshold, msg = NULL) {
if (any(x < threshold)) {
default <- paste(
deparse(substitute(x)),
"must be less than or equal to",
threshold
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_identical <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
default <- paste(
deparse(substitute(x)),
"and",
deparse(substitute(y)),
"must be identical."
)
tar_throw_validate(msg %|||% default)
}
}
tar_assert_identical_chr <- function(x, y, msg = NULL) {
if (!identical(x, y)) {
msg_x <- paste0(deparse(x), collapse = "")
msg_y <- paste0(deparse(y), collapse = "")
tar_throw_validate(msg %|||% paste(msg_x, "and", msg_y, "not identical."))
}
}
#' @export
#' @rdname tar_assert
tar_assert_in <- function(x, choices, msg = NULL) {
if (!all(x %in% choices)) {
msg <- msg %|||% paste(
deparse(substitute(x)),
"equals",
deparse(x),
"but must be in",
deparse(choices)
)
tar_throw_validate(msg)
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_dirs <- function(x, msg = NULL) {
lapply(x, tar_assert_not_dir, msg = msg)
}
#' @export
#' @rdname tar_assert
tar_assert_not_dir <- function(x, msg = NULL) {
if (dir.exists(x)) {
tar_throw_validate(msg %|||% paste(deparse(x), "must not be a directory."))
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_in <- function(x, choices, msg = NULL) {
if (any(x %in% choices)) {
tar_throw_validate(msg %|||% paste(deparse(x), "is in", deparse(choices)))
}
}
#' @export
#' @rdname tar_assert
tar_assert_inherits <- function(x, class, msg = NULL) {
if (!inherits(x, class)) {
default <- paste(deparse(substitute(x)), "x does not inherit from", class)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_int <- function(x, msg = NULL) {
if (!is.integer(x)) {
default <- paste(deparse(substitute(x)), "must have mode integer.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_internet <- function(msg = NULL) {
tar_assert_package("curl")
if (!curl::has_internet()) {
# This line cannot be covered in automated tests
# because internet is usually on.
tar_throw_run("no internet") # nocov
}
}
#' @export
#' @rdname tar_assert
tar_assert_lang <- function(x, msg = NULL) {
if (!is.language(x)) {
tar_throw_validate(msg %|||% "x must be a language object")
}
}
#' @export
#' @rdname tar_assert
tar_assert_le <- function(x, threshold, msg = NULL) {
if (any(x > threshold)) {
default <- paste(
deparse(substitute(x)),
"must be less than or equal to",
threshold
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_list <- function(x, msg = NULL) {
if (!is.list(x)) {
default <- paste(deparse(substitute(x)), "must be a list.")
tar_throw_validate(msg %|||% "x must be a list.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_lgl <- function(x, msg = NULL) {
if (!is.logical(x)) {
default <- paste(deparse(substitute(x)), "must be logical.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_name <- function(x) {
tar_assert_chr(x)
tar_assert_scalar(x)
if (!nzchar(x)) {
tar_throw_validate("name must be a nonempty string.")
}
if (!identical(x, make.names(x))) {
tar_throw_validate(x, " is not a valid symbol name.")
}
if (grepl("\\.$", x)) {
tar_throw_validate(x, " ends with a dot.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_names <- function(x, msg = NULL) {
if (any(x != make.names(x, unique = FALSE))) {
tar_throw_validate(msg %|||% "x must legal symbol names.")
}
}
#' @export
#' @rdname tar_assert
tar_assert_nonempty <- function(x, msg = NULL) {
if (!length(x)) {
default <- paste(deparse(substitute(x)), "must be nonempty.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_none_na <- function(x, msg = NULL) {
if (anyNA(x)) {
default <- paste(deparse(substitute(x)), "must have no missing values.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_not_expr <- function(x, msg = NULL) {
if (is.expression(x)) {
tar_throw_validate(msg %|||% "x must not be an expression object")
}
}
#' @export
#' @rdname tar_assert
tar_assert_nzchar <- function(x, msg = NULL) {
if (any(!nzchar(x))) {
default <- paste(deparse(substitute(x)), "has empty character strings.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_package <- function(package) {
tryCatch(
rlang::check_installed(package),
error = function(e) {
tar_throw_validate(conditionMessage(e))
}
)
}
#' @export
#' @rdname tar_assert
tar_assert_path <- function(path, msg = NULL) {
missing <- !file.exists(path)
if (any(missing)) {
tar_throw_validate(
msg %|||% paste0(
"missing files: ",
paste(path[missing], collapse = ", ")
)
)
}
}
#' @export
#' @rdname tar_assert
tar_assert_match <- function(x, pattern, msg = NULL) {
if (!grepl(pattern = pattern, x = x)) {
default <- paste(deparse(substitute(x)), "does not match pattern", pattern)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_nonmissing <- function(x, msg = NULL) {
if (rlang::is_missing(x)) {
default <- paste(deparse(substitute(x)), "is missing with no default.")
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_positive <- function(x, msg = NULL) {
if (any(x <= 0)) {
default <- paste(deparse(substitute(x)), "is not all positive.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_resources <- function(resources) {
tar_assert_list(resources, "resources must be list. Use tar_resources().")
if (length(resources)) {
tar_assert_nonempty(names(resources), "resources list must have names.")
tar_assert_nzchar(names(resources), "resources names must be nonempty")
tar_assert_unique(names(resources), "resources names must be unique.")
}
for (name in names(resources)) {
if (!(name %in% names(formals(tar_resources)))) {
tar_warn_deprecate(
"found non-standard resource group ",
name,
" in resources list. Unstructrued resources list are deprecated ",
"in targets >= 0.5.0.9000 (2021-06-07). Use tar_resources() ",
"and various tar_resources_*() helper functions to create the ",
"resources argument to tar_target() and tar_option_set()."
)
} else if (!inherits(resources[[name]], "tar_resources")) {
tar_warn_deprecate(
"found incorrectly formatted resource group ",
name,
" in resources list. Unstructrued resources list are deprecated ",
"in targets >= 0.5.0.9000 (2021-06-07). Use tar_resources_clustermq()",
" and various other tar_resources_*() helper functions to create ",
"arguments to tar_resources()."
)
}
}
}
#' @export
#' @rdname tar_assert
tar_assert_scalar <- function(x, msg = NULL) {
if (length(x) != 1) {
default <- paste(deparse(substitute(x)), "must have length 1.")
tar_throw_validate(msg %|||% default)
}
}
tar_assert_store <- function(store) {
tar_assert_path(
store,
paste(
"data store path", store, "not found.",
"utility functions like tar_read() and tar_progress() require a",
"data store (default: _targets/) produced by tar_make() or similar."
)
)
}
#' @export
#' @rdname tar_assert
tar_assert_target <- function(x, msg = NULL) {
msg <- msg %|||% paste(
"Found a non-target object. The target script file (default: _targets.R)",
"must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated)."
)
tar_assert_inherits(x = x, class = "tar_target", msg = msg)
}
#' @export
#' @rdname tar_assert
tar_assert_target_list <- function(x) {
msg <- paste(
"The target script file (default: _targets.R)",
"must end with a list of tar_target() objects (recommended)",
"or a tar_pipeline() object (deprecated). Each element of the target list",
"must be a target object or nested list of target objects."
)
tar_assert_list(x, msg = msg)
map(x, tar_assert_target, msg = msg)
}
tar_assert_script <- function(script) {
msg <- paste0(
"could not find file ",
script,
". Main functions like tar_make() require a target script file ",
"(default: _targets.R) to define the pipeline. ",
"Functions tar_edit() and tar_script() can help. "
)
tar_assert_path(script, msg)
vars <- all.vars(parse(file = script), functions = TRUE)
exclude <- c(
"glimpse",
"make",
"manifest",
"network",
"outdated",
"prune",
"renv",
"sitrep",
"validate",
"visnetwork"
)
pattern <- paste(paste0("^tar_", exclude), collapse = "|")
choices <- grep(pattern, getNamespaceExports("targets"), value = TRUE)
msg <- paste(
"The target script file",
script,
"must not call tar_make() or similar functions",
"that would source the target script again and cause infinite recursion."
)
tar_assert_not_in(vars, choices, msg)
msg <- paste(
"Do not use %s() from {devtools} or {pkgload} to load",
"packages or custom functions/globals for {targets}. If you do,",
"custom functions will go to a package environment where {targets}",
"may not track them, and the loaded data will not be available in",
"parallel workers created by tar_make_clustermq() or tar_make_future().",
"Read https://books.ropensci.org/targets/practices.html#loading-and-configuring-r-packages", # nolint
"and https://books.ropensci.org/targets/practices.html#packages-based-invalidation", # nolint
"for the correct way to load packages for {targets} pipelines.",
"Warnings like this one are important, but if you must suppress them, ",
"you can do so with Sys.setenv(TAR_WARN = \"false\")."
)
for (loader in c("load_all", "load_code", "load_data", "load_dll")) {
if (!identical(Sys.getenv("TAR_WARN"), "false") && loader %in% vars) {
tar_warn_validate(sprintf(msg, loader))
}
}
}
#' @export
#' @rdname tar_assert
tar_assert_true <- function(x, msg = NULL) {
if (!x) {
default <- paste(
deparse(substitute(x)),
"does not evaluate not TRUE."
)
tar_throw_validate(msg %|||% default)
}
}
#' @export
#' @rdname tar_assert
tar_assert_unique <- function(x, msg = NULL) {
if (anyDuplicated(x)) {
dups <- paste(unique(x[duplicated(x)]), collapse = ", ")
default <- paste(
deparse(substitute(x)),
"has duplicated entries:",
dups
)
tar_throw_validate(paste(msg %|||% default))
}
}
#' @export
#' @rdname tar_assert
tar_assert_unique_targets <- function(x) {
tar_assert_unique(x, "duplicated target names:")
}
# nocov start
# tested in tests/interactive/test-tar_watch.R
tar_assert_watch_packages <- function() {
pkgs <- c(
"bs4Dash",
"DT",
"gt",
"markdown",
"pingr",
"shiny",
"shinybusy",
"shinyWidgets",
"visNetwork"
)
tar_assert_package(pkgs)
}
# nocov end
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICC.R
\name{ICC}
\alias{ICC}
\title{An ICC function}
\usage{
ICC(meta, infor, df_name = "Vital")
}
\arguments{
\item{infor, }{input data frame: metabolites infor}
\item{df_name, }{name for the input data}
\item{df_input, }{input data frame: metabolites data}
}
\value{
a r baseplot scatter plot graph
}
\description{
This function allows you to get ICC.
}
| /man/ICC.Rd | no_license | cautree/qa | R | false | true | 436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICC.R
\name{ICC}
\alias{ICC}
\title{An ICC function}
\usage{
ICC(meta, infor, df_name = "Vital")
}
\arguments{
\item{infor, }{input data frame: metabolites infor}
\item{df_name, }{name for the input data}
\item{df_input, }{input data frame: metabolites data}
}
\value{
a r baseplot scatter plot graph
}
\description{
This function allows you to get ICC.
}
|
#' gmirror
#'
#' Create mirrored Manhattan plots for GWAS
#' Dependencies: ggplot2, gridExtra
#' Suggested: ggrepel
#' @param top data frame, must contain SNP, CHR, POS, pvalue, optional Shape
#' @param bottom data frame, must contain SNP, CHR, POS, pvalue, optional Shape
#' @param tline list of pvalues to draw red threshold lines in top plot
#' @param bline ist of pvalues to draw red threshold lines in bottom plot
#' @param chroms list of chromosomes to plot in the order desired, default c(1:22, "X", "Y")
#' @param log10 plot -log10() of pvalue column, logical
#' @param yaxis label for y-axis in the format c("top", "bottom"), automatically set if log10=TRUE
#' @param opacity opacity of points, from 0 to 1, useful for dense plots
#' @param annotate_snp vector of RSIDs to annotate
#' @param annotate_p list of pvalue thresholds to annotate in the order of c(p_top, p_bottom)
#' @param toptitle optional string for top plot title
#' @param bottomtitle optional string for bottom plot title
#' @param chrcolor1 first alternating color for chromosome
#' @param chrcolor2 second alternating color for chromosome
#' @param highlight_snp vector of snps to highlight
#' @param highlight_p list of pvalue thresholds to highlight in the order of c(p_top, p_bottom)
#' @param highlighter color to highlight
#' @param freey allow y-axes to scale with the data
#' @param background variegated or white
#' @param chrblocks logical, turns on x-axis chromosome marker blocks
#' @param file file name of saved image
#' @param type plot type/extension
#' @param hgt height of plot in inches
#' @param hgtratio height ratio of plots, equal to top plot proportion
#' @param wi width of plot in inches
#' @param res resolution of plot in pixels per inch
#' @return png image
#' @import ggplot2
#' @importFrom gridExtra arrangeGrob grid.arrange
#' @export
#' @examples
#' data(gwas.t)
#' data(gwas.b)
#' gmirror(top=gwas.t, bottom=gwas.b, tline=0.05/nrow(gwas.t), bline=0.05/nrow(gwas.b),
#' toptitle="GWAS Comparison Example: Data 1", bottomtitle = "GWAS Comparison Example: Data 2",
#' highlight_p = c(0.05/nrow(gwas.t), 0.05/nrow(gwas.b)), highlighter="green")
gmirror <- function(top, bottom, tline, bline, chroms = c(1:22, "X", "Y"),log10=TRUE,
yaxis, opacity=1, annotate_snp, annotate_p, toptitle=NULL,
bottomtitle=NULL, highlight_snp, highlight_p, highlighter="red",
chrcolor1="#AAAAAA", chrcolor2="#4D4D4D", freey=FALSE,
background="variegated", chrblocks=FALSE, file="gmirror",
type="png", hgt=7, hgtratio=0.5, wi=12, res=300 ){
#Sort data
topn <- names(top)
bottomn <- names(bottom)
top$Location <- "Top"
bottom$Location <- "Bottom"
# Check file formats
if(!identical(topn, bottomn)){stop("Please ensure both inputs have the same metadata columns.")}
d <- as.data.frame(rbind(top, bottom))
d$POS <- as.numeric(as.character(d$POS))
d$CHR <- droplevels(factor(d$CHR, levels = as.character(chroms)))
d <- d[d$CHR %in% chroms, ]
d_order <- d[order(d$CHR, d$POS), ]
d_order$pos_index <- seq.int(nrow(d_order))
d_order_sub <- d_order[, c("SNP", "CHR", "POS", "pvalue", "pos_index")]
#Set up dataframe with color and position info
maxRows <- by(d_order_sub, d_order_sub$CHR, function(x) x[which.max(x$pos_index),])
minRows <- by(d_order_sub, d_order_sub$CHR, function(x) x[which.min(x$pos_index),])
milimits <- do.call(rbind, minRows)
malimits <- do.call(rbind, maxRows)
lims <- merge(milimits, malimits, by="CHR")
names(lims) <- c("Color", "snpx", "px", "posx", "posmin", "snpy", "py", "posy", "posmax")
lims$av <- (lims$posmin + lims$posmax)/2
lims <- lims[order(lims$Color),]
lims$shademap <- rep(c("shade_ffffff", "shade_ebebeb"), length.out=nrow(lims), each=1)
#Set up colors
nchrcolors <- nlevels(factor(lims$Color))
#Color by CHR
colnames(d_order)[2] <- "Color"
newcols <-c(rep(x=c(chrcolor1, chrcolor2), length.out=nchrcolors, each=1), "#FFFFFF", "#EBEBEB")
names(newcols) <-c(levels(factor(lims$Color)), "shade_ffffff", "shade_ebebeb")
#Info for y-axis
if(log10==TRUE){
d_order$pval <- -log10(d_order$pvalue)
yaxislab1 <- expression(paste("-log"[10], "(p-value)", sep=""))
yaxislab2 <- expression(paste("-log"[10], "(p-value)", sep=""))
if(!missing(tline)) {tredline <- -log10(tline)}
if(!missing(bline)) {bredline <- -log10(bline)}
} else {
d_order$pval <- d_order$pvalue
yaxislab1 <- yaxis[1]
yaxislab2 <- yaxis[2]
if(!missing(tline)) {tredline <- tline}
if(!missing(bline)) {bredline <- bline}
}
yaxismax1 <- ifelse(freey==FALSE, max(d_order$pval[which(d_order$pval< Inf)]), max(d_order$pval[which(d_order$pval< Inf) & d_order$Location=="Top"]))
yaxismax2 <- ifelse(freey==FALSE, max(d_order$pval[which(d_order$pval< Inf)]), max(d_order$pval[which(d_order$pval< Inf) & d_order$Location=="Bottom"]))
yaxismin1 <- ifelse(freey==FALSE, 0, min(d_order$pval[d_order$Location=="Top"]))
yaxismin2 <- ifelse(freey==FALSE, 0, min(d_order$pval[d_order$Location=="Bottom"]))
#Theme options
backpanel1 <- ifelse(background=="white", "NULL", "geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = yaxismin1, ymax = Inf, fill=factor(shademap)), alpha = 0.5)" )
backpanel2 <- ifelse(background=="white", "NULL", "geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = yaxismin2, ymax = Inf, fill=factor(shademap)), alpha = 0.5)" )
#Start plotting
#TOP PLOT
p1 <- ggplot() + eval(parse(text=backpanel1))
#Add shape info if available
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$Location=="Top",], aes(x=pos_index, y=pval, color=factor(Color), shape=factor(Shape)), alpha=opacity)
} else {
p1 <- p1 + geom_point(data=d_order[d_order$Location=="Top",], aes(x=pos_index, y=pval, color=factor(Color)), alpha=opacity)
}
p1 <- p1 + scale_x_continuous(breaks=lims$av, labels=lims$Color, expand=c(0,0))
if(chrblocks==TRUE){
p1 <- p1 + geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = -Inf, ymax = min(d_order$pval), fill=as.factor(Color)), alpha = 1)
}
p1 <- p1 + scale_colour_manual(name = "Color", values = newcols) + scale_fill_manual(name = "Color", values = newcols)
p1 <- p1 + theme(panel.grid.minor.x = element_blank(), panel.grid.major.x=element_blank(), axis.title.x=element_blank(), legend.position="top", legend.title=element_blank())
#BOTTOM PLOT
p2 <- ggplot() + eval(parse(text=backpanel2))
#Add shape info if available
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$Location=="Bottom",], aes(x=pos_index, y=pval, color=factor(Color), shape=factor(Shape)), alpha=opacity)
} else {
p2 <- p2 + geom_point(data=d_order[d_order$Location=="Bottom",], aes(x=pos_index, y=pval, color=factor(Color)), alpha=opacity)
}
p2 <- p2 + scale_x_continuous(breaks=lims$av, labels=lims$Color, expand=c(0,0))
if(chrblocks==TRUE){
p2 <- p2 + geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = -Inf, ymax = min(d_order$pval), fill=as.factor(Color)), alpha = 1)
}
p2 <- p2 + scale_colour_manual(name = "Color", values = newcols) + scale_fill_manual(name = "Color", values = newcols)
p2 <- p2 + theme(axis.text.x=element_text(angle=90), panel.grid.minor.x = element_blank(), panel.grid.major.x=element_blank(), axis.title.x=element_blank(), legend.position="bottom", legend.title=element_blank())
#Highlight if given
if(!missing(highlight_snp)){
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Top", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p1 <- p1 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p1 <- p1 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Top", ], aes(x=pos_index, y=pval), colour=highlighter)
}
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p2 <- p2 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p2 <- p2 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval), colour=highlighter)
}
}
if(!missing(highlight_p)){
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$pvalue < highlight_p[1] & d_order$Location=="Top", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p1 <- p1 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p1 <- p1 + geom_point(data=d_order[d_order$pvalue < highlight_p[1] & d_order$Location=="Top", ], aes(x=pos_index, y=pval), colour=highlighter)
}
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$pvalue < highlight_p[2] & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p2 <- p2 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p2 <- p2 + geom_point(data=d_order[d_order$pvalue < highlight_p[2] & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval), colour=highlighter)
}
}
#Add pvalue threshold line
if(!missing(tline)){
for(i in 1:length(tline)){
p1 <- p1 + geom_hline(yintercept = tredline[i], colour="red")
}
}
if(!missing(bline)){
for(i in 1:length(bline)){
p2 <- p2 + geom_hline(yintercept = bredline[i], colour="red")
}
}
#Annotate
if(!missing(annotate_p)){
if (!requireNamespace(c("ggrepel"), quietly = TRUE)==TRUE) {
print("Consider installing 'ggrepel' for improved text annotation")
p1 <- p1 + geom_text(data=d_order[d_order$pvalue < annotate_p[1] & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + geom_text(data=d_order[d_order$pvalue < annotate_p[2] & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
} else {
p1 <- p1 + ggrepel::geom_text_repel(data=d_order[d_order$pvalue < annotate_p[1] & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + ggrepel::geom_text_repel(data=d_order[d_order$pvalue < annotate_p[2] & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
}
}
if(!missing(annotate_snp)){
if (!requireNamespace(c("ggrepel"), quietly = TRUE)==TRUE){
print("Consider installing 'ggrepel' for improved text annotation")
p1 <- p1 + geom_text(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + geom_text(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
} else {
p1 <- p1 + ggrepel::geom_text_repel(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + ggrepel::geom_text_repel(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
}
}
#Add title and y axis title
p1 <- p1 + ylab(yaxislab1)
p2 <- p2 + ylab(yaxislab2)
#Format
if(chrblocks==TRUE){
if(freey==TRUE){
print("Sorry, drawing chrblocks with freey=TRUE is currently unsupported and will be ignored.")
} else {
p1 <- p1+theme(axis.text.x = element_text(vjust=1),axis.ticks.x = element_blank())+ylim(c(yaxismin1,yaxismax1))
p2 <- p2+scale_y_reverse(limits=c(yaxismax2, yaxismin2)) + theme(axis.text.x = element_blank(),axis.ticks.x = element_blank())
}
} else {
p1 <- p1+theme(axis.text.x = element_text(vjust=1),axis.ticks.x = element_blank())+ scale_y_continuous(limits=c(yaxismin1, yaxismax1),expand=expansion(mult=c(0,0.1)))
p2 <- p2+scale_y_reverse(limits=c(yaxismax2,yaxismin2), expand=expansion(mult=c(0.1,0))) + theme(axis.text.x = element_blank(),axis.ticks.x = element_blank())
}
if(background=="white"){
p1 <- p1 + theme(panel.background = element_rect(fill="white"))
p2 <- p2 + theme(panel.background = element_rect(fill="white"))
}
p1 <- p1 + guides(fill="none", color="none")
p2 <- p2 + guides(fill="none", color="none")
#Save
print(paste0("Saving plot to ", file, ".", type))
p <- grid.arrange(arrangeGrob(p1, top=toptitle), arrangeGrob(p2, bottom=bottomtitle), padding=0, heights=c(hgtratio,1-hgtratio))
ggsave(p, filename=paste0(file, ".", type), dpi=res, units="in", height=hgt, width=wi)
return(p)
}
| /R/gmirror.R | no_license | anastasia-lucas/hudson | R | false | false | 12,604 | r | #' gmirror
#'
#' Create mirrored Manhattan plots for GWAS
#' Dependencies: ggplot2, gridExtra
#' Suggested: ggrepel
#' @param top data frame, must contain SNP, CHR, POS, pvalue, optional Shape
#' @param bottom data frame, must contain SNP, CHR, POS, pvalue, optional Shape
#' @param tline list of pvalues to draw red threshold lines in top plot
#' @param bline ist of pvalues to draw red threshold lines in bottom plot
#' @param chroms list of chromosomes to plot in the order desired, default c(1:22, "X", "Y")
#' @param log10 plot -log10() of pvalue column, logical
#' @param yaxis label for y-axis in the format c("top", "bottom"), automatically set if log10=TRUE
#' @param opacity opacity of points, from 0 to 1, useful for dense plots
#' @param annotate_snp vector of RSIDs to annotate
#' @param annotate_p list of pvalue thresholds to annotate in the order of c(p_top, p_bottom)
#' @param toptitle optional string for top plot title
#' @param bottomtitle optional string for bottom plot title
#' @param chrcolor1 first alternating color for chromosome
#' @param chrcolor2 second alternating color for chromosome
#' @param highlight_snp vector of snps to highlight
#' @param highlight_p list of pvalue thresholds to highlight in the order of c(p_top, p_bottom)
#' @param highlighter color to highlight
#' @param freey allow y-axes to scale with the data
#' @param background variegated or white
#' @param chrblocks logical, turns on x-axis chromosome marker blocks
#' @param file file name of saved image
#' @param type plot type/extension
#' @param hgt height of plot in inches
#' @param hgtratio height ratio of plots, equal to top plot proportion
#' @param wi width of plot in inches
#' @param res resolution of plot in pixels per inch
#' @return png image
#' @import ggplot2
#' @importFrom gridExtra arrangeGrob grid.arrange
#' @export
#' @examples
#' data(gwas.t)
#' data(gwas.b)
#' gmirror(top=gwas.t, bottom=gwas.b, tline=0.05/nrow(gwas.t), bline=0.05/nrow(gwas.b),
#' toptitle="GWAS Comparison Example: Data 1", bottomtitle = "GWAS Comparison Example: Data 2",
#' highlight_p = c(0.05/nrow(gwas.t), 0.05/nrow(gwas.b)), highlighter="green")
gmirror <- function(top, bottom, tline, bline, chroms = c(1:22, "X", "Y"),log10=TRUE,
yaxis, opacity=1, annotate_snp, annotate_p, toptitle=NULL,
bottomtitle=NULL, highlight_snp, highlight_p, highlighter="red",
chrcolor1="#AAAAAA", chrcolor2="#4D4D4D", freey=FALSE,
background="variegated", chrblocks=FALSE, file="gmirror",
type="png", hgt=7, hgtratio=0.5, wi=12, res=300 ){
#Sort data
topn <- names(top)
bottomn <- names(bottom)
top$Location <- "Top"
bottom$Location <- "Bottom"
# Check file formats
if(!identical(topn, bottomn)){stop("Please ensure both inputs have the same metadata columns.")}
d <- as.data.frame(rbind(top, bottom))
d$POS <- as.numeric(as.character(d$POS))
d$CHR <- droplevels(factor(d$CHR, levels = as.character(chroms)))
d <- d[d$CHR %in% chroms, ]
d_order <- d[order(d$CHR, d$POS), ]
d_order$pos_index <- seq.int(nrow(d_order))
d_order_sub <- d_order[, c("SNP", "CHR", "POS", "pvalue", "pos_index")]
#Set up dataframe with color and position info
maxRows <- by(d_order_sub, d_order_sub$CHR, function(x) x[which.max(x$pos_index),])
minRows <- by(d_order_sub, d_order_sub$CHR, function(x) x[which.min(x$pos_index),])
milimits <- do.call(rbind, minRows)
malimits <- do.call(rbind, maxRows)
lims <- merge(milimits, malimits, by="CHR")
names(lims) <- c("Color", "snpx", "px", "posx", "posmin", "snpy", "py", "posy", "posmax")
lims$av <- (lims$posmin + lims$posmax)/2
lims <- lims[order(lims$Color),]
lims$shademap <- rep(c("shade_ffffff", "shade_ebebeb"), length.out=nrow(lims), each=1)
#Set up colors
nchrcolors <- nlevels(factor(lims$Color))
#Color by CHR
colnames(d_order)[2] <- "Color"
newcols <-c(rep(x=c(chrcolor1, chrcolor2), length.out=nchrcolors, each=1), "#FFFFFF", "#EBEBEB")
names(newcols) <-c(levels(factor(lims$Color)), "shade_ffffff", "shade_ebebeb")
#Info for y-axis
if(log10==TRUE){
d_order$pval <- -log10(d_order$pvalue)
yaxislab1 <- expression(paste("-log"[10], "(p-value)", sep=""))
yaxislab2 <- expression(paste("-log"[10], "(p-value)", sep=""))
if(!missing(tline)) {tredline <- -log10(tline)}
if(!missing(bline)) {bredline <- -log10(bline)}
} else {
d_order$pval <- d_order$pvalue
yaxislab1 <- yaxis[1]
yaxislab2 <- yaxis[2]
if(!missing(tline)) {tredline <- tline}
if(!missing(bline)) {bredline <- bline}
}
yaxismax1 <- ifelse(freey==FALSE, max(d_order$pval[which(d_order$pval< Inf)]), max(d_order$pval[which(d_order$pval< Inf) & d_order$Location=="Top"]))
yaxismax2 <- ifelse(freey==FALSE, max(d_order$pval[which(d_order$pval< Inf)]), max(d_order$pval[which(d_order$pval< Inf) & d_order$Location=="Bottom"]))
yaxismin1 <- ifelse(freey==FALSE, 0, min(d_order$pval[d_order$Location=="Top"]))
yaxismin2 <- ifelse(freey==FALSE, 0, min(d_order$pval[d_order$Location=="Bottom"]))
#Theme options
backpanel1 <- ifelse(background=="white", "NULL", "geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = yaxismin1, ymax = Inf, fill=factor(shademap)), alpha = 0.5)" )
backpanel2 <- ifelse(background=="white", "NULL", "geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = yaxismin2, ymax = Inf, fill=factor(shademap)), alpha = 0.5)" )
#Start plotting
#TOP PLOT
p1 <- ggplot() + eval(parse(text=backpanel1))
#Add shape info if available
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$Location=="Top",], aes(x=pos_index, y=pval, color=factor(Color), shape=factor(Shape)), alpha=opacity)
} else {
p1 <- p1 + geom_point(data=d_order[d_order$Location=="Top",], aes(x=pos_index, y=pval, color=factor(Color)), alpha=opacity)
}
p1 <- p1 + scale_x_continuous(breaks=lims$av, labels=lims$Color, expand=c(0,0))
if(chrblocks==TRUE){
p1 <- p1 + geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = -Inf, ymax = min(d_order$pval), fill=as.factor(Color)), alpha = 1)
}
p1 <- p1 + scale_colour_manual(name = "Color", values = newcols) + scale_fill_manual(name = "Color", values = newcols)
p1 <- p1 + theme(panel.grid.minor.x = element_blank(), panel.grid.major.x=element_blank(), axis.title.x=element_blank(), legend.position="top", legend.title=element_blank())
#BOTTOM PLOT
p2 <- ggplot() + eval(parse(text=backpanel2))
#Add shape info if available
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$Location=="Bottom",], aes(x=pos_index, y=pval, color=factor(Color), shape=factor(Shape)), alpha=opacity)
} else {
p2 <- p2 + geom_point(data=d_order[d_order$Location=="Bottom",], aes(x=pos_index, y=pval, color=factor(Color)), alpha=opacity)
}
p2 <- p2 + scale_x_continuous(breaks=lims$av, labels=lims$Color, expand=c(0,0))
if(chrblocks==TRUE){
p2 <- p2 + geom_rect(data = lims, aes(xmin = posmin-.5, xmax = posmax+.5, ymin = -Inf, ymax = min(d_order$pval), fill=as.factor(Color)), alpha = 1)
}
p2 <- p2 + scale_colour_manual(name = "Color", values = newcols) + scale_fill_manual(name = "Color", values = newcols)
p2 <- p2 + theme(axis.text.x=element_text(angle=90), panel.grid.minor.x = element_blank(), panel.grid.major.x=element_blank(), axis.title.x=element_blank(), legend.position="bottom", legend.title=element_blank())
#Highlight if given
if(!missing(highlight_snp)){
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Top", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p1 <- p1 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p1 <- p1 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Top", ], aes(x=pos_index, y=pval), colour=highlighter)
}
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p2 <- p2 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p2 <- p2 + geom_point(data=d_order[d_order$SNP %in% highlight_snp & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval), colour=highlighter)
}
}
if(!missing(highlight_p)){
if("Shape" %in% topn){
p1 <- p1 + geom_point(data=d_order[d_order$pvalue < highlight_p[1] & d_order$Location=="Top", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p1 <- p1 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p1 <- p1 + geom_point(data=d_order[d_order$pvalue < highlight_p[1] & d_order$Location=="Top", ], aes(x=pos_index, y=pval), colour=highlighter)
}
if("Shape" %in% bottomn){
p2 <- p2 + geom_point(data=d_order[d_order$pvalue < highlight_p[2] & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval, shape=Shape), colour=highlighter)
p2 <- p2 + guides(shape = guide_legend(override.aes = list(colour = "black")))
} else {
p2 <- p2 + geom_point(data=d_order[d_order$pvalue < highlight_p[2] & d_order$Location=="Bottom", ], aes(x=pos_index, y=pval), colour=highlighter)
}
}
#Add pvalue threshold line
if(!missing(tline)){
for(i in 1:length(tline)){
p1 <- p1 + geom_hline(yintercept = tredline[i], colour="red")
}
}
if(!missing(bline)){
for(i in 1:length(bline)){
p2 <- p2 + geom_hline(yintercept = bredline[i], colour="red")
}
}
#Annotate
if(!missing(annotate_p)){
if (!requireNamespace(c("ggrepel"), quietly = TRUE)==TRUE) {
print("Consider installing 'ggrepel' for improved text annotation")
p1 <- p1 + geom_text(data=d_order[d_order$pvalue < annotate_p[1] & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + geom_text(data=d_order[d_order$pvalue < annotate_p[2] & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
} else {
p1 <- p1 + ggrepel::geom_text_repel(data=d_order[d_order$pvalue < annotate_p[1] & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + ggrepel::geom_text_repel(data=d_order[d_order$pvalue < annotate_p[2] & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
}
}
if(!missing(annotate_snp)){
if (!requireNamespace(c("ggrepel"), quietly = TRUE)==TRUE){
print("Consider installing 'ggrepel' for improved text annotation")
p1 <- p1 + geom_text(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + geom_text(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
} else {
p1 <- p1 + ggrepel::geom_text_repel(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Top",], aes(pos_index,pval,label=SNP))
p2 <- p2 + ggrepel::geom_text_repel(data=d_order[d_order$SNP %in% annotate_snp & d_order$Location=="Bottom",], aes(pos_index,pval,label=SNP))
}
}
#Add title and y axis title
p1 <- p1 + ylab(yaxislab1)
p2 <- p2 + ylab(yaxislab2)
#Format
if(chrblocks==TRUE){
if(freey==TRUE){
print("Sorry, drawing chrblocks with freey=TRUE is currently unsupported and will be ignored.")
} else {
p1 <- p1+theme(axis.text.x = element_text(vjust=1),axis.ticks.x = element_blank())+ylim(c(yaxismin1,yaxismax1))
p2 <- p2+scale_y_reverse(limits=c(yaxismax2, yaxismin2)) + theme(axis.text.x = element_blank(),axis.ticks.x = element_blank())
}
} else {
p1 <- p1+theme(axis.text.x = element_text(vjust=1),axis.ticks.x = element_blank())+ scale_y_continuous(limits=c(yaxismin1, yaxismax1),expand=expansion(mult=c(0,0.1)))
p2 <- p2+scale_y_reverse(limits=c(yaxismax2,yaxismin2), expand=expansion(mult=c(0.1,0))) + theme(axis.text.x = element_blank(),axis.ticks.x = element_blank())
}
if(background=="white"){
p1 <- p1 + theme(panel.background = element_rect(fill="white"))
p2 <- p2 + theme(panel.background = element_rect(fill="white"))
}
p1 <- p1 + guides(fill="none", color="none")
p2 <- p2 + guides(fill="none", color="none")
#Save
print(paste0("Saving plot to ", file, ".", type))
p <- grid.arrange(arrangeGrob(p1, top=toptitle), arrangeGrob(p2, bottom=bottomtitle), padding=0, heights=c(hgtratio,1-hgtratio))
ggsave(p, filename=paste0(file, ".", type), dpi=res, units="in", height=hgt, width=wi)
return(p)
}
|
\name{difftable}
\alias{difftable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Table of multivariate differences
}
\description{
Built a table crossing modalities of a trestmanet variable about multivariate differences
}
\usage{
difftable(mat,vep=NULL,axes=c(1,2),var.col=NULL,trt="zone",test="hotelling")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mat}{
the matrix must have one column named as trt, and
}
\item{vep}{
when vep is not NULL, a preliminary operation multiplies mat [,var.col] by the vep matrix (useful to do the test only on the first principal components of mat).
}
\item{axes}{
vep can contain all eigenvectors. If it is the case, axes allows the user to select only some eigenvectors (1:2 for the first two eigenvectors, 1:5 for the first five eigenvectors...)
}
\item{var.col}{
indicates the number of columns of the numerical variables. If NULL, all columns but trt are selected.
}
\item{trt}{
name of the column
}
\item{test}{
"hotelling" or "sri". Indicates the type of test to detect multivariate differences. "hotelling" is the usual hotelling T2 test whereas "sri" corresponds to the Srivastava's Test, allowing a test of differences even if there is more variables than observations.
}
}
\value{A table containing the p-values is returned.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link{hotelling.test}, \link{sri.test}
}
\examples{
data(df.scaled)
\donttest{difftable(df.scaled[,-5],trt="zone")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
| /man/difftable.Rd | no_license | cran/multifluo | R | false | false | 1,685 | rd | \name{difftable}
\alias{difftable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Table of multivariate differences
}
\description{
Built a table crossing modalities of a trestmanet variable about multivariate differences
}
\usage{
difftable(mat,vep=NULL,axes=c(1,2),var.col=NULL,trt="zone",test="hotelling")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mat}{
the matrix must have one column named as trt, and
}
\item{vep}{
when vep is not NULL, a preliminary operation multiplies mat [,var.col] by the vep matrix (useful to do the test only on the first principal components of mat).
}
\item{axes}{
vep can contain all eigenvectors. If it is the case, axes allows the user to select only some eigenvectors (1:2 for the first two eigenvectors, 1:5 for the first five eigenvectors...)
}
\item{var.col}{
indicates the number of columns of the numerical variables. If NULL, all columns but trt are selected.
}
\item{trt}{
name of the column
}
\item{test}{
"hotelling" or "sri". Indicates the type of test to detect multivariate differences. "hotelling" is the usual hotelling T2 test whereas "sri" corresponds to the Srivastava's Test, allowing a test of differences even if there is more variables than observations.
}
}
\value{A table containing the p-values is returned.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link{hotelling.test}, \link{sri.test}
}
\examples{
data(df.scaled)
\donttest{difftable(df.scaled[,-5],trt="zone")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
snippet Header_Script "R Header Template" b
#' ------------------------------------------------
#' Project: ${1:TT R session 1}
#' Script: ${2:TT R session 1}
#' Author: ${3:Takesure Tozooneyi }
#' Date: ${4:`r paste(date())`}
#' ------------------------------------------------
snippet Folders "Create project folders" b
Folder_names <- c("Raw Data", "Data", "Figures", "Tables", "Analysis", "Literature", "Paper", "Templates", "Slides", "Website")
ifelse(!dir.exists(Folder_names), sapply(Folder_names, dir.create), "Folder Exists")
# Installing R packages ---------------------------------------------------
install.packages('gtsummary')
install.packages('tidyverse')
install.packages('readxl')
install.packages('janitor')
install.packages('openxlsx')
install.packages('gt')
install.packages('arsenal')
install.packages('questionr')
# Uploading the libraries -------------------------------------------------
library(gtsummary)
library(tidyverse)
library(readxl)
library(janitor)
library(openxlsx)
library(gt)
library(arsenal)
library(questionr)
# Loading Data ------------------------------------------------------------
sam_data_001 <- read_excel("sam_data_001.xlsx") %>%
clean_names()
# Labeling variables -----------------------------------------------------
labels(sam_data_001) = c(
age = "Age of respondent, yrs",
agehousehold = "HH Age, yrs",
size = "Size of household",
female = "No. of females",
male = "No. of males",
members_under5 = "No. of childern under 5 yrs",
members5to17 = "No. of hh members 5-17 yrs",
members18to65 = "No. of hh members 18-65 yrs",
members66 = "No. of hh members 66+ yrs",
genderhousehold = "Gender of HH",
marital_status = " Marital status of HH",
sec4b1 = "Religion")
pt <- sam_data_001 %>%
select(genderhousehold, index)
# Demographic Summary -----------------------------------------------------
dem <- sam_data_001 %>%
select(7:18, sec4b1)
# Treating missing values -------------------------------------------------
dem <- dem %>%
replace_na(
list(
agehousehold = 0,
size = 0,
female = 0,
male = 0,
members_under5 = 0,
members5to17 = 0,
members18to65 = 0,
members66 = 0
)
)
# Household Roster --------------------------------------------------------
sam_data_002 <- read_excel("sam_data_001.xlsx", sheet = 2) %>%
clean_names()
names(sam_data_002) <- gsub("section1_", "", names(sam_data_002))
roster <- sam_data_002 %>%
select(2:6, 8) %>%
replace_na(list (sec1q3 = 0))
# Household Food Security -------------------------------------------------
fs <- sam_data_001 %>%
select(genderhousehold, contains("sec2"))
fs <- fs %>%
select(-3, -11)
names(fs) <- gsub("sec2q2_", "", names(fs))
labels(fs) <-
c(sec2q1 = "In the past 5 years, where there years you did not have enough food to meet your family needs ",
sec2q3 = "How many meals excluding snacks do you normally have in a day",
sec2q4 = "Compared to 5 years ago, your households is: ")
# Major shocks and risks --------------------------------------------------
sam_data_003 <- read_excel("sam_data_001.xlsx", sheet = 3) %>%
clean_names()
#Join table 3 with the primary table (to link table with gender of household)
shocks <-
left_join(pt, sam_data_003, by = c("index" = "parent_index")) %>%
select(genderhousehold, sec3a_sec3a1)
labels(shocks) <-
c(genderhousehold = "Gender of the head of household",
sec3a_sec3a1 = "Which shocks did you experience")
# Shocks and social networks ----------------------------------------------
sam_data_004 <- read_excel("sam_data_001.xlsx", sheet = 4)
names(sam_data_004) <- gsub("sec3b/", "", names(sam_data_004))
adaptation <- sam_data_004 %>%
select(sec3b1, 5:18)
# Shocks and local knowledge systems --------------------------------------
sam_data_005 <- read_excel("sam_data_001.xlsx", sheet = 5) %>%
clean_names()
names(sam_data_005) <- gsub("sec4a_", "", names(sam_data_005))
iks <- sam_data_005 %>%
select(1:2)
labels(iks) <- c(sec4a1 = "Changes in weather pattern",
sec4a2 = "Are the ways that local people predict or know about weather other than through radio")
iks2 <- sam_data_001 %>%
select(genderhousehold, sec4a4, sec4a6)
labels(iks2) <- c(genderhousehold = "Gender of household head",
sec4a4 = "In your view, are the local weather prediction systems useful?",
sec4a6 = "Have you used this informatoon to plan your agricutural activities?")
# Sacred sites ------------------------------------------------------------
sacred_sites <- sam_data_001 %>%
select(sec4b1, sec4b2)
# Sacred animals, birds, and trees ----------------------------------------
sam_data_006 <- read_excel("sam_data_001.xlsx", sheet = 6) %>%
clean_names()
# Recoding sam_data_006$section4c_sec4c1b into sam_data_006$sectio --------
sam_data_006$section4c_sec4c1b_rec <-
fct_recode(
sam_data_006$section4c_sec4c1b,
"Mariti" = "Manti",
"Mishumbi, Mukwakwa" = "Mishumbi ne Mukwakwa",
"Jackal" = "Gava",
"Mariti, Mbira" = "Mariti Mbira",
"Mbira" = "mbira",
"Mariti, Kowiro" = "Mariti, kowiro",
"Baboon, Jackal, Mbira" = "Mbira, Gava, Gudo",
"Baboon, Chikovo" = "Baboon, chikovo",
"Mariti, Dendera, Kowiro" = "Riti/Dendera,kowiro",
"Jackal" = "Gava/Jackle",
"Jichidza" = "Majijidza",
"Mariti, Kowiro" = "Kowiri, Mariti",
"Mariti, Kowiro" = "Mariti , kowiro",
"Jackal, Fox" = "Gava/Fox",
"Dendera" = "Matendera",
"Mariti" = "Riti",
"Kowiro, Owls, Masongano" = "Kowiro, Owls Masongano",
"Owl" = "Owls",
"Hyena" = "Hyenas",
"Baboon" = "Baboons",
"Mukamba, Mukwa, Mutuwa trees" = "Mukamba tree(Mukwa tree),Mutuwa",
"Mutuwa, Mubvumira, Mukamba trees" = "Mutuwa/mubvumira/ mukamba trees"
)
abt <- sam_data_006 %>%
select(1:2)
labels(abt) <-
c(section4c_sec4c1a = "Is there a sacred bird, tree, animal?",
section4c_sec4c1b = "Indicate the name")
# Access to services and programs --------------------------------------
sam_data_007 <- read_excel("sam_data_001.xlsx", sheet = 7) %>%
clean_names()
sam_data_007 <-
left_join(pt, sam_data_007, by = c("index" = "parent_index")) %>%
select(
genderhousehold,
section5_sec5q1,
section5_sec5q2,
section5_sec5q3,
section5_sec5q4,
section5_sec5q5,
section5_sec5q6,
section5_sec5q7,
section5_sec5q8,
section5_sec5q9
)
sam_data_007$section5_sec5q2 <-
fct_recode(
sam_data_007$section5_sec5q2,
"Social Welfare" = "Social walfare",
"Government" = "Gvt",
"BEAM" = "Beam",
"Social Welfare" = "Social welfare",
"Agritex" = "Agritex Officers",
"Vet Services" = "Veterinary Services",
"Vet Services" = "Vetinary services",
"Agritex" = "Agritex Officer",
"Vet Services" = "Veterinary Officers",
"Vet Services" = "Veterinary",
"Donors" = "USAID",
"Agritex" = "Agritex officers",
"Donors" = "NAC",
"Agritex" = "Arex",
"Donors" = "NGO",
"Agritex" = "Agritex offices",
"Vet Services" = "Verterinary services",
"Donors" = "Usaid",
"Donors" = "WfP",
"Donors" = "Donor",
"Government" = "GVT",
"Donors" = "Dor",
"Vet Services" = "Vertinary"
)
sam_data_007 <- sam_data_007 %>%
rename(
`Programmes` = section5_sec5q1,
`Source` = section5_sec5q2,
`Did_HH_Travel` = section5_sec5q3,
`HH_Member_Travelled` = section5_sec5q4,
`Where_travelled` = section5_sec5q5,
`Form_of_Transport` = section5_sec5q6,
`Distance_in_km` = section5_sec5q7,
`Duration_in_minutes` = section5_sec5q8,
`Cost_of_Transport` = section5_sec5q9
)
labels(sam_data_007) = c(
Programmes = "Services and programmes",
Source = "Who provided support?",
Did_HH_Travel = "Did hh member travelled to get suppot?",
HH_Member_Travelled = "Who travelled?",
Where_travelled = "Where did he/she travelled?",
Form_of_Transport = "Form of transport used",
Distance_in_km = "Distance in kilometres",
Duration_in_minutes = "Time taken in minutes",
Cost_of_Transport = "Cost of transport"
)
# Land --------------------------------------------------------------------
sam_data_008 <- read_excel("sam_data_001.xlsx", sheet = 8) %>%
clean_names()
sam_data_008 <- sam_data_008 %>%
select(c(-3, -11:-17, -20:-39))
labels(sam_data_008) = c(
plot_questions_plot_id = "Plot Number",
plot_questions_sec6a2 = "Type of Land",
plot_questions_sec6a4 = "Irrigated",
plot_questions_sec6a5 = "Tenure",
plot_questions_sec6a6 = "Land Acquired Through",
plot_questions_sec6a7 = "Principal Use",
plot_questions_sec6a8 = "Crops Rotated",
plot_questions_sec6a9a = "Degradation",
plot_questions_sec6a9b = "Form of Degradation",
plot_questions_sec6a10 = "Extent of Degradation",
plot_questions_sec6a11 = "Manager"
)
sam_data_008 <- sam_data_008 %>%
rename(
`Plot_Number` = plot_questions_plot_id,
`Type_of_Land` = plot_questions_sec6a2,
`Irrigated` = plot_questions_sec6a4,
`Tenure` = plot_questions_sec6a5,
`Land_Acquired_Through` = plot_questions_sec6a6,
`Principal_Use` = plot_questions_sec6a7,
`Crops_Rotated` = plot_questions_sec6a8,
`Degradation` = plot_questions_sec6a9a,
`Form_of_Degradation` = plot_questions_sec6a9b,
`Extent_of_Degradation` = plot_questions_sec6a10,
`Who_Manage_the_field` = plot_questions_sec6a11
)
# Crop Production - Last Rainy Season (2018/2019) -------------------------
sam_data_009 <- read_excel("sam_data_001.xlsx", sheet = 9) %>%
clean_names() %>% select(
sec6b_sec6b2,
sec6b_sec6b3,
sec6b_sec6b4,
sec6b_sec6b5,
sec6b_sec6b6,
sec6b_sec6b7,
sec6b_sec6b8,
sec6b_sec6b8a,
sec6b_sec6b8b,
sec6b_sec6b9,
sec6b_sec6b9a,
sec6b_sec6b9b
)
sam_data_009 <- sam_data_009 %>%
rename(
`Plot_Id` = sec6b_sec6b2,
`Crop` = sec6b_sec6b3,
`Type_of_Crop_Stand` = sec6b_sec6b4,
`Entire_Plot` = sec6b_sec6b5,
`Area_Under_Crop` = sec6b_sec6b6,
`Seed_Variety` = sec6b_sec6b7
)
# Recoding sam_data_009$sec6b_sec6b8 into sam_data_009$Total_Harve --------
sam_data_009$Total_Harvest_in_kgs <-
fct_recode(
sam_data_009$sec6b_sec6b8,
"90" = "90Kg Bag",
"25" = "25 Kg Bag",
"1" = "Kilogram",
"50" = "50 Kg Bag",
"75" = "75Kg Bag"
)
sam_data_009$Expected_Harvest_in_kgs <-
fct_recode(
sam_data_009$sec6b_sec6b9,
"90" = "90Kg Bag",
"25" = "25 Kg Bag",
"1" = "Kilogram",
"50" = "50 Kg Bag",
"75" = "75Kg Bag")
## Recoding sam_data_009$Total_Harvest_in_kgs into sam_data_009$Total_Harvest_in_kgs_rec
sam_data_009$Total_Harvest_in_kgs_rec <- fct_recode(sam_data_009$Total_Harvest_in_kgs,
"25.0" = "25",
"50.0" = "50",
"75.0" = "75",
"90.0" = "90",
"1.0" = "1"
)
sam_data_009$Total_Harvest_in_kgs_rec <- fct_explicit_na(sam_data_009$Total_Harvest_in_kgs_rec, "0")
sam_data_009$Total_Harvest_in_kgs_rec <- as.numeric(as.character(sam_data_009$Total_Harvest_in_kgs_rec))
## Recoding sam_data_009$sec6b_sec6b8a into sam_data_009$sec6b_sec6b8a_rec
sam_data_009$sec6b_sec6b8a_rec <- as.character(sam_data_009$sec6b_sec6b8a)
sam_data_009$sec6b_sec6b8a_rec <- fct_recode(sam_data_009$sec6b_sec6b8a_rec,
"1.0" = "1",
"2.0" = "2",
"0.0" = "0",
"3.0" = "3",
"4.0" = "4",
"6.0" = "6",
"7.0" = "7",
"500.0" = "500",
"350.0" = "350",
"260.0" = "260",
"200.0" = "200",
"100.0" = "100",
"50.0" = "50",
"9.0" = "9",
"5.0" = "5",
"20.0" = "20",
"15.0" = "15",
"12.0" = "12"
)
sam_data_009$sec6b_sec6b8a_rec <- fct_explicit_na(sam_data_009$sec6b_sec6b8a_rec, "0.0")
sam_data_009$sec6b_sec6b8a_rec <- as.numeric(as.character(sam_data_009$sec6b_sec6b8a_rec))
sam_data_009$Total_Harvest_in_kgs_ <- (sam_data_009$Total_Harvest_in_kgs_rec * sam_data_009$sec6b_sec6b8a_rec)
# Crop production ---------------------------------------------------------
sam_data_010 <- read_excel("sam_data_001.xlsx", sheet = 10) %>%
clean_names()
names(sam_data_010) <- gsub("sec6c_", "", names(sam_data_010))
crops <- sam_data_010 %>% select(1:3, 29:30)
labels(crops) <- c(
sec6c1 = "Name of crop grown",
sec6c2 = "Trend in proportion under this croop",
sec6c3 = "Explain the change in land size",
sec6c4 = "Production levels in 2009",
sec6c5 = "Explain the change in production"
)
# Crop production - changing preferences ----------------------------------
sam_data_011 <- read_excel("sam_data_001.xlsx", sheet = 11) %>%
clean_names()
crops_1980 <- sam_data_011 %>%
select(1:2)
labels(crops_1980) <- c(sec6d_sec6d1 = "Crop dropped since 1980",
sec6d_sec6d2 = "Why was the crop dropped")
# Crop production and changing preferences adopted ------------------------
sam_data_012 <- read_excel("sam_data_001.xlsx", sheet = 12) %>%
clean_names()
crops_1980a <- sam_data_012 %>%
select(1:2)
labels(crops_1980a) <- c(sec6dd_sec6d3 = "Crop dropped since 1980",
sec6dd_sec6d4 = "Why was the crop dropped")
# Watershed condition and management --------------------------------------
sam_data_013 <- read_excel("sam_data_001.xlsx", sheet = 13) %>%
clean_names() %>% select(sec7a_sec7a1,
sec7a_sec7a2,
sec7a_sec7a3,
sec7a_sec7a4)
sam_data_013 <- sam_data_013 %>%
rename(
`Type_of_change` = sec7a_sec7a1,
`Observed_in_comm` = sec7a_sec7a2,
`Observed_in_own_field` = sec7a_sec7a3,
`Prevention_measure` = sec7a_sec7a4
)
labels(sam_data_013) = c(
Type_of_change = "Type of change",
Observed_in_comm = "Change observed in community",
Observed_in_own_field = "Change observed in own field",
Prevention_measures = "Prevention measure"
)
| /TT R session 1.R | no_license | taktozo/TTsession1 | R | false | false | 14,795 | r | snippet Header_Script "R Header Template" b
#' ------------------------------------------------
#' Project: ${1:TT R session 1}
#' Script: ${2:TT R session 1}
#' Author: ${3:Takesure Tozooneyi }
#' Date: ${4:`r paste(date())`}
#' ------------------------------------------------
snippet Folders "Create project folders" b
Folder_names <- c("Raw Data", "Data", "Figures", "Tables", "Analysis", "Literature", "Paper", "Templates", "Slides", "Website")
ifelse(!dir.exists(Folder_names), sapply(Folder_names, dir.create), "Folder Exists")
# Installing R packages ---------------------------------------------------
install.packages('gtsummary')
install.packages('tidyverse')
install.packages('readxl')
install.packages('janitor')
install.packages('openxlsx')
install.packages('gt')
install.packages('arsenal')
install.packages('questionr')
# Uploading the libraries -------------------------------------------------
library(gtsummary)
library(tidyverse)
library(readxl)
library(janitor)
library(openxlsx)
library(gt)
library(arsenal)
library(questionr)
# Loading Data ------------------------------------------------------------
sam_data_001 <- read_excel("sam_data_001.xlsx") %>%
clean_names()
# Labeling variables -----------------------------------------------------
labels(sam_data_001) = c(
age = "Age of respondent, yrs",
agehousehold = "HH Age, yrs",
size = "Size of household",
female = "No. of females",
male = "No. of males",
members_under5 = "No. of childern under 5 yrs",
members5to17 = "No. of hh members 5-17 yrs",
members18to65 = "No. of hh members 18-65 yrs",
members66 = "No. of hh members 66+ yrs",
genderhousehold = "Gender of HH",
marital_status = " Marital status of HH",
sec4b1 = "Religion")
pt <- sam_data_001 %>%
select(genderhousehold, index)
# Demographic Summary -----------------------------------------------------
dem <- sam_data_001 %>%
select(7:18, sec4b1)
# Treating missing values -------------------------------------------------
dem <- dem %>%
replace_na(
list(
agehousehold = 0,
size = 0,
female = 0,
male = 0,
members_under5 = 0,
members5to17 = 0,
members18to65 = 0,
members66 = 0
)
)
# Household Roster --------------------------------------------------------
sam_data_002 <- read_excel("sam_data_001.xlsx", sheet = 2) %>%
clean_names()
names(sam_data_002) <- gsub("section1_", "", names(sam_data_002))
roster <- sam_data_002 %>%
select(2:6, 8) %>%
replace_na(list (sec1q3 = 0))
# Household Food Security -------------------------------------------------
fs <- sam_data_001 %>%
select(genderhousehold, contains("sec2"))
fs <- fs %>%
select(-3, -11)
names(fs) <- gsub("sec2q2_", "", names(fs))
labels(fs) <-
c(sec2q1 = "In the past 5 years, where there years you did not have enough food to meet your family needs ",
sec2q3 = "How many meals excluding snacks do you normally have in a day",
sec2q4 = "Compared to 5 years ago, your households is: ")
# Major shocks and risks --------------------------------------------------
sam_data_003 <- read_excel("sam_data_001.xlsx", sheet = 3) %>%
clean_names()
#Join table 3 with the primary table (to link table with gender of household)
shocks <-
left_join(pt, sam_data_003, by = c("index" = "parent_index")) %>%
select(genderhousehold, sec3a_sec3a1)
labels(shocks) <-
c(genderhousehold = "Gender of the head of household",
sec3a_sec3a1 = "Which shocks did you experience")
# Shocks and social networks ----------------------------------------------
sam_data_004 <- read_excel("sam_data_001.xlsx", sheet = 4)
names(sam_data_004) <- gsub("sec3b/", "", names(sam_data_004))
adaptation <- sam_data_004 %>%
select(sec3b1, 5:18)
# Shocks and local knowledge systems --------------------------------------
sam_data_005 <- read_excel("sam_data_001.xlsx", sheet = 5) %>%
clean_names()
names(sam_data_005) <- gsub("sec4a_", "", names(sam_data_005))
iks <- sam_data_005 %>%
select(1:2)
labels(iks) <- c(sec4a1 = "Changes in weather pattern",
sec4a2 = "Are the ways that local people predict or know about weather other than through radio")
iks2 <- sam_data_001 %>%
select(genderhousehold, sec4a4, sec4a6)
labels(iks2) <- c(genderhousehold = "Gender of household head",
sec4a4 = "In your view, are the local weather prediction systems useful?",
sec4a6 = "Have you used this informatoon to plan your agricutural activities?")
# Sacred sites ------------------------------------------------------------
sacred_sites <- sam_data_001 %>%
select(sec4b1, sec4b2)
# Sacred animals, birds, and trees ----------------------------------------
sam_data_006 <- read_excel("sam_data_001.xlsx", sheet = 6) %>%
clean_names()
# Recoding sam_data_006$section4c_sec4c1b into sam_data_006$sectio --------
sam_data_006$section4c_sec4c1b_rec <-
fct_recode(
sam_data_006$section4c_sec4c1b,
"Mariti" = "Manti",
"Mishumbi, Mukwakwa" = "Mishumbi ne Mukwakwa",
"Jackal" = "Gava",
"Mariti, Mbira" = "Mariti Mbira",
"Mbira" = "mbira",
"Mariti, Kowiro" = "Mariti, kowiro",
"Baboon, Jackal, Mbira" = "Mbira, Gava, Gudo",
"Baboon, Chikovo" = "Baboon, chikovo",
"Mariti, Dendera, Kowiro" = "Riti/Dendera,kowiro",
"Jackal" = "Gava/Jackle",
"Jichidza" = "Majijidza",
"Mariti, Kowiro" = "Kowiri, Mariti",
"Mariti, Kowiro" = "Mariti , kowiro",
"Jackal, Fox" = "Gava/Fox",
"Dendera" = "Matendera",
"Mariti" = "Riti",
"Kowiro, Owls, Masongano" = "Kowiro, Owls Masongano",
"Owl" = "Owls",
"Hyena" = "Hyenas",
"Baboon" = "Baboons",
"Mukamba, Mukwa, Mutuwa trees" = "Mukamba tree(Mukwa tree),Mutuwa",
"Mutuwa, Mubvumira, Mukamba trees" = "Mutuwa/mubvumira/ mukamba trees"
)
abt <- sam_data_006 %>%
select(1:2)
labels(abt) <-
c(section4c_sec4c1a = "Is there a sacred bird, tree, animal?",
section4c_sec4c1b = "Indicate the name")
# Access to services and programs --------------------------------------
sam_data_007 <- read_excel("sam_data_001.xlsx", sheet = 7) %>%
clean_names()
sam_data_007 <-
left_join(pt, sam_data_007, by = c("index" = "parent_index")) %>%
select(
genderhousehold,
section5_sec5q1,
section5_sec5q2,
section5_sec5q3,
section5_sec5q4,
section5_sec5q5,
section5_sec5q6,
section5_sec5q7,
section5_sec5q8,
section5_sec5q9
)
sam_data_007$section5_sec5q2 <-
fct_recode(
sam_data_007$section5_sec5q2,
"Social Welfare" = "Social walfare",
"Government" = "Gvt",
"BEAM" = "Beam",
"Social Welfare" = "Social welfare",
"Agritex" = "Agritex Officers",
"Vet Services" = "Veterinary Services",
"Vet Services" = "Vetinary services",
"Agritex" = "Agritex Officer",
"Vet Services" = "Veterinary Officers",
"Vet Services" = "Veterinary",
"Donors" = "USAID",
"Agritex" = "Agritex officers",
"Donors" = "NAC",
"Agritex" = "Arex",
"Donors" = "NGO",
"Agritex" = "Agritex offices",
"Vet Services" = "Verterinary services",
"Donors" = "Usaid",
"Donors" = "WfP",
"Donors" = "Donor",
"Government" = "GVT",
"Donors" = "Dor",
"Vet Services" = "Vertinary"
)
sam_data_007 <- sam_data_007 %>%
rename(
`Programmes` = section5_sec5q1,
`Source` = section5_sec5q2,
`Did_HH_Travel` = section5_sec5q3,
`HH_Member_Travelled` = section5_sec5q4,
`Where_travelled` = section5_sec5q5,
`Form_of_Transport` = section5_sec5q6,
`Distance_in_km` = section5_sec5q7,
`Duration_in_minutes` = section5_sec5q8,
`Cost_of_Transport` = section5_sec5q9
)
labels(sam_data_007) = c(
Programmes = "Services and programmes",
Source = "Who provided support?",
Did_HH_Travel = "Did hh member travelled to get suppot?",
HH_Member_Travelled = "Who travelled?",
Where_travelled = "Where did he/she travelled?",
Form_of_Transport = "Form of transport used",
Distance_in_km = "Distance in kilometres",
Duration_in_minutes = "Time taken in minutes",
Cost_of_Transport = "Cost of transport"
)
# Land --------------------------------------------------------------------
sam_data_008 <- read_excel("sam_data_001.xlsx", sheet = 8) %>%
clean_names()
sam_data_008 <- sam_data_008 %>%
select(c(-3, -11:-17, -20:-39))
labels(sam_data_008) = c(
plot_questions_plot_id = "Plot Number",
plot_questions_sec6a2 = "Type of Land",
plot_questions_sec6a4 = "Irrigated",
plot_questions_sec6a5 = "Tenure",
plot_questions_sec6a6 = "Land Acquired Through",
plot_questions_sec6a7 = "Principal Use",
plot_questions_sec6a8 = "Crops Rotated",
plot_questions_sec6a9a = "Degradation",
plot_questions_sec6a9b = "Form of Degradation",
plot_questions_sec6a10 = "Extent of Degradation",
plot_questions_sec6a11 = "Manager"
)
sam_data_008 <- sam_data_008 %>%
rename(
`Plot_Number` = plot_questions_plot_id,
`Type_of_Land` = plot_questions_sec6a2,
`Irrigated` = plot_questions_sec6a4,
`Tenure` = plot_questions_sec6a5,
`Land_Acquired_Through` = plot_questions_sec6a6,
`Principal_Use` = plot_questions_sec6a7,
`Crops_Rotated` = plot_questions_sec6a8,
`Degradation` = plot_questions_sec6a9a,
`Form_of_Degradation` = plot_questions_sec6a9b,
`Extent_of_Degradation` = plot_questions_sec6a10,
`Who_Manage_the_field` = plot_questions_sec6a11
)
# Crop Production - Last Rainy Season (2018/2019) -------------------------
sam_data_009 <- read_excel("sam_data_001.xlsx", sheet = 9) %>%
clean_names() %>% select(
sec6b_sec6b2,
sec6b_sec6b3,
sec6b_sec6b4,
sec6b_sec6b5,
sec6b_sec6b6,
sec6b_sec6b7,
sec6b_sec6b8,
sec6b_sec6b8a,
sec6b_sec6b8b,
sec6b_sec6b9,
sec6b_sec6b9a,
sec6b_sec6b9b
)
sam_data_009 <- sam_data_009 %>%
rename(
`Plot_Id` = sec6b_sec6b2,
`Crop` = sec6b_sec6b3,
`Type_of_Crop_Stand` = sec6b_sec6b4,
`Entire_Plot` = sec6b_sec6b5,
`Area_Under_Crop` = sec6b_sec6b6,
`Seed_Variety` = sec6b_sec6b7
)
# Recoding sam_data_009$sec6b_sec6b8 into sam_data_009$Total_Harve --------
sam_data_009$Total_Harvest_in_kgs <-
fct_recode(
sam_data_009$sec6b_sec6b8,
"90" = "90Kg Bag",
"25" = "25 Kg Bag",
"1" = "Kilogram",
"50" = "50 Kg Bag",
"75" = "75Kg Bag"
)
sam_data_009$Expected_Harvest_in_kgs <-
fct_recode(
sam_data_009$sec6b_sec6b9,
"90" = "90Kg Bag",
"25" = "25 Kg Bag",
"1" = "Kilogram",
"50" = "50 Kg Bag",
"75" = "75Kg Bag")
## Recoding sam_data_009$Total_Harvest_in_kgs into sam_data_009$Total_Harvest_in_kgs_rec
sam_data_009$Total_Harvest_in_kgs_rec <- fct_recode(sam_data_009$Total_Harvest_in_kgs,
"25.0" = "25",
"50.0" = "50",
"75.0" = "75",
"90.0" = "90",
"1.0" = "1"
)
sam_data_009$Total_Harvest_in_kgs_rec <- fct_explicit_na(sam_data_009$Total_Harvest_in_kgs_rec, "0")
sam_data_009$Total_Harvest_in_kgs_rec <- as.numeric(as.character(sam_data_009$Total_Harvest_in_kgs_rec))
## Recoding sam_data_009$sec6b_sec6b8a into sam_data_009$sec6b_sec6b8a_rec
sam_data_009$sec6b_sec6b8a_rec <- as.character(sam_data_009$sec6b_sec6b8a)
sam_data_009$sec6b_sec6b8a_rec <- fct_recode(sam_data_009$sec6b_sec6b8a_rec,
"1.0" = "1",
"2.0" = "2",
"0.0" = "0",
"3.0" = "3",
"4.0" = "4",
"6.0" = "6",
"7.0" = "7",
"500.0" = "500",
"350.0" = "350",
"260.0" = "260",
"200.0" = "200",
"100.0" = "100",
"50.0" = "50",
"9.0" = "9",
"5.0" = "5",
"20.0" = "20",
"15.0" = "15",
"12.0" = "12"
)
sam_data_009$sec6b_sec6b8a_rec <- fct_explicit_na(sam_data_009$sec6b_sec6b8a_rec, "0.0")
sam_data_009$sec6b_sec6b8a_rec <- as.numeric(as.character(sam_data_009$sec6b_sec6b8a_rec))
sam_data_009$Total_Harvest_in_kgs_ <- (sam_data_009$Total_Harvest_in_kgs_rec * sam_data_009$sec6b_sec6b8a_rec)
# Crop production ---------------------------------------------------------
sam_data_010 <- read_excel("sam_data_001.xlsx", sheet = 10) %>%
clean_names()
names(sam_data_010) <- gsub("sec6c_", "", names(sam_data_010))
crops <- sam_data_010 %>% select(1:3, 29:30)
labels(crops) <- c(
sec6c1 = "Name of crop grown",
sec6c2 = "Trend in proportion under this croop",
sec6c3 = "Explain the change in land size",
sec6c4 = "Production levels in 2009",
sec6c5 = "Explain the change in production"
)
# Crop production - changing preferences ----------------------------------
sam_data_011 <- read_excel("sam_data_001.xlsx", sheet = 11) %>%
clean_names()
crops_1980 <- sam_data_011 %>%
select(1:2)
labels(crops_1980) <- c(sec6d_sec6d1 = "Crop dropped since 1980",
sec6d_sec6d2 = "Why was the crop dropped")
# Crop production and changing preferences adopted ------------------------
sam_data_012 <- read_excel("sam_data_001.xlsx", sheet = 12) %>%
clean_names()
crops_1980a <- sam_data_012 %>%
select(1:2)
labels(crops_1980a) <- c(sec6dd_sec6d3 = "Crop dropped since 1980",
sec6dd_sec6d4 = "Why was the crop dropped")
# Watershed condition and management --------------------------------------
sam_data_013 <- read_excel("sam_data_001.xlsx", sheet = 13) %>%
clean_names() %>% select(sec7a_sec7a1,
sec7a_sec7a2,
sec7a_sec7a3,
sec7a_sec7a4)
sam_data_013 <- sam_data_013 %>%
rename(
`Type_of_change` = sec7a_sec7a1,
`Observed_in_comm` = sec7a_sec7a2,
`Observed_in_own_field` = sec7a_sec7a3,
`Prevention_measure` = sec7a_sec7a4
)
labels(sam_data_013) = c(
Type_of_change = "Type of change",
Observed_in_comm = "Change observed in community",
Observed_in_own_field = "Change observed in own field",
Prevention_measures = "Prevention measure"
)
|
library(shiny)
shinyServer(function(input, output) {
# display 10 rows initially
output$ex1 <- renderDataTable(iris, options = list(iDisplayLength = 10))
# -1 means no pagination; the 2nd element contains menu labels
output$ex2 <- renderDataTable(iris, options = list(
aLengthMenu = list(c(5, 15, -1), c('5', '15', 'All')),
iDisplayLength = 15
))
# you can also use bPaginate = FALSE to disable pagination
output$ex3 <- renderDataTable(iris, options = list(bPaginate = FALSE))
# turn off filtering (no searching boxes)
output$ex4 <- renderDataTable(iris, options = list(bFilter = FALSE))
# turn off filtering on individual columns
output$ex5 <- renderDataTable(iris, options = list(
aoColumns = list(list(bSearchable = TRUE), list(bSearchable = TRUE),
list(bSearchable = FALSE), list(bSearchable = FALSE),
list(bSearchable = TRUE)),
iDisplayLength = 10
))
# write literal JS code in I()
output$ex6 <- renderDataTable(
iris,
options = list(fnRowCallback = I(
'function(nRow, aData, iDisplayIndex, iDisplayIndexFull) {
// Bold cells for those >= 5 in the first column
if (parseFloat(aData[0]) >= 5.0)
$("td:eq(0)", nRow).css("font-weight", "bold");
}'
))
)
})
| /018-datatable-options/server.R | no_license | joey711/shiny-examples | R | false | false | 1,303 | r | library(shiny)
shinyServer(function(input, output) {
# display 10 rows initially
output$ex1 <- renderDataTable(iris, options = list(iDisplayLength = 10))
# -1 means no pagination; the 2nd element contains menu labels
output$ex2 <- renderDataTable(iris, options = list(
aLengthMenu = list(c(5, 15, -1), c('5', '15', 'All')),
iDisplayLength = 15
))
# you can also use bPaginate = FALSE to disable pagination
output$ex3 <- renderDataTable(iris, options = list(bPaginate = FALSE))
# turn off filtering (no searching boxes)
output$ex4 <- renderDataTable(iris, options = list(bFilter = FALSE))
# turn off filtering on individual columns
output$ex5 <- renderDataTable(iris, options = list(
aoColumns = list(list(bSearchable = TRUE), list(bSearchable = TRUE),
list(bSearchable = FALSE), list(bSearchable = FALSE),
list(bSearchable = TRUE)),
iDisplayLength = 10
))
# write literal JS code in I()
output$ex6 <- renderDataTable(
iris,
options = list(fnRowCallback = I(
'function(nRow, aData, iDisplayIndex, iDisplayIndexFull) {
// Bold cells for those >= 5 in the first column
if (parseFloat(aData[0]) >= 5.0)
$("td:eq(0)", nRow).css("font-weight", "bold");
}'
))
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customer.R
\name{set_risk_action}
\alias{set_risk_action}
\title{Whitelist / Blacklist Customer}
\usage{
set_risk_action(authorization, ...)
}
\arguments{
\item{authorization}{set_keys("", "SECRET_KEY")$secret,
equivalent of "-H Authorization: Bearer SECRET_kEY"}
\item{...}{Body Params}
\item{customer}{string. REQUIRED
Customer's ID, code, or email address}
\item{risk_action}{string.
One of the possible risk actions. allow to whitelist. deny to blacklist.}
}
\value{
}
\description{
Whitelist / Blacklist Customer
}
| /man/set_risk_action.Rd | no_license | Wenmeilin/paystack | R | false | true | 628 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customer.R
\name{set_risk_action}
\alias{set_risk_action}
\title{Whitelist / Blacklist Customer}
\usage{
set_risk_action(authorization, ...)
}
\arguments{
\item{authorization}{set_keys("", "SECRET_KEY")$secret,
equivalent of "-H Authorization: Bearer SECRET_kEY"}
\item{...}{Body Params}
\item{customer}{string. REQUIRED
Customer's ID, code, or email address}
\item{risk_action}{string.
One of the possible risk actions. allow to whitelist. deny to blacklist.}
}
\value{
}
\description{
Whitelist / Blacklist Customer
}
|
#######################################################################
## Merge BAM files based on factor and return updated SYSargs object ##
#######################################################################
## Useful prior to peak calling in ChIP-Seq or miRNA gene prediction experiments
## where pooling of replicates maximizes depth of read coverage.
## Note: default factor is "Factor"
mergeBamByFactor <- function(args, targetsDF = NULL, mergefactor = "Factor",
overwrite = FALSE, silent = FALSE, ...) {
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs class
if (inherits(args, "SYSargs")) {
mergefactor <- targetsin(args)[[mergefactor]]
targetsin <- targetsin(args)
bampaths <- infile1(args)
if (!"FileName" %in% colnames(targetsin)) stop("Name of one column in 'targetsin(arg)' is expected to be 'FileName'.")
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
mergefactor <- targets.as.df(targets(args))[[mergefactor]]
targetsin <- targets.as.df(targets(args))
bampaths <- infile1(args)
if (!"FileName" %in% colnames(targetsin)) stop("Name of one column in 'targetsin(arg)' is expected to be 'FileName'.")
} else if (inherits(args, "character")) {
if (is.null(names(args))) stop("Please provide a named character vector, where the names elements should be the sampleID")
bampaths <- args
if (is.null(targetsDF)) stop("'targets argument is required when a named character vector is provided for 'args' argument'.")
if (!inherits(targetsDF, "DFrame")) stop("Argument 'targets' needs to be assigned an object of class 'DFrame'")
targetsDF <- as.data.frame(targetsDF)
mergefactor <- targetsDF[[mergefactor]]
targetsin <- targetsDF
}
## Check validity of input
allbam <- !grepl("\\.bam$|\\.BAM", bampaths)
if (any(allbam)) stop("The following files lack the extension '.bam': ", paste(basename(bampaths[allbam]), collapse = ", "))
## Unique values in Factor column of targetsin(args)
sample_index <- !duplicated(as.character(mergefactor))
names(sample_index) <- names(bampaths)
uniorder <- unique(mergefactor)
unifreq <- table(mergefactor)[uniorder] # Important: in order of targetsin(args)!
if (!any(unifreq >= 2)) warning("Values in Factor column are all unique. Thus, there are no BAM files to merge.")
## Store BAM file paths in a list that have >=2 identical values (replicates) in Factor column of targets file
filelist <- tapply(bampaths, factor(mergefactor), as.character)
filelist <- filelist[names(unifreq)] # Assures original order
## Create vector containing paths of output BAM files
filelist_merge <- filelist[names(unifreq[unifreq >= 2])] # Merge only those with >= files
outname_vec <- character(length(filelist_merge))
names(outname_vec) <- names(filelist_merge)
for (i in seq(along = outname_vec)) {
outname <- gsub("\\.bam$|\\.BAM$", "", filelist_merge[[i]][1])
outname <- paste(outname, "_", names(filelist_merge[i]), ".bam", sep = "")
outname_vec[i] <- outname
}
## If any output BAM file exists and 'overwrite=FALSE' then stop
file_exists <- file.exists(outname_vec)
names(file_exists) <- names(outname_vec)
if (any(file_exists) & overwrite == FALSE) stop("The following files exist: ", paste(names(file_exists)[file_exists], collapse = ", "), ". Delete/rename them or set 'overwrite=TRUE'")
## Generate collapsed BAM files
for (i in seq(along = filelist_merge)) {
mergeBam(filelist_merge[[i]], outname_vec[i], indexDestination = TRUE, overwrite = overwrite)
if (silent != TRUE) {
cat("Merged BAM files:", basename(filelist_merge[[i]]), "and saved to file", basename(outname_vec[i]), "\n\n")
}
}
## Generate updated SYSargs and SYSargs2 object
filelist[names(outname_vec)] <- outname_vec # Assign new file names to proper slots
outfile_names <- unlist(filelist)
args_sub <- args[sample_index]
## SYSargs class
if (inherits(args, "SYSargs")) {
targets_out <- targetsout(args_sub)
targets_out[, "FileName"] <- outfile_names
rownames(targets_out) <- NULL
syslist <- list(
targetsin = targetsin(args_sub),
targetsout = targets_out,
targetsheader = targetsheader(args_sub),
modules = modules(args_sub),
software = "mergeBamByFactor",
cores = cores(args_sub),
other = other(args_sub),
reference = reference(args_sub),
results = results(args_sub),
infile1 = infile1(args_sub),
infile2 = infile2(args_sub),
outfile1 = outfile_names,
sysargs = sysargs(args_sub),
outpaths = outfile_names
)
args_sub_out <- as(syslist, "SYSargs")
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
out <- sapply(names(outfile_names), function(x) list(outfile_names[[x]]), simplify = F)
for (i in seq_along(out)) {
names(out[[i]]) <- files(args)$step
}
# out <- sapply(names(out), function(x) names(out[[x]]) <- files(args)$step, simplify = F)
sys2list <- list(
targets = targets(args_sub),
targetsheader = targetsheader(args_sub),
modules = as.list(modules(args_sub)),
wf = wf(args_sub),
clt = clt(args_sub),
yamlinput = yamlinput(args_sub),
cmdlist = cmdlist(args_sub),
input = input(args_sub),
output = out,
files = files(args_sub),
inputvars = inputvars(args_sub),
cmdToCwl = list(),
status = data.frame(),
internal_outfiles = list()
)
args_sub_out <- as(sys2list, "SYSargs2")
} else if (inherits(args, "character")) {
targetsDF <- targetsDF[, -which(colnames(targetsDF) %in% c("FileName1", "FileName2", "FileName"))]
targetsDF <- targetsDF[sample_index, ]
# a <- file.path(.getPath(filelist_merge[[1]]), outfile_names)
args_sub_out <- cbind(FileName = outname_vec, targetsDF)
}
return(args_sub_out)
}
## Usage:
# args <- systemArgs(sysma=NULL, mytargets="targets_bam.txt")
# args_merge <- mergeBamByFactor(args, overwrite=TRUE, silent=FALSE)
# writeTargetsout(x=args_merge, file="targets_mergeBamByFactor.txt", overwrite=TRUE)
############################################################################
## Creat targets file with refence sample, e.g. input sample for ChIP-Seq ##
############################################################################
writeTargetsRef <- function(infile, outfile, silent = FALSE, overwrite = FALSE, ...) {
## Import
headerlines <- readLines(infile)
targets <- read.delim(infile, comment.char = "#")
## Check for expected input
if (!c("SampleReference") %in% colnames(targets)) stop("Targets file lacks SampleReference column")
if (!c("FileName") %in% colnames(targets)) stop("Targets file lacks FileName column")
if (all(c("FileName1", "FileName2") %in% colnames(targets))) stop("Targets file is expected to have only one FileName column")
if (file.exists(outfile) & overwrite == FALSE) {
stop(
"I am not allowed to overwrite files; please delete existing file: ",
outfile, " or set 'overwrite=TRUE'"
)
}
testv <- as.character(targets$SampleReference)
testv <- testv[!is.na(testv)]
testv <- testv[testv != ""]
myfiles <- as.character(targets$FileName)
names(myfiles) <- as.character(targets$SampleName)
if (!all(testv %in% names(myfiles))) {
stop(
"Value(s) ", paste(testv[!testv %in% names(myfiles)], collapse = ", "),
" from SampleReference column have no matches in SampleName column!"
)
}
## Rearrange targets file
targets <- data.frame(FileName1 = targets$FileName, FileName2 = NA, targets)
targets[, "FileName2"] <- myfiles[as.character(targets$SampleReference)]
targets <- targets[!is.na(as.character(targets$SampleReference)), , drop = FALSE]
targets <- targets[targets$SampleReference != "", , drop = FALSE]
targets <- targets[, !(names(targets) %in% "FileName")]
## Export targets file including header lines
headerlines <- headerlines[grepl("^#", headerlines)]
targetslines <- c(paste(colnames(targets), collapse = "\t"), apply(targets, 1, paste, collapse = "\t"))
writeLines(c(headerlines, targetslines), outfile, ...)
if (silent != TRUE) cat("\t", "Modified", infile, "file with sample-wise reference has been written to outfile", outfile, "\n")
}
## Usage:
# writeTargetsRef(infile="~/targets.txt", outfile="~/targets_refsample.txt", silent=FALSE, overwrite=FALSE)
########################################################
## Iterative read counting over different range files ##
########################################################
## Convenience function to perform read counting over several different
## range sets, e.g. peak ranges or feature types
countRangeset <- function(bfl, args, outfiles = NULL, format = "tabular", ...) {
pkg <- c("GenomeInfoDb")
checkPkg(pkg, quietly = FALSE)
## Input validity checks
if (!inherits(bfl, "BamFileList")) stop("'bfl' needs to be of class 'BamFileList'.")
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs or SYSargs2 class
if (inherits(args, "SYSargs") | inherits(args, "SYSargs2")) {
absent_peak_file <- infile1(args)[!file.exists(infile1(args))]
if (length(absent_peak_file) != 0) stop("The following files assigned to 'infile1(args)' do not exist: ", paste(basename(absent_peak_file), collapse = ", "))
if (inherits(args, "SYSargs")) {
absent_peak_file <- infile1(args)
countDFnames <- outpaths(args)
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
absent_peak_file <- infile1(args)
countDFnames <- subsetWF(args, slot = "output", subset = 1, index = 1)
}
## named character vector class
} else if (inherits(args, "character")) {
if (is.null(names(args))) stop("Please provide a named character vector, where the names elements should be the sampleID")
absent_peak_file <- args
if (is.null(outfiles)) stop("Please provide a named character vector, where the names elements should be the sampleID and the outfiles names.")
if (!all(names(outfiles) %in% names(absent_peak_file))) stop("names of 'outfiles' argument should be find at names of 'args' argument.")
countDFnames <- outfiles
}
for (i in seq(along = absent_peak_file)) {
if (format == "tabular") {
df <- read.delim(absent_peak_file[i], comment.char = "#")
peaks <- as(df, "GRanges")
} else if (format == "bed") {
peaks <- rtracklayer::import.bed(absent_peak_file[i])
} else {
stop("Input file format not supported.")
}
names(peaks) <- paste0(as.character(GenomeInfoDb::seqnames(peaks)), "_", start(peaks), "-", end(peaks))
peaks <- split(peaks, names(peaks))
countDF <- GenomicAlignments::summarizeOverlaps(peaks, bfl, ...)
countDF <- SummarizedExperiment::assays(countDF)$counts
write.table(countDF, countDFnames[i], col.names = NA, quote = FALSE, sep = "\t")
cat("Wrote count result", i, "to", basename(countDFnames[i]), "\n")
}
if (inherits(args, "character")) {
names(countDFnames) <- names(args)
return(countDFnames)
} else {
return(countDFnames)
}
}
## Usage:
# countDFnames <- countRangeset(bfl, args, mode="Union", ignore.strand=TRUE)
################################################################################
## Iterative edgeR/DESeq2 analysis over counts sets from different range sets ##
################################################################################
## Convenience function to iterate over several count sets generated by
## countRangeset() or similar utilities
runDiff <- function(args, outfiles = NULL, diffFct, targets, cmp, dbrfilter, ...) {
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs class
if (inherits(args, "SYSargs")) {
countfiles <- infile1(args)
dbrDFnames <- outpaths(args)
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
countfiles <- infile1(args)
dbrDFnames <- subsetWF(args, slot = "output", subset = 1, index = 1)
} else if (inherits(args, "character")) {
countfiles <- args
dbrDFnames <- outfiles
}
## Input validity checks
absent_count_files <- countfiles[!file.exists(countfiles)]
if (length(absent_count_files) != 0) stop("The following files assigned to 'countfiles' do not exist: ", paste(basename(absent_count_files), collapse = ", "))
## Perform differential analysis
dbrlists <- list()
for (i in seq(along = dbrDFnames)) {
countDF <- read.delim(countfiles[i], row.names = 1)
edgeDF <- diffFct(countDF = countDF, targets, cmp, ...)
write.table(edgeDF, dbrDFnames[i], quote = FALSE, sep = "\t", col.names = NA)
grDevices::pdf(paste0(dbrDFnames[i], ".pdf"))
DBR_list <- filterDEGs(degDF = edgeDF, filter = dbrfilter)
grDevices::dev.off()
dbrlists <- c(dbrlists, list(DBR_list))
names(dbrlists)[i] <- names(dbrDFnames[i])
cat("Wrote count result", i, "to", basename(dbrDFnames[i]), "\n")
cat("Saved plot", i, "to", basename(paste0(dbrDFnames[i], ".pdf")), "\n")
}
return(dbrlists)
}
## Usage:
# dbrlist <- runDiff(args, diffFct=run_edgeR, targets=targetsin(args_bam), cmp=cmp[[1]], independent=TRUE, dbrfilter=c(Fold=2, FDR=1))
# dbrlist <- runDiff(args, diffFct=run_edgeR, targets=ttargets.as.df(targets(args_bam)), cmp=cmp[[1]], independent=TRUE, dbrfilter=c(Fold=2, FDR=1))
###########################################################
## (A) olRanges Function for IRanges and GRanges Objects ##
###########################################################
## Identify Range Overlaps
olRanges <- function(query, subject, output = "gr") {
pkg <- c("IRanges", "GenomeInfoDb")
checkPkg(pkg, quietly = FALSE)
## Input check
if (!((class(query) == "GRanges" & class(subject) == "GRanges") | (class(query) == "IRanges" & class(subject) == "IRanges"))) {
stop("Query and subject need to be of same class, either GRanges or IRanges!")
}
## Find overlapping ranges
if (class(query) == "GRanges") {
GenomeInfoDb::seqlengths(query) <- rep(NA, length(GenomeInfoDb::seqlengths(query)))
GenomeInfoDb::seqlengths(subject) <- rep(NA, length(GenomeInfoDb::seqlengths(subject)))
}
olindex <- as.matrix(GenomicRanges::findOverlaps(query, subject))
query <- query[olindex[, 1]]
subject <- subject[olindex[, 2]]
olma <- cbind(Qstart = start(query), Qend = end(query), Sstart = start(subject), Send = end(subject))
## Pre-queries for overlaps
startup <- olma[, "Sstart"] < olma[, "Qstart"]
enddown <- olma[, "Send"] > olma[, "Qend"]
startin <- olma[, "Sstart"] >= olma[, "Qstart"] & olma[, "Sstart"] <= olma[, "Qend"]
endin <- olma[, "Send"] >= olma[, "Qstart"] & olma[, "Send"] <= olma[, "Qend"]
## Overlap types
olup <- startup & endin
oldown <- startin & enddown
inside <- startin & endin
contained <- startup & enddown
## Overlap types in one vector
OLtype <- rep("", length(olma[, "Qstart"]))
OLtype[olup] <- "olup"
OLtype[oldown] <- "oldown"
OLtype[inside] <- "inside"
OLtype[contained] <- "contained"
## Overlap positions
OLstart <- rep(0, length(olma[, "Qstart"]))
OLend <- rep(0, length(olma[, "Qstart"]))
OLstart[olup] <- olma[, "Qstart"][olup]
OLend[olup] <- olma[, "Send"][olup]
OLstart[oldown] <- olma[, "Sstart"][oldown]
OLend[oldown] <- olma[, "Qend"][oldown]
OLstart[inside] <- olma[, "Sstart"][inside]
OLend[inside] <- olma[, "Send"][inside]
OLstart[contained] <- olma[, "Qstart"][contained]
OLend[contained] <- olma[, "Qend"][contained]
## Absolute and relative length of overlaps
OLlength <- (OLend - OLstart) + 1
OLpercQ <- OLlength / width(query) * 100
OLpercS <- OLlength / width(subject) * 100
## Output type
oldf <- data.frame(Qindex = olindex[, 1], Sindex = olindex[, 2], olma, OLstart, OLend, OLlength, OLpercQ, OLpercS, OLtype)
if (class(query) == "GRanges") {
oldf <- cbind(space = as.character(GenomeInfoDb::seqnames(query)), oldf)
}
if (output == "df") {
return(oldf)
}
if (output == "gr") {
if (class(query) == "GRanges") {
S4Vectors::elementMetadata(query) <- cbind(as.data.frame(elementMetadata(query)), oldf)
}
if (class(query) == "IRanges") {
query <- GRanges(seqnames = S4Vectors::Rle(rep("dummy", length(query))), ranges = IRanges::IRanges(start = oldf[, "Qstart"], end = oldf[, "Qend"]), strand = S4Vectors::Rle(BiocGenerics::strand(rep("+", length(query)))), oldf)
}
return(query)
}
}
## Run olRanges function
## Sample Data Sets
# grq <- GRanges(seqnames = S4Vectors::Rle(c("chr1", "chr2", "chr1", "chr3"), c(1, 3, 2, 4)),
# ranges = IRanges(seq(1, 100, by=10), end = seq(30, 120, by=10)),
# strand = S4Vectors::Rle(BiocGenerics::strand(c("-", "+", "-")), c(1, 7, 2)))
# grs <- shift(grq[c(2,5,6)], 5)
# olRanges(query=grq, subject=grs, output="df")
# olRanges(query=grq, subject=grs, output="gr")
| /R/chipseq.R | no_license | dcassol/systemPipeR | R | false | false | 18,240 | r | #######################################################################
## Merge BAM files based on factor and return updated SYSargs object ##
#######################################################################
## Useful prior to peak calling in ChIP-Seq or miRNA gene prediction experiments
## where pooling of replicates maximizes depth of read coverage.
## Note: default factor is "Factor"
mergeBamByFactor <- function(args, targetsDF = NULL, mergefactor = "Factor",
overwrite = FALSE, silent = FALSE, ...) {
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs class
if (inherits(args, "SYSargs")) {
mergefactor <- targetsin(args)[[mergefactor]]
targetsin <- targetsin(args)
bampaths <- infile1(args)
if (!"FileName" %in% colnames(targetsin)) stop("Name of one column in 'targetsin(arg)' is expected to be 'FileName'.")
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
mergefactor <- targets.as.df(targets(args))[[mergefactor]]
targetsin <- targets.as.df(targets(args))
bampaths <- infile1(args)
if (!"FileName" %in% colnames(targetsin)) stop("Name of one column in 'targetsin(arg)' is expected to be 'FileName'.")
} else if (inherits(args, "character")) {
if (is.null(names(args))) stop("Please provide a named character vector, where the names elements should be the sampleID")
bampaths <- args
if (is.null(targetsDF)) stop("'targets argument is required when a named character vector is provided for 'args' argument'.")
if (!inherits(targetsDF, "DFrame")) stop("Argument 'targets' needs to be assigned an object of class 'DFrame'")
targetsDF <- as.data.frame(targetsDF)
mergefactor <- targetsDF[[mergefactor]]
targetsin <- targetsDF
}
## Check validity of input
allbam <- !grepl("\\.bam$|\\.BAM", bampaths)
if (any(allbam)) stop("The following files lack the extension '.bam': ", paste(basename(bampaths[allbam]), collapse = ", "))
## Unique values in Factor column of targetsin(args)
sample_index <- !duplicated(as.character(mergefactor))
names(sample_index) <- names(bampaths)
uniorder <- unique(mergefactor)
unifreq <- table(mergefactor)[uniorder] # Important: in order of targetsin(args)!
if (!any(unifreq >= 2)) warning("Values in Factor column are all unique. Thus, there are no BAM files to merge.")
## Store BAM file paths in a list that have >=2 identical values (replicates) in Factor column of targets file
filelist <- tapply(bampaths, factor(mergefactor), as.character)
filelist <- filelist[names(unifreq)] # Assures original order
## Create vector containing paths of output BAM files
filelist_merge <- filelist[names(unifreq[unifreq >= 2])] # Merge only those with >= files
outname_vec <- character(length(filelist_merge))
names(outname_vec) <- names(filelist_merge)
for (i in seq(along = outname_vec)) {
outname <- gsub("\\.bam$|\\.BAM$", "", filelist_merge[[i]][1])
outname <- paste(outname, "_", names(filelist_merge[i]), ".bam", sep = "")
outname_vec[i] <- outname
}
## If any output BAM file exists and 'overwrite=FALSE' then stop
file_exists <- file.exists(outname_vec)
names(file_exists) <- names(outname_vec)
if (any(file_exists) & overwrite == FALSE) stop("The following files exist: ", paste(names(file_exists)[file_exists], collapse = ", "), ". Delete/rename them or set 'overwrite=TRUE'")
## Generate collapsed BAM files
for (i in seq(along = filelist_merge)) {
mergeBam(filelist_merge[[i]], outname_vec[i], indexDestination = TRUE, overwrite = overwrite)
if (silent != TRUE) {
cat("Merged BAM files:", basename(filelist_merge[[i]]), "and saved to file", basename(outname_vec[i]), "\n\n")
}
}
## Generate updated SYSargs and SYSargs2 object
filelist[names(outname_vec)] <- outname_vec # Assign new file names to proper slots
outfile_names <- unlist(filelist)
args_sub <- args[sample_index]
## SYSargs class
if (inherits(args, "SYSargs")) {
targets_out <- targetsout(args_sub)
targets_out[, "FileName"] <- outfile_names
rownames(targets_out) <- NULL
syslist <- list(
targetsin = targetsin(args_sub),
targetsout = targets_out,
targetsheader = targetsheader(args_sub),
modules = modules(args_sub),
software = "mergeBamByFactor",
cores = cores(args_sub),
other = other(args_sub),
reference = reference(args_sub),
results = results(args_sub),
infile1 = infile1(args_sub),
infile2 = infile2(args_sub),
outfile1 = outfile_names,
sysargs = sysargs(args_sub),
outpaths = outfile_names
)
args_sub_out <- as(syslist, "SYSargs")
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
out <- sapply(names(outfile_names), function(x) list(outfile_names[[x]]), simplify = F)
for (i in seq_along(out)) {
names(out[[i]]) <- files(args)$step
}
# out <- sapply(names(out), function(x) names(out[[x]]) <- files(args)$step, simplify = F)
sys2list <- list(
targets = targets(args_sub),
targetsheader = targetsheader(args_sub),
modules = as.list(modules(args_sub)),
wf = wf(args_sub),
clt = clt(args_sub),
yamlinput = yamlinput(args_sub),
cmdlist = cmdlist(args_sub),
input = input(args_sub),
output = out,
files = files(args_sub),
inputvars = inputvars(args_sub),
cmdToCwl = list(),
status = data.frame(),
internal_outfiles = list()
)
args_sub_out <- as(sys2list, "SYSargs2")
} else if (inherits(args, "character")) {
targetsDF <- targetsDF[, -which(colnames(targetsDF) %in% c("FileName1", "FileName2", "FileName"))]
targetsDF <- targetsDF[sample_index, ]
# a <- file.path(.getPath(filelist_merge[[1]]), outfile_names)
args_sub_out <- cbind(FileName = outname_vec, targetsDF)
}
return(args_sub_out)
}
## Usage:
# args <- systemArgs(sysma=NULL, mytargets="targets_bam.txt")
# args_merge <- mergeBamByFactor(args, overwrite=TRUE, silent=FALSE)
# writeTargetsout(x=args_merge, file="targets_mergeBamByFactor.txt", overwrite=TRUE)
############################################################################
## Creat targets file with refence sample, e.g. input sample for ChIP-Seq ##
############################################################################
writeTargetsRef <- function(infile, outfile, silent = FALSE, overwrite = FALSE, ...) {
## Import
headerlines <- readLines(infile)
targets <- read.delim(infile, comment.char = "#")
## Check for expected input
if (!c("SampleReference") %in% colnames(targets)) stop("Targets file lacks SampleReference column")
if (!c("FileName") %in% colnames(targets)) stop("Targets file lacks FileName column")
if (all(c("FileName1", "FileName2") %in% colnames(targets))) stop("Targets file is expected to have only one FileName column")
if (file.exists(outfile) & overwrite == FALSE) {
stop(
"I am not allowed to overwrite files; please delete existing file: ",
outfile, " or set 'overwrite=TRUE'"
)
}
testv <- as.character(targets$SampleReference)
testv <- testv[!is.na(testv)]
testv <- testv[testv != ""]
myfiles <- as.character(targets$FileName)
names(myfiles) <- as.character(targets$SampleName)
if (!all(testv %in% names(myfiles))) {
stop(
"Value(s) ", paste(testv[!testv %in% names(myfiles)], collapse = ", "),
" from SampleReference column have no matches in SampleName column!"
)
}
## Rearrange targets file
targets <- data.frame(FileName1 = targets$FileName, FileName2 = NA, targets)
targets[, "FileName2"] <- myfiles[as.character(targets$SampleReference)]
targets <- targets[!is.na(as.character(targets$SampleReference)), , drop = FALSE]
targets <- targets[targets$SampleReference != "", , drop = FALSE]
targets <- targets[, !(names(targets) %in% "FileName")]
## Export targets file including header lines
headerlines <- headerlines[grepl("^#", headerlines)]
targetslines <- c(paste(colnames(targets), collapse = "\t"), apply(targets, 1, paste, collapse = "\t"))
writeLines(c(headerlines, targetslines), outfile, ...)
if (silent != TRUE) cat("\t", "Modified", infile, "file with sample-wise reference has been written to outfile", outfile, "\n")
}
## Usage:
# writeTargetsRef(infile="~/targets.txt", outfile="~/targets_refsample.txt", silent=FALSE, overwrite=FALSE)
########################################################
## Iterative read counting over different range files ##
########################################################
## Convenience function to perform read counting over several different
## range sets, e.g. peak ranges or feature types
countRangeset <- function(bfl, args, outfiles = NULL, format = "tabular", ...) {
pkg <- c("GenomeInfoDb")
checkPkg(pkg, quietly = FALSE)
## Input validity checks
if (!inherits(bfl, "BamFileList")) stop("'bfl' needs to be of class 'BamFileList'.")
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs or SYSargs2 class
if (inherits(args, "SYSargs") | inherits(args, "SYSargs2")) {
absent_peak_file <- infile1(args)[!file.exists(infile1(args))]
if (length(absent_peak_file) != 0) stop("The following files assigned to 'infile1(args)' do not exist: ", paste(basename(absent_peak_file), collapse = ", "))
if (inherits(args, "SYSargs")) {
absent_peak_file <- infile1(args)
countDFnames <- outpaths(args)
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
absent_peak_file <- infile1(args)
countDFnames <- subsetWF(args, slot = "output", subset = 1, index = 1)
}
## named character vector class
} else if (inherits(args, "character")) {
if (is.null(names(args))) stop("Please provide a named character vector, where the names elements should be the sampleID")
absent_peak_file <- args
if (is.null(outfiles)) stop("Please provide a named character vector, where the names elements should be the sampleID and the outfiles names.")
if (!all(names(outfiles) %in% names(absent_peak_file))) stop("names of 'outfiles' argument should be find at names of 'args' argument.")
countDFnames <- outfiles
}
for (i in seq(along = absent_peak_file)) {
if (format == "tabular") {
df <- read.delim(absent_peak_file[i], comment.char = "#")
peaks <- as(df, "GRanges")
} else if (format == "bed") {
peaks <- rtracklayer::import.bed(absent_peak_file[i])
} else {
stop("Input file format not supported.")
}
names(peaks) <- paste0(as.character(GenomeInfoDb::seqnames(peaks)), "_", start(peaks), "-", end(peaks))
peaks <- split(peaks, names(peaks))
countDF <- GenomicAlignments::summarizeOverlaps(peaks, bfl, ...)
countDF <- SummarizedExperiment::assays(countDF)$counts
write.table(countDF, countDFnames[i], col.names = NA, quote = FALSE, sep = "\t")
cat("Wrote count result", i, "to", basename(countDFnames[i]), "\n")
}
if (inherits(args, "character")) {
names(countDFnames) <- names(args)
return(countDFnames)
} else {
return(countDFnames)
}
}
## Usage:
# countDFnames <- countRangeset(bfl, args, mode="Union", ignore.strand=TRUE)
################################################################################
## Iterative edgeR/DESeq2 analysis over counts sets from different range sets ##
################################################################################
## Convenience function to iterate over several count sets generated by
## countRangeset() or similar utilities
runDiff <- function(args, outfiles = NULL, diffFct, targets, cmp, dbrfilter, ...) {
if (!any(inherits(args, "SYSargs"), inherits(args, "SYSargs2"), inherits(args, "character"))) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2 OR named character vector")
## SYSargs class
if (inherits(args, "SYSargs")) {
countfiles <- infile1(args)
dbrDFnames <- outpaths(args)
## SYSargs2 class
} else if (inherits(args, "SYSargs2")) {
countfiles <- infile1(args)
dbrDFnames <- subsetWF(args, slot = "output", subset = 1, index = 1)
} else if (inherits(args, "character")) {
countfiles <- args
dbrDFnames <- outfiles
}
## Input validity checks
absent_count_files <- countfiles[!file.exists(countfiles)]
if (length(absent_count_files) != 0) stop("The following files assigned to 'countfiles' do not exist: ", paste(basename(absent_count_files), collapse = ", "))
## Perform differential analysis
dbrlists <- list()
for (i in seq(along = dbrDFnames)) {
countDF <- read.delim(countfiles[i], row.names = 1)
edgeDF <- diffFct(countDF = countDF, targets, cmp, ...)
write.table(edgeDF, dbrDFnames[i], quote = FALSE, sep = "\t", col.names = NA)
grDevices::pdf(paste0(dbrDFnames[i], ".pdf"))
DBR_list <- filterDEGs(degDF = edgeDF, filter = dbrfilter)
grDevices::dev.off()
dbrlists <- c(dbrlists, list(DBR_list))
names(dbrlists)[i] <- names(dbrDFnames[i])
cat("Wrote count result", i, "to", basename(dbrDFnames[i]), "\n")
cat("Saved plot", i, "to", basename(paste0(dbrDFnames[i], ".pdf")), "\n")
}
return(dbrlists)
}
## Usage:
# dbrlist <- runDiff(args, diffFct=run_edgeR, targets=targetsin(args_bam), cmp=cmp[[1]], independent=TRUE, dbrfilter=c(Fold=2, FDR=1))
# dbrlist <- runDiff(args, diffFct=run_edgeR, targets=ttargets.as.df(targets(args_bam)), cmp=cmp[[1]], independent=TRUE, dbrfilter=c(Fold=2, FDR=1))
###########################################################
## (A) olRanges Function for IRanges and GRanges Objects ##
###########################################################
## Identify Range Overlaps
olRanges <- function(query, subject, output = "gr") {
pkg <- c("IRanges", "GenomeInfoDb")
checkPkg(pkg, quietly = FALSE)
## Input check
if (!((class(query) == "GRanges" & class(subject) == "GRanges") | (class(query) == "IRanges" & class(subject) == "IRanges"))) {
stop("Query and subject need to be of same class, either GRanges or IRanges!")
}
## Find overlapping ranges
if (class(query) == "GRanges") {
GenomeInfoDb::seqlengths(query) <- rep(NA, length(GenomeInfoDb::seqlengths(query)))
GenomeInfoDb::seqlengths(subject) <- rep(NA, length(GenomeInfoDb::seqlengths(subject)))
}
olindex <- as.matrix(GenomicRanges::findOverlaps(query, subject))
query <- query[olindex[, 1]]
subject <- subject[olindex[, 2]]
olma <- cbind(Qstart = start(query), Qend = end(query), Sstart = start(subject), Send = end(subject))
## Pre-queries for overlaps
startup <- olma[, "Sstart"] < olma[, "Qstart"]
enddown <- olma[, "Send"] > olma[, "Qend"]
startin <- olma[, "Sstart"] >= olma[, "Qstart"] & olma[, "Sstart"] <= olma[, "Qend"]
endin <- olma[, "Send"] >= olma[, "Qstart"] & olma[, "Send"] <= olma[, "Qend"]
## Overlap types
olup <- startup & endin
oldown <- startin & enddown
inside <- startin & endin
contained <- startup & enddown
## Overlap types in one vector
OLtype <- rep("", length(olma[, "Qstart"]))
OLtype[olup] <- "olup"
OLtype[oldown] <- "oldown"
OLtype[inside] <- "inside"
OLtype[contained] <- "contained"
## Overlap positions
OLstart <- rep(0, length(olma[, "Qstart"]))
OLend <- rep(0, length(olma[, "Qstart"]))
OLstart[olup] <- olma[, "Qstart"][olup]
OLend[olup] <- olma[, "Send"][olup]
OLstart[oldown] <- olma[, "Sstart"][oldown]
OLend[oldown] <- olma[, "Qend"][oldown]
OLstart[inside] <- olma[, "Sstart"][inside]
OLend[inside] <- olma[, "Send"][inside]
OLstart[contained] <- olma[, "Qstart"][contained]
OLend[contained] <- olma[, "Qend"][contained]
## Absolute and relative length of overlaps
OLlength <- (OLend - OLstart) + 1
OLpercQ <- OLlength / width(query) * 100
OLpercS <- OLlength / width(subject) * 100
## Output type
oldf <- data.frame(Qindex = olindex[, 1], Sindex = olindex[, 2], olma, OLstart, OLend, OLlength, OLpercQ, OLpercS, OLtype)
if (class(query) == "GRanges") {
oldf <- cbind(space = as.character(GenomeInfoDb::seqnames(query)), oldf)
}
if (output == "df") {
return(oldf)
}
if (output == "gr") {
if (class(query) == "GRanges") {
S4Vectors::elementMetadata(query) <- cbind(as.data.frame(elementMetadata(query)), oldf)
}
if (class(query) == "IRanges") {
query <- GRanges(seqnames = S4Vectors::Rle(rep("dummy", length(query))), ranges = IRanges::IRanges(start = oldf[, "Qstart"], end = oldf[, "Qend"]), strand = S4Vectors::Rle(BiocGenerics::strand(rep("+", length(query)))), oldf)
}
return(query)
}
}
## Run olRanges function
## Sample Data Sets
# grq <- GRanges(seqnames = S4Vectors::Rle(c("chr1", "chr2", "chr1", "chr3"), c(1, 3, 2, 4)),
# ranges = IRanges(seq(1, 100, by=10), end = seq(30, 120, by=10)),
# strand = S4Vectors::Rle(BiocGenerics::strand(c("-", "+", "-")), c(1, 7, 2)))
# grs <- shift(grq[c(2,5,6)], 5)
# olRanges(query=grq, subject=grs, output="df")
# olRanges(query=grq, subject=grs, output="gr")
|
shinyServer(
function(input, output, session) {
#' Expression data
exprs <- reactive({
exprs <- switch(input$datasetType,
"Platform" = subset(datasets,Platform %in% input$platform),
"DataSource" = subset(datasets,DataSource %in% input$dataset)
)
exprs <- rmNA(exprs)
})
#' Return the available histology, to be used in the updateSelectInput
histo <- reactive({
levels(exprs()[,"Pathology"])
})
#' Return the available subtype, to be used in the updateSelectInput
subtype <- reactive({
if (input$histology == "All"){
df <- exprs()
subtype <- levels(df$Subtype)
} else{
df <- subset(exprs(), Pathology == input$histology)
subtype <- intersect(levels(df$Subtype),df$Subtype)
}
subtype
})
#' When switching datasets if the selected histo is not available it will choose "All"
histo_selected <- reactive ({
if (input$histology %in% c("All", histo())){
input$histology
} else {
"All"
}
})
#' When switching datasets if the selected subtype is not available it will choose "All"
subtype_selected <- reactive ({
if (input$subtype %in% c("All", subtype())){
input$subtype
} else {
"All"
}
})
observe({
updateSelectInput(session, inputId = "histology", choices = c("All", histo()), selected = histo_selected())
updateSelectInput(session, inputId = "subtype", choices = c("All", subtype()), selected = subtype_selected())
})
#' Text matching with the gene names list
updateSelectizeInput(session, inputId = "gene", choices = gene_names, server = TRUE)
plot_type <- reactive({
input$plotType
})
#' Generate a dataframe with the data to plot
data <- reactive({
validate(
need(input$gene != "", "Please, enter a gene name in the panel on the left")%then%
# Not all genes are available for all the dataset
need(input$gene %in% names(exprs()),"Gene not available for this platform")
)
mRNA <- exprs()[ ,input$gene]
data <- cbind(mRNA, exprs()[,1:19]) # To combine with pData
samples <- data[which(duplicated(data$PtID)),]$PtID
data <- data[data$PtID %in% samples,]
data
})
observeEvent(plot_type(), {
updateRadioButtons(session, inputId = "point_line", selected = "Box plot")
})
#' Populate Xaxis labels
observe({
updateTextInput(session, inputId = "myXlab",value = paste0("\n",plot_type()))
})
# Tukey plot active only when tukey stat data are shown
observeEvent(!input$tukeyHSD, {
updateCheckboxInput(session, inputId = "tukeyPlot", value = FALSE)
})
observeEvent(input$point_line == 'Scatter plot', {
updateCheckboxInput(session, inputId = "tukeyPlot", value = FALSE)
updateCheckboxInput(session, inputId = "paired_tTest", value = FALSE)
updateCheckboxInput(session, inputId = "tTest", value = FALSE)
})
#' Reactive function to generate the box plots
box_Plot <- reactive({
data <- data()
xlabel <- paste("\n", input$myXlab)
ylabel <- paste(input$myYlab,"\n")
col <- input$colorP
shape <- input$shapeP
if(input$colorP == "None") {
col <- NULL
}
if(input$shapeP == "None") {
shape <- NULL
}
theme <- theme(axis.text.x = element_text(size = input$axis_text_size, angle = input$xaxisLabelAngle, hjust = ifelse(input$xaxisLabelAngle == 0,0.5,1)),
axis.text.y = element_text(size = input$axis_text_size),
legend.text = element_text(size = input$axis_text_size*0.8),
legend.title = element_text(size = input$axis_text_size*0.8),
axis.title.x = element_text(size = input$axis_title_size),
axis.title.y = element_text(size = input$axis_title_size))
p <- ggplot(data, mapping=aes_string(x=plot_type(), y = "mRNA")) +
geom_boxplot(outlier.size = 0, outlier.stroke = 0) +
geom_jitter(position = position_jitter(width = .25), mapping = aes_string(colour = col, shape = shape),
size = input$point_size, alpha = input$alpha) +
ylab(ylabel) + xlab(xlabel) + theme_bw() + theme
if(input$point_line == "Lines") {
p <- ggplot(data, mapping=aes_string(x=plot_type(), y = "mRNA", group="PtID", colour = col, shape = shape)) +
geom_line() +
geom_point(size = input$point_size) +
ylab(ylabel) + xlab(xlabel) + theme_bw() + theme
}
if(input$point_line == "Scatter plot") {
exp <- data %>% select(PtID,Progression,mRNA) %>% spread(Progression, mRNA)
if(input$colorP == "None") {
p <- ggplot(exp, aes(Initial, Recurrent)) +
geom_point(size = input$point_size) + geom_smooth(method = "lm", se = TRUE) +
geom_rug() + theme
} else {
group <- data %>% select_("PtID","Progression",input$colorP) %>% spread_("Progression",input$colorP)
names(group)[2:3] <- c(paste0(input$colorP,"_Initial"),paste0(input$colorP,"_Recurrence"))
data <- merge(exp,group,by="PtID")
p <- ggplot(data, aes(Initial, Recurrent)) +
geom_point(size = input$point_size) + geom_smooth(method = "lm", se = TRUE) +
geom_rug(sides="b", aes_string(colour = paste0(input$colorP,"_Initial"))) +
geom_rug(sides="l", aes_string(colour = paste0(input$colorP,"_Recurrence"))) +
theme + theme(legend.title = element_blank())
}
}
if (input$tukeyPlot) {
t <- tukey() %>%
mutate(comparison = row.names(.)) %>%
ggplot(aes(reorder(comparison, diff), diff, ymin = lwr, ymax= upr, colour = Significance)) +
geom_point() + geom_errorbar(width = 0.25) + ylab("\nDifferences in mean levels") + xlab("") +
geom_hline(yintercept = 0, colour="darkgray", linetype = "longdash") + coord_flip() + theme
p <- grid.arrange(p, t, ncol=2, widths = c(3,2))
}
return(p)
})
box_width <- reactive({
if(input$tukeyPlot)
input$plot_width* 1.8 else
input$plot_width
})
#' Create the selected plot
output$plot <- renderPlot({
box_Plot()
}, width = box_width, height = function()input$plot_height)
#' Data for the statistic
stat_data <- reactive({
mRNA <- data()[ ,"mRNA"]
group <- data()[ ,plot_type()]
data <- data.frame(mRNA, group)
data
})
#' Summary statistic
output$summary <- renderTable({
data <- stat_data()
stat <- data %>%
group_by(group) %>%
summarise(Sample_count = paste0(n()," (", round(n()*100/dim(data)[1], 2), "%)" ), # prop.table
median = median(mRNA, na.rm=T), mad = mad(mRNA, na.rm=T), mean = mean(mRNA, na.rm=T),
sd = sd(mRNA, na.rm=T)) %>%
data.frame()
row.names(stat) <- stat$group
tot <- data %>%
summarise(Sample_count = n(), median = median(mRNA, na.rm=T),
mad = mad(mRNA, na.rm=T), mean = mean(mRNA, na.rm=T), sd = sd(mRNA, na.rm=T))
stat <- stat[,-1]
stat <- rbind(stat,TOTAL = tot)
stat
}, rownames = TRUE, align='rrrrrr')
#' Tukey post-hoc test, to combine it with the boxplot and to render in a table
tukey <- reactive({
validate(
need(nlevels(stat_data()$group)>1,message = "There is only one category, group comparison cannot be performed")
)
data <- stat_data()
tukey <- data.frame(TukeyHSD(aov(mRNA ~ group, data = data))[[1]])
tukey$Significance <- as.factor(starmaker(tukey$p.adj, p.levels = c(.001, .01, .05, 1), symbols=c("***", "**", "*", "ns")))
tukey <- tukey[order(tukey$diff, decreasing = TRUE), ]
tukey
})
#' Render tukey
output$tukeyTest <- renderTable({
tukey()
},rownames = TRUE , digits = c(2,2,2,-1,2))
#' Pairwise t test
output$pairwiseTtest <- renderTable({
validate(
need(nlevels(stat_data()$group)>1,message = "There is only one category, group comparison cannot be performed")
)
data <- stat_data()
pttest <- pairwise.t.test(data$mRNA, data$group, na.rm= TRUE, p.adj = "bonferroni", paired = FALSE)
pttest$p.value
}, rownames = TRUE, digits = -1)
# Paired t-test active only when Progression plot are shown
observeEvent(plot_type() != "Progression", {
updateCheckboxInput(session, inputId = "paired_tTest", value = FALSE)
})
#' Paired t-test
output$pairedTtest <- renderTable({
req(plot_type() == "Progression")
data <- stat_data()
pttest <- broom::tidy(t.test(mRNA ~ group, data, paired=TRUE))
pttest
},rownames = TRUE)
#' Get the selected download file type.
download_plot_file_type <- reactive({
input$downloadPlotFileType
})
observe({
plotFileType <- input$downloadPlotFileType
plotFileTypePDF <- plotFileType == "pdf"
plotUnit <- ifelse(plotFileTypePDF, "inches", "pixels")
plotUnitDef <- ifelse(plotFileTypePDF, 7, 600)
plotUnitMin <- ifelse(plotFileTypePDF, 1, 100)
plotUnitMax <- ifelse(plotFileTypePDF, 12, 2000)
plotUnitStep <- ifelse(plotFileTypePDF, 0.1, 50)
updateNumericInput(
session,
inputId = "downloadPlotHeight",
label = sprintf("Height (%s)", plotUnit),
value = plotUnitDef, min = plotUnitMin, max = plotUnitMax, step = plotUnitStep)
updateNumericInput(
session,
inputId = "downloadPlotWidth",
label = sprintf("Width (%s)", plotUnit),
value = plotUnitDef, min = plotUnitMin, max = plotUnitMax, step = plotUnitStep)
})
#' Get the download dimensions.
download_plot_height <- reactive({
input$downloadPlotHeight
})
download_plot_width <- reactive({
input$downloadPlotWidth
})
#' Download the Plot
output$downloadPlot <- downloadHandler(
filename = function() {
paste0(Sys.Date(), "_", input$gene, "_", input$dataset, "_", input$plotTypeSel,
".", download_plot_file_type())
},
# The argument content below takes filename as a function and returns what's printed to it.
content = function(file) {
# Gets the name of the function to use from the downloadFileType reactive element.
plotFunction <- match.fun(download_plot_file_type())
plotFunction(file, width = download_plot_width(), height = download_plot_height())
if (input$tukeyPlot) {
grid.draw(box_Plot())
} else {
print(box_Plot())
}
dev.off()
}
)
#' Extract the survival data.
surv_data <- reactive({
df <- data()
# df <- subset(df, !is.na(df$status))
df <- subset(df,Progression == "Initial")
if (input$histology != "All"){
df <- subset(df, Pathology == input$histology)
}
if (input$subtype != "All") {
df <- subset(df, Subtype == input$subtype)
}
# exclude G-CIMP is selected
if (input$gcimpSurv){
df <- subset(df, GcimpPrediction != "GCIMP")
}
df
})
#' Create a slider for the manual cutoff of the Kaplan Meier plot
mRNA_surv <- reactive({
surv_need()
req(input$histology %in% c("All", histo()))
mRNA <- surv_data()[ ,"mRNA"]
mRNA.values <- round(mRNA[!is.na(mRNA)],2)
# Generate a vector of continuos values, excluding the first an last value
mRNA.values <- sort(mRNA.values[mRNA.values != min(mRNA.values) & mRNA.values != max(mRNA.values)])
})
#' Create a rug plot with the mRNA expression value for the manual cutoff
output$boxmRNA <- renderPlot({
req(input$mInput)
mRNA <- round(mRNA_surv(),2)
q <- quantile(mRNA)
xrange <-range(mRNA)
par(mar = c(0,0,0,0))
plot(0, 0, type = "n", xlim = c(xrange[1] + 0.25, xrange[2]) , ylim = c(-0.1, + 0.1), ylab ="", xlab = "", axes = FALSE)
points(x = mRNA, y = rep(0, length(mRNA)), pch="|", col=rgb(0, 0, 0, 0.25))
# Add a red line to show which is the current cutoff.
points(x = input$mInput, y = 0, pch = "|", col="red", cex = 2.5)
points(x = q[2:4], y = rep(0,3), pch = "|", col="blue", cex = 2)
}, bg = "transparent")
#' Generate the slider for the manual cutoff
output$numericCutoff <- renderUI({
sliderInput(inputId = "mInput",label = NULL, min = min(mRNA_surv()), max = max(mRNA_surv()),
value = median(mRNA_surv()), step = 0.05, round = -2)
})
#' Requirements for all the survival plots
surv_need <- reactive({
validate(
need(input$gene != "", "Please, enter a gene name in the panel on the left")%then%
need(input$gene %in% names(exprs()),"Gene not available for this dataset")
)
})
#' busy indicator when switching surv tab
#' http://stackoverflow.com/questions/18237987/show-that-shiny-is-busy-or-loading-when-changing-tab-panels
output$activeTabSurv <- reactive({
return(input$tabSurv)
})
outputOptions(output, 'activeTabSurv', suspendWhenHidden=FALSE)
#' Set survival plot height
surv_plot_height <- reactive({
if(input$allSubSurv){
ifelse(length(subtype())>4, 1300, 650)
} else {
400
}
})
#' Create a Kaplan Meier plot with cutoff based on quantiles or manual selection
output$survPlot <- renderPlot({
surv_need ()
req(input$histology %in% c("All", histo()))
# Use 'try' to suppress a message throwed the first time manual cutoff is selected
if(input$allSubSurv) {
nrow <- ceiling(length(subtype())/2)
par(mfrow = c(nrow,2), mar=c(3.5,3.5,3.5,1.5), mgp=c(2.2,.95,0))
try({
for (i in subtype()) {
survivalPlot(surv_data(), surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = i,
cutoff = input$cutoff, numeric = input$mInput, cex = 1.2)
}
}, silent = TRUE)
} else {
try(survivalPlot(surv_data(), surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = input$subtype,
cutoff = input$cutoff, numeric = input$mInput), silent = TRUE)
}
}, height = surv_plot_height, width = function(){if(!input$allSubSurv) {500} else {900}})
#' Download the survPlot
output$downloadsurvPlot <- downloadHandler(
filename = function() {
paste0(Sys.Date(), "_", input$gene, "_", input$dataset, "_survPlot.", download_plot_file_type())
},
content = function(file) {
plotFunction <- match.fun(download_plot_file_type())
plotFunction(file, width = download_plot_width(), height = download_plot_height())
if(input$allSubSurv) {
nrow <- ceiling(length(subtype())/2)
par(mfrow = c(nrow,2), mar=c(3.5,3.5,3.5,1.5), mgp=c(2.2,.95,0))
for (i in subtype()) {
survivalPlot(surv_data(),surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = i,
cutoff = input$cutoff, numeric = input$mInput, cex = 1.2)
}
} else {
survivalPlot(surv_data(), surv_type = input$surv_type, gene =input$gene, group = input$histology, subtype = input$subtype,
cutoff = input$cutoff, numeric = input$mInput)
}
dev.off()
}
)
})
| /server.R | no_license | msquatrito/shiny_RecuR | R | false | false | 15,929 | r | shinyServer(
function(input, output, session) {
#' Expression data
exprs <- reactive({
exprs <- switch(input$datasetType,
"Platform" = subset(datasets,Platform %in% input$platform),
"DataSource" = subset(datasets,DataSource %in% input$dataset)
)
exprs <- rmNA(exprs)
})
#' Return the available histology, to be used in the updateSelectInput
histo <- reactive({
levels(exprs()[,"Pathology"])
})
#' Return the available subtype, to be used in the updateSelectInput
subtype <- reactive({
if (input$histology == "All"){
df <- exprs()
subtype <- levels(df$Subtype)
} else{
df <- subset(exprs(), Pathology == input$histology)
subtype <- intersect(levels(df$Subtype),df$Subtype)
}
subtype
})
#' When switching datasets if the selected histo is not available it will choose "All"
histo_selected <- reactive ({
if (input$histology %in% c("All", histo())){
input$histology
} else {
"All"
}
})
#' When switching datasets if the selected subtype is not available it will choose "All"
subtype_selected <- reactive ({
if (input$subtype %in% c("All", subtype())){
input$subtype
} else {
"All"
}
})
observe({
updateSelectInput(session, inputId = "histology", choices = c("All", histo()), selected = histo_selected())
updateSelectInput(session, inputId = "subtype", choices = c("All", subtype()), selected = subtype_selected())
})
#' Text matching with the gene names list
updateSelectizeInput(session, inputId = "gene", choices = gene_names, server = TRUE)
plot_type <- reactive({
input$plotType
})
#' Generate a dataframe with the data to plot
data <- reactive({
validate(
need(input$gene != "", "Please, enter a gene name in the panel on the left")%then%
# Not all genes are available for all the dataset
need(input$gene %in% names(exprs()),"Gene not available for this platform")
)
mRNA <- exprs()[ ,input$gene]
data <- cbind(mRNA, exprs()[,1:19]) # To combine with pData
samples <- data[which(duplicated(data$PtID)),]$PtID
data <- data[data$PtID %in% samples,]
data
})
observeEvent(plot_type(), {
updateRadioButtons(session, inputId = "point_line", selected = "Box plot")
})
#' Populate Xaxis labels
observe({
updateTextInput(session, inputId = "myXlab",value = paste0("\n",plot_type()))
})
# Tukey plot active only when tukey stat data are shown
observeEvent(!input$tukeyHSD, {
updateCheckboxInput(session, inputId = "tukeyPlot", value = FALSE)
})
observeEvent(input$point_line == 'Scatter plot', {
updateCheckboxInput(session, inputId = "tukeyPlot", value = FALSE)
updateCheckboxInput(session, inputId = "paired_tTest", value = FALSE)
updateCheckboxInput(session, inputId = "tTest", value = FALSE)
})
#' Reactive function to generate the box plots
box_Plot <- reactive({
data <- data()
xlabel <- paste("\n", input$myXlab)
ylabel <- paste(input$myYlab,"\n")
col <- input$colorP
shape <- input$shapeP
if(input$colorP == "None") {
col <- NULL
}
if(input$shapeP == "None") {
shape <- NULL
}
theme <- theme(axis.text.x = element_text(size = input$axis_text_size, angle = input$xaxisLabelAngle, hjust = ifelse(input$xaxisLabelAngle == 0,0.5,1)),
axis.text.y = element_text(size = input$axis_text_size),
legend.text = element_text(size = input$axis_text_size*0.8),
legend.title = element_text(size = input$axis_text_size*0.8),
axis.title.x = element_text(size = input$axis_title_size),
axis.title.y = element_text(size = input$axis_title_size))
p <- ggplot(data, mapping=aes_string(x=plot_type(), y = "mRNA")) +
geom_boxplot(outlier.size = 0, outlier.stroke = 0) +
geom_jitter(position = position_jitter(width = .25), mapping = aes_string(colour = col, shape = shape),
size = input$point_size, alpha = input$alpha) +
ylab(ylabel) + xlab(xlabel) + theme_bw() + theme
if(input$point_line == "Lines") {
p <- ggplot(data, mapping=aes_string(x=plot_type(), y = "mRNA", group="PtID", colour = col, shape = shape)) +
geom_line() +
geom_point(size = input$point_size) +
ylab(ylabel) + xlab(xlabel) + theme_bw() + theme
}
if(input$point_line == "Scatter plot") {
exp <- data %>% select(PtID,Progression,mRNA) %>% spread(Progression, mRNA)
if(input$colorP == "None") {
p <- ggplot(exp, aes(Initial, Recurrent)) +
geom_point(size = input$point_size) + geom_smooth(method = "lm", se = TRUE) +
geom_rug() + theme
} else {
group <- data %>% select_("PtID","Progression",input$colorP) %>% spread_("Progression",input$colorP)
names(group)[2:3] <- c(paste0(input$colorP,"_Initial"),paste0(input$colorP,"_Recurrence"))
data <- merge(exp,group,by="PtID")
p <- ggplot(data, aes(Initial, Recurrent)) +
geom_point(size = input$point_size) + geom_smooth(method = "lm", se = TRUE) +
geom_rug(sides="b", aes_string(colour = paste0(input$colorP,"_Initial"))) +
geom_rug(sides="l", aes_string(colour = paste0(input$colorP,"_Recurrence"))) +
theme + theme(legend.title = element_blank())
}
}
if (input$tukeyPlot) {
t <- tukey() %>%
mutate(comparison = row.names(.)) %>%
ggplot(aes(reorder(comparison, diff), diff, ymin = lwr, ymax= upr, colour = Significance)) +
geom_point() + geom_errorbar(width = 0.25) + ylab("\nDifferences in mean levels") + xlab("") +
geom_hline(yintercept = 0, colour="darkgray", linetype = "longdash") + coord_flip() + theme
p <- grid.arrange(p, t, ncol=2, widths = c(3,2))
}
return(p)
})
box_width <- reactive({
if(input$tukeyPlot)
input$plot_width* 1.8 else
input$plot_width
})
#' Create the selected plot
output$plot <- renderPlot({
box_Plot()
}, width = box_width, height = function()input$plot_height)
#' Data for the statistic
stat_data <- reactive({
mRNA <- data()[ ,"mRNA"]
group <- data()[ ,plot_type()]
data <- data.frame(mRNA, group)
data
})
#' Summary statistic
output$summary <- renderTable({
data <- stat_data()
stat <- data %>%
group_by(group) %>%
summarise(Sample_count = paste0(n()," (", round(n()*100/dim(data)[1], 2), "%)" ), # prop.table
median = median(mRNA, na.rm=T), mad = mad(mRNA, na.rm=T), mean = mean(mRNA, na.rm=T),
sd = sd(mRNA, na.rm=T)) %>%
data.frame()
row.names(stat) <- stat$group
tot <- data %>%
summarise(Sample_count = n(), median = median(mRNA, na.rm=T),
mad = mad(mRNA, na.rm=T), mean = mean(mRNA, na.rm=T), sd = sd(mRNA, na.rm=T))
stat <- stat[,-1]
stat <- rbind(stat,TOTAL = tot)
stat
}, rownames = TRUE, align='rrrrrr')
#' Tukey post-hoc test, to combine it with the boxplot and to render in a table
tukey <- reactive({
validate(
need(nlevels(stat_data()$group)>1,message = "There is only one category, group comparison cannot be performed")
)
data <- stat_data()
tukey <- data.frame(TukeyHSD(aov(mRNA ~ group, data = data))[[1]])
tukey$Significance <- as.factor(starmaker(tukey$p.adj, p.levels = c(.001, .01, .05, 1), symbols=c("***", "**", "*", "ns")))
tukey <- tukey[order(tukey$diff, decreasing = TRUE), ]
tukey
})
#' Render tukey
output$tukeyTest <- renderTable({
tukey()
},rownames = TRUE , digits = c(2,2,2,-1,2))
#' Pairwise t test
output$pairwiseTtest <- renderTable({
validate(
need(nlevels(stat_data()$group)>1,message = "There is only one category, group comparison cannot be performed")
)
data <- stat_data()
pttest <- pairwise.t.test(data$mRNA, data$group, na.rm= TRUE, p.adj = "bonferroni", paired = FALSE)
pttest$p.value
}, rownames = TRUE, digits = -1)
# Paired t-test active only when Progression plot are shown
observeEvent(plot_type() != "Progression", {
updateCheckboxInput(session, inputId = "paired_tTest", value = FALSE)
})
#' Paired t-test
output$pairedTtest <- renderTable({
req(plot_type() == "Progression")
data <- stat_data()
pttest <- broom::tidy(t.test(mRNA ~ group, data, paired=TRUE))
pttest
},rownames = TRUE)
#' Get the selected download file type.
download_plot_file_type <- reactive({
input$downloadPlotFileType
})
observe({
plotFileType <- input$downloadPlotFileType
plotFileTypePDF <- plotFileType == "pdf"
plotUnit <- ifelse(plotFileTypePDF, "inches", "pixels")
plotUnitDef <- ifelse(plotFileTypePDF, 7, 600)
plotUnitMin <- ifelse(plotFileTypePDF, 1, 100)
plotUnitMax <- ifelse(plotFileTypePDF, 12, 2000)
plotUnitStep <- ifelse(plotFileTypePDF, 0.1, 50)
updateNumericInput(
session,
inputId = "downloadPlotHeight",
label = sprintf("Height (%s)", plotUnit),
value = plotUnitDef, min = plotUnitMin, max = plotUnitMax, step = plotUnitStep)
updateNumericInput(
session,
inputId = "downloadPlotWidth",
label = sprintf("Width (%s)", plotUnit),
value = plotUnitDef, min = plotUnitMin, max = plotUnitMax, step = plotUnitStep)
})
#' Get the download dimensions.
download_plot_height <- reactive({
input$downloadPlotHeight
})
download_plot_width <- reactive({
input$downloadPlotWidth
})
#' Download the Plot
output$downloadPlot <- downloadHandler(
filename = function() {
paste0(Sys.Date(), "_", input$gene, "_", input$dataset, "_", input$plotTypeSel,
".", download_plot_file_type())
},
# The argument content below takes filename as a function and returns what's printed to it.
content = function(file) {
# Gets the name of the function to use from the downloadFileType reactive element.
plotFunction <- match.fun(download_plot_file_type())
plotFunction(file, width = download_plot_width(), height = download_plot_height())
if (input$tukeyPlot) {
grid.draw(box_Plot())
} else {
print(box_Plot())
}
dev.off()
}
)
#' Extract the survival data.
surv_data <- reactive({
df <- data()
# df <- subset(df, !is.na(df$status))
df <- subset(df,Progression == "Initial")
if (input$histology != "All"){
df <- subset(df, Pathology == input$histology)
}
if (input$subtype != "All") {
df <- subset(df, Subtype == input$subtype)
}
# exclude G-CIMP is selected
if (input$gcimpSurv){
df <- subset(df, GcimpPrediction != "GCIMP")
}
df
})
#' Create a slider for the manual cutoff of the Kaplan Meier plot
mRNA_surv <- reactive({
surv_need()
req(input$histology %in% c("All", histo()))
mRNA <- surv_data()[ ,"mRNA"]
mRNA.values <- round(mRNA[!is.na(mRNA)],2)
# Generate a vector of continuos values, excluding the first an last value
mRNA.values <- sort(mRNA.values[mRNA.values != min(mRNA.values) & mRNA.values != max(mRNA.values)])
})
#' Create a rug plot with the mRNA expression value for the manual cutoff
output$boxmRNA <- renderPlot({
req(input$mInput)
mRNA <- round(mRNA_surv(),2)
q <- quantile(mRNA)
xrange <-range(mRNA)
par(mar = c(0,0,0,0))
plot(0, 0, type = "n", xlim = c(xrange[1] + 0.25, xrange[2]) , ylim = c(-0.1, + 0.1), ylab ="", xlab = "", axes = FALSE)
points(x = mRNA, y = rep(0, length(mRNA)), pch="|", col=rgb(0, 0, 0, 0.25))
# Add a red line to show which is the current cutoff.
points(x = input$mInput, y = 0, pch = "|", col="red", cex = 2.5)
points(x = q[2:4], y = rep(0,3), pch = "|", col="blue", cex = 2)
}, bg = "transparent")
#' Generate the slider for the manual cutoff
output$numericCutoff <- renderUI({
sliderInput(inputId = "mInput",label = NULL, min = min(mRNA_surv()), max = max(mRNA_surv()),
value = median(mRNA_surv()), step = 0.05, round = -2)
})
#' Requirements for all the survival plots
surv_need <- reactive({
validate(
need(input$gene != "", "Please, enter a gene name in the panel on the left")%then%
need(input$gene %in% names(exprs()),"Gene not available for this dataset")
)
})
#' busy indicator when switching surv tab
#' http://stackoverflow.com/questions/18237987/show-that-shiny-is-busy-or-loading-when-changing-tab-panels
output$activeTabSurv <- reactive({
return(input$tabSurv)
})
outputOptions(output, 'activeTabSurv', suspendWhenHidden=FALSE)
#' Set survival plot height
surv_plot_height <- reactive({
if(input$allSubSurv){
ifelse(length(subtype())>4, 1300, 650)
} else {
400
}
})
#' Create a Kaplan Meier plot with cutoff based on quantiles or manual selection
output$survPlot <- renderPlot({
surv_need ()
req(input$histology %in% c("All", histo()))
# Use 'try' to suppress a message throwed the first time manual cutoff is selected
if(input$allSubSurv) {
nrow <- ceiling(length(subtype())/2)
par(mfrow = c(nrow,2), mar=c(3.5,3.5,3.5,1.5), mgp=c(2.2,.95,0))
try({
for (i in subtype()) {
survivalPlot(surv_data(), surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = i,
cutoff = input$cutoff, numeric = input$mInput, cex = 1.2)
}
}, silent = TRUE)
} else {
try(survivalPlot(surv_data(), surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = input$subtype,
cutoff = input$cutoff, numeric = input$mInput), silent = TRUE)
}
}, height = surv_plot_height, width = function(){if(!input$allSubSurv) {500} else {900}})
#' Download the survPlot
output$downloadsurvPlot <- downloadHandler(
filename = function() {
paste0(Sys.Date(), "_", input$gene, "_", input$dataset, "_survPlot.", download_plot_file_type())
},
content = function(file) {
plotFunction <- match.fun(download_plot_file_type())
plotFunction(file, width = download_plot_width(), height = download_plot_height())
if(input$allSubSurv) {
nrow <- ceiling(length(subtype())/2)
par(mfrow = c(nrow,2), mar=c(3.5,3.5,3.5,1.5), mgp=c(2.2,.95,0))
for (i in subtype()) {
survivalPlot(surv_data(),surv_type = input$surv_type, gene = input$gene, group = input$histology, subtype = i,
cutoff = input$cutoff, numeric = input$mInput, cex = 1.2)
}
} else {
survivalPlot(surv_data(), surv_type = input$surv_type, gene =input$gene, group = input$histology, subtype = input$subtype,
cutoff = input$cutoff, numeric = input$mInput)
}
dev.off()
}
)
})
|
##################################
## models for latent parameters ##
##################################
# Generate AR(1) variates
r_AR1 <- function(iterations, series_length, rho, sigma_sq)
matrix(stats::rnorm(iterations * series_length), iterations, series_length) %*% chol(sigma_sq * rho^as.matrix(stats::dist(1:series_length)))
# solve for conditional prevalence
logit <- function(x) log(x) - log(1 - x)
expit <- function(x) 1 / (1 + exp(-x))
E_logitnorm <- function(eta_star, sigma_sq)
sapply(eta_star, function(eta_star)
stats::integrate(function(v)
expit(eta_star + v) * exp(-v^2 / (2 * sigma_sq)) / sqrt(2 * pi * sigma_sq),
lower = -Inf, upper = Inf)$value)
eta_star_phi <- function(marginal_mean, sigma_sq, interval = c(-1000,1000)) {
n <- max(length(marginal_mean), length(sigma_sq))
marginal_mean <- rep(marginal_mean, length.out = n)
sigma_sq <- rep(sigma_sq, length.out = n)
mapply(function(marginal_mean, sigma_sq)
stats::uniroot(function(x) E_logitnorm(x, sigma_sq) - marginal_mean,
interval = interval)$root,
marginal_mean = marginal_mean, sigma_sq = sigma_sq)
}
# generate random, dependent phi values
r_phi_star <- function(iterations, series_length, phi_marg, rho, sigma_sq) {
eta_cond <- rep(eta_star_phi(phi_marg, sigma_sq), length.out = series_length)
nu <- t(r_AR1(iterations, series_length, rho, sigma_sq))
expit(eta_cond + nu)
}
# generate random, dependent zeta values
r_zeta_star <- function(iterations, series_length, zeta_marg, rho, sigma_sq) {
nu <- t(r_AR1(iterations, series_length, rho, sigma_sq)) - sigma_sq / 2
zeta_marg * exp(nu)
}
# smooth covariance matrix
smooth_cov <- function(V) {
n <- dim(V)[1]
smooth_cov <- sapply(1:n, function(x) ifelse(x < n, mean(diag(V[x:n,1:(n-x+1)])), V[x,1]))
matrix(smooth_cov[as.matrix(stats::dist(1:n, diag=TRUE)) + 1], n, n) / smooth_cov[1]
}
| /R/simulate_latent_parameters.R | no_license | cran/ARPobservation | R | false | false | 1,968 | r |
##################################
## models for latent parameters ##
##################################
# Generate AR(1) variates
r_AR1 <- function(iterations, series_length, rho, sigma_sq)
matrix(stats::rnorm(iterations * series_length), iterations, series_length) %*% chol(sigma_sq * rho^as.matrix(stats::dist(1:series_length)))
# solve for conditional prevalence
logit <- function(x) log(x) - log(1 - x)
expit <- function(x) 1 / (1 + exp(-x))
E_logitnorm <- function(eta_star, sigma_sq)
sapply(eta_star, function(eta_star)
stats::integrate(function(v)
expit(eta_star + v) * exp(-v^2 / (2 * sigma_sq)) / sqrt(2 * pi * sigma_sq),
lower = -Inf, upper = Inf)$value)
eta_star_phi <- function(marginal_mean, sigma_sq, interval = c(-1000,1000)) {
n <- max(length(marginal_mean), length(sigma_sq))
marginal_mean <- rep(marginal_mean, length.out = n)
sigma_sq <- rep(sigma_sq, length.out = n)
mapply(function(marginal_mean, sigma_sq)
stats::uniroot(function(x) E_logitnorm(x, sigma_sq) - marginal_mean,
interval = interval)$root,
marginal_mean = marginal_mean, sigma_sq = sigma_sq)
}
# generate random, dependent phi values
r_phi_star <- function(iterations, series_length, phi_marg, rho, sigma_sq) {
eta_cond <- rep(eta_star_phi(phi_marg, sigma_sq), length.out = series_length)
nu <- t(r_AR1(iterations, series_length, rho, sigma_sq))
expit(eta_cond + nu)
}
# generate random, dependent zeta values
r_zeta_star <- function(iterations, series_length, zeta_marg, rho, sigma_sq) {
nu <- t(r_AR1(iterations, series_length, rho, sigma_sq)) - sigma_sq / 2
zeta_marg * exp(nu)
}
# smooth covariance matrix
smooth_cov <- function(V) {
n <- dim(V)[1]
smooth_cov <- sapply(1:n, function(x) ifelse(x < n, mean(diag(V[x:n,1:(n-x+1)])), V[x,1]))
matrix(smooth_cov[as.matrix(stats::dist(1:n, diag=TRUE)) + 1], n, n) / smooth_cov[1]
}
|
View(Auto_Price_Rating_Data_replace)
attach(Auto_Price_Rating_Data_replace)
#Boxplot
boxplot(Price~`Num.of.doors`,xlab="number of doors",ylab="price")
boxplot(Price ~ `Num.of.cylinders`,col=(c("gold","darkgreen")))
#barplot
count <- Auto_Price_Rating_Data_replace$`Length`
barplot(count,xlab = "Length")
#Histogram
hist(Horsepower)
hist(Price)
#Piechart
x <- c(`Wheel.base`)
labels <- c(`Fuel.Type`)
pie(x,labels)
#Scatter plot
scatter.smooth(Curb.weight,Price,xlab = "Curb-Weight",ylab = "Price")
# pair-wise scatterplots colored by class
pairs(Curb.weight~Horsepower, data=Auto_Price_Rating_Data_replace, col=Auto_Price_Rating_Data_replace$Curb.weight)
| /Auto_Price_Rating_Data_replace/M1.2.R | no_license | Kondapsa/R_with_datasets | R | false | false | 665 | r |
View(Auto_Price_Rating_Data_replace)
attach(Auto_Price_Rating_Data_replace)
#Boxplot
boxplot(Price~`Num.of.doors`,xlab="number of doors",ylab="price")
boxplot(Price ~ `Num.of.cylinders`,col=(c("gold","darkgreen")))
#barplot
count <- Auto_Price_Rating_Data_replace$`Length`
barplot(count,xlab = "Length")
#Histogram
hist(Horsepower)
hist(Price)
#Piechart
x <- c(`Wheel.base`)
labels <- c(`Fuel.Type`)
pie(x,labels)
#Scatter plot
scatter.smooth(Curb.weight,Price,xlab = "Curb-Weight",ylab = "Price")
# pair-wise scatterplots colored by class
pairs(Curb.weight~Horsepower, data=Auto_Price_Rating_Data_replace, col=Auto_Price_Rating_Data_replace$Curb.weight)
|
## list all files in your working directory with
list.file() or dir()
## ? befor a function will bring up the documentation
?list.files
## use the args() function to see what arguments a function can take
args(list.files)
## getwd() to assign the value of the current working directory
## strings or charctor texts have to be in ""
testdir <- "troyiscool"
## file.create() to make a files.
## list.files() to list all files.
## file.exists() to see if your file exists.
##file.info() to access information about the file.
## file.rename() to change the name of a file.
file.rename("mytest.R", "mytest2.R")
##file.copy to copy a file.
file.copy("mytest.R", "mytest2.R")
##file.path() to construct path to file.
file.path("mytest3.R")
## create a directory in the current wirjubg directory called 'testdir2' and subdirectory for it called 'testdir3', all in one command by using dir.create() and filepath
## subdirectory ex dir.create(file.path('testdir2', ' testdir3'), recursive = true).
## the part that shows ('testdir', 'testdir3') is the subdirectory.
## unlink() deletes a directory that you createted. every other fucking function sound it should then this comes along and us like nah unlink() not delete() wtf is that swirl??.
## the simplest way to create a swquence of numbers in R us by using ':' operator.
## ?':' is used to pull up documentation.
## seq() is used to make a sequence of numbers.
## use seq by=0.5 to make it go in incerments of 0.5.
seq(0, 10, by=0.5)
## use length= for a sequence of numbers.
seq(5, 10, length=30)
## say you dont know the length of my_seq, but we want to generate a sequence of intergers frim 1 to N, where N represents the length of my_seq vector
## one way you could do this is by using the operator ':' and the length() function.
1:length(my_seq)
## another way you could acheive this is by using seq(along.with = my_seq).
seq(along.with = my_seq)
## seq_along = generate regular sequences.
## along.with = take the length from the length of this argument.
## rep() stands for replicate
## if you want to do a certain amout of repetitions of the vector and a (c( then state the amount you wish to use using times =
rep(c(0, 1, 2), times = 10)
## c() used to create vectors and objects
## paste() Concatenate vectors after converting to character.
## collapse = "" is an optional character string to separate the results.(number of spaces is how many spaces thw words are appart)
## Sep = A character sting to spearate the terms.
paste(my_char, collapse = " ")
my_name <- c(my_char, "Troy")
## : for generating sequences
1:3
## would look something like "1" "2" "3".
## In R, NA is used to represent any value that is not available
## rep() replicates the values in x.
## rnorm() Density, distribution function, quantile function and random generation with mean equal to mean and standard deviation equal to sd.
## is.na() tells us were our NAs are located in our data.
## '==' is a method of testing for equality between two objects. it is also the same as doing is.na function.
## sum() returns the sum of all the values present in its arguments.
## Inf means infinity.
## [] for selecting particular elements form a vector is by placing an 'index vector' in square brackets immediately following the name of the vector.
## '!' gives you the negation of a logical expression.
##!is.na is saying 'is not NA'
## y <- x[!is.na(x)] takes all the non-NA values from x.
## use -c when dealing will all negitve numbers
## names() set the names of an object.
## dim() function tells you the diemnsions of an object.
## class () will tell you what
## matrix() creates a matrix from the given set of values.
## nrow = sets the number of rows in a matrix.
## ncol = sets the number of columns in a matrix.
my_matrix2 <- matrix( 1:20, nrow = 4, ncol = 5)
## cbind() takes a sequence of vector, matrix or data frame arguments and combine by columns or rows.
## data.frame() allows you to store character vector of names right alongside your matrix of numbers.
## colnames() retrieves or sets the row or column nmaes of a matrix-like object.
## != operator tests whether two values are unequal.
## == operator tests whether two boolean values or numbers are equal.
## ! not operator negates logical expressions so that TRUE expressions become FALSE and FALSE expressions become TRUE.
## Sys.Date() returns a value based on your computer's enviorment, while other functions manipulate imput data in order to compute a retuen value.
as.Date() ## reference a date.
as.Date("1969-01-01")
## mean() takes a vector of numbers as imput, returns the average of all of the numbers in the imput mi. Imputs to function are often called arguments. Providing arguments to a function is also sometimes called passing arguments to that function. arguments you want to pass to a function go inside the function's parentheses.
## sd this function computes the standard deviation of the values in x. If na.rm is TRUE then missing values are removed before computation proceeds.
evaluate(function(x){x+1}, 6)
##The first argument is a tiny anonymous function that takes one argument `x` and returns `x+1`. We passed the number 6 into this function so the entire expression evaluates to 7.
head() ## by default it shows the first 6 rows of a dataset.
head(x, 10) ## you can alter this behavior by passing a second argument the number of rows you'd like to view.
tail() ## will show the last 6 rows of the dataset.
tail(x, 10) ## will alter the amout of rows you want to show.
## dim() to check the dimensions of the dataset.(rows and columns)
## lapply() function takes a list as imput, applies a function to each element of the list, then returns a list of the same length as the original one. Since a data frame is really just a list of vectors (you can see this with as.list(flags)), we can use lapply() to apply the class() function to each colum of the flags dataset. Let's see it in action!
cls_list <- lapply(flags, class)
## the l in lapply stands for list.
##unique() returns a vector, data frame or array like x but with duplicate elements/rows removed.
## vapply() is similar to sapply, but has a pre-specified type od return value, so it can be safer (and sometimes faster) to use.
## tapply() Apply a function to each cell of a ragged array, that is each (non-empty) group values given by a unique combination of the levels of certain factors.
object.size() ## used to see how much memory your dataset is occupying.
summary() ## provides different output for each variable, depending on its class. for numeric data summary() displays the minimum, 1st quartitle, meadian, mean, 3rd quartitle, and maximum. These values help us understand how the data are distributed.
str() ## tells you the class, the oberservations and veriables (rows, columns), name and class of each variable, as well as a preview of its contents.
sample() ## take a sample of the specified size from the elements of x using either with or without replacement.
sample(1:6, 4, replace = TRUE) ## is and example for rolling a six sided die.
## replace means that each number is replaced after it is selected, so that the same number can show up more that once.
flips <- sample(c(0,1), 100, replace = TRUE, prob = c(0.3,0.7))
## this is an example of trying to simulate 100 flips of an unfair two-sided coin. This prticular coin has a 0.3 probability of landing 'tails' and a 0.7 probability of landing 'heads'.
rbinom() ## stands for random binormal variable
## represents the number of 'successes' in a given number of independent 'trials'.
rbinom(1, size = 100, prob = 0.7)
## represents the number of heads in 100 flips of our unfair coin.
rnorm() ## to sumulate random numbers from many other probability distributions.
rpois() ## Density, distrabution function, quantile function and random generation for the Poission distribution with parameter lambda.
lambda argument ## vector of (non-negative) means.
rpois(5, lambda = 10)
my_pois <- replicate(100, rpois(5,10))
## now we can get the mean of each column in my_pois using the colMeans() fucntion
cm <- colMeans(my_pois)
POSIXct ## represents the (signed) number of seconds since the beginning of 1970 (in the UTC time zone) as a numeric vector.
POSIXlt ## is a named list of vectors representing.
weekdays() ## will return the day of week from any date or time object.
months() ## will return the month on any date or time object.
quarters() ## returns the quarter of the year (Q1-Q4).
strptime() ## converts character vectors to POSIXlt.
## similar to as.POSIXlt(), except that the input doesn't have to be in a particular format (YYYY-MM-DD)
%B ## full month name in the current locale.
%d ## day of the month as decimal number.
%Y ## year with century.
%H ## hours as decimal number (00-23).
%M ## minute as a decimal number (00-59).
t3 <- "October 17, 1986 08:24"
t4 <- strftime(t3, "%B %d, %Y %H %M")
t4
[1] "1986-10-17 08:24:00 MDT"
difftime() ## allows you to specify a 'units' parameter.
difftime(Sys.time(), t1, units = 'days')
Time difference of 0.3210728 days
xlab ## to title the x-axis
ylab ## to title the y-axis
col = ## to color the plots
xlim ## to limit the x-axis
xlim = c(10, 15)
ylim ## to limit the y-axis
pch = ## to change the shape of the symbols
boxplot() ## produce a box-and-whisker plots of the given grouped values.
| /R_Programming_T.R | no_license | KyleKubeczka/R_Projects-1 | R | false | false | 9,471 | r | ## list all files in your working directory with
list.file() or dir()
## ? befor a function will bring up the documentation
?list.files
## use the args() function to see what arguments a function can take
args(list.files)
## getwd() to assign the value of the current working directory
## strings or charctor texts have to be in ""
testdir <- "troyiscool"
## file.create() to make a files.
## list.files() to list all files.
## file.exists() to see if your file exists.
##file.info() to access information about the file.
## file.rename() to change the name of a file.
file.rename("mytest.R", "mytest2.R")
##file.copy to copy a file.
file.copy("mytest.R", "mytest2.R")
##file.path() to construct path to file.
file.path("mytest3.R")
## create a directory in the current wirjubg directory called 'testdir2' and subdirectory for it called 'testdir3', all in one command by using dir.create() and filepath
## subdirectory ex dir.create(file.path('testdir2', ' testdir3'), recursive = true).
## the part that shows ('testdir', 'testdir3') is the subdirectory.
## unlink() deletes a directory that you createted. every other fucking function sound it should then this comes along and us like nah unlink() not delete() wtf is that swirl??.
## the simplest way to create a swquence of numbers in R us by using ':' operator.
## ?':' is used to pull up documentation.
## seq() is used to make a sequence of numbers.
## use seq by=0.5 to make it go in incerments of 0.5.
seq(0, 10, by=0.5)
## use length= for a sequence of numbers.
seq(5, 10, length=30)
## say you dont know the length of my_seq, but we want to generate a sequence of intergers frim 1 to N, where N represents the length of my_seq vector
## one way you could do this is by using the operator ':' and the length() function.
1:length(my_seq)
## another way you could acheive this is by using seq(along.with = my_seq).
seq(along.with = my_seq)
## seq_along = generate regular sequences.
## along.with = take the length from the length of this argument.
## rep() stands for replicate
## if you want to do a certain amout of repetitions of the vector and a (c( then state the amount you wish to use using times =
rep(c(0, 1, 2), times = 10)
## c() used to create vectors and objects
## paste() Concatenate vectors after converting to character.
## collapse = "" is an optional character string to separate the results.(number of spaces is how many spaces thw words are appart)
## Sep = A character sting to spearate the terms.
paste(my_char, collapse = " ")
my_name <- c(my_char, "Troy")
## : for generating sequences
1:3
## would look something like "1" "2" "3".
## In R, NA is used to represent any value that is not available
## rep() replicates the values in x.
## rnorm() Density, distribution function, quantile function and random generation with mean equal to mean and standard deviation equal to sd.
## is.na() tells us were our NAs are located in our data.
## '==' is a method of testing for equality between two objects. it is also the same as doing is.na function.
## sum() returns the sum of all the values present in its arguments.
## Inf means infinity.
## [] for selecting particular elements form a vector is by placing an 'index vector' in square brackets immediately following the name of the vector.
## '!' gives you the negation of a logical expression.
##!is.na is saying 'is not NA'
## y <- x[!is.na(x)] takes all the non-NA values from x.
## use -c when dealing will all negitve numbers
## names() set the names of an object.
## dim() function tells you the diemnsions of an object.
## class () will tell you what
## matrix() creates a matrix from the given set of values.
## nrow = sets the number of rows in a matrix.
## ncol = sets the number of columns in a matrix.
my_matrix2 <- matrix( 1:20, nrow = 4, ncol = 5)
## cbind() takes a sequence of vector, matrix or data frame arguments and combine by columns or rows.
## data.frame() allows you to store character vector of names right alongside your matrix of numbers.
## colnames() retrieves or sets the row or column nmaes of a matrix-like object.
## != operator tests whether two values are unequal.
## == operator tests whether two boolean values or numbers are equal.
## ! not operator negates logical expressions so that TRUE expressions become FALSE and FALSE expressions become TRUE.
## Sys.Date() returns a value based on your computer's enviorment, while other functions manipulate imput data in order to compute a retuen value.
as.Date() ## reference a date.
as.Date("1969-01-01")
## mean() takes a vector of numbers as imput, returns the average of all of the numbers in the imput mi. Imputs to function are often called arguments. Providing arguments to a function is also sometimes called passing arguments to that function. arguments you want to pass to a function go inside the function's parentheses.
## sd this function computes the standard deviation of the values in x. If na.rm is TRUE then missing values are removed before computation proceeds.
evaluate(function(x){x+1}, 6)
##The first argument is a tiny anonymous function that takes one argument `x` and returns `x+1`. We passed the number 6 into this function so the entire expression evaluates to 7.
head() ## by default it shows the first 6 rows of a dataset.
head(x, 10) ## you can alter this behavior by passing a second argument the number of rows you'd like to view.
tail() ## will show the last 6 rows of the dataset.
tail(x, 10) ## will alter the amout of rows you want to show.
## dim() to check the dimensions of the dataset.(rows and columns)
## lapply() function takes a list as imput, applies a function to each element of the list, then returns a list of the same length as the original one. Since a data frame is really just a list of vectors (you can see this with as.list(flags)), we can use lapply() to apply the class() function to each colum of the flags dataset. Let's see it in action!
cls_list <- lapply(flags, class)
## the l in lapply stands for list.
##unique() returns a vector, data frame or array like x but with duplicate elements/rows removed.
## vapply() is similar to sapply, but has a pre-specified type od return value, so it can be safer (and sometimes faster) to use.
## tapply() Apply a function to each cell of a ragged array, that is each (non-empty) group values given by a unique combination of the levels of certain factors.
object.size() ## used to see how much memory your dataset is occupying.
summary() ## provides different output for each variable, depending on its class. for numeric data summary() displays the minimum, 1st quartitle, meadian, mean, 3rd quartitle, and maximum. These values help us understand how the data are distributed.
str() ## tells you the class, the oberservations and veriables (rows, columns), name and class of each variable, as well as a preview of its contents.
sample() ## take a sample of the specified size from the elements of x using either with or without replacement.
sample(1:6, 4, replace = TRUE) ## is and example for rolling a six sided die.
## replace means that each number is replaced after it is selected, so that the same number can show up more that once.
flips <- sample(c(0,1), 100, replace = TRUE, prob = c(0.3,0.7))
## this is an example of trying to simulate 100 flips of an unfair two-sided coin. This prticular coin has a 0.3 probability of landing 'tails' and a 0.7 probability of landing 'heads'.
rbinom() ## stands for random binormal variable
## represents the number of 'successes' in a given number of independent 'trials'.
rbinom(1, size = 100, prob = 0.7)
## represents the number of heads in 100 flips of our unfair coin.
rnorm() ## to sumulate random numbers from many other probability distributions.
rpois() ## Density, distrabution function, quantile function and random generation for the Poission distribution with parameter lambda.
lambda argument ## vector of (non-negative) means.
rpois(5, lambda = 10)
my_pois <- replicate(100, rpois(5,10))
## now we can get the mean of each column in my_pois using the colMeans() fucntion
cm <- colMeans(my_pois)
POSIXct ## represents the (signed) number of seconds since the beginning of 1970 (in the UTC time zone) as a numeric vector.
POSIXlt ## is a named list of vectors representing.
weekdays() ## will return the day of week from any date or time object.
months() ## will return the month on any date or time object.
quarters() ## returns the quarter of the year (Q1-Q4).
strptime() ## converts character vectors to POSIXlt.
## similar to as.POSIXlt(), except that the input doesn't have to be in a particular format (YYYY-MM-DD)
%B ## full month name in the current locale.
%d ## day of the month as decimal number.
%Y ## year with century.
%H ## hours as decimal number (00-23).
%M ## minute as a decimal number (00-59).
t3 <- "October 17, 1986 08:24"
t4 <- strftime(t3, "%B %d, %Y %H %M")
t4
[1] "1986-10-17 08:24:00 MDT"
difftime() ## allows you to specify a 'units' parameter.
difftime(Sys.time(), t1, units = 'days')
Time difference of 0.3210728 days
xlab ## to title the x-axis
ylab ## to title the y-axis
col = ## to color the plots
xlim ## to limit the x-axis
xlim = c(10, 15)
ylim ## to limit the y-axis
pch = ## to change the shape of the symbols
boxplot() ## produce a box-and-whisker plots of the given grouped values.
|
# Compare emissions from motor vehicle sources in Baltimore City
# with emissions from motor vehicle sources in Los Angeles County,
# California (fips == "06037"). Which city has seen greater changes
# over time in motor vehicle emissions?
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI_MD_CA_onroad <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
sub_tot <- aggregate(Emissions ~ year + fips, NEI_MD_CA_onroad, sum)
sub_tot$fips[sub_tot$fips=="24510"] <- "Baltimore, MD"
sub_tot$fips[sub_tot$fips=="06037"] <- "Los Angeles, CA"
png(filename='../plot6.png', width=700, height=550)
g <- ggplot(sub_tot, aes(year, Emissions, color = fips))
g <- g + geom_line() + geom_point() + xlab("Year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions from motor vehicle in Baltimore City, MD and Los Angeles, CA')
print(g)
dev.off()
| /plot6.R | no_license | stemineo/Exploratory_Data_Analysis_Course_Project_2 | R | false | false | 939 | r | # Compare emissions from motor vehicle sources in Baltimore City
# with emissions from motor vehicle sources in Los Angeles County,
# California (fips == "06037"). Which city has seen greater changes
# over time in motor vehicle emissions?
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI_MD_CA_onroad <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
sub_tot <- aggregate(Emissions ~ year + fips, NEI_MD_CA_onroad, sum)
sub_tot$fips[sub_tot$fips=="24510"] <- "Baltimore, MD"
sub_tot$fips[sub_tot$fips=="06037"] <- "Los Angeles, CA"
png(filename='../plot6.png', width=700, height=550)
g <- ggplot(sub_tot, aes(year, Emissions, color = fips))
g <- g + geom_line() + geom_point() + xlab("Year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions from motor vehicle in Baltimore City, MD and Los Angeles, CA')
print(g)
dev.off()
|
## This is a R script to plot 2D histogram of ice particles
# Load required libraries
library(fields)
library(ncdf4)
library(scales)
# check arguments
args = commandArgs(trailingOnly=TRUE)
if(length(args)!=0) {
cat(sprintf("Use without any arguments\n"))
quit()
}
# set parameters
VALID2INVALID <- -999.0
# Make the list of files
tmp_files = dir("../",pattern="^SD_output_NetCDF_")
tmp = strsplit(tmp_files,"\\SD_output_NetCDF_|\\.000.pe")
alltimes = unique(matrix(unlist(tmp),nrow=3)[2,])
allmpiranks = unique(matrix(unlist(tmp),nrow=3)[3,])
allfiles = matrix(tmp_files,ncol=length(alltimes))
rownames(allfiles) = allmpiranks
colnames(allfiles) = alltimes
# loop of time
#for(time in alltimes){
for(time in alltimes[which(as.numeric(alltimes)==3000)]){
cat(sprintf("processing the time = %s [sec]\n",time))
sink(paste("selected_ice.",sprintf("%05d",as.numeric(time)),".dat",sep=""))
sink()
sink(paste("selected_ice.",sprintf("%05d",as.numeric(time)),".dat",sep=""))
cat(sprintf("time[s] x[m] z[m] a[m] c[m] dens[kg/m^3] mult[] tvel[m/s] mrime[kg] nmono[] ice_category\n"))
# for(ice_category in c("snow")){
for(ice_category in c("graupel", "ice", "snow")){
# loop of MPI rank
for(mpirank in allmpiranks){
# for(mpirank in allmpiranks[80]){
# open file
file <- allfiles[mpirank,time]
ncin <- nc_open(paste("../",file,sep=""))
# extract the id of ice particles
all_sd_z <- ncvar_get(ncin,"sd_z")
all_sd_liqice <- ncvar_get(ncin,"sd_liqice")
ice_id <- which(all_sd_liqice==10 & all_sd_z>VALID2INVALID)
## equatorial radius
all_sdi_re <- ncvar_get(ncin,"sdi_re")
equr <- all_sdi_re[ice_id]
## polar radius
all_sdi_rp <- ncvar_get(ncin,"sdi_rp")
polr <- all_sdi_rp[ice_id]
## density of particle
all_sdi_rho <- ncvar_get(ncin,"sdi_rho")
dens <- all_sdi_rho[ice_id]
## terminal velocity of ice particles
all_sdi_tvel <- ncvar_get(ncin,"sd_vz")
icetvel <- all_sdi_tvel[ice_id]
## multiplicity of super-droplet
all_sd_n <- ncvar_get(ncin,"sd_n")
mult <- all_sd_n[ice_id]
## rime mass
all_sdi_mrime <- ncvar_get(ncin,"sdi_mrime")
mrime <- all_sdi_mrime[ice_id]
## number of monomers
all_sdi_nmono <- ncvar_get(ncin,"sdi_nmono")
nmono <- all_sdi_nmono[ice_id]
# x coordinate
all_sd_x <- ncvar_get(ncin,"sd_x")
sd_x <- all_sd_x[ice_id]
# z coordinate
sd_z <- all_sd_z[ice_id]
# close file
nc_close(ncin)
# categorize ice particles
mass <- dens*(4.0/3.0)*pi*equr**2*polr
if( ice_category=="graupel" ){
typed_ice_id <- which( (mass*0.3)<mrime )
}else if( ice_category=="snow" ){
typed_ice_id <- which( ((mass*0.3)>=mrime) & (nmono>10) )
}else if( ice_category=="ice" ){
typed_ice_id <- which( ((mass*0.3)>=mrime) & (nmono<=10) )
}else if( ice_category=="all" ){
typed_ice_id <- which( mass>0.0 )
}else{
cat(sprintf("Wrong ice category \"%s\" specified \n",typed_ice_id))
quit()
}
# if no ice exists, skip to the next MPI rank
if(length(typed_ice_id)==0){ next }
# save categorized ice particles
equr <- equr[typed_ice_id]
polr <- polr[typed_ice_id]
dens <- dens[typed_ice_id]
mult <- mult[typed_ice_id]
icetvel <- icetvel[typed_ice_id]
mrime <- mrime[typed_ice_id]
nmono <- nmono[typed_ice_id]
sd_x <- sd_x[typed_ice_id]
sd_z <- sd_z[typed_ice_id]
### output selected ice
selected_ice_id <- which( ((polr/equr)>10.0) & (2.0*polr>0.03) )
if(length(selected_ice_id)>0){
for(id in selected_ice_id){
cat(sprintf("%d %e %e %e %e %e %e %e %e %d %s\n",as.integer(time),sd_x[id], sd_z[id], equr[id],polr[id],dens[id],mult[id],icetvel[id],mrime[id],nmono[id],ice_category))
}
}
}
}
sink()
}
| /scale-les/test/case/warmbubble/2D_Khain04_sdm/ptl_individual_analysis/ptl_individual_analysis.R | permissive | huangynj/SCALE-SDM_mixed-phase_Shima2019 | R | false | false | 3,762 | r | ## This is a R script to plot 2D histogram of ice particles
# Load required libraries
library(fields)
library(ncdf4)
library(scales)
# check arguments
args = commandArgs(trailingOnly=TRUE)
if(length(args)!=0) {
cat(sprintf("Use without any arguments\n"))
quit()
}
# set parameters
VALID2INVALID <- -999.0
# Make the list of files
tmp_files = dir("../",pattern="^SD_output_NetCDF_")
tmp = strsplit(tmp_files,"\\SD_output_NetCDF_|\\.000.pe")
alltimes = unique(matrix(unlist(tmp),nrow=3)[2,])
allmpiranks = unique(matrix(unlist(tmp),nrow=3)[3,])
allfiles = matrix(tmp_files,ncol=length(alltimes))
rownames(allfiles) = allmpiranks
colnames(allfiles) = alltimes
# loop of time
#for(time in alltimes){
for(time in alltimes[which(as.numeric(alltimes)==3000)]){
cat(sprintf("processing the time = %s [sec]\n",time))
sink(paste("selected_ice.",sprintf("%05d",as.numeric(time)),".dat",sep=""))
sink()
sink(paste("selected_ice.",sprintf("%05d",as.numeric(time)),".dat",sep=""))
cat(sprintf("time[s] x[m] z[m] a[m] c[m] dens[kg/m^3] mult[] tvel[m/s] mrime[kg] nmono[] ice_category\n"))
# for(ice_category in c("snow")){
for(ice_category in c("graupel", "ice", "snow")){
# loop of MPI rank
for(mpirank in allmpiranks){
# for(mpirank in allmpiranks[80]){
# open file
file <- allfiles[mpirank,time]
ncin <- nc_open(paste("../",file,sep=""))
# extract the id of ice particles
all_sd_z <- ncvar_get(ncin,"sd_z")
all_sd_liqice <- ncvar_get(ncin,"sd_liqice")
ice_id <- which(all_sd_liqice==10 & all_sd_z>VALID2INVALID)
## equatorial radius
all_sdi_re <- ncvar_get(ncin,"sdi_re")
equr <- all_sdi_re[ice_id]
## polar radius
all_sdi_rp <- ncvar_get(ncin,"sdi_rp")
polr <- all_sdi_rp[ice_id]
## density of particle
all_sdi_rho <- ncvar_get(ncin,"sdi_rho")
dens <- all_sdi_rho[ice_id]
## terminal velocity of ice particles
all_sdi_tvel <- ncvar_get(ncin,"sd_vz")
icetvel <- all_sdi_tvel[ice_id]
## multiplicity of super-droplet
all_sd_n <- ncvar_get(ncin,"sd_n")
mult <- all_sd_n[ice_id]
## rime mass
all_sdi_mrime <- ncvar_get(ncin,"sdi_mrime")
mrime <- all_sdi_mrime[ice_id]
## number of monomers
all_sdi_nmono <- ncvar_get(ncin,"sdi_nmono")
nmono <- all_sdi_nmono[ice_id]
# x coordinate
all_sd_x <- ncvar_get(ncin,"sd_x")
sd_x <- all_sd_x[ice_id]
# z coordinate
sd_z <- all_sd_z[ice_id]
# close file
nc_close(ncin)
# categorize ice particles
mass <- dens*(4.0/3.0)*pi*equr**2*polr
if( ice_category=="graupel" ){
typed_ice_id <- which( (mass*0.3)<mrime )
}else if( ice_category=="snow" ){
typed_ice_id <- which( ((mass*0.3)>=mrime) & (nmono>10) )
}else if( ice_category=="ice" ){
typed_ice_id <- which( ((mass*0.3)>=mrime) & (nmono<=10) )
}else if( ice_category=="all" ){
typed_ice_id <- which( mass>0.0 )
}else{
cat(sprintf("Wrong ice category \"%s\" specified \n",typed_ice_id))
quit()
}
# if no ice exists, skip to the next MPI rank
if(length(typed_ice_id)==0){ next }
# save categorized ice particles
equr <- equr[typed_ice_id]
polr <- polr[typed_ice_id]
dens <- dens[typed_ice_id]
mult <- mult[typed_ice_id]
icetvel <- icetvel[typed_ice_id]
mrime <- mrime[typed_ice_id]
nmono <- nmono[typed_ice_id]
sd_x <- sd_x[typed_ice_id]
sd_z <- sd_z[typed_ice_id]
### output selected ice
selected_ice_id <- which( ((polr/equr)>10.0) & (2.0*polr>0.03) )
if(length(selected_ice_id)>0){
for(id in selected_ice_id){
cat(sprintf("%d %e %e %e %e %e %e %e %e %d %s\n",as.integer(time),sd_x[id], sd_z[id], equr[id],polr[id],dens[id],mult[id],icetvel[id],mrime[id],nmono[id],ice_category))
}
}
}
}
sink()
}
|
##-------------------------------------------
## server.R
require(shiny)
## Carrega template das aplicações elaboradas pelo projeto iguiR2
source("../template.R")
## Simula o número de trocas ao lançar n vezes uma moeda equilibrada.
moeda <- function(n){
sum(abs(diff(rbinom(n, 1, 0.5))))
}
## Número de simulações
N <- 1000
shinyServer(
function(input, output){
## Cabeçalho IGUIR2
output$header <- renderPrint({
template("TEMA")
})
## Valores reativos que armazenam a sequência descrita pelo
## usuário
v <- reactiveValues(x = integer(), show = FALSE)
## Responde a estímulos no `input$goCara`.
observeEvent(input$goCara, {
v$x <- c(v$x, 1L)
})
## Responde a estímulos no `input$goCoro`.
observeEvent(input$goCoro, {
v$x <- c(v$x, 0L)
})
## Responde a estímulos no `input$clear`.
observeEvent(
input$clear, {
v$x <- integer()
v$show <- FALSE
})
## Responde a estímulos no `input$goProcess` retornando uma
## lista de valores a serem usados na construção do gráfico
process <- eventReactive(
input$goProcess, {
x <- v$x
## Exibe gráfico
v$show <- TRUE
## Número de lançamentos.
n <- length(v$x)
## Número de caras.
k <- sum(v$x)
## Número de trocas de face.
o <- sum(abs(diff(v$x)))
## Faz várias execuções do experimento aleatório.
r <- replicate(N, moeda(n))
## P-valor bilateral empírico.
p <- min(c(2*min(c(sum(r<=o), sum(r>=o)))/N, 1))
## Lista com todos os elementos.
return(list(n=n, k=k, o=o, r=r, p=p, x=x, show=v$show))
})
## Número de lançamentos realizados
output$numx <- renderText({
n <- length(v$x)
return(n)
})
## Sequência lançada pelo usuário
output$seqx <- renderText({
s <- paste0(v$x, collapse = "")
return(s)
})
## Gráfico para testar a hipótese
output$hist <- renderPlot({
with(process(),{
if(n < 20){
}
if(v$show & n > 19){
par(mar = c(5, 4, 1, 2), family = "Palatino",
cex = 1.2)
bks <- seq(min(c(r,o)), max(c(r, o)) + 1,
by = 1) - 0.5
ht <- hist(r, breaks = bks, plot = FALSE)
plot(ht$mids, ht$density, type = "h", lwd = 2,
ylim = c(0, 1.05 * max(ht$density)),
xlab = sprintf("Número de trocas em %i lançamentos", n),
ylab = "Probabilidade",
sub = sprintf("%i simulações", N))
grid()
segments(ht$mids, 0, ht$mids, ht$density, lwd = 3,
col = 1)
abline(v = o, col = "blue", lwd = 2)
axis(1, o, round(o, 2), col = "blue",
col.axis = "blue", cex = 1.5)
text(x = o, y = par()$usr[4],
label = "Estatística observada",
srt = 90, adj = c(1.25,-0.25))
mtext(side = 3, line = 0, cex = 1.2,
text = sprintf(
"Número de caras: %i\t Número de coroas: %i",
k, n - k))
}
})
})
## Mensagem de aviso caso a sequencia lançada seja menor que 20
output$bloqueio <- renderUI({
if(process()$n < 20 & v$show){
HTML("<center><font style='font-weight: bold; color:red'>Lançe ao menos 20 vezes</font></center><br>")
} else return()
})
})
| /shiny/moeda/server.R | no_license | diogoprov/iguir2 | R | false | false | 4,208 | r | ##-------------------------------------------
## server.R
require(shiny)
## Carrega template das aplicações elaboradas pelo projeto iguiR2
source("../template.R")
## Simula o número de trocas ao lançar n vezes uma moeda equilibrada.
moeda <- function(n){
sum(abs(diff(rbinom(n, 1, 0.5))))
}
## Número de simulações
N <- 1000
shinyServer(
function(input, output){
## Cabeçalho IGUIR2
output$header <- renderPrint({
template("TEMA")
})
## Valores reativos que armazenam a sequência descrita pelo
## usuário
v <- reactiveValues(x = integer(), show = FALSE)
## Responde a estímulos no `input$goCara`.
observeEvent(input$goCara, {
v$x <- c(v$x, 1L)
})
## Responde a estímulos no `input$goCoro`.
observeEvent(input$goCoro, {
v$x <- c(v$x, 0L)
})
## Responde a estímulos no `input$clear`.
observeEvent(
input$clear, {
v$x <- integer()
v$show <- FALSE
})
## Responde a estímulos no `input$goProcess` retornando uma
## lista de valores a serem usados na construção do gráfico
process <- eventReactive(
input$goProcess, {
x <- v$x
## Exibe gráfico
v$show <- TRUE
## Número de lançamentos.
n <- length(v$x)
## Número de caras.
k <- sum(v$x)
## Número de trocas de face.
o <- sum(abs(diff(v$x)))
## Faz várias execuções do experimento aleatório.
r <- replicate(N, moeda(n))
## P-valor bilateral empírico.
p <- min(c(2*min(c(sum(r<=o), sum(r>=o)))/N, 1))
## Lista com todos os elementos.
return(list(n=n, k=k, o=o, r=r, p=p, x=x, show=v$show))
})
## Número de lançamentos realizados
output$numx <- renderText({
n <- length(v$x)
return(n)
})
## Sequência lançada pelo usuário
output$seqx <- renderText({
s <- paste0(v$x, collapse = "")
return(s)
})
## Gráfico para testar a hipótese
output$hist <- renderPlot({
with(process(),{
if(n < 20){
}
if(v$show & n > 19){
par(mar = c(5, 4, 1, 2), family = "Palatino",
cex = 1.2)
bks <- seq(min(c(r,o)), max(c(r, o)) + 1,
by = 1) - 0.5
ht <- hist(r, breaks = bks, plot = FALSE)
plot(ht$mids, ht$density, type = "h", lwd = 2,
ylim = c(0, 1.05 * max(ht$density)),
xlab = sprintf("Número de trocas em %i lançamentos", n),
ylab = "Probabilidade",
sub = sprintf("%i simulações", N))
grid()
segments(ht$mids, 0, ht$mids, ht$density, lwd = 3,
col = 1)
abline(v = o, col = "blue", lwd = 2)
axis(1, o, round(o, 2), col = "blue",
col.axis = "blue", cex = 1.5)
text(x = o, y = par()$usr[4],
label = "Estatística observada",
srt = 90, adj = c(1.25,-0.25))
mtext(side = 3, line = 0, cex = 1.2,
text = sprintf(
"Número de caras: %i\t Número de coroas: %i",
k, n - k))
}
})
})
## Mensagem de aviso caso a sequencia lançada seja menor que 20
output$bloqueio <- renderUI({
if(process()$n < 20 & v$show){
HTML("<center><font style='font-weight: bold; color:red'>Lançe ao menos 20 vezes</font></center><br>")
} else return()
})
})
|
# cov19.R - COVID-19 deaths in Germany
load("cov19de.RData") # generated by scrape_cov19.R
bg <- c(e="#FFCCCC", w="#CCCCFF", b="#FFCCFF")[ew]
names(bg) <- state
### Fitting
library(growthcurver)
# Fitted Cum. infections / 100,000 pop.
L <- lapply(tbl[[2]], function(x)
SummarizeGrowth(seq_along(x), x, bg_correct="none"))
tbl[[5]] <- data.frame(sapply(L, function(x) predict(x$model)))
row.names(tbl[[5]]) <- row.names(tbl[[2]])
# Fitted Cum. deaths / 100,000 pop.
L <- lapply(tbl[[4]], function(x)
SummarizeGrowth(seq_along(x), x, bg_correct="none"))
tbl[[6]] <- data.frame(sapply(L, function(x) predict(x$model)))
row.names(tbl[[6]]) <- row.names(tbl[[4]])
### Plot
library(ggplot2)
library(geofacet)
library(cowplot)
theme_set(theme_cowplot())
ti <- cbind(
date=rep(row.names(tbl[[2]]), ncol(tbl[[2]])),
stack(tbl[[2]])[, c(2, 1)], stack(tbl[[5]])[, 1])
colnames(ti) <- c("date", "state", "infection", "fitted")
t2 <- tbl[[2]]
ggplot(ti, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
geom_line(aes(as.Date(date), fitted), color="red") +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=bg) +
labs(
title = sprintf(
"COVID-19 infections in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Infections / 100,000 pop.") +
geom_text(
x=-Inf, y=Inf, aes(label=sprintf("%3.0f", t2[nrow(t2), state])),
vjust=1.2, hjust=-0.2, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19i.png", width=12, height=9, dpi=72)
t2w <- as.data.frame(lapply(t2, diff, 7))
row.names(t2w) <- row.names(t2)[-(1:7)]
tiw <- cbind(
date=rep(row.names(t2w), ncol(t2w)), stack(t2w)[, c(2, 1)])
colnames(tiw) <- c("date", "state", "infection")
ggplot(tiw, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_hline(yintercept=50, color="gray") +
geom_point(shape=1) +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=rep("#F0F0F0", ncol(t2w))) +
labs(
title = sprintf(
"COVID-19 last 7 days new infections in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Last 7 days new infections / 100,000 pop.") +
geom_text(
x=Inf, y=Inf, aes(label=sprintf("%4.1f", t2w[nrow(t2w), state])),
vjust=1.2, hjust=1, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19iw.png", width=12, height=9, dpi=72)
td <- cbind(
date=rep(row.names(tbl[[4]]), ncol(tbl[[4]])),
stack(tbl[[4]])[, c(2, 1)], stack(tbl[[6]])[, 1])
colnames(td) <- c("date", "state", "death", "fitted")
t4 <- tbl[[4]]
ggplot(td, aes(as.Date(date), death)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
geom_line(aes(as.Date(date), fitted), color="red") +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=bg) +
labs(
title = sprintf(
"COVID-19 deaths in Germany (as of %s)", tail(row.names(t4), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Deaths / 100,000 pop.") +
geom_text(
x=-Inf, y=Inf, aes(label=sprintf("%4.1f", t4[nrow(t4), state])),
vjust=1.2, hjust=-0.2, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19d.png", width=12, height=9, dpi=72)
t4w <- as.data.frame(lapply(t4, diff, 7))
row.names(t4w) <- row.names(t4)[-(1:7)]
tdw <- cbind(
date=rep(row.names(t4w), ncol(t4w)), stack(t4w)[, c(2, 1)])
colnames(tdw) <- c("date", "state", "infection")
ggplot(tdw, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
scale_x_date(date_labels="%-m/%-d", limits=range(as.Date(row.names(t2w)))) +
scale_fill_manual("legend", values=rep("#F0F0F0", ncol(t2w))) +
labs(
title = sprintf(
"COVID-19 last 7 days new deaths in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Last 7 days new deaths / 100,000 pop.") +
geom_text(
x=Inf, y=Inf, aes(label=sprintf("%4.2f", t4w[nrow(t4w), state])),
vjust=1.2, hjust=1, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19dw.png", width=12, height=9, dpi=72)
# matplot-like graph
ggplot(stack(t2), aes(rep(as.Date(row.names(t2)), ncol(t2)), y=values, group=ind)) +
geom_line(aes(color=ew[ind])) +
geom_text(
data=stack(t2[nrow(t2),]),
aes(x=as.Date(tail(row.names(t2), 1)), y=values, label=ind, color=ew[ind]),
hjust=-0.2) +
scale_color_manual(values=c(e="red", w="blue", b="purple")) +
scale_x_date(date_labels="%-m/%-d") +
labs(
title = sprintf(
"COVID-19 infections in Germany (as of %s)", rownames(t2)[nrow(t2)]),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date", y = "Infections / 100,000 pop.") +
theme(legend.position="none")
ggplot(stack(t4), aes(rep(as.Date(row.names(t4)), ncol(t4)), y=values, group=ind)) +
geom_line(aes(color=ew[ind])) +
geom_text(
data=stack(t4[nrow(t4),]),
aes(x=as.Date(tail(row.names(t4), 1)), y=values, label=ind, color=ew[ind]),
hjust=-0.2) +
scale_color_manual(values=c(e="red", w="blue", b="purple")) +
scale_x_date(date_labels="%-m/%-d") +
labs(
title = sprintf(
"COVID-19 deaths in Germany (as of %s)", rownames(t4)[nrow(t4)]),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date", y = "Deaths / 100,000 pop.") +
theme(legend.position="none")
| /cov19.R | permissive | yukia10/Rexamples | R | false | false | 5,995 | r | # cov19.R - COVID-19 deaths in Germany
load("cov19de.RData") # generated by scrape_cov19.R
bg <- c(e="#FFCCCC", w="#CCCCFF", b="#FFCCFF")[ew]
names(bg) <- state
### Fitting
library(growthcurver)
# Fitted Cum. infections / 100,000 pop.
L <- lapply(tbl[[2]], function(x)
SummarizeGrowth(seq_along(x), x, bg_correct="none"))
tbl[[5]] <- data.frame(sapply(L, function(x) predict(x$model)))
row.names(tbl[[5]]) <- row.names(tbl[[2]])
# Fitted Cum. deaths / 100,000 pop.
L <- lapply(tbl[[4]], function(x)
SummarizeGrowth(seq_along(x), x, bg_correct="none"))
tbl[[6]] <- data.frame(sapply(L, function(x) predict(x$model)))
row.names(tbl[[6]]) <- row.names(tbl[[4]])
### Plot
library(ggplot2)
library(geofacet)
library(cowplot)
theme_set(theme_cowplot())
ti <- cbind(
date=rep(row.names(tbl[[2]]), ncol(tbl[[2]])),
stack(tbl[[2]])[, c(2, 1)], stack(tbl[[5]])[, 1])
colnames(ti) <- c("date", "state", "infection", "fitted")
t2 <- tbl[[2]]
ggplot(ti, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
geom_line(aes(as.Date(date), fitted), color="red") +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=bg) +
labs(
title = sprintf(
"COVID-19 infections in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Infections / 100,000 pop.") +
geom_text(
x=-Inf, y=Inf, aes(label=sprintf("%3.0f", t2[nrow(t2), state])),
vjust=1.2, hjust=-0.2, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19i.png", width=12, height=9, dpi=72)
t2w <- as.data.frame(lapply(t2, diff, 7))
row.names(t2w) <- row.names(t2)[-(1:7)]
tiw <- cbind(
date=rep(row.names(t2w), ncol(t2w)), stack(t2w)[, c(2, 1)])
colnames(tiw) <- c("date", "state", "infection")
ggplot(tiw, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_hline(yintercept=50, color="gray") +
geom_point(shape=1) +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=rep("#F0F0F0", ncol(t2w))) +
labs(
title = sprintf(
"COVID-19 last 7 days new infections in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Last 7 days new infections / 100,000 pop.") +
geom_text(
x=Inf, y=Inf, aes(label=sprintf("%4.1f", t2w[nrow(t2w), state])),
vjust=1.2, hjust=1, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19iw.png", width=12, height=9, dpi=72)
td <- cbind(
date=rep(row.names(tbl[[4]]), ncol(tbl[[4]])),
stack(tbl[[4]])[, c(2, 1)], stack(tbl[[6]])[, 1])
colnames(td) <- c("date", "state", "death", "fitted")
t4 <- tbl[[4]]
ggplot(td, aes(as.Date(date), death)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
geom_line(aes(as.Date(date), fitted), color="red") +
scale_x_date(date_labels="%-m/%-d") +
scale_fill_manual("legend", values=bg) +
labs(
title = sprintf(
"COVID-19 deaths in Germany (as of %s)", tail(row.names(t4), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Deaths / 100,000 pop.") +
geom_text(
x=-Inf, y=Inf, aes(label=sprintf("%4.1f", t4[nrow(t4), state])),
vjust=1.2, hjust=-0.2, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19d.png", width=12, height=9, dpi=72)
t4w <- as.data.frame(lapply(t4, diff, 7))
row.names(t4w) <- row.names(t4)[-(1:7)]
tdw <- cbind(
date=rep(row.names(t4w), ncol(t4w)), stack(t4w)[, c(2, 1)])
colnames(tdw) <- c("date", "state", "infection")
ggplot(tdw, aes(as.Date(date), infection)) +
facet_geo(~ state, grid="de_states_grid1") +
geom_rect(aes(fill=state), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
geom_point(shape=1) +
scale_x_date(date_labels="%-m/%-d", limits=range(as.Date(row.names(t2w)))) +
scale_fill_manual("legend", values=rep("#F0F0F0", ncol(t2w))) +
labs(
title = sprintf(
"COVID-19 last 7 days new deaths in Germany (as of %s)", tail(row.names(t2), 1)),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date",
y = "Last 7 days new deaths / 100,000 pop.") +
geom_text(
x=Inf, y=Inf, aes(label=sprintf("%4.2f", t4w[nrow(t4w), state])),
vjust=1.2, hjust=1, size=5, check_overlap=TRUE) +
theme(legend.position="none")
ggsave("cov19dw.png", width=12, height=9, dpi=72)
# matplot-like graph
ggplot(stack(t2), aes(rep(as.Date(row.names(t2)), ncol(t2)), y=values, group=ind)) +
geom_line(aes(color=ew[ind])) +
geom_text(
data=stack(t2[nrow(t2),]),
aes(x=as.Date(tail(row.names(t2), 1)), y=values, label=ind, color=ew[ind]),
hjust=-0.2) +
scale_color_manual(values=c(e="red", w="blue", b="purple")) +
scale_x_date(date_labels="%-m/%-d") +
labs(
title = sprintf(
"COVID-19 infections in Germany (as of %s)", rownames(t2)[nrow(t2)]),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date", y = "Infections / 100,000 pop.") +
theme(legend.position="none")
ggplot(stack(t4), aes(rep(as.Date(row.names(t4)), ncol(t4)), y=values, group=ind)) +
geom_line(aes(color=ew[ind])) +
geom_text(
data=stack(t4[nrow(t4),]),
aes(x=as.Date(tail(row.names(t4), 1)), y=values, label=ind, color=ew[ind]),
hjust=-0.2) +
scale_color_manual(values=c(e="red", w="blue", b="purple")) +
scale_x_date(date_labels="%-m/%-d") +
labs(
title = sprintf(
"COVID-19 deaths in Germany (as of %s)", rownames(t4)[nrow(t4)]),
caption = "Data Source: Wikipedia based on Robert Koch Institute",
x = "Date", y = "Deaths / 100,000 pop.") +
theme(legend.position="none")
|
##
## return the curvature of a quadratic fit to the given FPKM expression, normalized to FPKM(0)
##
curvature = function(times, values) {
c = lm(values ~ times + I(times^2))
print(paste("NormCurv=", as.numeric(c$coefficients[3]/mean(values[1:3])), "AdjR-Squared=", summary(c)$adj.r.squared))
}
| /curvature.R | no_license | carnegie-dpb/bartonlab-modeling | R | false | false | 299 | r | ##
## return the curvature of a quadratic fit to the given FPKM expression, normalized to FPKM(0)
##
curvature = function(times, values) {
c = lm(values ~ times + I(times^2))
print(paste("NormCurv=", as.numeric(c$coefficients[3]/mean(values[1:3])), "AdjR-Squared=", summary(c)$adj.r.squared))
}
|
cube <- function(x,n){
x^3
}
f<-function(x){
g<-function(y){
y+z
}
z<-4
x+g(x)
}
makevector <- function(x = numeric()){ ## define function and argument init x
m <- NULL ## init m to a null or empty vector
set <- function(y){ ## construct a function 'set' that take arg 'y'
x <<- y ## special operator that can be used to
## assign value different from the current env.
m <<- NULL
}
get <- function() x ## construct a function get that gets val of 'x'
setmean <- function(mean) m <<- mean ## construct a function setmean
## that takes the mean as argu
## and sets it to 'm' in the
## current env. (parent frame)
getmean <- function() m ## construct a function getmean
## to get the valus of m
list(set = set, get = get, setmean = setmean, getmean = getmean)
## returns the a list of four
## functions.
}
cachemean <- function(x,...) { ## define a function 'cachemean' that takes
## the arguments 'x' and '...' values of 'x'
m <- x$getmean() ## brings up the getmean() atomic vector
## stored as an attribute of 'x'
if(!is.null(m)) { ## checks for all non empty 'm' OR in other
## words checks to see if a mean has already
## been cached in 'm'
message("getting cached data")
return(m)
}
data <- x$get() ## inits 'data' and stores to get the valuse
## form the vector x.
m <- mean(data,...) ## calls the mean function and assigns it to
## 'm'
x$setmean(m) ## sets the mean 'm' to the vector x in the
## cached 'x'.
m
} | /week1week2/assorted.R | no_license | diliptmonson/beginnerfunctions | R | false | false | 2,608 | r | cube <- function(x,n){
x^3
}
f<-function(x){
g<-function(y){
y+z
}
z<-4
x+g(x)
}
makevector <- function(x = numeric()){ ## define function and argument init x
m <- NULL ## init m to a null or empty vector
set <- function(y){ ## construct a function 'set' that take arg 'y'
x <<- y ## special operator that can be used to
## assign value different from the current env.
m <<- NULL
}
get <- function() x ## construct a function get that gets val of 'x'
setmean <- function(mean) m <<- mean ## construct a function setmean
## that takes the mean as argu
## and sets it to 'm' in the
## current env. (parent frame)
getmean <- function() m ## construct a function getmean
## to get the valus of m
list(set = set, get = get, setmean = setmean, getmean = getmean)
## returns the a list of four
## functions.
}
cachemean <- function(x,...) { ## define a function 'cachemean' that takes
## the arguments 'x' and '...' values of 'x'
m <- x$getmean() ## brings up the getmean() atomic vector
## stored as an attribute of 'x'
if(!is.null(m)) { ## checks for all non empty 'm' OR in other
## words checks to see if a mean has already
## been cached in 'm'
message("getting cached data")
return(m)
}
data <- x$get() ## inits 'data' and stores to get the valuse
## form the vector x.
m <- mean(data,...) ## calls the mean function and assigns it to
## 'm'
x$setmean(m) ## sets the mean 'm' to the vector x in the
## cached 'x'.
m
} |
####### NTP-MEM bivariate
source('functions.R')
library(mnormt)
oldw <- getOption("warn")
options(warn = -1)
d<-2
mu.xis <- 1
sigma2.xis <- 1
alphas <- c(0,0)
betas <- c(1,1)
sigma2.us <- 0.5
sigma2.es <- diag(c(0.25,0.25))
gammas <- 0.5
z=sample.MEM(n=100, alphas, betas, mu.xis, sigma2.xis, sigma2.us, sigma2.es, gammas) # generate a random sample
theta=EM.NTP_MEM(z) # estimation via EM algorithm
MI=MIapprox2(theta,z) # information matrix
ep=sqrt(diag(solve(MI))) # standard error of the parameters
| /Example_simulation.R | no_license | ClecioFerreira/TPN-MEM | R | false | false | 545 | r |
####### NTP-MEM bivariate
source('functions.R')
library(mnormt)
oldw <- getOption("warn")
options(warn = -1)
d<-2
mu.xis <- 1
sigma2.xis <- 1
alphas <- c(0,0)
betas <- c(1,1)
sigma2.us <- 0.5
sigma2.es <- diag(c(0.25,0.25))
gammas <- 0.5
z=sample.MEM(n=100, alphas, betas, mu.xis, sigma2.xis, sigma2.us, sigma2.es, gammas) # generate a random sample
theta=EM.NTP_MEM(z) # estimation via EM algorithm
MI=MIapprox2(theta,z) # information matrix
ep=sqrt(diag(solve(MI))) # standard error of the parameters
|
##import data
household_power_consumption <- rio::import('household_power_consumption.txt')
HHPC_Data <- household_power_consumption
#Convert date variable
HHPC_Data$Date<- as.Date(HHPC_Data$Date,'%d/%m/%Y')
#subset data as per given dates date
library(dplyr)
start <- as.Date('01-02-2007','%d-%m-%Y')
end <- as.Date('02-02-2007','%d-%m-%Y')
HHPC <- HHPC_Data %>%
filter(Date>=start) %>%
filter(Date<=end)
#creating a date time variable
HHPC$DateTime <- strptime(paste(HHPC$Date,HHPC$Time),"%Y-%m-%d %H:%M:%S")
png(file = "plot2.png", width = 480, height = 480)
with(HHPC,plot(DateTime, Global_active_power,type="l",
ylab="Global Active Power (kilowatts)",xlab=''))
dev.off()
| /EDA Assignment 1/plot2.R | no_license | s00singla/ExData_Plotting1 | R | false | false | 711 | r | ##import data
household_power_consumption <- rio::import('household_power_consumption.txt')
HHPC_Data <- household_power_consumption
#Convert date variable
HHPC_Data$Date<- as.Date(HHPC_Data$Date,'%d/%m/%Y')
#subset data as per given dates date
library(dplyr)
start <- as.Date('01-02-2007','%d-%m-%Y')
end <- as.Date('02-02-2007','%d-%m-%Y')
HHPC <- HHPC_Data %>%
filter(Date>=start) %>%
filter(Date<=end)
#creating a date time variable
HHPC$DateTime <- strptime(paste(HHPC$Date,HHPC$Time),"%Y-%m-%d %H:%M:%S")
png(file = "plot2.png", width = 480, height = 480)
with(HHPC,plot(DateTime, Global_active_power,type="l",
ylab="Global Active Power (kilowatts)",xlab=''))
dev.off()
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(poverty,agegrps,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
pers_events <- stacked_events %>%
group_by(DUPERSID) %>%
summarise(ANY = sum(XP.yy.X >= 0),
EXP = sum(XP.yy.X > 0),
SLF = sum(SF.yy.X > 0),
MCR = sum(MR.yy.X > 0),
MCD = sum(MD.yy.X > 0),
PTR = sum(PR.yy.X > 0),
OTZ = sum(OZ.yy.X > 0)) %>%
ungroup
n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>%
mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ),
function(x) ifelse(is.na(x),0,x))
nEVTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = n_events,
nest = TRUE)
results <- svyby(~ANY, FUN=svymean, by = ~poverty + agegrps, design = nEVTdsgn)
print(results)
| /mepstrends/hc_use/json/code/r/avgEVT__poverty__agegrps__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 3,844 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(poverty,agegrps,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
pers_events <- stacked_events %>%
group_by(DUPERSID) %>%
summarise(ANY = sum(XP.yy.X >= 0),
EXP = sum(XP.yy.X > 0),
SLF = sum(SF.yy.X > 0),
MCR = sum(MR.yy.X > 0),
MCD = sum(MD.yy.X > 0),
PTR = sum(PR.yy.X > 0),
OTZ = sum(OZ.yy.X > 0)) %>%
ungroup
n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>%
mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ),
function(x) ifelse(is.na(x),0,x))
nEVTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = n_events,
nest = TRUE)
results <- svyby(~ANY, FUN=svymean, by = ~poverty + agegrps, design = nEVTdsgn)
print(results)
|
#'@export
prune.DStree <- function(tree,data,gamma=2,which,...){
fitr <- tree
class(fitr) <-"rpart"
if (missing(which)){
which <- "BRIER"
}
if(missing(data)){
time <- as.numeric(fitr$y[,1])
states <- fitr$y[,2]
}else{
time <- as.numeric(data[,tree$names[1]])
states <- data[,tree$names[2]]+1
}
npred <- length(time)
z <- sort(unique(time))
cptable<-fitr$cptable[-1,]
cp<-cptable[,1]
n.cp <- length(cp)
n.lev <- length(fitr$parms[[1]])
states2 <- abs(states-3)
KM<-survfit(Surv(as.numeric(time),states2)~1,data.frame(time,states2))$surv
DEV<-CRIT<-BRIER<-rep(NA,n.cp)
fitr$frame$yval <- as.integer(rownames(fitr$frame))
z <- min(time):round(median(time))
if(min(z)==0){
z<-z+1
time<-time+1
}
for(i in 1:n.cp){
prunedfitr <- prune.rpart(fitr,cp=cp[i])
Pred <- predict(prunedfitr,data,type="matrix")
nodes <- predict(prunedfitr,data,type="vector")
N <- by(cbind(time,states),nodes,FUN=computeN,lev=fitr$parms[[1]])
unique.nodes <- attributes(N)$dimnames$INDICES
S <- subset(prunedfitr$frame,var=="<leaf>")
S.ord <- S[match(unique.nodes,rownames(S)),]$yval2
k <-length(unique.nodes)
lik <- rep(0,k)
for (j in 1:k){
lik[j] <- lik(ncens=unlist(N[[j]][1]),
nuncens = unlist(N[[j]][2]),pi=S.ord[j,1:(n.lev)],S=S.ord[j,(n.lev+1):(2*n.lev)])
}
CRIT[i] <- -2*sum(lik)+gamma*k*n.lev
DEV[i] <- -2*sum(lik)
BRIER[i]<- sum(sapply(z,brier,S=Pred[,(n.lev+1):(2*n.lev)],
time=time,states=states,KM=KM,npred=npred))/length(z)
M <- cbind(CRIT,DEV,BRIER)
}
ind <- which.min(M[,which])
fitr$frame$yval <- tree$frame$yval
prunedfitr <- prune.rpart(fitr,cp=cp[ind])
prunedfit <- prunedfitr
class(prunedfit)<-"DStree"
return(list(nsplit=as.numeric(cptable[,2]),CRIT=CRIT,DEV=DEV,BRIER=BRIER,prunedfit=prunedfit))
}
| /DStree/R/DStree.prune.R | no_license | akhikolla/InformationHouse | R | false | false | 2,016 | r | #'@export
prune.DStree <- function(tree,data,gamma=2,which,...){
fitr <- tree
class(fitr) <-"rpart"
if (missing(which)){
which <- "BRIER"
}
if(missing(data)){
time <- as.numeric(fitr$y[,1])
states <- fitr$y[,2]
}else{
time <- as.numeric(data[,tree$names[1]])
states <- data[,tree$names[2]]+1
}
npred <- length(time)
z <- sort(unique(time))
cptable<-fitr$cptable[-1,]
cp<-cptable[,1]
n.cp <- length(cp)
n.lev <- length(fitr$parms[[1]])
states2 <- abs(states-3)
KM<-survfit(Surv(as.numeric(time),states2)~1,data.frame(time,states2))$surv
DEV<-CRIT<-BRIER<-rep(NA,n.cp)
fitr$frame$yval <- as.integer(rownames(fitr$frame))
z <- min(time):round(median(time))
if(min(z)==0){
z<-z+1
time<-time+1
}
for(i in 1:n.cp){
prunedfitr <- prune.rpart(fitr,cp=cp[i])
Pred <- predict(prunedfitr,data,type="matrix")
nodes <- predict(prunedfitr,data,type="vector")
N <- by(cbind(time,states),nodes,FUN=computeN,lev=fitr$parms[[1]])
unique.nodes <- attributes(N)$dimnames$INDICES
S <- subset(prunedfitr$frame,var=="<leaf>")
S.ord <- S[match(unique.nodes,rownames(S)),]$yval2
k <-length(unique.nodes)
lik <- rep(0,k)
for (j in 1:k){
lik[j] <- lik(ncens=unlist(N[[j]][1]),
nuncens = unlist(N[[j]][2]),pi=S.ord[j,1:(n.lev)],S=S.ord[j,(n.lev+1):(2*n.lev)])
}
CRIT[i] <- -2*sum(lik)+gamma*k*n.lev
DEV[i] <- -2*sum(lik)
BRIER[i]<- sum(sapply(z,brier,S=Pred[,(n.lev+1):(2*n.lev)],
time=time,states=states,KM=KM,npred=npred))/length(z)
M <- cbind(CRIT,DEV,BRIER)
}
ind <- which.min(M[,which])
fitr$frame$yval <- tree$frame$yval
prunedfitr <- prune.rpart(fitr,cp=cp[ind])
prunedfit <- prunedfitr
class(prunedfit)<-"DStree"
return(list(nsplit=as.numeric(cptable[,2]),CRIT=CRIT,DEV=DEV,BRIER=BRIER,prunedfit=prunedfit))
}
|
\name{read.hyph.pat}
\alias{read.hyph.pat}
\title{Reading patgen-compatible hyphenation pattern files}
\usage{
read.hyph.pat(file, lang, fileEncoding = "UTF-8")
}
\arguments{
\item{file}{A character string with a valid path to a
file with hyphenation patterns (one pattern per line).}
\item{lang}{A character string, usually two letters
short, naming the language the patterns are meant to be
used with (e.g. "es" for Spanish).}
\item{fileEncoding}{A character string defining the
character encoding of the file to be read. Unless you
have a really good reason to do otherwise, your pattern
files should all be UTF-8 encoded.}
}
\value{
An object of class
\code{\link[koRpus:kRp.hyph.pat-class]{kRp.hyph.pat-class}}.
}
\description{
This function reads hyphenation pattern files, to be used
with \code{\link[koRpus:hyphen]{hyphen}}.
}
\details{
Hyphenation patterns that can be used are available from
CTAN[1]. But actually any file with only the patterns
themselves, one per line, should work.
The language designation is of no direct consequence here,
but if the resulting pattern object is to be used by other
functions in this package, it should resamble the
designation that's used for the same language there.
}
\examples{
\dontrun{
read.hyph.pat("~/patterns/hyph-en-us.pat.txt", lang="en_us")
}
}
\references{
[1]
\url{http://tug.ctan.org/tex-archive/language/hyph-utf8/tex/generic/hyph-utf8/patterns/txt/}
}
\seealso{
\code{\link[koRpus:hyphen]{hyphen}},
\code{\link[koRpus:manage.hyph.pat]{manage.hyph.pat}}
}
\keyword{hyphenation}
| /man/read.hyph.pat.Rd | no_license | Libardo1/koRpus | R | false | false | 1,563 | rd | \name{read.hyph.pat}
\alias{read.hyph.pat}
\title{Reading patgen-compatible hyphenation pattern files}
\usage{
read.hyph.pat(file, lang, fileEncoding = "UTF-8")
}
\arguments{
\item{file}{A character string with a valid path to a
file with hyphenation patterns (one pattern per line).}
\item{lang}{A character string, usually two letters
short, naming the language the patterns are meant to be
used with (e.g. "es" for Spanish).}
\item{fileEncoding}{A character string defining the
character encoding of the file to be read. Unless you
have a really good reason to do otherwise, your pattern
files should all be UTF-8 encoded.}
}
\value{
An object of class
\code{\link[koRpus:kRp.hyph.pat-class]{kRp.hyph.pat-class}}.
}
\description{
This function reads hyphenation pattern files, to be used
with \code{\link[koRpus:hyphen]{hyphen}}.
}
\details{
Hyphenation patterns that can be used are available from
CTAN[1]. But actually any file with only the patterns
themselves, one per line, should work.
The language designation is of no direct consequence here,
but if the resulting pattern object is to be used by other
functions in this package, it should resamble the
designation that's used for the same language there.
}
\examples{
\dontrun{
read.hyph.pat("~/patterns/hyph-en-us.pat.txt", lang="en_us")
}
}
\references{
[1]
\url{http://tug.ctan.org/tex-archive/language/hyph-utf8/tex/generic/hyph-utf8/patterns/txt/}
}
\seealso{
\code{\link[koRpus:hyphen]{hyphen}},
\code{\link[koRpus:manage.hyph.pat]{manage.hyph.pat}}
}
\keyword{hyphenation}
|
testlist <- list(A = structure(c(9.97941197291525e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 9L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result) | /mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615952284-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 318 | r | testlist <- list(A = structure(c(9.97941197291525e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 9L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result) |
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/palatte-package.R
\name{triadic}
\alias{triadic}
\title{triadic}
\usage{
triadic(hex)
}
\arguments{
\item{hex}{The base color specified as hex}
}
\description{
This function returns triadic colors to a given hex color
}
\examples{
triadic("#121314")
}
\keyword{triadic}
| /man/triadic.Rd | no_license | cran/ColorPalette | R | false | false | 362 | rd | % Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/palatte-package.R
\name{triadic}
\alias{triadic}
\title{triadic}
\usage{
triadic(hex)
}
\arguments{
\item{hex}{The base color specified as hex}
}
\description{
This function returns triadic colors to a given hex color
}
\examples{
triadic("#121314")
}
\keyword{triadic}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/soft_tissue/soft_tissue_008.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/soft_tissue/soft_tissue_008.R | no_license | leon1003/QSMART | R | false | false | 366 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/soft_tissue/soft_tissue_008.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Procedures.R
\name{dospflp1}
\alias{dospflp1}
\title{SupF(l+1|l) test}
\usage{
dospflp1(
y,
z,
x,
m,
eps,
eps1,
maxi,
fixb,
betaini,
printd,
prewhit,
robust,
hetdat,
hetvar
)
}
\arguments{
\item{y}{dependent variables in matrix form}
\item{z}{matrix of independent variables with coefficients are allowed to change across
regimes}
\item{x}{matrix of independent variables with coefficients constant across regimes}
\item{m}{maximum number of breaks}
\item{eps}{convergence criterion for iterative recursive computation}
\item{eps1}{trimming level}
\item{maxi}{maximum number of iterations}
\item{fixb}{option to use fixed initial input \eqn{\beta}. If \code{1},
the model will use values given in \code{betaini}. If \code{0}, betaini is skipped}
\item{betaini}{Initial \eqn{beta_0} to use in estimation}
\item{printd}{option to print results of iterations for partial change model}
\item{prewhit}{option to use AR(1) for prewhitening}
\item{robust, hetdat, hetvar}{options on error terms assumptions}
}
\value{
A list that contains following:
\itemize{
\item{supfl}{SupF(l+1|l) test statistics}
\item{cv}{Critical values for SupF(l+1|l) test}
\item{ndat}{New date (if available)} }
}
\description{
Function computes the procedure of SupF(l+1|l) test. The function returns
the test statistics of supF(l+1|l) test
with null hypothesis is maximum number of break is l
and alternative hypothesis is l+1.
The l breaks under the null hypothesis are taken from the
global minimization. Also, new date (if available) and critical values based on
significant levels are returned for plotting and inference
}
| /man/dospflp1.Rd | no_license | Allisterh/StructuralBreak_BP2003 | R | false | true | 1,716 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Procedures.R
\name{dospflp1}
\alias{dospflp1}
\title{SupF(l+1|l) test}
\usage{
dospflp1(
y,
z,
x,
m,
eps,
eps1,
maxi,
fixb,
betaini,
printd,
prewhit,
robust,
hetdat,
hetvar
)
}
\arguments{
\item{y}{dependent variables in matrix form}
\item{z}{matrix of independent variables with coefficients are allowed to change across
regimes}
\item{x}{matrix of independent variables with coefficients constant across regimes}
\item{m}{maximum number of breaks}
\item{eps}{convergence criterion for iterative recursive computation}
\item{eps1}{trimming level}
\item{maxi}{maximum number of iterations}
\item{fixb}{option to use fixed initial input \eqn{\beta}. If \code{1},
the model will use values given in \code{betaini}. If \code{0}, betaini is skipped}
\item{betaini}{Initial \eqn{beta_0} to use in estimation}
\item{printd}{option to print results of iterations for partial change model}
\item{prewhit}{option to use AR(1) for prewhitening}
\item{robust, hetdat, hetvar}{options on error terms assumptions}
}
\value{
A list that contains following:
\itemize{
\item{supfl}{SupF(l+1|l) test statistics}
\item{cv}{Critical values for SupF(l+1|l) test}
\item{ndat}{New date (if available)} }
}
\description{
Function computes the procedure of SupF(l+1|l) test. The function returns
the test statistics of supF(l+1|l) test
with null hypothesis is maximum number of break is l
and alternative hypothesis is l+1.
The l breaks under the null hypothesis are taken from the
global minimization. Also, new date (if available) and critical values based on
significant levels are returned for plotting and inference
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logging.R
\name{warn}
\alias{warn}
\title{Warning-level logging function.}
\usage{
warn(...)
}
\arguments{
\item{...}{One or more strings to be logged.}
}
\description{
Warning-level logging function.
}
| /man/warn.Rd | no_license | khughitt/EuPathDB | R | false | true | 281 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logging.R
\name{warn}
\alias{warn}
\title{Warning-level logging function.}
\usage{
warn(...)
}
\arguments{
\item{...}{One or more strings to be logged.}
}
\description{
Warning-level logging function.
}
|
# Get data
# subHPC = subset of household power consumption
# date_time = merged Date and Time of subHPC
source("dataProvider.R")
data_and_time <- give_me_the_data_and_time_dude()
subHPC <- data_and_time$subHPC
date_time <- data_and_time$date_time
#Plot the forth graph
png("./data/plot4.png", bg = "white", width = 480, height = 480)
par(mfrow=c(2,2))
# Graph 1
plot(date_time,
subHPC$Global_active_power,
xlab = NA,
ylab = "Global active power (kilowatts)",
type = "l")
# Graph 2
plot(date_time,
subHPC$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
# Graph 3
plot(date_time,
subHPC$Sub_metering_1,
xlab = NA,
ylab = "Energy sub metering",
type = "l")
lines(date_time, subHPC$Sub_metering_2, col = "red")
lines(date_time, subHPC$Sub_metering_3, col = "blue")
legend("topright",
c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = 1)
# Graph 4
plot(date_time,
subHPC$Global_reactive_power,
xlab = "datetime",
ylab = "Global_reactive_power",
type = "l")
dev.off()
| /plot4.R | no_license | dfjkahdfjkash/ExData_Plotting1 | R | false | false | 1,127 | r | # Get data
# subHPC = subset of household power consumption
# date_time = merged Date and Time of subHPC
source("dataProvider.R")
data_and_time <- give_me_the_data_and_time_dude()
subHPC <- data_and_time$subHPC
date_time <- data_and_time$date_time
#Plot the forth graph
png("./data/plot4.png", bg = "white", width = 480, height = 480)
par(mfrow=c(2,2))
# Graph 1
plot(date_time,
subHPC$Global_active_power,
xlab = NA,
ylab = "Global active power (kilowatts)",
type = "l")
# Graph 2
plot(date_time,
subHPC$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
# Graph 3
plot(date_time,
subHPC$Sub_metering_1,
xlab = NA,
ylab = "Energy sub metering",
type = "l")
lines(date_time, subHPC$Sub_metering_2, col = "red")
lines(date_time, subHPC$Sub_metering_3, col = "blue")
legend("topright",
c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = 1)
# Graph 4
plot(date_time,
subHPC$Global_reactive_power,
xlab = "datetime",
ylab = "Global_reactive_power",
type = "l")
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/becca_plot2.R
\name{becca_plot2.default}
\alias{becca_plot2.default}
\alias{beccaplot2}
\title{Wrapper to becca_plot for non \code{\link{mapvizier}} objects}
\usage{
\method{becca_plot2}{default}(.data, school_name_column = "sch_abbr",
cohort_name_column = "cohort", academic_year_column = "map_year_academic",
grade_level_season_column = "grade_level_season",
measurement_scale_column = "measurementscale",
test_percentile_column = "percentile_2011_norms",
first_and_spring_only = TRUE, auto_justify_x = TRUE,
justify_widths = FALSE, justify_min = NA, justify_max = NA,
entry_grades = c(-0.7, 4.3), color_scheme = "KIPP Report Card",
facets = FALSE, facet_opts = FALSE, title_text = FALSE)
}
\description{
Wrapper to becca_plot for non \code{\link{mapvizier}} objects
}
| /man/becca_plot2.default.Rd | no_license | almartin82/MAP-visuals | R | false | false | 874 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/becca_plot2.R
\name{becca_plot2.default}
\alias{becca_plot2.default}
\alias{beccaplot2}
\title{Wrapper to becca_plot for non \code{\link{mapvizier}} objects}
\usage{
\method{becca_plot2}{default}(.data, school_name_column = "sch_abbr",
cohort_name_column = "cohort", academic_year_column = "map_year_academic",
grade_level_season_column = "grade_level_season",
measurement_scale_column = "measurementscale",
test_percentile_column = "percentile_2011_norms",
first_and_spring_only = TRUE, auto_justify_x = TRUE,
justify_widths = FALSE, justify_min = NA, justify_max = NA,
entry_grades = c(-0.7, 4.3), color_scheme = "KIPP Report Card",
facets = FALSE, facet_opts = FALSE, title_text = FALSE)
}
\description{
Wrapper to becca_plot for non \code{\link{mapvizier}} objects
}
|
testlist <- list(A = structure(c(2.17107980819812e+205, 4.6157839563816e-310 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125056-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 322 | r | testlist <- list(A = structure(c(2.17107980819812e+205, 4.6157839563816e-310 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' Fit a Bradley-Terry model on game score data
#'
#' @param games data.frame with the following columns: game.id, home.id,
#' away.id, home.score, away.score, neutral, ot (matched by output of
#' scrape.game.results)
#' @return matrix of win probabilities, with rows and columns labeled by team.
#' Each entry gives the probability of the team corresponding to that row
#' beating the team corresponding to that column.
#' @examples
#' prob = bradley.terry(games = games.men.2018)
#' @export
#' @author sspowers
bradley.terry = function(games) {
columns = c("game.id", "home.id", "away.id", "home.score", "away.score",
"neutral", "ot")
# Check that input is data.frame with necessary columns
if (!("data.frame" %in% class(games)) |
length(setdiff(columns, names(games))) > 0) {
stop(paste("games much be 'data.frame' with columns", columns))
}
# Construct design matrix for Bradley-Terry model:
# First column: 0 if neutral location, 1 otherwise
# All other columns correspond to teams --- +1 in row means that team is home
# for corresponding game, -1 means that team is away.
x = Matrix::sparseMatrix(
i = c(which(games$neutral == 0), rep(1:nrow(games), 2)),
j = c(rep(1, sum(games$neutral == 0)),
1 + as.numeric(as.factor(c(games$home.id, games$away.id)))),
x = c(rep(1, sum(games$neutral == 0)), rep(c(1, -1), each = nrow(games))))
# Build response vector: home score minus away score (0 if game went overtime)
y = as.numeric(games$home.score) - as.numeric(games$away.score)
y[games$ot != ""] = 0
# Fit model via ridge regression
# Choose lambda to minimize cross-validation error
fit = glmnet::cv.glmnet(x, y, alpha = 0, standardize = FALSE,
intercept = FALSE, lambda = exp(seq(5, -10, length = 100)))
beta = stats::coef(fit, s = 'lambda.min')[-1, 1]
names(beta) = c("home", sort(unique(games$home.id)))
# Estimate variance in score differential
sigma = sqrt(mean((y - stats::predict(fit, x, s = 'lambda.min'))^2))
# Get estimated point spread for each possible matchup
point.spread.matrix = beta[-1] - matrix(beta[-1], nrow = length(beta) - 1,
ncol = length(beta) - 1, byrow = TRUE)
# Name the rows and columns of the matrix according to corresponding team
rownames(point.spread.matrix) = colnames(point.spread.matrix) =
names(beta)[-1]
# Convert point spreads into probability of winning
stats::pnorm(point.spread.matrix, sd = sigma)
}
| /R/bradley.terry.R | no_license | HennenD/mRchmadness | R | false | false | 2,431 | r | #' Fit a Bradley-Terry model on game score data
#'
#' @param games data.frame with the following columns: game.id, home.id,
#' away.id, home.score, away.score, neutral, ot (matched by output of
#' scrape.game.results)
#' @return matrix of win probabilities, with rows and columns labeled by team.
#' Each entry gives the probability of the team corresponding to that row
#' beating the team corresponding to that column.
#' @examples
#' prob = bradley.terry(games = games.men.2018)
#' @export
#' @author sspowers
bradley.terry = function(games) {
columns = c("game.id", "home.id", "away.id", "home.score", "away.score",
"neutral", "ot")
# Check that input is data.frame with necessary columns
if (!("data.frame" %in% class(games)) |
length(setdiff(columns, names(games))) > 0) {
stop(paste("games much be 'data.frame' with columns", columns))
}
# Construct design matrix for Bradley-Terry model:
# First column: 0 if neutral location, 1 otherwise
# All other columns correspond to teams --- +1 in row means that team is home
# for corresponding game, -1 means that team is away.
x = Matrix::sparseMatrix(
i = c(which(games$neutral == 0), rep(1:nrow(games), 2)),
j = c(rep(1, sum(games$neutral == 0)),
1 + as.numeric(as.factor(c(games$home.id, games$away.id)))),
x = c(rep(1, sum(games$neutral == 0)), rep(c(1, -1), each = nrow(games))))
# Build response vector: home score minus away score (0 if game went overtime)
y = as.numeric(games$home.score) - as.numeric(games$away.score)
y[games$ot != ""] = 0
# Fit model via ridge regression
# Choose lambda to minimize cross-validation error
fit = glmnet::cv.glmnet(x, y, alpha = 0, standardize = FALSE,
intercept = FALSE, lambda = exp(seq(5, -10, length = 100)))
beta = stats::coef(fit, s = 'lambda.min')[-1, 1]
names(beta) = c("home", sort(unique(games$home.id)))
# Estimate variance in score differential
sigma = sqrt(mean((y - stats::predict(fit, x, s = 'lambda.min'))^2))
# Get estimated point spread for each possible matchup
point.spread.matrix = beta[-1] - matrix(beta[-1], nrow = length(beta) - 1,
ncol = length(beta) - 1, byrow = TRUE)
# Name the rows and columns of the matrix according to corresponding team
rownames(point.spread.matrix) = colnames(point.spread.matrix) =
names(beta)[-1]
# Convert point spreads into probability of winning
stats::pnorm(point.spread.matrix, sd = sigma)
}
|
require(RCurl)
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
myLink<- "https://docs.google.com/spreadsheets/d/1ckfJLebzPYhJCp595aUn7IXkbDBBB21kZfGREw0l5PE/export?&format=csv"
myCsv <- getURL(myLink)
df <- read.csv(textConnection(myCsv))
df$date <- as.Date(df$Date, "%m/%d/%Y")
#total minutes
df$tot <- vector(mode="numeric",
length=nrow(df))
for (i in 1:nrow(df)){
df$tot[i] <- sum(df[1:i, "Minutes"], na.rm=T)
}
#total goal
df$tot.goal <- vector(mode="numeric",
length=nrow(df))
for (i in 1:nrow(df)){
df$tot.goal[i] <- sum(df[1:i, "Goal"], na.rm=T)
}
df$tot[which(df$date > Sys.Date())] <- NA | /htmlColoma/helper.R | no_license | joebrew/ShinyApps | R | false | false | 689 | r | require(RCurl)
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
myLink<- "https://docs.google.com/spreadsheets/d/1ckfJLebzPYhJCp595aUn7IXkbDBBB21kZfGREw0l5PE/export?&format=csv"
myCsv <- getURL(myLink)
df <- read.csv(textConnection(myCsv))
df$date <- as.Date(df$Date, "%m/%d/%Y")
#total minutes
df$tot <- vector(mode="numeric",
length=nrow(df))
for (i in 1:nrow(df)){
df$tot[i] <- sum(df[1:i, "Minutes"], na.rm=T)
}
#total goal
df$tot.goal <- vector(mode="numeric",
length=nrow(df))
for (i in 1:nrow(df)){
df$tot.goal[i] <- sum(df[1:i, "Goal"], na.rm=T)
}
df$tot[which(df$date > Sys.Date())] <- NA |
library("randomForest")
library(ggplot2)
vinos <- 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
vinos <- read.table(file=url(vinos), header=FALSE, sep=",")
columnas <- c('clase',
'alcohol',
'malic_acid',
'ash',
'ash_alcalinidad',
'magnesio',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color',
'hue',
'od280_od315',
'proline')
names(vinos) <- columnas
vinos$clase <- as.factor(vinos$clase)
set.seed(31)
muestra <- sort(sample(x = 1:nrow(vinos), replace = FALSE, size = nrow(vinos)/2))
muestra_2 <- setdiff(1:nrow(vinos), muestra)
test <- vinos[muestra, ]
train <- vinos[muestra_2, ]
randfor <- randomForest(clase ~ ., data=train, importance=TRUE, do.trace=100)
clase1_importance <- data.frame(feature=names(randfor$importance[,1]), importance=randfor$importance[,1])
ggplot(clase1_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
clase2_importance <- data.frame(feature=names(randfor$importance[,2]), importance=randfor$importance[,2])
ggplot(clase2_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
clase3_importance <- data.frame(feature=names(randfor$importance[,3]), importance=randfor$importance[,3])
ggplot(clase3_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
boxplot(vinos$color ~ vinos$clase, main="Color segun la clase")
boxplot(vinos$alcohol ~ vinos$clase, main="Alcohol segun la clase")
ggplot(vinos, aes(x=alcohol, y=color, colour=clase)) + geom_point()
| /Sesion-08/Sesion-08/Ejemplo-03/Ejemplo-03.R | no_license | abrownrb/DA_R_BEDU_2 | R | false | false | 1,653 | r |
library("randomForest")
library(ggplot2)
vinos <- 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
vinos <- read.table(file=url(vinos), header=FALSE, sep=",")
columnas <- c('clase',
'alcohol',
'malic_acid',
'ash',
'ash_alcalinidad',
'magnesio',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color',
'hue',
'od280_od315',
'proline')
names(vinos) <- columnas
vinos$clase <- as.factor(vinos$clase)
set.seed(31)
muestra <- sort(sample(x = 1:nrow(vinos), replace = FALSE, size = nrow(vinos)/2))
muestra_2 <- setdiff(1:nrow(vinos), muestra)
test <- vinos[muestra, ]
train <- vinos[muestra_2, ]
randfor <- randomForest(clase ~ ., data=train, importance=TRUE, do.trace=100)
clase1_importance <- data.frame(feature=names(randfor$importance[,1]), importance=randfor$importance[,1])
ggplot(clase1_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
clase2_importance <- data.frame(feature=names(randfor$importance[,2]), importance=randfor$importance[,2])
ggplot(clase2_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
clase3_importance <- data.frame(feature=names(randfor$importance[,3]), importance=randfor$importance[,3])
ggplot(clase3_importance, aes(x=feature, y=importance)) + geom_bar(stat="identity")
boxplot(vinos$color ~ vinos$clase, main="Color segun la clase")
boxplot(vinos$alcohol ~ vinos$clase, main="Alcohol segun la clase")
ggplot(vinos, aes(x=alcohol, y=color, colour=clase)) + geom_point()
|
source("./global.R")
# Server Code -------------------------------------------------------------
server <- function(input, output,session) {
#Render ReadMe UI
rmdfiles <- c("MathModel.Rmd","GettingStarted.Rmd")
sapply(rmdfiles, knit, quiet = T)
output$markdownRM <- renderUI({
fluidPage(
withMathJax(HTML(markdown::markdownToHTML(knit("MathModel.Rmd", quiet = TRUE), fragment.only=TRUE,title="Frack Off",header="Mathematical Model for Anaerobic Digestion")))
)
})
#Render GettingStarted UI
output$markdownGS <- renderUI({
fluidPage(
withMathJax(HTML(markdown::markdownToHTML(knit("GettingStarted.Rmd", quiet = TRUE), fragment.only=TRUE,title="Frack Off",header="Getting Started")))
)
})
mod <- mrgsolve::mread_cache("AnaerobicDigestionShinyV3.cpp")
# Kinetic Data Import From Bacteria_kinetics.R
# Acetogens
# Vmax
coef_Aceto_vmax <- coef(fit_Aceto_vmax)
Intercept_Aceto_vmax <- as.numeric(coef_Aceto_vmax[1])
a_Aceto_vmax <- as.numeric(coef_Aceto_vmax[2])
b_Aceto_vmax <- as.numeric(coef_Aceto_vmax[3])
c_Aceto_vmax <- as.numeric(coef_Aceto_vmax[4])
# Km
coef_Aceto_km <- coef(fit_Aceto_km)
Intercept_Aceto_km <- as.numeric(coef_Aceto_km[1])
a_Aceto_km <- as.numeric(coef_Aceto_km[2])
b_Aceto_km <- as.numeric(coef_Aceto_km[3])
c_Aceto_km <- as.numeric(coef_Aceto_km[4])
# Methanogens
# Vmax
coef_Meth_vmax <- coef(fit_Meth_vmax)
Intercept_Meth_vmax <- as.numeric(coef_Meth_vmax[1])
a_Meth_vmax <- as.numeric(coef_Meth_vmax[2])
b_Meth_vmax <- as.numeric(coef_Meth_vmax[3])
c_Meth_vmax <- as.numeric(coef_Meth_vmax[4])
# Km
coef_Meth_km <- coef(fit_Meth_km)
Intercept_Meth_km <- as.numeric(coef_Meth_km[1])
a_Meth_km <- as.numeric(coef_Meth_km[2])
b_Meth_km <- as.numeric(coef_Meth_km[3])
c_Meth_km <- as.numeric(coef_Meth_km[4])
PegDist <- reactive({
shiny::req(input[["PegMeanMW"]])
meanVal <- as.numeric(input[["PegMeanMW"]])
meanVal2 <- (meanVal-18)/44
test <- sn::rsn(5000, (meanVal2*1.267), -1.3, alpha=10) %>% as.vector() %>% as.integer()
test <- test[test>4 & test<10]
test
})
output$PegPlot <- renderPlot({
test <- PegDist() %>% as.vector() %>% as.integer()
test2 <- test*44+18 %>% as.vector() %>% as.integer()
df <- data.frame(test2,test); names(df) <- c("MW","N")
df$N <- df$N %>% as.character() %>% as.factor()
breaks <- c(238, 282, 326, 370, 414)
#Plot
hist2 <- ggplot(df,aes(x=MW)) + labs(x="Molecular Weight",y="Density",caption = paste0("Mean MW: ",round(mean(test2),2))) +
theme(text=element_text(family="Times New Roman", face="bold", size=14),
plot.caption = element_text(color = "red", face = "italic",size=14)) + expand_limits(x = c(min(test2), max(test2))) +
geom_vline(aes(xintercept=mean(test2)),color="blue", linetype="dashed", size=1) + geom_density() +
scale_x_continuous(breaks=breaks)
hist2
})
pars <- reactive({
#invalidateLater(1000, session)
shiny::req(PegDist())
shiny::req(input[["Bact_ScaleFact_Aceto"]])
shiny::req(input[["Bact_ScaleFact_Acido"]])
shiny::req(input[["Bact_ScaleFact_Meth"]])
shiny::req(input[["Bact_ScaleFact_Bact"]])
shiny::req(input[["WT_Perc_Guar_IN"]])
shiny::req(input[["WT_Perc_PEG_IN"]])
shiny::req(input[["WT_Perc_MeOL_IN"]])
shiny::req(input[["WT_Perc_ISO_IN"]])
shiny::req(input[["wells"]])
shiny::req(input[["Temp"]])
shiny::req(input[["Head_Space_VolRatio"]])
shiny::req(input[["TempSlope"]])
shiny::req(input[["DecayRate"]])
test <- PegDist() %>% as.vector() %>% as.numeric()
test2 <- test*44+18 %>% as.vector() %>% as.numeric()
hist1 <- graphics::hist(test,breaks=4:9,plot=F)
yA <- round(hist1$density,3); MW_PEG_In <- mean(test2)
param(Bact_ScaleFact_Aceto = (as.numeric(input[["Bact_ScaleFact_Aceto"]])/1000),
Bact_ScaleFact_Acido = (as.numeric(input[["Bact_ScaleFact_Acido"]])/1000),
Bact_ScaleFact_Meth = (as.numeric(input[["Bact_ScaleFact_Meth"]])/1000),
Bact_ScaleFact_Bact = (as.numeric(input[["Bact_ScaleFact_Bact"]])/1000),
WT_Perc_Guar_IN = (as.numeric(input[["WT_Perc_Guar_IN"]]))/100,
WT_Perc_PEG_IN = (as.numeric(input[["WT_Perc_PEG_IN"]]))/100,
WT_Perc_MeOL_IN = (as.numeric(input[["WT_Perc_MeOL_IN"]]))/100,
WT_Perc_ISO_IN = (as.numeric(input[["WT_Perc_ISO_IN"]]))/100,
wells = as.numeric(input[["wells"]]),
Temp = as.numeric(input[["Temp"]]),
TempSlope = as.numeric(input[["TempSlope"]]),
DecayRate = as.numeric(input[["DecayRate"]]),
Head_Space_VolRatio = as.numeric(input[["Head_Space_VolRatio"]]),
molFracPEG9 = yA[5], molFracPEG8 = yA[4], molFracPEG7 = yA[3],
molFracPEG6 = yA[2], molFracPEG5 = yA[1], MW_PEG_In = MW_PEG_In,
Intercept_Aceto_vmax=as.numeric(coef_Aceto_vmax[1]),a_Aceto_vmax=as.numeric(coef_Aceto_vmax[2]),
b_Aceto_vmax=as.numeric(coef_Aceto_vmax[3]),c_Aceto_vmax=as.numeric(coef_Aceto_vmax[4]),
Intercept_Aceto_km=as.numeric(coef_Aceto_km[1]),a_Aceto_km=as.numeric(coef_Aceto_km[2]),
b_Aceto_km=as.numeric(coef_Aceto_km[3]),c_Aceto_km=as.numeric(coef_Aceto_km[4]),
Intercept_Meth_vmax=as.numeric(coef_Meth_vmax[1]),a_Meth_vmax=as.numeric(coef_Meth_vmax[2]),
b_Meth_vmax=as.numeric(coef_Meth_vmax[3]),c_Meth_vmax=as.numeric(coef_Meth_vmax[4]),
Intercept_Meth_km=as.numeric(coef_Meth_km[1]),a_Meth_km=as.numeric(coef_Meth_km[2]),
b_Meth_km=as.numeric(coef_Meth_km[3]),c_Meth_km=as.numeric(coef_Meth_km[4])
)
})
SensParam <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType!="Normal"){
shiny::req(input[["SensParam"]])
SensParam <- as.character(input[["SensParam"]])
if(SensParam=="Acidogen Conc."){
SensParam2 <- "Bact_ScaleFact_Acido"
}else if(SensParam=="Acetogen Conc."){
SensParam2 <- "Bact_ScaleFact_Aceto"
}else if(SensParam=="Methanogen Conc."){
SensParam2 <- "Bact_ScaleFact_Meth"
}else if(SensParam=="Bacteroid Conc."){
SensParam2 <- "Bact_ScaleFact_Bact"
}else if(SensParam=="Head Space Ratio"){
SensParam2 <- "Head_Space_VolRatio"
}else if(SensParam=="Temperature"){
SensParam2 <- "Temp"
}else if(SensParam=="Number of Wells"){
SensParam2 <- "wells"
}else if(SensParam=="WT % of Guar Gum"){
SensParam2 <- "WT_Perc_Guar_IN"
}else if(SensParam=="WT % of PEG-400"){
SensParam2 <- "WT_Perc_PEG_IN"
}else if(SensParam=="WT % of Methanol"){
SensParam2 <- "WT_Perc_MeOL_IN"
}else if(SensParam=="WT % of Isopropanol"){
SensParam2 <- "WT_Perc_ISO_IN"
}else if(SensParam=="Bacteria Decay Rate"){
SensParam2 <- "DecayRate"
}
return(SensParam2)
}else{
return(NULL)
}
})
SensRangeR <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
shiny::req(input[["SensRange"]])
SensParam <- SensParam() %>% as.character()
shiny::req(input[[SensParam]])
MedVal <- as.numeric(input[[SensParam]])
SensRange <- as.numeric(input[["SensRange"]])/100
if(SensParam=="WT_Perc_Guar_IN"|SensParam=="WT_Perc_PEG_IN"|SensParam=="WT_Perc_MeOL_IN"|SensParam=="WT_Perc_ISO_IN"){
MedVal <- MedVal/100
}else if(SensParam=="Bact_ScaleFact_Acido"|SensParam=="Bact_ScaleFact_Aceto"|SensParam=="Bact_ScaleFact_Meth"|SensParam=="Bact_ScaleFact_Bact"){
MedVal <- MedVal/1000
}else{
MedVal <- MedVal
}
SensVals <- sort(c(MedVal,SensRange*MedVal+MedVal))
SensVals
}
})
omega_Kinetics <- reactive({
shiny::req(input[["kinetic_var"]])
kinetic_Var <- (as.numeric(input[["kinetic_var"]])/100)^2
omegaMatrix <- omat(mod)@data$Bact_kinetics %>% as.matrix()
omegaNames <- unlist(omat(mod)@labels[[1]]) %>% as.character()
diag(omegaMatrix) <- kinetic_Var
row.names(omegaMatrix) <- omegaNames
return(omegaMatrix)
})
omega_Yields <- reactive({
shiny::req(input[["yield_var"]])
yield_Var <- (as.numeric(input[["yield_var"]])/100)^2
omegaMatrix <- omat(mod)@data$Bact_yields %>% as.matrix()
omegaNames <- unlist(omat(mod)@labels[[2]]) %>% as.character()
diag(omegaMatrix) <- yield_Var
row.names(omegaMatrix) <- omegaNames
return(omegaMatrix)
})
TimeSolv <- reactive({
shiny::req(input[["cutOff"]])
cutOffTime <- as.numeric(input[["cutOff"]])
delta <- 0.1#cutOffTime
#Times to solve equations
if(cutOffTime<100 & cutOffTime>=80){
addVal <- sort(unique(c(0.25,0.5,1:80,seq(80,cutOffTime,2.5))))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}else if(cutOffTime<80){
addVal <- sort(unique(c(0.25,0.5,1:cutOffTime)))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}else if(cutOffTime>=100){
addVal <- sort(unique(c(0.25,0.5,1:70,seq(72.5,82.5,by=2.5),seq(85,100,by=5),seq(100,cutOffTime,10))))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}
T1
})
out <- reactive({
shiny::req(input[["cutOff"]])
shiny::req(input[["nSim"]])
shiny::req(input[["simType"]])
shiny::req(TimeSolv())
shiny::req(omega_Kinetics())
shiny::req(omega_Yields())
shiny::req(pars())
cutOffTime <- isolate(as.numeric(input[["cutOff"]]))
nSim <- as.numeric(input[["nSim"]])
simType <- as.character(input[["simType"]])
T1 <- isolate(TimeSolv())
# If Normal Simulation
if(simType=="Normal"){
Bact_kinetics <- isolate(omega_Kinetics())
Bact_yields <- isolate(omega_Yields())
prog <- 20; print(paste(prog,"%"))
withProgress(message = 'Compiling and Simulating Model',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
Normal_Sim <- function(pars,T1,cutOffTime,nSim){
hmax=0.1; maxsteps=80000; prog <- 20
repeat{
# set.seed(10101)
outDat <- try({mod %>% param(pars) %>% omat(Bact_kinetics=Bact_kinetics,Bact_yields=Bact_yields) %>%
mrgsim(nid=nSim,tgrid=T1,end=cutOffTime,atol = 1E-10,maxsteps=maxsteps,hmax = hmax)},silent = T)
if (length(names(outDat)) >10 | maxsteps >= 320000){
prog <- 100
setProgress(value = prog, detail = paste('This may take a while...',prog,"%"))
break}
# hmax <- round(hmax/1.3, 3)
maxsteps <- maxsteps + 60000
prog <- prog + 20
print(paste(prog,"%"))
setProgress(value = prog, detail = paste('This may take a while...',prog,"%"))
}
return(outDat)
}
pars <- as.list(isolate(pars()))
outDat <- Normal_Sim(pars,T1,cutOffTime,nSim) }) #End progress bar
# If Sensitivity Analysis
}else if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
Bact_kinetics <- isolate(omega_Kinetics())
Bact_yields <- isolate(omega_Yields())
SensParam <- SensParam() %>% as.character()
SensVals <- SensRangeR() %>% as.numeric()
prog <- 20
withProgress(message = 'Compiling and Simulating Model',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#idata_set function
Sensitivity_Sim <- function(T1,cutOffTime,nSim,SensVals){
hmax=0.1; maxsteps=60000
repeat{
solveModel <- function(SensParam,T1,cutOffTime,nSim,SensVals){
SensParam <- qc(.(SensParam))
wrapr::let(
c(SensParam2=substitute(SensParam)),{
datafile2 <- rlang::expr(expand.ev(SensParam2=sort(rep(SensVals,nSim)))) %>% rlang::eval_tidy()
rlang::expr(mod %>% idata_set(datafile2) %>% omat(Bact_kinetics=Bact_kinetics,Bact_yields=Bact_yields) %>% carry.out(SensParam2) %>%
mrgsim(end=cutOffTime,tgrid=T1,atol = 1E-10,maxsteps=maxsteps,hmax = hmax)) %>% rlang::eval_tidy()})
}
outDat <- try({solveModel(SensParam,T1,cutOffTime,nSim,SensVals)},silent = T)
if (length(names(outDat)) >10 | maxsteps >= 210000){
prog <- 100
setProgress(value = prog,detail = paste('This may take a while...',prog,"%"))
break}
# hmax <- 0.001
maxsteps <- maxsteps + 50000
prog <- prog + 20
setProgress(value = prog,detail = paste('This may take a while...',prog,"%"))
}
return(outDat)
}
outDat <- Sensitivity_Sim(T1,cutOffTime,nSim,SensVals) }) #End progress bar
}
assign("out",outDat,envir = globalenv())
outDat
}) # End reactive
observe(priority = 4,{
tmpOut <- out()
if(is.null(tmpOut) | length(names(tmpOut)) < 3){
sendSweetAlert(
session = session,
title = "Error: Model could not compile with chosen parameters",
text = tags$div(
p("Please choose a new set of parameters or decrease kinetic variability."),
p("The model will attempt to recompile once any parameter has been changed.")),
type = "error",
html = TRUE
)
}
})
varyParam <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- SensParam() %>% as.character()
}else{
out <- out()
varyParam <- try({knobs(out)},silent=T)
}
return(varyParam)
})
varyParamClass <- reactive({
out <- out()
varyParam <- varyParam() %>% as.character()
varyParamClass <- try({
class(varyParam) %>% as.character
},silent=T)
return(varyParamClass)
})
confInterval <- reactive({
input[["confInterval"]] %>% as.character()
})
#Standard Deviation Function (For Table)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=T,
conf.interval=.95, .drop=TRUE) {
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)}
# This does the summary. For each group's data frame, return a vector with N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean(xx[[col]], na.rm=na.rm),
sd = stats::sd(xx[[col]], na.rm=na.rm))},measurevar)
datac <- plyr::rename(datac, c("mean" = measurevar)) # Rename the "mean" column
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
#### Render Component Data Table
output$sum1 <- renderTable({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out()
Inputs_Main <- c("H2O", "GUAR", "GUAR_Conc","Conc_PEG9","Pressure_atm","AVG_PEG_MW","Conc_METHANOL","Conc_ISOPROPANOL")
Outputs_Main <- c("H2O", "GUAR", "GUAR_Conc", "H2_GAS", "CO2_GAS", "CH4_GAS","H2_LIQ", "CO2_LIQ", "CH4_LIQ")
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()#try({knobs(out)},silent=T)
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
SumDat <- out %>% dplyr::select(ID,time,varyParam,eval(Inputs_Main),eval(Outputs_Main)) %>% as.data.frame
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
SumDat[varyParam] <- round(SumDat[varyParam]*100,3)
}
}else{
SumDat <- out %>% dplyr::select(ID,time,eval(Inputs_Main),eval(Outputs_Main)) %>% as.data.frame
}
SumDat <- SumDat %>% dplyr::filter(time==0 | time==cutOffTime)
for(i in 1:dim(SumDat)[1]){
if(SumDat$time[i]==0){
row.names(SumDat)[i] <- paste("Input", " ID =",SumDat$ID[i])
}else if(SumDat$time[i]==cutOffTime){
row.names(SumDat)[i] <- paste("Output", "ID =",SumDat$ID[i])}
SumDat$TotalMol[i] <- SumDat$H2O[i]+SumDat$GUAR[i]+SumDat$H2_GAS[i]+SumDat$CO2_GAS[i]+
SumDat$CH4_GAS[i]+SumDat$H2_LIQ[i]+SumDat$CO2_LIQ[i]+SumDat$CH4_LIQ[i]+SumDat$CH4_LIQ[i]
}
SumDat$GUAR <- round(SumDat$GUAR/6600,3) #divide by n_bonds
SumDat$H2_GAS <- round(SumDat$H2_GAS,3);SumDat$CO2_GAS <- round(SumDat$CO2_GAS,3)
ModelErrorPerc <<- round((abs(SumDat$TotalMol[1]-SumDat$TotalMol[2])/SumDat$TotalMol[1])*100,3)
SumDat2 <- SumDat %>% dplyr::select(everything(),-c(ID,time,H2_LIQ,CO2_LIQ,CH4_LIQ,TotalMol)) %>%
dplyr::rename("Guar Gum (mol)"=GUAR,"Guar Gum (g/L)"=GUAR_Conc, "H2 (mol-gas)"=H2_GAS,
"CO2 (mol-gas)"=CO2_GAS, "CH4 (mol-gas)"=CH4_GAS,"Total Pressure (atm)"=Pressure_atm,
"Average PEG MW (g/mol)"=AVG_PEG_MW,"PEG-9 (mol/L)"=Conc_PEG9,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL)
SumDat2$H2O <- round(SumDat2$H2O*18/1000,2); SumDat2 <- SumDat2 %>% dplyr::rename("Water (L)"=H2O)
SumDat2$"Guar Gum (g/L)" <- round(SumDat2$"Guar Gum (g/L)",3)
SumDat2$"Total Pressure (atm)" <- round(SumDat2$"Total Pressure (atm)",2)
SumDat2$"Average PEG MW (g/mol)" <- round(SumDat2$"Average PEG MW (g/mol)",2)
if(simType=="Normal" & n_distinct(out$ID)==1){
row.names(SumDat2) <- c("Input","Output")}
SumDat_Chem <- SumDat2; SumDat_Chem <<- SumDat_Chem
SumDat_Chem
},rownames = TRUE)
#### Render Bacteria Data Table
output$sum2 <- renderTable({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out()
Inputs_Bact <- c("Conc_ACIDOGEN","Conc_ACETOGEN","Conc_METHANOGEN","Conc_BACTEROID")
Outputs_Bact <- c("Conc_ACIDOGEN","Conc_ACETOGEN","Conc_METHANOGEN","Conc_BACTEROID")
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam %>% as.character()
SumDat <- out %>% dplyr::select(ID,time,all_of(varyParam),eval(Inputs_Bact),eval(Outputs_Bact)) %>% as.data.frame
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
SumDat[varyParam] <- round(SumDat[varyParam]*100,3)
}
}else{
SumDat <- out %>% dplyr::select(ID,time,eval(Inputs_Bact),eval(Outputs_Bact)) %>% as.data.frame
}
SumDat <- SumDat %>% dplyr::filter(time==0 | time==cutOffTime)
for(i in 1:dim(SumDat)[1]){
if(SumDat$time[i]==0){
row.names(SumDat)[i] <- paste("Input", " ID =",SumDat$ID[i])
}else if(SumDat$time[i]==cutOffTime){
row.names(SumDat)[i] <- paste("Output", "ID =",SumDat$ID[i])}}
SumDat2 <- SumDat %>% dplyr::select(everything(),-c(ID,time))
SumDat2$Conc_ACIDOGEN <- round(SumDat2$Conc_ACIDOGEN,2); SumDat2$Conc_ACETOGEN <- round(SumDat2$Conc_ACETOGEN,2)
SumDat2$Conc_METHANOGEN <- round(SumDat2$Conc_METHANOGEN,2); SumDat2$Conc_BACTEROID <- round(SumDat2$Conc_BACTEROID,2)
SumDat2 <- SumDat2 %>% dplyr::rename("Acidogens (g/L)"=Conc_ACIDOGEN, "Acetogens (g/L)"=Conc_ACETOGEN,
"Methanogens (g/L)"=Conc_METHANOGEN,"Bacteroides (g/L)"=Conc_BACTEROID)
if(simType=="Normal" & n_distinct(out$ID)==1){
row.names(SumDat2) <- c("Input","Output")
}
SumDat_Bact <- SumDat2
SumDat_Bact
},rownames = TRUE)
# Render Model Error
output$modelError <- renderText({
ErrorMessage <- paste("Model Error in Mole Balance: <b>",ModelErrorPerc,"% </b>")
HTML(paste(ErrorMessage))
})
# }) #end observe
# Plots --------------------------------------------------------------
observe(priority=3,{
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out() %>% as.data.frame()
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()#try({knobs(out)},silent=T)
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
#Plot Datasets
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- SensParam() %>% as.character()
withProgress(message = 'Summarizing Data',
detail = 'This may take a while...', value = 0, max=100,
{
MainProdDat <- out %>% dplyr::select(ID,time,all_of(varyParam),GUAR_Conc,AVG_PEG_MW,CH4_GAS, H2_GAS, CO2_GAS,Conc_METHANOL,Conc_ISOPROPANOL) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc, "AVG PEG MW (g/mol)"=AVG_PEG_MW,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL,"H2 (mol-gas)"=H2_GAS,"CO2 (mol-gas)"=CO2_GAS,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(20)
IntermediateDat <- out %>% dplyr::select(ID,time,all_of(varyParam),GUAR_Conc,Conc_GLUCOSE,Conc_ETHANOL,Conc_PropAcid,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc,"CH4 (mol-gas)"=CH4_GAS, "Glucose (mol/L)"=Conc_GLUCOSE,
"Ethanol (mol/L)"=Conc_ETHANOL,"Propanoic Acid (mol/L)"=Conc_PropAcid,
"Acetate (mol/L)"=Conc_ACETATE) %>% as.data.frame()
setProgress(45)
PEG_Dat <- out %>% dplyr::select(ID,time,all_of(varyParam),Conc_PEG9,Conc_PEG8,Conc_PEG7,Conc_PEG6,Conc_PEG5,Conc_PEG4,Conc_PEG3,Conc_DEG,Conc_EG,Conc_AcetHyde,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("PEG-9 (mol/L)"=Conc_PEG9,"PEG-8 (mol/L)"=Conc_PEG8,"PEG-7 (mol/L)"=Conc_PEG7,
"PEG-6 (mol/L)"=Conc_PEG6,"PEG-5 (mol/L)"=Conc_PEG5,"PEG-4 (mol/L)"=Conc_PEG4,
"PEG-3 (mol/L)"=Conc_PEG3,"DEG (mol/L)"=Conc_DEG,"EG (mol/L)"=Conc_EG,
"Acetaldehyde (mol/L)"=Conc_AcetHyde, "Acetate (mol/L)"=Conc_ACETATE,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(75)
SystemDat <- out %>% dplyr::select(ID,time,all_of(varyParam),H2O,V_TOT,Pressure_atm,Temp2) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Total Pressure (atm)"=Pressure_atm,
"Liquid Volume (% Change)"=V_TOT,"Water (% Change)"=H2O,"Temperature (C)" = Temp2) %>% as.data.frame()
SystemDat$"Liquid Volume (% Change)" <- (SystemDat$"Liquid Volume (% Change)"/SystemDat$"Liquid Volume (% Change)"[1])*100
SystemDat$"Water (% Change)" <- (SystemDat$"Water (% Change)"/SystemDat$"Water (% Change)"[1])*100
setProgress(85)
BacteriaDat <- out %>% dplyr::select(ID,time,all_of(varyParam),Conc_ACIDOGEN,Conc_ACETOGEN,Conc_METHANOGEN,Conc_BACTEROID) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Acidogen Biomass (g/L)"=Conc_ACIDOGEN, "Acetogen Biomass (g/L)"=Conc_ACETOGEN,
"Methanogen Biomass (g/L)"=Conc_METHANOGEN,"Bacteroid Biomass (g/L)"=Conc_BACTEROID) %>% as.data.frame()
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
MainProdDat[varyParam] <- round(MainProdDat[varyParam]*100,3)
IntermediateDat[varyParam] <- round(IntermediateDat[varyParam]*100,3)
PEG_Dat[varyParam] <- round(PEG_Dat[varyParam]*100,3)
SystemDat[varyParam] <- round(SystemDat[varyParam]*100,3)
BacteriaDat[varyParam] <- round(BacteriaDat[varyParam]*100,3)
}
MainProdDat <<- MainProdDat
IntermediateDat <<- IntermediateDat
PEG_Dat <<- PEG_Dat
SystemDat <<- SystemDat
BacteriaDat <<- BacteriaDat
setProgress(100)
})
}else{
withProgress(message = 'Summarizing Data',
detail = 'This may take a while...', value = 0, max=100,
{
MainProdDat <- out %>% dplyr::select(ID,time,GUAR_Conc,AVG_PEG_MW,CH4_GAS, H2_GAS, CO2_GAS,Conc_METHANOL,Conc_ISOPROPANOL) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc, "AVG PEG MW (g/mol)"=AVG_PEG_MW,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL,"H2 (mol-gas)"=H2_GAS,"CO2 (mol-gas)"=CO2_GAS,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(20)
IntermediateDat <- out %>% dplyr::select(ID,time,GUAR_Conc,Conc_GLUCOSE,Conc_ETHANOL,Conc_PropAcid,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc,"CH4 (mol-gas)"=CH4_GAS, "Glucose (mol/L)"=Conc_GLUCOSE,
"Ethanol (mol/L)"=Conc_ETHANOL,"Propanoic Acid (mol/L)"=Conc_PropAcid,
"Acetate (mol/L)"=Conc_ACETATE) %>% as.data.frame()
setProgress(45)
PEG_Dat <- out %>% dplyr::select(ID,time,Conc_PEG9,Conc_PEG8,Conc_PEG7,Conc_PEG6,Conc_PEG5,Conc_PEG4,Conc_PEG3,Conc_DEG,Conc_EG,Conc_AcetHyde,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("PEG-9 (mol/L)"=Conc_PEG9,"PEG-8 (mol/L)"=Conc_PEG8,"PEG-7 (mol/L)"=Conc_PEG7,
"PEG-6 (mol/L)"=Conc_PEG6,"PEG-5 (mol/L)"=Conc_PEG5,"PEG-4 (mol/L)"=Conc_PEG4,
"PEG-3 (mol/L)"=Conc_PEG3,"DEG (mol/L)"=Conc_DEG,"EG (mol/L)"=Conc_EG,
"Acetaldehyde (mol/L)"=Conc_AcetHyde, "Acetate (mol/L)"=Conc_ACETATE,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(75)
SystemDat <- out %>% dplyr::select(ID,time,H2O,V_TOT,Pressure_atm,Temp2) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Total Pressure (atm)"=Pressure_atm,
"Liquid Volume (% Change)"=V_TOT,"Water (% Change)"=H2O,"Temperature (C)" = Temp2) %>% as.data.frame()
SystemDat$"Liquid Volume (% Change)" <- (SystemDat$"Liquid Volume (% Change)"/SystemDat$"Liquid Volume (% Change)"[1])*100
SystemDat$"Water (% Change)" <- (SystemDat$"Water (% Change)"/SystemDat$"Water (% Change)"[1])*100
setProgress(85)
BacteriaDat <- out %>% dplyr::select(ID,time,Conc_ACIDOGEN,Conc_ACETOGEN,Conc_METHANOGEN,Conc_BACTEROID) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Acidogen Biomass (g/L)"=Conc_ACIDOGEN, "Acetogen Biomass (g/L)"=Conc_ACETOGEN,
"Methanogen Biomass (g/L)"=Conc_METHANOGEN,"Bacteroid Biomass (g/L)"=Conc_BACTEROID) %>% as.data.frame()
MainProdDat <<- MainProdDat
IntermediateDat <<- IntermediateDat
PEG_Dat <<- PEG_Dat
SystemDat <<- SystemDat
BacteriaDat <<- BacteriaDat
setProgress(100)
}) #End progress bar
}
}) #end observe
### Plot 1 (Reactor Properties)
output$plot1 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("SystemDat")){
prog <- 20
withProgress(message = 'Plotting System Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(SystemDat)[4:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- names(SystemDat)[4:length(SystemDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p1
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p1 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=SystemDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(SystemDat$ID)==1){
colNames <- names(SystemDat)[3:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- names(SystemDat)[3:length(SystemDat)]
#Make Plot
for(i in colNames){
if(round(min(SystemDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(SystemDat[,i])-min(SystemDat[,i])*.01,15)}
ymaxVal <- round(max(SystemDat[,i])+max(SystemDat[,i])*.015,15)
p1[[i]] <- ggplot(SystemDat, aes_string(x=SystemDat$time, y = SystemDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(SystemDat$ID)!=1){
colNames <- names(SystemDat)[3:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p1
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p1 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=SystemDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p1 <<- p1; Plot1 <- grid.arrange(grobs=p1,ncol=2); assign("Plot1",Plot1,envir = globalenv()) #; print(Plot1)
grid.arrange(Plot1)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 2 (Main Component Data)
output$plot2 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("MainProdDat")){
prog <- 20
withProgress(message = 'Plotting Main Product Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(MainProdDat)[4:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- names(MainProdDat)[4:length(MainProdDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p2
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p2 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=MainProdDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(MainProdDat$ID)==1){
colNames <- names(MainProdDat)[3:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- names(MainProdDat)[3:length(MainProdDat)]
#Make Plot
for(i in colNames){
if(round(min(MainProdDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(MainProdDat[,i])-min(MainProdDat[,i])*.01,15)}
ymaxVal <- round(max(MainProdDat[,i])+max(MainProdDat[,i])*.015,15)
p2[[i]] <- ggplot(MainProdDat, aes_string(x=MainProdDat$time, y = MainProdDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(MainProdDat$ID)!=1){
colNames <- names(MainProdDat)[3:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p2
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p2 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=MainProdDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
# for(i in 1:length(p2)){p2[[i]] <- ggplotly(p2[[i]]) %>% layout(
# xaxis = list(automargin=TRUE), yaxis = list(automargin=TRUE,tickprefix=" "),margin=list(l = 75,pad=4))}
# Plot2 <- subplot(p2,nrows = round(length(p2)/2),titleX=T,shareX = T,titleY = T)
p2 <<- p2; Plot2 <- grid.arrange(grobs=p2,ncol=2);
assign("Plot2",Plot2,envir = globalenv()) #; print(Plot2)
grid.arrange(Plot2)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 3 (Intermediate Component Data for Guar Gum)
output$plot3 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("IntermediateDat")){
prog <- 20
withProgress(message = 'Plotting Guar Gum Intermediate Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(IntermediateDat)[4:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- names(IntermediateDat)[4:length(IntermediateDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p3
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p3 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=IntermediateDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(IntermediateDat$ID)==1){
colNames <- names(IntermediateDat)[3:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- names(IntermediateDat)[3:length(IntermediateDat)]
#Make Plot
for(i in colNames){
if(round(min(IntermediateDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(IntermediateDat[,i])-min(IntermediateDat[,i])*.01,15)}
ymaxVal <- round(max(IntermediateDat[,i])+max(IntermediateDat[,i])*.015,15)
p3[[i]] <- ggplot(IntermediateDat, aes_string(x=IntermediateDat$time, y = IntermediateDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(IntermediateDat$ID)!=1){
colNames <- names(IntermediateDat)[3:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p3
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p3 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=IntermediateDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p3 <<- p3; Plot3 <- grid.arrange(grobs=p3,ncol=2); assign("Plot3",Plot3,envir = globalenv()) #; print(Plot3)
grid.arrange(Plot3)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 4 (Intermediate Component Data for PEG)
output$plot4 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("PEG_Dat")){
prog <- 20
withProgress(message = 'Plotting PEG-400 Intermediate Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(PEG_Dat)[4:length(PEG_Dat)]
p4 <- vector("list",length = length(colNames)); names(p4) <- names(PEG_Dat)[4:length(PEG_Dat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p4
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p4 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=PEG_Dat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(PEG_Dat$ID)==1){
colNames <- names(PEG_Dat)[3:length(PEG_Dat)]
p4 <- vector("list",length = length(colNames)); names(p4) <- names(PEG_Dat)[3:length(PEG_Dat)]
#Make Plot
for(i in colNames){
if(round(min(PEG_Dat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(PEG_Dat[,i])-min(PEG_Dat[,i])*.01,15)}
ymaxVal <- round(max(PEG_Dat[,i])+max(PEG_Dat[,i])*.015,15)
p4[[i]] <- ggplot(PEG_Dat, aes_string(x=PEG_Dat$time, y = PEG_Dat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(PEG_Dat$ID)!=1){
PEG_Dat1 <- PEG_Dat %>% dplyr::select(everything(),-c("CH4 (mol-gas)")) %>% tidyr::gather(key=key,value=value,-ID,-time)
PEG_Dat2 <- MainProdDat %>% dplyr::select(ID,time,"AVG PEG MW (g/mol)")
stdDevsDat <- summarySE(data=PEG_Dat1,measurevar = "value",groupvars = c("key","time")) %>% as.data.frame()
stdDevsDat2 <- summarySE(data=PEG_Dat2,measurevar = "AVG PEG MW (g/mol)",groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]; yminE2 <- stdDevsDat2[,3] - stdDevsDat2[,6]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]; ymaxE2 <- stdDevsDat2[,3] + stdDevsDat2[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15); yminVal2 <- round(min(yminE2)-min(yminE2)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15); ymaxVal2 <- round(max(ymaxE2)+max(ymaxE2)*.015,15)
yLimits <- c(yminVal, ymaxVal); yLimits2 <- c(yminVal2, ymaxVal2)
plotPoints <- unique(PEG_Dat1$key)
p4 <- vector("list",length = 2); names(p4) <- c("Intermediate Products","AVG MW Weight")
prog <- 40; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Make Plot
if(confInterval=="Error Bars"){
p4[[1]] <- ggplot(stdDevsDat, aes(y=value, x=time, group=key)) + scale_y_continuous(limits = yLimits) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
geom_line(aes(color=key), size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Concentration (mol/L)") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13)) +
scale_colour_discrete(name="Polyethylene Glycol (PEG) \nIntermediate Product")
p4[[2]] <- ggplot(stdDevsDat2, aes_string(y=stdDevsDat2$"AVG PEG MW (g/mol)", x=stdDevsDat2$time)) +
geom_errorbar(color="black",aes(ymin=yminE2,ymax=ymaxE2),width=2.5) + scale_y_continuous(limits = yLimits2) +
geom_line(size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Average PEG MW") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else if(confInterval=="Confidence Band"){
p4[[1]] <- ggplot(stdDevsDat, aes(y=value, x=time, group=key)) + scale_y_continuous(limits = yLimits) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat$key)),alpha=0.3,show.legend = FALSE) +
geom_line(aes(color=key), size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Concentration (mol/L)") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13)) +
scale_colour_discrete(name="Polyethylene Glycol (PEG) \nIntermediate Product")
p4[[2]] <- ggplot(stdDevsDat2, aes_string(y=stdDevsDat2$"AVG PEG MW (g/mol)", x=stdDevsDat2$time)) +
geom_ribbon(aes_string(ymin=yminE2,ymax=ymaxE2),alpha=0.3,show.legend = FALSE) + scale_y_continuous(limits = yLimits2) +
geom_line(size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Average PEG MW") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#p4[[1]] + geom_point(data=subset(PEG_Dat3[PEG_Dat3$key==plotPoints[1],]), color='black', shape=18, size=2)
}
p4 <<- p4;
if(simType=="Normal" & n_distinct(PEG_Dat$ID)!=1){
Plot4 <- grid.arrange(grobs=p4,ncol=1,heights=c(2,1.5)); assign("Plot4",Plot4,envir = globalenv())
}else{
Plot4 <- grid.arrange(grobs=p4,ncol=2); assign("Plot4",Plot4,envir = globalenv())
}
grid.arrange(Plot4)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 5 (Bacteria Data)
output$plot5 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("BacteriaDat")){
prog <- 20
withProgress(message = 'Plotting Bacteria Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(BacteriaDat)[4:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- names(BacteriaDat)[4:length(BacteriaDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p5
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p5 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=BacteriaDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(BacteriaDat$ID)==1){
colNames <- names(BacteriaDat)[3:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- names(BacteriaDat)[3:length(BacteriaDat)]
#Make Plot
for(i in colNames){
if(round(min(BacteriaDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(BacteriaDat[,i])-min(BacteriaDat[,i])*.01,15)}
ymaxVal <- round(max(BacteriaDat[,i])+max(BacteriaDat[,i])*.015,15)
p5[[i]] <- ggplot(BacteriaDat, aes_string(x=BacteriaDat$time, y = BacteriaDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(BacteriaDat$ID)!=1){
colNames <- names(BacteriaDat)[3:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p5
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p5 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=BacteriaDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p5 <<- p5; Plot5 <- grid.arrange(grobs=p5,ncol=2); assign("Plot5",Plot5,envir = globalenv()) #; print(Plot5)
grid.arrange(Plot5)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
} # End Server
# UI Code -----------------------------------------------------------------
ui <- dashboardPage(#theme = shinytheme("slate"),
#shinythemes::themeSelector(),
dashboardHeader(title = "Anaerobic Digestion"),
dashboardSidebar(
#hr(),
sidebarMenu(id="tabs",
menuItem("Getting Started", tabName = "GettingStarted", icon = icon("dashboard"),selected=TRUE),
menuItem("Model Parameters", icon = icon("balance-scale"),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Initial Bacteria Concentrations (mg/L)", align = "center"),
numericInput("Bact_ScaleFact_Acido", label=p("Acidogens",style="color:#080808"), min=1,step=1,value=300,max=10000),
numericInput("Bact_ScaleFact_Aceto", label=p("Acetogens",style="color:#080808"), min=1,step=1,value=3000,max=10000),
numericInput("Bact_ScaleFact_Meth", label=p("Methanogens",style="color:#080808"), min=1,step=1,value=3500,max=10000),
numericInput("Bact_ScaleFact_Bact", label=p("Bacteroides",style="color:#080808"), min=1,step=1,value=300,max=10000),
sliderInput("DecayRate",label=p("Decay Rate",style="color:#080808"), post=" 1/h", 0,0.002,0.001,0.0002,ticks = F)
),
label = "Bacteria", style = "stretch", size="sm",#up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Initial Weight Percents (%)", align = "center"),
numericInput("WT_Perc_Guar_IN",
label=p("Guar Gum",style="color:#080808"), min=0.1,step=.01,value=0.83,max=2),
numericInput("WT_Perc_PEG_IN",
label=p("PEG 400",style="color:#080808"), min=0.1,step=.01,value=0.42,max=2),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("PEG MW Distribution", align = "center"),
plotOutput("PegPlot",width="100%",height="400px"),
p("Note: The distribution was purposely skewed to more closely match",style="color:#080808"),
p("observations in the literature.",style="color:#080808"),
noUiSliderInput("PegMeanMW",label= p("Adjust Mean PEG MW",style="color:#080808"), 300,400,400,1,tooltips = F)
),
label = "Adjust PEG MW Distribution", style = "stretch",size="sm",
status = "primary", width = "600px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(enter = "fadeInDown", exit = "fadeOutUp",duration = 0.8)
),
numericInput("WT_Perc_MeOL_IN",
label=p("Methanol",style="color:#080808"), min=0.1,step=.01,value=0.63,max=0.8),
numericInput("WT_Perc_ISO_IN",
label=p("Isopropanol",style="color:#080808"), min=0.1,step=.01,value=0.63,max=0.8)
),
label = "Chemical Compounds", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("System Properties", align = "center"),
sliderInput("wells",label= p("Number of Wells to Pull From",style="color:#080808"), 1,55,55,1),
sliderInput("Temp",label= p("Initial Reactor Temperature (°C)",style="color:#080808"), 20,40,30,1),
knobInput("TempSlope",displayPrevious=T,label= p("Temperature Slope (°C/h)",style="color:#080808"),0,-0.1,0.1,0.01,immediate=F),# ,height = "50%",width="50%"
numericInput("Head_Space_VolRatio",
label=p("Ratio of Headspace to Reactor Volume (L/L)",style="color:#080808"),
min=0.25,max=3,value=2,step=0.05)
),
label = "System Properties", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Parameter Variability (CV %)", align = "center"),
sliderInput("kinetic_var",label= p("Kinetic Rates",style="color:#080808"), 0,100,30,1),
sliderInput("yield_var",label= p("Bacteria Yields",style="color:#080808"), 0,50,15,1),
radioGroupButtons("confInterval",label=p("Confidence Interval (95%)",style="color:#080808"),justified = TRUE,
checkIcon = list(yes = icon("ok", lib = "glyphicon")),
choices = c("Confidence Band","Error Bars"),selected = "Confidence Band")
),
label = "Variability", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Simulation Specification", align = "center"),
radioGroupButtons("simType",label=p("Type of Simulation",style="color:#080808"),justified = TRUE,
checkIcon = list(yes = icon("ok", lib = "glyphicon")),
choices = c("Normal","Sensitivity Analysis"),selected = "Normal"),
numericInput("nSim", label=p("Number of Simulations",style="color:#080808"), min=1,step=1,value=20,max=200),
conditionalPanel("input.simType=='Sensitivity Analysis'",
h2("Sensitivity Analysis", align = "center"),
pickerInput(inputId = "SensParam",label = p("Choose Parameter to Vary",style="color:#080808"),
choices = SensChoices,options = list(size = 5,`style` = "btn-info"),selected = "WT % of Guar Gum"),
sliderInput("SensRange", p("Range of Parameter Variance:",style="color:#080808"), min = -80, max = 80, post="%",value = c(-25,25))
)
),
label = "Simulation", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
)
),
menuItem("Plots", icon = icon("line-chart"),
menuSubItem("Main Components", tabName = "plotTab2", icon = icon("angle-right")),
menuSubItem("Guar Gum Intermediates", tabName = "plotTab3", icon = icon("angle-right")),
menuSubItem("PEG Intermediates", tabName = "plotTab4", icon = icon("angle-right")),
menuSubItem("Bacteria Growth", tabName = "plotTab5", icon = icon("angle-right")),
menuSubItem("Reactor Properties", tabName = "plotTab1", icon = icon("angle-right"))
),
menuItem("Tables", icon = icon("table"), #tabName="tabtable",
menuSubItem("Main Component Data", tabName = "sumTab1", icon = icon("angle-right")),
menuSubItem("Bacteria Data", tabName = "sumTab2", icon = icon("angle-right"))
), hr(),
menuItem("Mathematical Model", tabName = "readme", icon = icon("mortar-board")),
menuItem("Background of Process", tabName = "Background", icon = icon("book")),
menuItem("Codes", icon = icon("file-text-o"),
menuSubItem("Model Code", tabName = "ModelCode", icon = icon("angle-right")),
#menuSubItem("ui.R", tabName = "ui", icon = icon("angle-right")),
#menuSubItem("server.R", tabName = "server", icon = icon("angle-right")),
menuSubItem("app.R", tabName = "app", icon = icon("angle-right"))
)
),
hr(),
# conditionalPanel("input.tabs=='plot1' | input.tabs=='plot2' | input.tabs=='plot3' | input.tabs=='plot4'",
sidebarMenu(
menuItem("Plotting Parameters", icon = icon("chevron-circle-right"),
fluidRow(column(1),
column(10,
sliderInput("cutOff",label="Truncate Data", post=" h",
25,300,100,5),
awesomeCheckbox(
inputId = "plotPoints",label = "Show Data Points on Graphs?", value = FALSE,status = "info"
)
)))
),
hr()#,
# sidebarMenu(
# br(),
# div(img(src="FrackOff.png",height=160,width=200),style="text-align: center;")
# )
#)
), # End dashboardSidebar
dashboardBody(
tabItems(
tabItem(tabName = "GettingStarted",
fluidPage(
tags$head(HTML("<script type='text/x-mathjax-config'>MathJax.Hub.Config({ TeX: { equationNumbers: {autoNumber: 'all'} } });</script>")),
withMathJax(),
uiOutput('markdownGS')
)
),
# tabItem(tabName = "Input",
# ),
tabItem(tabName = "plotTab1",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot1",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim1",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab2",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot2",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim2",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab3",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot3",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim3",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab4",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot4",width="100%",height="1050px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim4",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab5",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot5",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim5",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "sumTab1",
fluidRow(
shinydashboard::box(width = 10, status = "primary", solidHeader = TRUE, title="Main Component Data",
tableOutput("sum1"),
htmlOutput(outputId = "modelError")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim6",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "sumTab2",
fluidRow(
shinydashboard::box(width = 10, status = "primary", solidHeader = TRUE, title="Bacteria Data",
tableOutput("sum2")#,
#htmlOutput(outputId = "modelError")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim7",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
# tabItem(tabName = "ui",
# shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="ui.R",
# downloadButton('downloadData2', 'Download'),
# br(),br(),
# pre(includeText("ui.R"))
# )
# ),
# tabItem(tabName = "server",
# shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="server.R",
# downloadButton('downloadData3', 'Download'),
# br(),br(),
# pre(includeText("server.R"))
# )
# ),
tabItem(tabName = "app",
shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="app.R",
downloadButton('downloadData4', 'Download'),
br(),br(),
pre(includeText("app.R"))
)
),
tabItem(tabName = "ModelCode",
aceEditor("mod_code", .model,mode="r", height="1000px")
),
tabItem(tabName = "readme",
fluidPage(
tags$head(HTML("<script type='text/x-mathjax-config'>MathJax.Hub.Config({ TeX: { equationNumbers: {autoNumber: 'all'} } });</script>")),
withMathJax(),
uiOutput('markdownRM')
)
),
tabItem(tabName = "Background",
tags$iframe(style="height:820px; width:100%; scrolling=yes",
src="AnaerobicDigestion_ReadMe.pdf")
)
)#,
# conditionalPanel("input.tabs=='plotTab1' | input.tabs=='plotTab2' | input.tabs=='plotTab3' | input.tabs=='plotTab4'| input.tabs=='sumTab1' | input.tabs=='sumTab2'",
# tags$h2("Adjust parameters in dropdown menus:"),
# fluidPage(
# ) #End fluidPage
# ) #End Conditional Panel
), # End dashboardBody
tags$head(
tags$script(src = "js/session.js"),
tags$script(src = "js/modal_vid.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css"),
tags$style(HTML(".main-sidebar { font-size: 16px; }"))
)#,
#tags$style(".swal-modal {width: 80%;}")
) # End UI
shinyApp(ui = ui, server = server)
#runApp(shinyApp(ui = ui, server = server),launch.browser=TRUE)
| /app/app.R | permissive | barrettk/AnaerobicDigestion | R | false | false | 94,599 | r |
source("./global.R")
# Server Code -------------------------------------------------------------
server <- function(input, output,session) {
#Render ReadMe UI
rmdfiles <- c("MathModel.Rmd","GettingStarted.Rmd")
sapply(rmdfiles, knit, quiet = T)
output$markdownRM <- renderUI({
fluidPage(
withMathJax(HTML(markdown::markdownToHTML(knit("MathModel.Rmd", quiet = TRUE), fragment.only=TRUE,title="Frack Off",header="Mathematical Model for Anaerobic Digestion")))
)
})
#Render GettingStarted UI
output$markdownGS <- renderUI({
fluidPage(
withMathJax(HTML(markdown::markdownToHTML(knit("GettingStarted.Rmd", quiet = TRUE), fragment.only=TRUE,title="Frack Off",header="Getting Started")))
)
})
mod <- mrgsolve::mread_cache("AnaerobicDigestionShinyV3.cpp")
# Kinetic Data Import From Bacteria_kinetics.R
# Acetogens
# Vmax
coef_Aceto_vmax <- coef(fit_Aceto_vmax)
Intercept_Aceto_vmax <- as.numeric(coef_Aceto_vmax[1])
a_Aceto_vmax <- as.numeric(coef_Aceto_vmax[2])
b_Aceto_vmax <- as.numeric(coef_Aceto_vmax[3])
c_Aceto_vmax <- as.numeric(coef_Aceto_vmax[4])
# Km
coef_Aceto_km <- coef(fit_Aceto_km)
Intercept_Aceto_km <- as.numeric(coef_Aceto_km[1])
a_Aceto_km <- as.numeric(coef_Aceto_km[2])
b_Aceto_km <- as.numeric(coef_Aceto_km[3])
c_Aceto_km <- as.numeric(coef_Aceto_km[4])
# Methanogens
# Vmax
coef_Meth_vmax <- coef(fit_Meth_vmax)
Intercept_Meth_vmax <- as.numeric(coef_Meth_vmax[1])
a_Meth_vmax <- as.numeric(coef_Meth_vmax[2])
b_Meth_vmax <- as.numeric(coef_Meth_vmax[3])
c_Meth_vmax <- as.numeric(coef_Meth_vmax[4])
# Km
coef_Meth_km <- coef(fit_Meth_km)
Intercept_Meth_km <- as.numeric(coef_Meth_km[1])
a_Meth_km <- as.numeric(coef_Meth_km[2])
b_Meth_km <- as.numeric(coef_Meth_km[3])
c_Meth_km <- as.numeric(coef_Meth_km[4])
PegDist <- reactive({
shiny::req(input[["PegMeanMW"]])
meanVal <- as.numeric(input[["PegMeanMW"]])
meanVal2 <- (meanVal-18)/44
test <- sn::rsn(5000, (meanVal2*1.267), -1.3, alpha=10) %>% as.vector() %>% as.integer()
test <- test[test>4 & test<10]
test
})
output$PegPlot <- renderPlot({
test <- PegDist() %>% as.vector() %>% as.integer()
test2 <- test*44+18 %>% as.vector() %>% as.integer()
df <- data.frame(test2,test); names(df) <- c("MW","N")
df$N <- df$N %>% as.character() %>% as.factor()
breaks <- c(238, 282, 326, 370, 414)
#Plot
hist2 <- ggplot(df,aes(x=MW)) + labs(x="Molecular Weight",y="Density",caption = paste0("Mean MW: ",round(mean(test2),2))) +
theme(text=element_text(family="Times New Roman", face="bold", size=14),
plot.caption = element_text(color = "red", face = "italic",size=14)) + expand_limits(x = c(min(test2), max(test2))) +
geom_vline(aes(xintercept=mean(test2)),color="blue", linetype="dashed", size=1) + geom_density() +
scale_x_continuous(breaks=breaks)
hist2
})
pars <- reactive({
#invalidateLater(1000, session)
shiny::req(PegDist())
shiny::req(input[["Bact_ScaleFact_Aceto"]])
shiny::req(input[["Bact_ScaleFact_Acido"]])
shiny::req(input[["Bact_ScaleFact_Meth"]])
shiny::req(input[["Bact_ScaleFact_Bact"]])
shiny::req(input[["WT_Perc_Guar_IN"]])
shiny::req(input[["WT_Perc_PEG_IN"]])
shiny::req(input[["WT_Perc_MeOL_IN"]])
shiny::req(input[["WT_Perc_ISO_IN"]])
shiny::req(input[["wells"]])
shiny::req(input[["Temp"]])
shiny::req(input[["Head_Space_VolRatio"]])
shiny::req(input[["TempSlope"]])
shiny::req(input[["DecayRate"]])
test <- PegDist() %>% as.vector() %>% as.numeric()
test2 <- test*44+18 %>% as.vector() %>% as.numeric()
hist1 <- graphics::hist(test,breaks=4:9,plot=F)
yA <- round(hist1$density,3); MW_PEG_In <- mean(test2)
param(Bact_ScaleFact_Aceto = (as.numeric(input[["Bact_ScaleFact_Aceto"]])/1000),
Bact_ScaleFact_Acido = (as.numeric(input[["Bact_ScaleFact_Acido"]])/1000),
Bact_ScaleFact_Meth = (as.numeric(input[["Bact_ScaleFact_Meth"]])/1000),
Bact_ScaleFact_Bact = (as.numeric(input[["Bact_ScaleFact_Bact"]])/1000),
WT_Perc_Guar_IN = (as.numeric(input[["WT_Perc_Guar_IN"]]))/100,
WT_Perc_PEG_IN = (as.numeric(input[["WT_Perc_PEG_IN"]]))/100,
WT_Perc_MeOL_IN = (as.numeric(input[["WT_Perc_MeOL_IN"]]))/100,
WT_Perc_ISO_IN = (as.numeric(input[["WT_Perc_ISO_IN"]]))/100,
wells = as.numeric(input[["wells"]]),
Temp = as.numeric(input[["Temp"]]),
TempSlope = as.numeric(input[["TempSlope"]]),
DecayRate = as.numeric(input[["DecayRate"]]),
Head_Space_VolRatio = as.numeric(input[["Head_Space_VolRatio"]]),
molFracPEG9 = yA[5], molFracPEG8 = yA[4], molFracPEG7 = yA[3],
molFracPEG6 = yA[2], molFracPEG5 = yA[1], MW_PEG_In = MW_PEG_In,
Intercept_Aceto_vmax=as.numeric(coef_Aceto_vmax[1]),a_Aceto_vmax=as.numeric(coef_Aceto_vmax[2]),
b_Aceto_vmax=as.numeric(coef_Aceto_vmax[3]),c_Aceto_vmax=as.numeric(coef_Aceto_vmax[4]),
Intercept_Aceto_km=as.numeric(coef_Aceto_km[1]),a_Aceto_km=as.numeric(coef_Aceto_km[2]),
b_Aceto_km=as.numeric(coef_Aceto_km[3]),c_Aceto_km=as.numeric(coef_Aceto_km[4]),
Intercept_Meth_vmax=as.numeric(coef_Meth_vmax[1]),a_Meth_vmax=as.numeric(coef_Meth_vmax[2]),
b_Meth_vmax=as.numeric(coef_Meth_vmax[3]),c_Meth_vmax=as.numeric(coef_Meth_vmax[4]),
Intercept_Meth_km=as.numeric(coef_Meth_km[1]),a_Meth_km=as.numeric(coef_Meth_km[2]),
b_Meth_km=as.numeric(coef_Meth_km[3]),c_Meth_km=as.numeric(coef_Meth_km[4])
)
})
SensParam <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType!="Normal"){
shiny::req(input[["SensParam"]])
SensParam <- as.character(input[["SensParam"]])
if(SensParam=="Acidogen Conc."){
SensParam2 <- "Bact_ScaleFact_Acido"
}else if(SensParam=="Acetogen Conc."){
SensParam2 <- "Bact_ScaleFact_Aceto"
}else if(SensParam=="Methanogen Conc."){
SensParam2 <- "Bact_ScaleFact_Meth"
}else if(SensParam=="Bacteroid Conc."){
SensParam2 <- "Bact_ScaleFact_Bact"
}else if(SensParam=="Head Space Ratio"){
SensParam2 <- "Head_Space_VolRatio"
}else if(SensParam=="Temperature"){
SensParam2 <- "Temp"
}else if(SensParam=="Number of Wells"){
SensParam2 <- "wells"
}else if(SensParam=="WT % of Guar Gum"){
SensParam2 <- "WT_Perc_Guar_IN"
}else if(SensParam=="WT % of PEG-400"){
SensParam2 <- "WT_Perc_PEG_IN"
}else if(SensParam=="WT % of Methanol"){
SensParam2 <- "WT_Perc_MeOL_IN"
}else if(SensParam=="WT % of Isopropanol"){
SensParam2 <- "WT_Perc_ISO_IN"
}else if(SensParam=="Bacteria Decay Rate"){
SensParam2 <- "DecayRate"
}
return(SensParam2)
}else{
return(NULL)
}
})
SensRangeR <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
shiny::req(input[["SensRange"]])
SensParam <- SensParam() %>% as.character()
shiny::req(input[[SensParam]])
MedVal <- as.numeric(input[[SensParam]])
SensRange <- as.numeric(input[["SensRange"]])/100
if(SensParam=="WT_Perc_Guar_IN"|SensParam=="WT_Perc_PEG_IN"|SensParam=="WT_Perc_MeOL_IN"|SensParam=="WT_Perc_ISO_IN"){
MedVal <- MedVal/100
}else if(SensParam=="Bact_ScaleFact_Acido"|SensParam=="Bact_ScaleFact_Aceto"|SensParam=="Bact_ScaleFact_Meth"|SensParam=="Bact_ScaleFact_Bact"){
MedVal <- MedVal/1000
}else{
MedVal <- MedVal
}
SensVals <- sort(c(MedVal,SensRange*MedVal+MedVal))
SensVals
}
})
omega_Kinetics <- reactive({
shiny::req(input[["kinetic_var"]])
kinetic_Var <- (as.numeric(input[["kinetic_var"]])/100)^2
omegaMatrix <- omat(mod)@data$Bact_kinetics %>% as.matrix()
omegaNames <- unlist(omat(mod)@labels[[1]]) %>% as.character()
diag(omegaMatrix) <- kinetic_Var
row.names(omegaMatrix) <- omegaNames
return(omegaMatrix)
})
omega_Yields <- reactive({
shiny::req(input[["yield_var"]])
yield_Var <- (as.numeric(input[["yield_var"]])/100)^2
omegaMatrix <- omat(mod)@data$Bact_yields %>% as.matrix()
omegaNames <- unlist(omat(mod)@labels[[2]]) %>% as.character()
diag(omegaMatrix) <- yield_Var
row.names(omegaMatrix) <- omegaNames
return(omegaMatrix)
})
TimeSolv <- reactive({
shiny::req(input[["cutOff"]])
cutOffTime <- as.numeric(input[["cutOff"]])
delta <- 0.1#cutOffTime
#Times to solve equations
if(cutOffTime<100 & cutOffTime>=80){
addVal <- sort(unique(c(0.25,0.5,1:80,seq(80,cutOffTime,2.5))))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}else if(cutOffTime<80){
addVal <- sort(unique(c(0.25,0.5,1:cutOffTime)))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}else if(cutOffTime>=100){
addVal <- sort(unique(c(0.25,0.5,1:70,seq(72.5,82.5,by=2.5),seq(85,100,by=5),seq(100,cutOffTime,10))))
T1 <- tgrid(0,cutOffTime,delta, add=addVal)
}
T1
})
out <- reactive({
shiny::req(input[["cutOff"]])
shiny::req(input[["nSim"]])
shiny::req(input[["simType"]])
shiny::req(TimeSolv())
shiny::req(omega_Kinetics())
shiny::req(omega_Yields())
shiny::req(pars())
cutOffTime <- isolate(as.numeric(input[["cutOff"]]))
nSim <- as.numeric(input[["nSim"]])
simType <- as.character(input[["simType"]])
T1 <- isolate(TimeSolv())
# If Normal Simulation
if(simType=="Normal"){
Bact_kinetics <- isolate(omega_Kinetics())
Bact_yields <- isolate(omega_Yields())
prog <- 20; print(paste(prog,"%"))
withProgress(message = 'Compiling and Simulating Model',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
Normal_Sim <- function(pars,T1,cutOffTime,nSim){
hmax=0.1; maxsteps=80000; prog <- 20
repeat{
# set.seed(10101)
outDat <- try({mod %>% param(pars) %>% omat(Bact_kinetics=Bact_kinetics,Bact_yields=Bact_yields) %>%
mrgsim(nid=nSim,tgrid=T1,end=cutOffTime,atol = 1E-10,maxsteps=maxsteps,hmax = hmax)},silent = T)
if (length(names(outDat)) >10 | maxsteps >= 320000){
prog <- 100
setProgress(value = prog, detail = paste('This may take a while...',prog,"%"))
break}
# hmax <- round(hmax/1.3, 3)
maxsteps <- maxsteps + 60000
prog <- prog + 20
print(paste(prog,"%"))
setProgress(value = prog, detail = paste('This may take a while...',prog,"%"))
}
return(outDat)
}
pars <- as.list(isolate(pars()))
outDat <- Normal_Sim(pars,T1,cutOffTime,nSim) }) #End progress bar
# If Sensitivity Analysis
}else if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
Bact_kinetics <- isolate(omega_Kinetics())
Bact_yields <- isolate(omega_Yields())
SensParam <- SensParam() %>% as.character()
SensVals <- SensRangeR() %>% as.numeric()
prog <- 20
withProgress(message = 'Compiling and Simulating Model',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#idata_set function
Sensitivity_Sim <- function(T1,cutOffTime,nSim,SensVals){
hmax=0.1; maxsteps=60000
repeat{
solveModel <- function(SensParam,T1,cutOffTime,nSim,SensVals){
SensParam <- qc(.(SensParam))
wrapr::let(
c(SensParam2=substitute(SensParam)),{
datafile2 <- rlang::expr(expand.ev(SensParam2=sort(rep(SensVals,nSim)))) %>% rlang::eval_tidy()
rlang::expr(mod %>% idata_set(datafile2) %>% omat(Bact_kinetics=Bact_kinetics,Bact_yields=Bact_yields) %>% carry.out(SensParam2) %>%
mrgsim(end=cutOffTime,tgrid=T1,atol = 1E-10,maxsteps=maxsteps,hmax = hmax)) %>% rlang::eval_tidy()})
}
outDat <- try({solveModel(SensParam,T1,cutOffTime,nSim,SensVals)},silent = T)
if (length(names(outDat)) >10 | maxsteps >= 210000){
prog <- 100
setProgress(value = prog,detail = paste('This may take a while...',prog,"%"))
break}
# hmax <- 0.001
maxsteps <- maxsteps + 50000
prog <- prog + 20
setProgress(value = prog,detail = paste('This may take a while...',prog,"%"))
}
return(outDat)
}
outDat <- Sensitivity_Sim(T1,cutOffTime,nSim,SensVals) }) #End progress bar
}
assign("out",outDat,envir = globalenv())
outDat
}) # End reactive
observe(priority = 4,{
tmpOut <- out()
if(is.null(tmpOut) | length(names(tmpOut)) < 3){
sendSweetAlert(
session = session,
title = "Error: Model could not compile with chosen parameters",
text = tags$div(
p("Please choose a new set of parameters or decrease kinetic variability."),
p("The model will attempt to recompile once any parameter has been changed.")),
type = "error",
html = TRUE
)
}
})
varyParam <- reactive({
shiny::req(input[["simType"]])
simType <- try({as.character(input[["simType"]])},silent = F)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- SensParam() %>% as.character()
}else{
out <- out()
varyParam <- try({knobs(out)},silent=T)
}
return(varyParam)
})
varyParamClass <- reactive({
out <- out()
varyParam <- varyParam() %>% as.character()
varyParamClass <- try({
class(varyParam) %>% as.character
},silent=T)
return(varyParamClass)
})
confInterval <- reactive({
input[["confInterval"]] %>% as.character()
})
#Standard Deviation Function (For Table)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=T,
conf.interval=.95, .drop=TRUE) {
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)}
# This does the summary. For each group's data frame, return a vector with N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean(xx[[col]], na.rm=na.rm),
sd = stats::sd(xx[[col]], na.rm=na.rm))},measurevar)
datac <- plyr::rename(datac, c("mean" = measurevar)) # Rename the "mean" column
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
#### Render Component Data Table
output$sum1 <- renderTable({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out()
Inputs_Main <- c("H2O", "GUAR", "GUAR_Conc","Conc_PEG9","Pressure_atm","AVG_PEG_MW","Conc_METHANOL","Conc_ISOPROPANOL")
Outputs_Main <- c("H2O", "GUAR", "GUAR_Conc", "H2_GAS", "CO2_GAS", "CH4_GAS","H2_LIQ", "CO2_LIQ", "CH4_LIQ")
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()#try({knobs(out)},silent=T)
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
SumDat <- out %>% dplyr::select(ID,time,varyParam,eval(Inputs_Main),eval(Outputs_Main)) %>% as.data.frame
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
SumDat[varyParam] <- round(SumDat[varyParam]*100,3)
}
}else{
SumDat <- out %>% dplyr::select(ID,time,eval(Inputs_Main),eval(Outputs_Main)) %>% as.data.frame
}
SumDat <- SumDat %>% dplyr::filter(time==0 | time==cutOffTime)
for(i in 1:dim(SumDat)[1]){
if(SumDat$time[i]==0){
row.names(SumDat)[i] <- paste("Input", " ID =",SumDat$ID[i])
}else if(SumDat$time[i]==cutOffTime){
row.names(SumDat)[i] <- paste("Output", "ID =",SumDat$ID[i])}
SumDat$TotalMol[i] <- SumDat$H2O[i]+SumDat$GUAR[i]+SumDat$H2_GAS[i]+SumDat$CO2_GAS[i]+
SumDat$CH4_GAS[i]+SumDat$H2_LIQ[i]+SumDat$CO2_LIQ[i]+SumDat$CH4_LIQ[i]+SumDat$CH4_LIQ[i]
}
SumDat$GUAR <- round(SumDat$GUAR/6600,3) #divide by n_bonds
SumDat$H2_GAS <- round(SumDat$H2_GAS,3);SumDat$CO2_GAS <- round(SumDat$CO2_GAS,3)
ModelErrorPerc <<- round((abs(SumDat$TotalMol[1]-SumDat$TotalMol[2])/SumDat$TotalMol[1])*100,3)
SumDat2 <- SumDat %>% dplyr::select(everything(),-c(ID,time,H2_LIQ,CO2_LIQ,CH4_LIQ,TotalMol)) %>%
dplyr::rename("Guar Gum (mol)"=GUAR,"Guar Gum (g/L)"=GUAR_Conc, "H2 (mol-gas)"=H2_GAS,
"CO2 (mol-gas)"=CO2_GAS, "CH4 (mol-gas)"=CH4_GAS,"Total Pressure (atm)"=Pressure_atm,
"Average PEG MW (g/mol)"=AVG_PEG_MW,"PEG-9 (mol/L)"=Conc_PEG9,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL)
SumDat2$H2O <- round(SumDat2$H2O*18/1000,2); SumDat2 <- SumDat2 %>% dplyr::rename("Water (L)"=H2O)
SumDat2$"Guar Gum (g/L)" <- round(SumDat2$"Guar Gum (g/L)",3)
SumDat2$"Total Pressure (atm)" <- round(SumDat2$"Total Pressure (atm)",2)
SumDat2$"Average PEG MW (g/mol)" <- round(SumDat2$"Average PEG MW (g/mol)",2)
if(simType=="Normal" & n_distinct(out$ID)==1){
row.names(SumDat2) <- c("Input","Output")}
SumDat_Chem <- SumDat2; SumDat_Chem <<- SumDat_Chem
SumDat_Chem
},rownames = TRUE)
#### Render Bacteria Data Table
output$sum2 <- renderTable({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out()
Inputs_Bact <- c("Conc_ACIDOGEN","Conc_ACETOGEN","Conc_METHANOGEN","Conc_BACTEROID")
Outputs_Bact <- c("Conc_ACIDOGEN","Conc_ACETOGEN","Conc_METHANOGEN","Conc_BACTEROID")
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam %>% as.character()
SumDat <- out %>% dplyr::select(ID,time,all_of(varyParam),eval(Inputs_Bact),eval(Outputs_Bact)) %>% as.data.frame
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
SumDat[varyParam] <- round(SumDat[varyParam]*100,3)
}
}else{
SumDat <- out %>% dplyr::select(ID,time,eval(Inputs_Bact),eval(Outputs_Bact)) %>% as.data.frame
}
SumDat <- SumDat %>% dplyr::filter(time==0 | time==cutOffTime)
for(i in 1:dim(SumDat)[1]){
if(SumDat$time[i]==0){
row.names(SumDat)[i] <- paste("Input", " ID =",SumDat$ID[i])
}else if(SumDat$time[i]==cutOffTime){
row.names(SumDat)[i] <- paste("Output", "ID =",SumDat$ID[i])}}
SumDat2 <- SumDat %>% dplyr::select(everything(),-c(ID,time))
SumDat2$Conc_ACIDOGEN <- round(SumDat2$Conc_ACIDOGEN,2); SumDat2$Conc_ACETOGEN <- round(SumDat2$Conc_ACETOGEN,2)
SumDat2$Conc_METHANOGEN <- round(SumDat2$Conc_METHANOGEN,2); SumDat2$Conc_BACTEROID <- round(SumDat2$Conc_BACTEROID,2)
SumDat2 <- SumDat2 %>% dplyr::rename("Acidogens (g/L)"=Conc_ACIDOGEN, "Acetogens (g/L)"=Conc_ACETOGEN,
"Methanogens (g/L)"=Conc_METHANOGEN,"Bacteroides (g/L)"=Conc_BACTEROID)
if(simType=="Normal" & n_distinct(out$ID)==1){
row.names(SumDat2) <- c("Input","Output")
}
SumDat_Bact <- SumDat2
SumDat_Bact
},rownames = TRUE)
# Render Model Error
output$modelError <- renderText({
ErrorMessage <- paste("Model Error in Mole Balance: <b>",ModelErrorPerc,"% </b>")
HTML(paste(ErrorMessage))
})
# }) #end observe
# Plots --------------------------------------------------------------
observe(priority=3,{
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
out <- out() %>% as.data.frame()
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()#try({knobs(out)},silent=T)
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
#Plot Datasets
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- SensParam() %>% as.character()
withProgress(message = 'Summarizing Data',
detail = 'This may take a while...', value = 0, max=100,
{
MainProdDat <- out %>% dplyr::select(ID,time,all_of(varyParam),GUAR_Conc,AVG_PEG_MW,CH4_GAS, H2_GAS, CO2_GAS,Conc_METHANOL,Conc_ISOPROPANOL) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc, "AVG PEG MW (g/mol)"=AVG_PEG_MW,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL,"H2 (mol-gas)"=H2_GAS,"CO2 (mol-gas)"=CO2_GAS,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(20)
IntermediateDat <- out %>% dplyr::select(ID,time,all_of(varyParam),GUAR_Conc,Conc_GLUCOSE,Conc_ETHANOL,Conc_PropAcid,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc,"CH4 (mol-gas)"=CH4_GAS, "Glucose (mol/L)"=Conc_GLUCOSE,
"Ethanol (mol/L)"=Conc_ETHANOL,"Propanoic Acid (mol/L)"=Conc_PropAcid,
"Acetate (mol/L)"=Conc_ACETATE) %>% as.data.frame()
setProgress(45)
PEG_Dat <- out %>% dplyr::select(ID,time,all_of(varyParam),Conc_PEG9,Conc_PEG8,Conc_PEG7,Conc_PEG6,Conc_PEG5,Conc_PEG4,Conc_PEG3,Conc_DEG,Conc_EG,Conc_AcetHyde,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("PEG-9 (mol/L)"=Conc_PEG9,"PEG-8 (mol/L)"=Conc_PEG8,"PEG-7 (mol/L)"=Conc_PEG7,
"PEG-6 (mol/L)"=Conc_PEG6,"PEG-5 (mol/L)"=Conc_PEG5,"PEG-4 (mol/L)"=Conc_PEG4,
"PEG-3 (mol/L)"=Conc_PEG3,"DEG (mol/L)"=Conc_DEG,"EG (mol/L)"=Conc_EG,
"Acetaldehyde (mol/L)"=Conc_AcetHyde, "Acetate (mol/L)"=Conc_ACETATE,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(75)
SystemDat <- out %>% dplyr::select(ID,time,all_of(varyParam),H2O,V_TOT,Pressure_atm,Temp2) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Total Pressure (atm)"=Pressure_atm,
"Liquid Volume (% Change)"=V_TOT,"Water (% Change)"=H2O,"Temperature (C)" = Temp2) %>% as.data.frame()
SystemDat$"Liquid Volume (% Change)" <- (SystemDat$"Liquid Volume (% Change)"/SystemDat$"Liquid Volume (% Change)"[1])*100
SystemDat$"Water (% Change)" <- (SystemDat$"Water (% Change)"/SystemDat$"Water (% Change)"[1])*100
setProgress(85)
BacteriaDat <- out %>% dplyr::select(ID,time,all_of(varyParam),Conc_ACIDOGEN,Conc_ACETOGEN,Conc_METHANOGEN,Conc_BACTEROID) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Acidogen Biomass (g/L)"=Conc_ACIDOGEN, "Acetogen Biomass (g/L)"=Conc_ACETOGEN,
"Methanogen Biomass (g/L)"=Conc_METHANOGEN,"Bacteroid Biomass (g/L)"=Conc_BACTEROID) %>% as.data.frame()
if(varyParam=="WT_Perc_Guar_IN"|varyParam=="WT_Perc_PEG_IN"|varyParam=="WT_Perc_MeOL_IN"|varyParam=="WT_Perc_ISO_IN"){
MainProdDat[varyParam] <- round(MainProdDat[varyParam]*100,3)
IntermediateDat[varyParam] <- round(IntermediateDat[varyParam]*100,3)
PEG_Dat[varyParam] <- round(PEG_Dat[varyParam]*100,3)
SystemDat[varyParam] <- round(SystemDat[varyParam]*100,3)
BacteriaDat[varyParam] <- round(BacteriaDat[varyParam]*100,3)
}
MainProdDat <<- MainProdDat
IntermediateDat <<- IntermediateDat
PEG_Dat <<- PEG_Dat
SystemDat <<- SystemDat
BacteriaDat <<- BacteriaDat
setProgress(100)
})
}else{
withProgress(message = 'Summarizing Data',
detail = 'This may take a while...', value = 0, max=100,
{
MainProdDat <- out %>% dplyr::select(ID,time,GUAR_Conc,AVG_PEG_MW,CH4_GAS, H2_GAS, CO2_GAS,Conc_METHANOL,Conc_ISOPROPANOL) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc, "AVG PEG MW (g/mol)"=AVG_PEG_MW,"Methanol (mol/L)"=Conc_METHANOL,
"Isopropanol (mol/L)"=Conc_ISOPROPANOL,"H2 (mol-gas)"=H2_GAS,"CO2 (mol-gas)"=CO2_GAS,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(20)
IntermediateDat <- out %>% dplyr::select(ID,time,GUAR_Conc,Conc_GLUCOSE,Conc_ETHANOL,Conc_PropAcid,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Guar Gum (g/L)"=GUAR_Conc,"CH4 (mol-gas)"=CH4_GAS, "Glucose (mol/L)"=Conc_GLUCOSE,
"Ethanol (mol/L)"=Conc_ETHANOL,"Propanoic Acid (mol/L)"=Conc_PropAcid,
"Acetate (mol/L)"=Conc_ACETATE) %>% as.data.frame()
setProgress(45)
PEG_Dat <- out %>% dplyr::select(ID,time,Conc_PEG9,Conc_PEG8,Conc_PEG7,Conc_PEG6,Conc_PEG5,Conc_PEG4,Conc_PEG3,Conc_DEG,Conc_EG,Conc_AcetHyde,Conc_ACETATE,CH4_GAS) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("PEG-9 (mol/L)"=Conc_PEG9,"PEG-8 (mol/L)"=Conc_PEG8,"PEG-7 (mol/L)"=Conc_PEG7,
"PEG-6 (mol/L)"=Conc_PEG6,"PEG-5 (mol/L)"=Conc_PEG5,"PEG-4 (mol/L)"=Conc_PEG4,
"PEG-3 (mol/L)"=Conc_PEG3,"DEG (mol/L)"=Conc_DEG,"EG (mol/L)"=Conc_EG,
"Acetaldehyde (mol/L)"=Conc_AcetHyde, "Acetate (mol/L)"=Conc_ACETATE,
"CH4 (mol-gas)"=CH4_GAS) %>% as.data.frame()
setProgress(75)
SystemDat <- out %>% dplyr::select(ID,time,H2O,V_TOT,Pressure_atm,Temp2) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Total Pressure (atm)"=Pressure_atm,
"Liquid Volume (% Change)"=V_TOT,"Water (% Change)"=H2O,"Temperature (C)" = Temp2) %>% as.data.frame()
SystemDat$"Liquid Volume (% Change)" <- (SystemDat$"Liquid Volume (% Change)"/SystemDat$"Liquid Volume (% Change)"[1])*100
SystemDat$"Water (% Change)" <- (SystemDat$"Water (% Change)"/SystemDat$"Water (% Change)"[1])*100
setProgress(85)
BacteriaDat <- out %>% dplyr::select(ID,time,Conc_ACIDOGEN,Conc_ACETOGEN,Conc_METHANOGEN,Conc_BACTEROID) %>%
dplyr::filter(time<=cutOffTime) %>% dplyr::rename("Acidogen Biomass (g/L)"=Conc_ACIDOGEN, "Acetogen Biomass (g/L)"=Conc_ACETOGEN,
"Methanogen Biomass (g/L)"=Conc_METHANOGEN,"Bacteroid Biomass (g/L)"=Conc_BACTEROID) %>% as.data.frame()
MainProdDat <<- MainProdDat
IntermediateDat <<- IntermediateDat
PEG_Dat <<- PEG_Dat
SystemDat <<- SystemDat
BacteriaDat <<- BacteriaDat
setProgress(100)
}) #End progress bar
}
}) #end observe
### Plot 1 (Reactor Properties)
output$plot1 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("SystemDat")){
prog <- 20
withProgress(message = 'Plotting System Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(SystemDat)[4:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- names(SystemDat)[4:length(SystemDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p1
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p1 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=SystemDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(SystemDat$ID)==1){
colNames <- names(SystemDat)[3:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- names(SystemDat)[3:length(SystemDat)]
#Make Plot
for(i in colNames){
if(round(min(SystemDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(SystemDat[,i])-min(SystemDat[,i])*.01,15)}
ymaxVal <- round(max(SystemDat[,i])+max(SystemDat[,i])*.015,15)
p1[[i]] <- ggplot(SystemDat, aes_string(x=SystemDat$time, y = SystemDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(SystemDat$ID)!=1){
colNames <- names(SystemDat)[3:length(SystemDat)]
p1 <- vector("list",length = length(colNames)); names(p1) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p1
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p1 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=SystemDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p1 <<- p1; Plot1 <- grid.arrange(grobs=p1,ncol=2); assign("Plot1",Plot1,envir = globalenv()) #; print(Plot1)
grid.arrange(Plot1)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 2 (Main Component Data)
output$plot2 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("MainProdDat")){
prog <- 20
withProgress(message = 'Plotting Main Product Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(MainProdDat)[4:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- names(MainProdDat)[4:length(MainProdDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p2
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p2 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=MainProdDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(MainProdDat$ID)==1){
colNames <- names(MainProdDat)[3:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- names(MainProdDat)[3:length(MainProdDat)]
#Make Plot
for(i in colNames){
if(round(min(MainProdDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(MainProdDat[,i])-min(MainProdDat[,i])*.01,15)}
ymaxVal <- round(max(MainProdDat[,i])+max(MainProdDat[,i])*.015,15)
p2[[i]] <- ggplot(MainProdDat, aes_string(x=MainProdDat$time, y = MainProdDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(MainProdDat$ID)!=1){
colNames <- names(MainProdDat)[3:length(MainProdDat)]
p2 <- vector("list",length = length(colNames)); names(p2) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p2
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p2 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=MainProdDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
# for(i in 1:length(p2)){p2[[i]] <- ggplotly(p2[[i]]) %>% layout(
# xaxis = list(automargin=TRUE), yaxis = list(automargin=TRUE,tickprefix=" "),margin=list(l = 75,pad=4))}
# Plot2 <- subplot(p2,nrows = round(length(p2)/2),titleX=T,shareX = T,titleY = T)
p2 <<- p2; Plot2 <- grid.arrange(grobs=p2,ncol=2);
assign("Plot2",Plot2,envir = globalenv()) #; print(Plot2)
grid.arrange(Plot2)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 3 (Intermediate Component Data for Guar Gum)
output$plot3 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("IntermediateDat")){
prog <- 20
withProgress(message = 'Plotting Guar Gum Intermediate Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(IntermediateDat)[4:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- names(IntermediateDat)[4:length(IntermediateDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p3
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p3 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=IntermediateDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(IntermediateDat$ID)==1){
colNames <- names(IntermediateDat)[3:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- names(IntermediateDat)[3:length(IntermediateDat)]
#Make Plot
for(i in colNames){
if(round(min(IntermediateDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(IntermediateDat[,i])-min(IntermediateDat[,i])*.01,15)}
ymaxVal <- round(max(IntermediateDat[,i])+max(IntermediateDat[,i])*.015,15)
p3[[i]] <- ggplot(IntermediateDat, aes_string(x=IntermediateDat$time, y = IntermediateDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(IntermediateDat$ID)!=1){
colNames <- names(IntermediateDat)[3:length(IntermediateDat)]
p3 <- vector("list",length = length(colNames)); names(p3) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p3
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p3 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=IntermediateDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p3 <<- p3; Plot3 <- grid.arrange(grobs=p3,ncol=2); assign("Plot3",Plot3,envir = globalenv()) #; print(Plot3)
grid.arrange(Plot3)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 4 (Intermediate Component Data for PEG)
output$plot4 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
simType <- as.character(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("PEG_Dat")){
prog <- 20
withProgress(message = 'Plotting PEG-400 Intermediate Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(PEG_Dat)[4:length(PEG_Dat)]
p4 <- vector("list",length = length(colNames)); names(p4) <- names(PEG_Dat)[4:length(PEG_Dat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p4
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p4 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=PEG_Dat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(PEG_Dat$ID)==1){
colNames <- names(PEG_Dat)[3:length(PEG_Dat)]
p4 <- vector("list",length = length(colNames)); names(p4) <- names(PEG_Dat)[3:length(PEG_Dat)]
#Make Plot
for(i in colNames){
if(round(min(PEG_Dat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(PEG_Dat[,i])-min(PEG_Dat[,i])*.01,15)}
ymaxVal <- round(max(PEG_Dat[,i])+max(PEG_Dat[,i])*.015,15)
p4[[i]] <- ggplot(PEG_Dat, aes_string(x=PEG_Dat$time, y = PEG_Dat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(PEG_Dat$ID)!=1){
PEG_Dat1 <- PEG_Dat %>% dplyr::select(everything(),-c("CH4 (mol-gas)")) %>% tidyr::gather(key=key,value=value,-ID,-time)
PEG_Dat2 <- MainProdDat %>% dplyr::select(ID,time,"AVG PEG MW (g/mol)")
stdDevsDat <- summarySE(data=PEG_Dat1,measurevar = "value",groupvars = c("key","time")) %>% as.data.frame()
stdDevsDat2 <- summarySE(data=PEG_Dat2,measurevar = "AVG PEG MW (g/mol)",groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]; yminE2 <- stdDevsDat2[,3] - stdDevsDat2[,6]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]; ymaxE2 <- stdDevsDat2[,3] + stdDevsDat2[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15); yminVal2 <- round(min(yminE2)-min(yminE2)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15); ymaxVal2 <- round(max(ymaxE2)+max(ymaxE2)*.015,15)
yLimits <- c(yminVal, ymaxVal); yLimits2 <- c(yminVal2, ymaxVal2)
plotPoints <- unique(PEG_Dat1$key)
p4 <- vector("list",length = 2); names(p4) <- c("Intermediate Products","AVG MW Weight")
prog <- 40; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Make Plot
if(confInterval=="Error Bars"){
p4[[1]] <- ggplot(stdDevsDat, aes(y=value, x=time, group=key)) + scale_y_continuous(limits = yLimits) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
geom_line(aes(color=key), size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Concentration (mol/L)") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13)) +
scale_colour_discrete(name="Polyethylene Glycol (PEG) \nIntermediate Product")
p4[[2]] <- ggplot(stdDevsDat2, aes_string(y=stdDevsDat2$"AVG PEG MW (g/mol)", x=stdDevsDat2$time)) +
geom_errorbar(color="black",aes(ymin=yminE2,ymax=ymaxE2),width=2.5) + scale_y_continuous(limits = yLimits2) +
geom_line(size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Average PEG MW") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else if(confInterval=="Confidence Band"){
p4[[1]] <- ggplot(stdDevsDat, aes(y=value, x=time, group=key)) + scale_y_continuous(limits = yLimits) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat$key)),alpha=0.3,show.legend = FALSE) +
geom_line(aes(color=key), size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Concentration (mol/L)") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13)) +
scale_colour_discrete(name="Polyethylene Glycol (PEG) \nIntermediate Product")
p4[[2]] <- ggplot(stdDevsDat2, aes_string(y=stdDevsDat2$"AVG PEG MW (g/mol)", x=stdDevsDat2$time)) +
geom_ribbon(aes_string(ymin=yminE2,ymax=ymaxE2),alpha=0.3,show.legend = FALSE) + scale_y_continuous(limits = yLimits2) +
geom_line(size=1) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y="Average PEG MW") + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#p4[[1]] + geom_point(data=subset(PEG_Dat3[PEG_Dat3$key==plotPoints[1],]), color='black', shape=18, size=2)
}
p4 <<- p4;
if(simType=="Normal" & n_distinct(PEG_Dat$ID)!=1){
Plot4 <- grid.arrange(grobs=p4,ncol=1,heights=c(2,1.5)); assign("Plot4",Plot4,envir = globalenv())
}else{
Plot4 <- grid.arrange(grobs=p4,ncol=2); assign("Plot4",Plot4,envir = globalenv())
}
grid.arrange(Plot4)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
### Plot 5 (Bacteria Data)
output$plot5 <- renderPlot({
shiny::req(out())
shiny::req(input[["cutOff"]])
shiny::req(input[["simType"]])
plotPoints <- input[["plotPoints"]] %>% as.logical()
simType <- as.character(input[["simType"]])
#Load Reactive Objects
varyParamClass <- varyParamClass() %>% as.character()
varyParam <- varyParam() %>% as.character()
cutOffTime <- as.numeric(input[["cutOff"]]) #100 #hours (4.167 days)
confInterval <- confInterval() %>% as.character()
if(exists("BacteriaDat")){
prog <- 20
withProgress(message = 'Plotting Bacteria Data',
detail = paste('This may take a while...',prog,"%"), value = 20, max=100,
{
#Sensitivity
if(simType=="Sensitivity Analysis"){
shiny::req(input[["SensParam"]])
varyParam <- varyParam() %>% as.character()
SensParam <- as.character(input[["SensParam"]])
colNames <- names(BacteriaDat)[4:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- names(BacteriaDat)[4:length(BacteriaDat)]
colScale <- scale_color_discrete(name = eval(SensParam))
#Make Plot
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p5
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p5 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=BacteriaDat,measurevar = colNames[i],groupvars = c("time",varyParam)) %>% as.data.frame()
yminE <- stdDevsDat[,4] - stdDevsDat[,7]
ymaxE <- stdDevsDat[,4] + stdDevsDat[,7]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) + geom_point(aes_string(color=factor(stdDevsDat[,varyParam])),shape=21,size=1.1,alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,4], group=stdDevsDat[,varyParam])) + geom_line(aes_string(color=factor(stdDevsDat[,varyParam])),size=0.5) +
geom_ribbon(aes_string(ymin=yminE,ymax=ymaxE,fill=factor(stdDevsDat[,varyParam])),alpha=0.3,show.legend = FALSE) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") + colScale +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Single Simulation
}else if(simType=="Normal" & n_distinct(BacteriaDat$ID)==1){
colNames <- names(BacteriaDat)[3:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- names(BacteriaDat)[3:length(BacteriaDat)]
#Make Plot
for(i in colNames){
if(round(min(BacteriaDat[,i]))==0){
yminVal <- 0
}else{yminVal <- round(min(BacteriaDat[,i])-min(BacteriaDat[,i])*.01,15)}
ymaxVal <- round(max(BacteriaDat[,i])+max(BacteriaDat[,i])*.015,15)
p5[[i]] <- ggplot(BacteriaDat, aes_string(x=BacteriaDat$time, y = BacteriaDat[,i])) +
geom_point(color="red") + geom_hline(yintercept=0, size=0.6, color="black") + labs(x="Time (h)",y=i) +
geom_vline(xintercept=0, size=0.6, color="black") + scale_y_continuous(limits = c(yminVal, ymaxVal)) +
theme(text=element_text(family="Times New Roman", face="bold", size=13))}
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
#Multiple Simulations
}else if(simType=="Normal" & n_distinct(BacteriaDat$ID)!=1){
colNames <- names(BacteriaDat)[3:length(BacteriaDat)]
p5 <- vector("list",length = length(colNames)); names(p5) <- colNames
stdDevs <- yminVal <- ymaxVal <- yminE <- ymaxE <- p5
prog <- 30; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
p5 <- lapply(1:length(colNames),function(i){
stdDevsDat <- summarySE(data=BacteriaDat,measurevar = colNames[i],groupvars = c("time")) %>% as.data.frame()
yminE <- stdDevsDat[,3] - stdDevsDat[,6]
ymaxE <- stdDevsDat[,3] + stdDevsDat[,6]
yminVal <- round(min(yminE)-min(yminE)*.01,15)
ymaxVal <- round(max(ymaxE)+max(ymaxE)*.015,15)
yLimits <- c(yminVal, ymaxVal)
#Make Plot
if(confInterval=="Error Bars"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_errorbar(color="black",aes(ymin=yminE,ymax=ymaxE),width=2.5) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}else if(confInterval=="Confidence Band"){
if(plotPoints){
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) + geom_point(color="black",shape=21,size=1.1,fill="white",alpha=0.9) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}else{
ggplot(stdDevsDat, aes_string(x=stdDevsDat$time, y = stdDevsDat[,3])) + geom_line(color="red",size=0.5) +
geom_ribbon(aes(ymin=yminE,ymax=ymaxE),alpha=0.3) +
scale_y_continuous(limits = yLimits) + geom_hline(yintercept=0, size=0.6, color="black") +
labs(x="Time (h)",y=colNames[i]) + geom_vline(xintercept=0, size=0.6, color="black") +
theme(text=element_text(family="Times New Roman", face="bold", size=13))
}
}
})
prog <- 70; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}
p5 <<- p5; Plot5 <- grid.arrange(grobs=p5,ncol=2); assign("Plot5",Plot5,envir = globalenv()) #; print(Plot5)
grid.arrange(Plot5)
prog <- 100; setProgress(value=prog,detail = paste('This may take a while...',prog,"%"))
}) #End progress bar
}
},res=110)
} # End Server
# UI Code -----------------------------------------------------------------
ui <- dashboardPage(#theme = shinytheme("slate"),
#shinythemes::themeSelector(),
dashboardHeader(title = "Anaerobic Digestion"),
dashboardSidebar(
#hr(),
sidebarMenu(id="tabs",
menuItem("Getting Started", tabName = "GettingStarted", icon = icon("dashboard"),selected=TRUE),
menuItem("Model Parameters", icon = icon("balance-scale"),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Initial Bacteria Concentrations (mg/L)", align = "center"),
numericInput("Bact_ScaleFact_Acido", label=p("Acidogens",style="color:#080808"), min=1,step=1,value=300,max=10000),
numericInput("Bact_ScaleFact_Aceto", label=p("Acetogens",style="color:#080808"), min=1,step=1,value=3000,max=10000),
numericInput("Bact_ScaleFact_Meth", label=p("Methanogens",style="color:#080808"), min=1,step=1,value=3500,max=10000),
numericInput("Bact_ScaleFact_Bact", label=p("Bacteroides",style="color:#080808"), min=1,step=1,value=300,max=10000),
sliderInput("DecayRate",label=p("Decay Rate",style="color:#080808"), post=" 1/h", 0,0.002,0.001,0.0002,ticks = F)
),
label = "Bacteria", style = "stretch", size="sm",#up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Initial Weight Percents (%)", align = "center"),
numericInput("WT_Perc_Guar_IN",
label=p("Guar Gum",style="color:#080808"), min=0.1,step=.01,value=0.83,max=2),
numericInput("WT_Perc_PEG_IN",
label=p("PEG 400",style="color:#080808"), min=0.1,step=.01,value=0.42,max=2),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("PEG MW Distribution", align = "center"),
plotOutput("PegPlot",width="100%",height="400px"),
p("Note: The distribution was purposely skewed to more closely match",style="color:#080808"),
p("observations in the literature.",style="color:#080808"),
noUiSliderInput("PegMeanMW",label= p("Adjust Mean PEG MW",style="color:#080808"), 300,400,400,1,tooltips = F)
),
label = "Adjust PEG MW Distribution", style = "stretch",size="sm",
status = "primary", width = "600px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(enter = "fadeInDown", exit = "fadeOutUp",duration = 0.8)
),
numericInput("WT_Perc_MeOL_IN",
label=p("Methanol",style="color:#080808"), min=0.1,step=.01,value=0.63,max=0.8),
numericInput("WT_Perc_ISO_IN",
label=p("Isopropanol",style="color:#080808"), min=0.1,step=.01,value=0.63,max=0.8)
),
label = "Chemical Compounds", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("System Properties", align = "center"),
sliderInput("wells",label= p("Number of Wells to Pull From",style="color:#080808"), 1,55,55,1),
sliderInput("Temp",label= p("Initial Reactor Temperature (°C)",style="color:#080808"), 20,40,30,1),
knobInput("TempSlope",displayPrevious=T,label= p("Temperature Slope (°C/h)",style="color:#080808"),0,-0.1,0.1,0.01,immediate=F),# ,height = "50%",width="50%"
numericInput("Head_Space_VolRatio",
label=p("Ratio of Headspace to Reactor Volume (L/L)",style="color:#080808"),
min=0.25,max=3,value=2,step=0.05)
),
label = "System Properties", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Parameter Variability (CV %)", align = "center"),
sliderInput("kinetic_var",label= p("Kinetic Rates",style="color:#080808"), 0,100,30,1),
sliderInput("yield_var",label= p("Bacteria Yields",style="color:#080808"), 0,50,15,1),
radioGroupButtons("confInterval",label=p("Confidence Interval (95%)",style="color:#080808"),justified = TRUE,
checkIcon = list(yes = icon("ok", lib = "glyphicon")),
choices = c("Confidence Band","Error Bars"),selected = "Confidence Band")
),
label = "Variability", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
),
shinyWidgets::dropdown(
shinydashboard::box(width = 12, status = "primary", solidHeader = FALSE,
h2("Simulation Specification", align = "center"),
radioGroupButtons("simType",label=p("Type of Simulation",style="color:#080808"),justified = TRUE,
checkIcon = list(yes = icon("ok", lib = "glyphicon")),
choices = c("Normal","Sensitivity Analysis"),selected = "Normal"),
numericInput("nSim", label=p("Number of Simulations",style="color:#080808"), min=1,step=1,value=20,max=200),
conditionalPanel("input.simType=='Sensitivity Analysis'",
h2("Sensitivity Analysis", align = "center"),
pickerInput(inputId = "SensParam",label = p("Choose Parameter to Vary",style="color:#080808"),
choices = SensChoices,options = list(size = 5,`style` = "btn-info"),selected = "WT % of Guar Gum"),
sliderInput("SensRange", p("Range of Parameter Variance:",style="color:#080808"), min = -80, max = 80, post="%",value = c(-25,25))
)
),
label = "Simulation", style = "stretch",size="sm", #up=TRUE,
status = "primary", width = "420px",
#tooltip = tooltipOptions(title = "Click to see options",placement = "bottom"),
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig)
)
),
menuItem("Plots", icon = icon("line-chart"),
menuSubItem("Main Components", tabName = "plotTab2", icon = icon("angle-right")),
menuSubItem("Guar Gum Intermediates", tabName = "plotTab3", icon = icon("angle-right")),
menuSubItem("PEG Intermediates", tabName = "plotTab4", icon = icon("angle-right")),
menuSubItem("Bacteria Growth", tabName = "plotTab5", icon = icon("angle-right")),
menuSubItem("Reactor Properties", tabName = "plotTab1", icon = icon("angle-right"))
),
menuItem("Tables", icon = icon("table"), #tabName="tabtable",
menuSubItem("Main Component Data", tabName = "sumTab1", icon = icon("angle-right")),
menuSubItem("Bacteria Data", tabName = "sumTab2", icon = icon("angle-right"))
), hr(),
menuItem("Mathematical Model", tabName = "readme", icon = icon("mortar-board")),
menuItem("Background of Process", tabName = "Background", icon = icon("book")),
menuItem("Codes", icon = icon("file-text-o"),
menuSubItem("Model Code", tabName = "ModelCode", icon = icon("angle-right")),
#menuSubItem("ui.R", tabName = "ui", icon = icon("angle-right")),
#menuSubItem("server.R", tabName = "server", icon = icon("angle-right")),
menuSubItem("app.R", tabName = "app", icon = icon("angle-right"))
)
),
hr(),
# conditionalPanel("input.tabs=='plot1' | input.tabs=='plot2' | input.tabs=='plot3' | input.tabs=='plot4'",
sidebarMenu(
menuItem("Plotting Parameters", icon = icon("chevron-circle-right"),
fluidRow(column(1),
column(10,
sliderInput("cutOff",label="Truncate Data", post=" h",
25,300,100,5),
awesomeCheckbox(
inputId = "plotPoints",label = "Show Data Points on Graphs?", value = FALSE,status = "info"
)
)))
),
hr()#,
# sidebarMenu(
# br(),
# div(img(src="FrackOff.png",height=160,width=200),style="text-align: center;")
# )
#)
), # End dashboardSidebar
dashboardBody(
tabItems(
tabItem(tabName = "GettingStarted",
fluidPage(
tags$head(HTML("<script type='text/x-mathjax-config'>MathJax.Hub.Config({ TeX: { equationNumbers: {autoNumber: 'all'} } });</script>")),
withMathJax(),
uiOutput('markdownGS')
)
),
# tabItem(tabName = "Input",
# ),
tabItem(tabName = "plotTab1",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot1",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim1",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab2",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot2",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim2",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab3",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot3",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim3",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab4",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot4",width="100%",height="1050px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim4",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "plotTab5",
fluidRow(
shinydashboard::box(width = 10, status = "primary",
plotOutput("plot5",width="100%",height="800px")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim5",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "sumTab1",
fluidRow(
shinydashboard::box(width = 10, status = "primary", solidHeader = TRUE, title="Main Component Data",
tableOutput("sum1"),
htmlOutput(outputId = "modelError")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim6",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
tabItem(tabName = "sumTab2",
fluidRow(
shinydashboard::box(width = 10, status = "primary", solidHeader = TRUE, title="Bacteria Data",
tableOutput("sum2")#,
#htmlOutput(outputId = "modelError")
)#,
# shinydashboard::box(width = 2, status = "primary",
# actionBttn(inputId = "resim7",label = "Resimulate",style = "fill",color="primary",size = "lg",block = T)
)#)
),
# tabItem(tabName = "ui",
# shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="ui.R",
# downloadButton('downloadData2', 'Download'),
# br(),br(),
# pre(includeText("ui.R"))
# )
# ),
# tabItem(tabName = "server",
# shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="server.R",
# downloadButton('downloadData3', 'Download'),
# br(),br(),
# pre(includeText("server.R"))
# )
# ),
tabItem(tabName = "app",
shinydashboard::box(width = NULL, status = "primary", solidHeader = TRUE, title="app.R",
downloadButton('downloadData4', 'Download'),
br(),br(),
pre(includeText("app.R"))
)
),
tabItem(tabName = "ModelCode",
aceEditor("mod_code", .model,mode="r", height="1000px")
),
tabItem(tabName = "readme",
fluidPage(
tags$head(HTML("<script type='text/x-mathjax-config'>MathJax.Hub.Config({ TeX: { equationNumbers: {autoNumber: 'all'} } });</script>")),
withMathJax(),
uiOutput('markdownRM')
)
),
tabItem(tabName = "Background",
tags$iframe(style="height:820px; width:100%; scrolling=yes",
src="AnaerobicDigestion_ReadMe.pdf")
)
)#,
# conditionalPanel("input.tabs=='plotTab1' | input.tabs=='plotTab2' | input.tabs=='plotTab3' | input.tabs=='plotTab4'| input.tabs=='sumTab1' | input.tabs=='sumTab2'",
# tags$h2("Adjust parameters in dropdown menus:"),
# fluidPage(
# ) #End fluidPage
# ) #End Conditional Panel
), # End dashboardBody
tags$head(
tags$script(src = "js/session.js"),
tags$script(src = "js/modal_vid.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css"),
tags$style(HTML(".main-sidebar { font-size: 16px; }"))
)#,
#tags$style(".swal-modal {width: 80%;}")
) # End UI
shinyApp(ui = ui, server = server)
#runApp(shinyApp(ui = ui, server = server),launch.browser=TRUE)
|
/ejhse2016/EngShepTest.R | no_license | lakshinav/papers | R | false | false | 2,815 | r | ||
# file.edit('~/.Rprofile') or file.edit('.Rprofile')
options(blogdown.ext = '.Rmd', blogdown.author = 'Brandon P. Pipher', blogdown.new_bundle = TRUE)
| /.Rprofile | permissive | bppipher/brandonpipher.com | R | false | false | 151 | rprofile | # file.edit('~/.Rprofile') or file.edit('.Rprofile')
options(blogdown.ext = '.Rmd', blogdown.author = 'Brandon P. Pipher', blogdown.new_bundle = TRUE)
|
#******************************************************************************#
# Verify and pre-process inputs #
#******************************************************************************#
# #
# Inputs #
# #
# data.x an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, event time, event indicator\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# data.z an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, time of measurement, measurement(s)\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# Outputs #
# #
# Return a list #
# #
# data.x Same as input with: ids coerced to integer; NAs removed; #
# #
# data.z Same as input with: ids coerced to integer; NAs removed; #
# missing data cases set to 0. #
# #
#******************************************************************************#
preprocessInputs <- function(data.x, data.z) {
#--------------------------------------------------------------------------#
# Verify sufficient number of columns in datasets #
#--------------------------------------------------------------------------#
nc <- ncol(data.x)
if( nc != 3L ) stop("data.x must include {ID, time, delta}.")
ncz <- ncol(data.z)
if( ncz < 3L ) stop("data.z must include {ID, time, measurement}.")
#--------------------------------------------------------------------------#
# ensure that patient ids are integers #
#--------------------------------------------------------------------------#
if( !is.integer(data.z[,1L]) ) {
data.z[,1L] <- as.integer(round(data.z[,1L],0))
cat("Patient IDs in data.z were coerced to integer.\n")
}
if( !is.integer(data.x[,1L]) ) {
data.x[,1L] <- as.integer(round(data.x[,1L],0))
cat("Patient IDs in data.x were coerced to integer.\n")
}
#--------------------------------------------------------------------------#
# Remove any cases for which all covariates are NA #
#--------------------------------------------------------------------------#
rmRow <- apply(data.z, 1, function(x){all(is.na(x))})
data.z <- data.z[!rmRow,]
#--------------------------------------------------------------------------#
# Set missing cases to 0.0 #
#--------------------------------------------------------------------------#
tst <- is.na(data.z)
data.z[tst] <- 0.0
#--------------------------------------------------------------------------#
# Remove any cases for which response is NA #
#--------------------------------------------------------------------------#
tst <- is.na(data.x[,2L])
data.x <- data.x[!tst,]
#--------------------------------------------------------------------------#
# Determine if range of data.z is (0,1) #
#--------------------------------------------------------------------------#
if( any(data.z[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.z.")
}
if( any(data.x[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.x.")
}
return(list(data.x = data.x,
data.z = data.z))
}
| /SurvLong/R/preprocessInputs.R | no_license | ingted/R-Examples | R | false | false | 4,802 | r | #******************************************************************************#
# Verify and pre-process inputs #
#******************************************************************************#
# #
# Inputs #
# #
# data.x an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, event time, event indicator\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# data.z an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, time of measurement, measurement(s)\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# Outputs #
# #
# Return a list #
# #
# data.x Same as input with: ids coerced to integer; NAs removed; #
# #
# data.z Same as input with: ids coerced to integer; NAs removed; #
# missing data cases set to 0. #
# #
#******************************************************************************#
preprocessInputs <- function(data.x, data.z) {
#--------------------------------------------------------------------------#
# Verify sufficient number of columns in datasets #
#--------------------------------------------------------------------------#
nc <- ncol(data.x)
if( nc != 3L ) stop("data.x must include {ID, time, delta}.")
ncz <- ncol(data.z)
if( ncz < 3L ) stop("data.z must include {ID, time, measurement}.")
#--------------------------------------------------------------------------#
# ensure that patient ids are integers #
#--------------------------------------------------------------------------#
if( !is.integer(data.z[,1L]) ) {
data.z[,1L] <- as.integer(round(data.z[,1L],0))
cat("Patient IDs in data.z were coerced to integer.\n")
}
if( !is.integer(data.x[,1L]) ) {
data.x[,1L] <- as.integer(round(data.x[,1L],0))
cat("Patient IDs in data.x were coerced to integer.\n")
}
#--------------------------------------------------------------------------#
# Remove any cases for which all covariates are NA #
#--------------------------------------------------------------------------#
rmRow <- apply(data.z, 1, function(x){all(is.na(x))})
data.z <- data.z[!rmRow,]
#--------------------------------------------------------------------------#
# Set missing cases to 0.0 #
#--------------------------------------------------------------------------#
tst <- is.na(data.z)
data.z[tst] <- 0.0
#--------------------------------------------------------------------------#
# Remove any cases for which response is NA #
#--------------------------------------------------------------------------#
tst <- is.na(data.x[,2L])
data.x <- data.x[!tst,]
#--------------------------------------------------------------------------#
# Determine if range of data.z is (0,1) #
#--------------------------------------------------------------------------#
if( any(data.z[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.z.")
}
if( any(data.x[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.x.")
}
return(list(data.x = data.x,
data.z = data.z))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{drugNames<-}
\alias{drugNames<-}
\title{drugNames<- Generic}
\usage{
drugNames(object) <- value
}
\arguments{
\item{object}{The \code{PharmacoSet} to update}
\item{value}{A \code{character} vector of the new drug names}
}
\value{
The [`object`] with updated drug names
}
\description{
A generic for the drugNames replacement method
}
\examples{
data(CCLEsmall)
drugNames(CCLEsmall) <- drugNames(CCLEsmall)
}
| /man/drugNames-set.Rd | no_license | FuChunjin/PharmacoGx | R | false | true | 505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{drugNames<-}
\alias{drugNames<-}
\title{drugNames<- Generic}
\usage{
drugNames(object) <- value
}
\arguments{
\item{object}{The \code{PharmacoSet} to update}
\item{value}{A \code{character} vector of the new drug names}
}
\value{
The [`object`] with updated drug names
}
\description{
A generic for the drugNames replacement method
}
\examples{
data(CCLEsmall)
drugNames(CCLEsmall) <- drugNames(CCLEsmall)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataFilter.R
\name{dataFilter}
\alias{dataFilter}
\alias{dataFilterUI}
\alias{dataFilterServer}
\title{Shiny module for filtering data}
\usage{
dataFilterUI(id)
dataFilterServer(id, data = reactive(NULL), hide = FALSE)
}
\arguments{
\item{id}{unique identifier for the module to prevent namespace clashes when
making multiple calls to this shiny module.}
\item{data}{an array wrapped in \code{reactive()} containing the data to be
filtered.}
\item{hide}{logical indicating whether the data sfiltering user interface
should be hidden from the user, set to FALSE by default.}
}
\value{
a list of reactive objects containing the filtered \code{data} and
indices for filtered \code{rows}.
}
\description{
Shiny module for filtering data
}
\examples{
if (interactive()) {
library(shiny)
library(rhandsontable)
library(shinyjs)
ui <- fluidPage(
useShinyjs(),
dataInputUI("input1"),
dataFilterUI("filter1"),
rHandsontableOutput("data1")
)
server <- function(input,
output,
session) {
data_input <- dataInputServer("input1")
# list with slots data and rows (indices)
data_filter <- dataFilterServer("filter1",
data = data_input
)
output$data1 <- renderRHandsontable({
if (!is.null(data_filter$data())) {
rhandsontable(data_filter$data())
}
})
}
shinyApp(ui, server)
}
}
\author{
Dillon Hammill, \email{Dillon.Hammill@anu.edu.au}
}
| /man/dataFilter.Rd | no_license | bluelou/DataEditR | R | false | true | 1,543 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataFilter.R
\name{dataFilter}
\alias{dataFilter}
\alias{dataFilterUI}
\alias{dataFilterServer}
\title{Shiny module for filtering data}
\usage{
dataFilterUI(id)
dataFilterServer(id, data = reactive(NULL), hide = FALSE)
}
\arguments{
\item{id}{unique identifier for the module to prevent namespace clashes when
making multiple calls to this shiny module.}
\item{data}{an array wrapped in \code{reactive()} containing the data to be
filtered.}
\item{hide}{logical indicating whether the data sfiltering user interface
should be hidden from the user, set to FALSE by default.}
}
\value{
a list of reactive objects containing the filtered \code{data} and
indices for filtered \code{rows}.
}
\description{
Shiny module for filtering data
}
\examples{
if (interactive()) {
library(shiny)
library(rhandsontable)
library(shinyjs)
ui <- fluidPage(
useShinyjs(),
dataInputUI("input1"),
dataFilterUI("filter1"),
rHandsontableOutput("data1")
)
server <- function(input,
output,
session) {
data_input <- dataInputServer("input1")
# list with slots data and rows (indices)
data_filter <- dataFilterServer("filter1",
data = data_input
)
output$data1 <- renderRHandsontable({
if (!is.null(data_filter$data())) {
rhandsontable(data_filter$data())
}
})
}
shinyApp(ui, server)
}
}
\author{
Dillon Hammill, \email{Dillon.Hammill@anu.edu.au}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saint-lague-scheppers.R
\name{sls}
\alias{sls}
\title{Seat Distribution by Sainte-Lague/Schepers}
\usage{
sls(votes, parties, n_seats = 598L)
}
\arguments{
\item{votes}{A numeric vector giving the redistributes votes}
\item{parties}{A character vector indicating the names of parties with
respective \code{votes}.}
\item{n_seats}{The total number of seats that can be assigned to the different
parties.}
}
\value{
A numeric vector giving the number of seats each party obtained.
}
\description{
Calculates number of seats for the respective parties that have received more
than 5\% of votes (according to the method of Sainte-Lague/Schepers,
see https://www.wahlrecht.de/verfahren/rangmasszahlen.html).
}
\examples{
library(coalitions)
library(dplyr)
# get the latest survey for a sample of German federal election polls
surveys <- get_latest(surveys_sample) \%>\% tidyr::unnest("survey")
# calculate the seat distribution based on Sainte-Lague/Schepers for a parliament with 300 seats
sls(surveys$votes, surveys$party, n_seats = 300)
}
\seealso{
\code{\link{dHondt}}
}
| /man/sls.Rd | permissive | adibender/coalitions | R | false | true | 1,150 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saint-lague-scheppers.R
\name{sls}
\alias{sls}
\title{Seat Distribution by Sainte-Lague/Schepers}
\usage{
sls(votes, parties, n_seats = 598L)
}
\arguments{
\item{votes}{A numeric vector giving the redistributes votes}
\item{parties}{A character vector indicating the names of parties with
respective \code{votes}.}
\item{n_seats}{The total number of seats that can be assigned to the different
parties.}
}
\value{
A numeric vector giving the number of seats each party obtained.
}
\description{
Calculates number of seats for the respective parties that have received more
than 5\% of votes (according to the method of Sainte-Lague/Schepers,
see https://www.wahlrecht.de/verfahren/rangmasszahlen.html).
}
\examples{
library(coalitions)
library(dplyr)
# get the latest survey for a sample of German federal election polls
surveys <- get_latest(surveys_sample) \%>\% tidyr::unnest("survey")
# calculate the seat distribution based on Sainte-Lague/Schepers for a parliament with 300 seats
sls(surveys$votes, surveys$party, n_seats = 300)
}
\seealso{
\code{\link{dHondt}}
}
|
#####Changing R version to Microsoft Open R
library(RevoScaleR)
rxOptions(reportProgress = 1) # reduces the amount of output RevoScaleR produces
#importing csv with base function 544498 rows
st <- Sys.time()
input_csv <- 'yellow_tripsample_2016-01.csv'
nyc_sample_df <- read.csv(input_csv)
summary(nyc_sample_df$fare_amount)
Sys.time() - st # stores the time it took to import
#importing with revoscale funtion
st <- Sys.time()
input_csv <- 'yellow_tripsample_2016-01.csv'
input_xdf <- 'yellow_tripdata_201601.xdf'
rxImport(inData=input_csv, outFile=input_xdf, overwrite = TRUE,reportProgress = 1)
rxSummary( ~ fare_amount,input_xdf )
Sys.time() - st
###Change Rstudio back to R
| /Importing data with Microsoft Open R.R | no_license | urosgodnov/DataCapture | R | false | false | 734 | r | #####Changing R version to Microsoft Open R
library(RevoScaleR)
rxOptions(reportProgress = 1) # reduces the amount of output RevoScaleR produces
#importing csv with base function 544498 rows
st <- Sys.time()
input_csv <- 'yellow_tripsample_2016-01.csv'
nyc_sample_df <- read.csv(input_csv)
summary(nyc_sample_df$fare_amount)
Sys.time() - st # stores the time it took to import
#importing with revoscale funtion
st <- Sys.time()
input_csv <- 'yellow_tripsample_2016-01.csv'
input_xdf <- 'yellow_tripdata_201601.xdf'
rxImport(inData=input_csv, outFile=input_xdf, overwrite = TRUE,reportProgress = 1)
rxSummary( ~ fare_amount,input_xdf )
Sys.time() - st
###Change Rstudio back to R
|
library(isismdl)
library(testthat)
library(utils)
rm(list = ls())
update <- FALSE
context("mrf IFN model")
source("../tools/read_mrf.R")
test_that("check mrf", {
mdl_file <- tempfile(pattern = "isismdl_", fileext = ".mdl")
mdl_file_orig <- system.file("models", "ifn.mdl", package = "isismdl")
file.copy(mdl_file_orig, mdl_file)
mdl <- isis_mdl(mdl_file, silent = TRUE)
mrf_data <- read_mrf(mdl_file)
expect_known_output(cat(mrf_data),
file = "expected_output/mrf_mrf.txt",
update = update, print = TRUE)
})
| /pkg/tests/testthat/ifn/test_mrf.R | no_license | timemod/isismdl | R | false | false | 568 | r | library(isismdl)
library(testthat)
library(utils)
rm(list = ls())
update <- FALSE
context("mrf IFN model")
source("../tools/read_mrf.R")
test_that("check mrf", {
mdl_file <- tempfile(pattern = "isismdl_", fileext = ".mdl")
mdl_file_orig <- system.file("models", "ifn.mdl", package = "isismdl")
file.copy(mdl_file_orig, mdl_file)
mdl <- isis_mdl(mdl_file, silent = TRUE)
mrf_data <- read_mrf(mdl_file)
expect_known_output(cat(mrf_data),
file = "expected_output/mrf_mrf.txt",
update = update, print = TRUE)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.